changeset 3501:958bb4b7be49

Merge
author asaha
date Tue, 10 Apr 2012 10:42:34 -0700
parents e778c29768e6 (current diff) 3b449840d80c (diff)
children aa07e41a9f80
files src/share/vm/runtime/arguments.cpp
diffstat 366 files changed, 13100 insertions(+), 7548 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed Apr 04 20:44:38 2012 -0700
+++ b/.hgtags	Tue Apr 10 10:42:34 2012 -0700
@@ -213,3 +213,27 @@
 513351373923f74a7c91755748b95c9771e59f96 hs23-b10
 24727fb37561779077fdfa5a33342246f20e5c0f jdk8-b22
 dcc292399a39113957eebbd3e487b7e05e2c79fc hs23-b11
+e850d8e7ea54b91c7aa656e297f0f9f38dd4c296 jdk8-b23
+9e177d44b10fe92ecffa965fef9c5ac5433c1b46 hs23-b12
+a80fd4f45d7aaa154ed2f86a129f3c9c4035ec7a jdk8-b24
+b22de824749922986ce4d442bed029916b832807 hs23-b13
+64b46f975ab82948c1e021e17775ff4fab8bc40e hs23-b14
+9ad8feb5afbddec46d3cfe29fb5f73c2e99d5a43 jdk8-b25
+d71e662fe03741b6de498ca2077220148405a978 hs23-b15
+fd3060701216a11c0df6dcd053c6fd7c2b17a42c jdk8-b26
+f92a171cf0071ca6c3fa8231d7d570377f8b2f4d hs23-b16
+f92a171cf0071ca6c3fa8231d7d570377f8b2f4d hs23-b16
+931e5f39e365a0d550d79148ff87a7f9e864d2e1 hs23-b16
+3b24e7e01d20ca590d0f86b1222bb7c3f1a2aa2d jdk8-b27
+975c4105f1e2ef1190a75b77124033f1fd4290b5 hs24-b01
+b183b0863611b85dbac16f3b08b40ba978756d19 jdk8-b28
+030b5306d60f140e822e4a6d301744cb110ff0c8 hs24-b02
+b45b5c564098c58ea69e7cff3f7d341f0254dd1d jdk8-b29
+d61761bf305031c94f7f8eca49abd978b7d3c5da jdk8-b30
+dfae0140457cfb2c381d7679735fbedbae862c62 hs24-b03
+f4767e53d6e0d5da7e3f1775904076cce54247c1 hs24-b04
+0cd147eaa673d1642b2f466f5dc257cf192db524 jdk8-b31
+27863e4586de38be7dd17da4163f542038f4d1d7 hs24-b05
+25410a347ebb0bef166c4338a90d9dea82463a20 jdk8-b32
+cd47da9383cd932cb2b659064057feafa2a91134 hs24-b06
+785bcf415ead2eaa5f6677aaf528481008140bac jdk8-b33
--- a/agent/src/os/linux/Makefile	Wed Apr 04 20:44:38 2012 -0700
+++ b/agent/src/os/linux/Makefile	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,7 @@
 
 LIBS     = -lthread_db
 
-CFLAGS   = -c -fPIC -g -D_GNU_SOURCE -D$(ARCH) $(INCLUDES)
+CFLAGS   = -c -fPIC -g -D_GNU_SOURCE -D$(ARCH) $(INCLUDES) -D_FILE_OFFSET_BITS=64
 
 LIBSA = $(ARCH)/libsaproc.so
 
--- a/agent/src/os/linux/libproc_impl.c	Wed Apr 04 20:44:38 2012 -0700
+++ b/agent/src/os/linux/libproc_impl.c	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,10 +50,6 @@
    char alt_path[PATH_MAX + 1];
 
    init_alt_root();
-   fd = open(name, O_RDONLY);
-   if (fd >= 0) {
-      return fd;
-   }
 
    if (alt_root_len > 0) {
       strcpy(alt_path, alt_root);
@@ -73,6 +69,11 @@
             return fd;
          }
       }
+   } else {
+      fd = open(name, O_RDONLY);
+      if (fd >= 0) {
+         return fd;
+      }
    }
 
    return -1;
--- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Wed Apr 04 20:44:38 2012 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,8 +49,12 @@
     static private long g1CommittedFieldOffset;
     // size_t _summary_bytes_used;
     static private CIntegerField summaryBytesUsedField;
-    // G1MonitoringSupport* _g1mm
+    // G1MonitoringSupport* _g1mm;
     static private AddressField g1mmField;
+    // MasterOldRegionSet _old_set;
+    static private long oldSetFieldOffset;
+    // MasterHumongousRegionSet _humongous_set;
+    static private long humongousSetFieldOffset;
 
     static {
         VM.registerVMInitializedObserver(new Observer() {
@@ -67,12 +71,14 @@
         g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
         summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
         g1mmField = type.getAddressField("_g1mm");
+        oldSetFieldOffset = type.getField("_old_set").getOffset();
+        humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
     }
 
     public long capacity() {
         Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
-        MemRegion g1_committed = new MemRegion(g1CommittedAddr);
-        return g1_committed.byteSize();
+        MemRegion g1Committed = new MemRegion(g1CommittedAddr);
+        return g1Committed.byteSize();
     }
 
     public long used() {
@@ -94,6 +100,18 @@
         return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
     }
 
+    public HeapRegionSetBase oldSet() {
+        Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
+        return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
+                                                             oldSetAddr);
+    }
+
+    public HeapRegionSetBase humongousSet() {
+        Address humongousSetAddr = addr.addOffsetTo(humongousSetFieldOffset);
+        return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
+                                                             humongousSetAddr);
+    }
+
     private Iterator<HeapRegion> heapRegionIterator() {
         return hrs().heapRegionIterator();
     }
--- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1MonitoringSupport.java	Wed Apr 04 20:44:38 2012 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1MonitoringSupport.java	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -77,6 +77,10 @@
         return edenUsedField.getValue(addr);
     }
 
+    public long edenRegionNum() {
+        return edenUsed() / HeapRegion.grainBytes();
+    }
+
     public long survivorCommitted() {
         return survivorCommittedField.getValue(addr);
     }
@@ -85,6 +89,10 @@
         return survivorUsedField.getValue(addr);
     }
 
+    public long survivorRegionNum() {
+        return survivorUsed() / HeapRegion.grainBytes();
+    }
+
     public long oldCommitted() {
         return oldCommittedField.getValue(addr);
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSetBase.java	Tue Apr 10 10:42:34 2012 -0700
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Iterator;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for HeapRegionSetBase. Represents a group of regions.
+
+public class HeapRegionSetBase extends VMObject {
+    // size_t _length;
+    static private CIntegerField lengthField;
+    // size_t _region_num;
+    static private CIntegerField regionNumField;
+    // size_t _total_used_bytes;
+    static private CIntegerField totalUsedBytesField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("HeapRegionSetBase");
+
+        lengthField         = type.getCIntegerField("_length");
+        regionNumField      = type.getCIntegerField("_region_num");
+        totalUsedBytesField = type.getCIntegerField("_total_used_bytes");
+    }
+
+    public long length() {
+        return lengthField.getValue(addr);
+    }
+
+    public long regionNum() {
+        return regionNumField.getValue(addr);
+    }
+
+    public long totalUsedBytes() {
+        return totalUsedBytesField.getValue(addr);
+    }
+
+    public HeapRegionSetBase(Address addr) {
+        super(addr);
+    }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ConnectorImpl.java	Wed Apr 04 20:44:38 2012 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ConnectorImpl.java	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -217,8 +217,8 @@
     }
 
     protected void checkNativeLink(SecurityManager sm, String os) {
-        if (os.equals("SunOS") || os.equals("Linux")) {
-            // link "saproc" - SA native library on SunOS and Linux?
+        if (os.equals("SunOS") || os.equals("Linux") || os.contains("OS X")) {
+            // link "saproc" - SA native library on SunOS, Linux, and Mac OS X
             sm.checkLink("saproc");
         } else if (os.startsWith("Windows")) {
             // link "sawindbg" - SA native library on Windows.
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/LoaderConstraintTable.java	Wed Apr 04 20:44:38 2012 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/LoaderConstraintTable.java	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,15 +42,6 @@
 
   private static synchronized void initialize(TypeDataBase db) {
     Type type = db.lookupType("LoaderConstraintTable");
-    nofBuckets = db.lookupIntConstant("LoaderConstraintTable::_nof_buckets").intValue();
-  }
-
-  // Fields
-  private static int nofBuckets;
-
-  // Accessors
-  public static int getNumOfBuckets() {
-    return nofBuckets;
   }
 
   public LoaderConstraintTable(Address addr) {
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java	Wed Apr 04 20:44:38 2012 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,7 +36,6 @@
   private static AddressField placeholdersField;
   private static AddressField loaderConstraintTableField;
   private static sun.jvm.hotspot.types.OopField javaSystemLoaderField;
-  private static int nofBuckets;
 
   private static sun.jvm.hotspot.types.OopField objectKlassField;
   private static sun.jvm.hotspot.types.OopField classLoaderKlassField;
@@ -62,7 +61,6 @@
     placeholdersField = type.getAddressField("_placeholders");
     loaderConstraintTableField = type.getAddressField("_loader_constraints");
     javaSystemLoaderField = type.getOopField("_java_system_loader");
-    nofBuckets = db.lookupIntConstant("SystemDictionary::_nof_buckets").intValue();
 
     objectKlassField = type.getOopField(WK_KLASS("Object_klass"));
     classLoaderKlassField = type.getOopField(WK_KLASS("ClassLoader_klass"));
@@ -142,10 +140,6 @@
     return newOop(javaSystemLoaderField.getValue());
   }
 
-  public static int getNumOfBuckets() {
-    return nofBuckets;
-  }
-
   private static Oop newOop(OopHandle handle) {
     return VM.getVM().getObjectHeap().newOop(handle);
   }
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Wed Apr 04 20:44:38 2012 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Tue Apr 10 10:42:34 2012 -0700
@@ -648,7 +648,12 @@
   }
 
   public void printValueOn(PrintStream tty) {
-    tty.print("ConstantPool for " + getPoolHolder().getName().asString());
+    Oop holder = poolHolder.getValue(this);
+    if (holder instanceof Klass) {
+      tty.print("ConstantPool for " + ((Klass)holder).getName().asString());
+    } else {
+      tty.print("ConstantPool for partially loaded class");
+    }
   }
 
   public long getObjectSize() {
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Wed Apr 04 20:44:38 2012 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Tue Apr 10 10:42:34 2012 -0700
@@ -359,6 +359,12 @@
     public static final int innerClassNextOffset = 4;
   };
 
+  public static interface EnclosingMethodAttributeOffset {
+    public static final int enclosing_method_class_index_offset = 0;
+    public static final int enclosing_method_method_index_offset = 1;
+    public static final int enclosing_method_attribute_size = 2;
+  };
+
   // refer to compute_modifier_flags in VM code.
   public long computeModifierFlags() {
     long access = getAccessFlags();
@@ -367,9 +373,14 @@
     int length = ( innerClassList == null)? 0 : (int) innerClassList.getLength();
     if (length > 0) {
        if (Assert.ASSERTS_ENABLED) {
-          Assert.that(length % InnerClassAttributeOffset.innerClassNextOffset == 0, "just checking");
+          Assert.that(length % InnerClassAttributeOffset.innerClassNextOffset == 0 ||
+                      length % InnerClassAttributeOffset.innerClassNextOffset == EnclosingMethodAttributeOffset.enclosing_method_attribute_size,
+                      "just checking");
        }
        for (int i = 0; i < length; i += InnerClassAttributeOffset.innerClassNextOffset) {
+          if (i == length - EnclosingMethodAttributeOffset.enclosing_method_attribute_size) {
+              break;
+          }
           int ioff = innerClassList.getShortAt(i +
                          InnerClassAttributeOffset.innerClassInnerClassInfoOffset);
           // 'ioff' can be zero.
@@ -419,9 +430,14 @@
     int length = ( innerClassList == null)? 0 : (int) innerClassList.getLength();
     if (length > 0) {
        if (Assert.ASSERTS_ENABLED) {
-         Assert.that(length % InnerClassAttributeOffset.innerClassNextOffset == 0, "just checking");
+         Assert.that(length % InnerClassAttributeOffset.innerClassNextOffset == 0 ||
+                     length % InnerClassAttributeOffset.innerClassNextOffset == EnclosingMethodAttributeOffset.enclosing_method_attribute_size,
+                     "just checking");
        }
        for (int i = 0; i < length; i += InnerClassAttributeOffset.innerClassNextOffset) {
+         if (i == length - EnclosingMethodAttributeOffset.enclosing_method_attribute_size) {
+             break;
+         }
          int ioff = innerClassList.getShortAt(i +
                         InnerClassAttributeOffset.innerClassInnerClassInfoOffset);
          // 'ioff' can be zero.
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Wed Apr 04 20:44:38 2012 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,6 +67,7 @@
       printValue("SurvivorRatio    = ", getFlagValue("SurvivorRatio", flagMap));
       printValMB("PermSize         = ", getFlagValue("PermSize", flagMap));
       printValMB("MaxPermSize      = ", getFlagValue("MaxPermSize", flagMap));
+      printValMB("G1HeapRegionSize = ", HeapRegion.grainBytes());
 
       System.out.println();
       System.out.println("Heap Usage:");
@@ -100,11 +101,20 @@
          } else if (sharedHeap instanceof G1CollectedHeap) {
              G1CollectedHeap g1h = (G1CollectedHeap) sharedHeap;
              G1MonitoringSupport g1mm = g1h.g1mm();
-             System.out.println("G1 Young Generation");
-             printG1Space("Eden Space:", g1mm.edenUsed(), g1mm.edenCommitted());
-             printG1Space("From Space:", g1mm.survivorUsed(), g1mm.survivorCommitted());
-             printG1Space("To Space:", 0, 0);
-             printG1Space("G1 Old Generation", g1mm.oldUsed(), g1mm.oldCommitted());
+             long edenRegionNum = g1mm.edenRegionNum();
+             long survivorRegionNum = g1mm.survivorRegionNum();
+             HeapRegionSetBase oldSet = g1h.oldSet();
+             HeapRegionSetBase humongousSet = g1h.humongousSet();
+             long oldRegionNum = oldSet.regionNum() + humongousSet.regionNum();
+             printG1Space("G1 Heap:", g1h.n_regions(),
+                          g1h.used(), g1h.capacity());
+             System.out.println("G1 Young Generation:");
+             printG1Space("Eden Space:", edenRegionNum,
+                          g1mm.edenUsed(), g1mm.edenCommitted());
+             printG1Space("Survivor Space:", survivorRegionNum,
+                          g1mm.survivorUsed(), g1mm.survivorCommitted());
+             printG1Space("G1 Old Generation:", oldRegionNum,
+                          g1mm.oldUsed(), g1mm.oldCommitted());
          } else {
              throw new RuntimeException("unknown SharedHeap type : " + heap.getClass());
          }
@@ -216,9 +226,11 @@
       System.out.println(alignment +  (double)space.used() * 100.0 / space.capacity() + "% used");
    }
 
-   private void printG1Space(String spaceName, long used, long capacity) {
+   private void printG1Space(String spaceName, long regionNum,
+                             long used, long capacity) {
       long free = capacity - used;
       System.out.println(spaceName);
+      printValue("regions  = ", regionNum);
       printValMB("capacity = ", capacity);
       printValMB("used     = ", used);
       printValMB("free     = ", free);
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java	Wed Apr 04 20:44:38 2012 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,7 @@
       return "bsd";
     } else if (os.equals("OpenBSD")) {
       return "bsd";
-    } else if (os.equals("Darwin") || os.startsWith("Mac OS X")) {
+    } else if (os.equals("Darwin") || os.contains("OS X")) {
       return "bsd";
     } else if (os.startsWith("Windows")) {
       return "win32";
@@ -52,17 +52,17 @@
     }
   }
 
-  /* Returns "sparc" if on SPARC, "x86" if on x86. */
+  /* Returns "sparc" for SPARC based platforms and "x86" for x86 based
+     platforms. Otherwise returns the value of os.arch.  If the value
+     is not recognized as supported, an exception is thrown instead. */
   public static String getCPU() throws UnsupportedPlatformException {
     String cpu = System.getProperty("os.arch");
-    if (cpu.equals("i386")) {
+    if (cpu.equals("i386") || cpu.equals("x86")) {
       return "x86";
-    } else if (cpu.equals("sparc") || cpu.equals("x86") || cpu.equals("ia64")) {
+    } else if (cpu.equals("sparc") || cpu.equals("sparcv9")) {
+      return "sparc";
+    } else if (cpu.equals("ia64") || cpu.equals("amd64") || cpu.equals("x86_64")) {
       return cpu;
-    } else if (cpu.equals("sparcv9")) {
-      return "sparc";
-    } else if (cpu.equals("x86_64") || cpu.equals("amd64")) {
-      return "amd64";
     } else {
       throw new UnsupportedPlatformException("CPU type " + cpu + " not yet supported");
     }
--- a/make/Makefile	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/Makefile	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -89,19 +89,31 @@
 ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero jvmgzero
 SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark jvmgshark
 
+COMMON_VM_PRODUCT_TARGETS=product product1 productkernel docs export_product
+COMMON_VM_FASTDEBUG_TARGETS=fastdebug fastdebug1 fastdebugkernel docs export_fastdebug
+COMMON_VM_DEBUG_TARGETS=jvmg jvmg1 jvmgkernel docs export_debug
+
 # JDK directory list
 JDK_DIRS=bin include jre lib demo
 
 all:           all_product all_fastdebug
-ifndef BUILD_CLIENT_ONLY
-all_product:   product product1 productkernel docs export_product
-all_fastdebug: fastdebug fastdebug1 fastdebugkernel docs export_fastdebug
-all_debug:     jvmg jvmg1 jvmgkernel docs export_debug
-else
+
+ifdef BUILD_CLIENT_ONLY
 all_product:   product1 docs export_product
 all_fastdebug: fastdebug1 docs export_fastdebug
 all_debug:     jvmg1 docs export_debug
+else
+ifeq ($(MACOSX_UNIVERSAL),true)
+all_product:   universal_product
+all_fastdebug: universal_fastdebug
+all_debug:     universal_debug
+else
+all_product:   $(COMMON_VM_PRODUCT_TARGETS)
+all_fastdebug: $(COMMON_VM_FASTDEBUG_TARGETS)
+all_debug:     $(COMMON_VM_DEBUG_TARGETS)
 endif
+endif
+
 all_optimized: optimized optimized1 optimizedkernel docs export_optimized
 
 allzero:           all_productzero all_fastdebugzero
@@ -232,20 +244,19 @@
 	$(MAKE) VM_SUBDIR=${VM_DEBUG} EXPORT_SUBDIR=/debug   generic_export
 export_optimized:
 	$(MAKE) VM_SUBDIR=optimized EXPORT_SUBDIR=/optimized generic_export
-export_product_jdk:
+export_product_jdk::
 	$(MAKE) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) \
 		VM_SUBDIR=product                            generic_export
-export_optimized_jdk:
+export_optimized_jdk::
 	$(MAKE) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) \
 		VM_SUBDIR=optimized                          generic_export
-export_fastdebug_jdk:
+export_fastdebug_jdk::
 	$(MAKE) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/fastdebug \
 		VM_SUBDIR=fastdebug                          generic_export
-export_debug_jdk:
+export_debug_jdk::
 	$(MAKE) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/debug \
 		VM_SUBDIR=${VM_DEBUG}                        generic_export
 
-
 # Export file copy rules
 XUSAGE=$(HS_SRC_DIR)/share/vm/Xusage.txt
 DOCS_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_docs
@@ -260,23 +271,25 @@
 ZERO_DIR=$(ZERO_BASE_DIR)/$(VM_SUBDIR)
 SHARK_DIR=$(SHARK_BASE_DIR)/$(VM_SUBDIR)
 
-# Misc files and generated files need to come from C1 or C2 area
-ifeq ($(ZERO_BUILD), true)
-ifeq ($(SHARK_BUILD), true)
-  MISC_DIR=$(SHARK_DIR)
-  GEN_DIR=$(SHARK_BASE_DIR)/generated
-else
-  MISC_DIR=$(ZERO_DIR)
-  GEN_DIR=$(ZERO_BASE_DIR)/generated
+ifeq ($(JVM_VARIANT_SERVER), true)
+    MISC_DIR=$(C2_DIR)
+    GEN_DIR=$(C2_BASE_DIR)/generated
+endif
+ifeq ($(JVM_VARIANT_CLIENT), true)
+    MISC_DIR=$(C1_DIR)
+    GEN_DIR=$(C1_BASE_DIR)/generated
 endif
-else
-ifeq ($(ARCH_DATA_MODEL), 32)
-  MISC_DIR=$(C1_DIR)
-  GEN_DIR=$(C1_BASE_DIR)/generated
-else
-  MISC_DIR=$(C2_DIR)
-  GEN_DIR=$(C2_BASE_DIR)/generated
+ifeq ($(JVM_VARIANT_KERNEL), true)
+    MISC_DIR=$(C2_DIR)
+    GEN_DIR=$(C2_BASE_DIR)/generated
 endif
+ifeq ($(JVM_VARIANT_ZEROSHARK), true)
+    MISC_DIR=$(SHARK_DIR)
+    GEN_DIR=$(SHARK_BASE_DIR)/generated
+endif
+ifeq ($(JVM_VARIANT_ZERO), true)
+    MISC_DIR=$(ZERO_DIR)
+    GEN_DIR=$(ZERO_BASE_DIR)/generated
 endif
 
 # Bin files (windows)
@@ -321,52 +334,55 @@
 
 # Shared Library
 ifneq ($(OSNAME),windows)
-  ifeq ($(ZERO_BUILD), true)
-    ifeq ($(SHARK_BUILD), true)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
-	$(install-file)
-$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
-	$(install-file)
-    else
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
-	$(install-file)
-$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
-	$(install-file)
+    ifeq ($(JVM_VARIANT_SERVER), true)
+        $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX)
+		$(install-file)
+        $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(C2_DIR)/%.$(LIBRARY_SUFFIX)
+		$(install-file)
+        $(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX):    $(C2_DIR)/%.$(LIBRARY_SUFFIX)
+		$(install-file)
+        $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: 		$(C2_DIR)/%.debuginfo
+		$(install-file)
+        $(EXPORT_SERVER_DIR)/%.debuginfo:       		$(C2_DIR)/%.debuginfo
+		$(install-file)
+        $(EXPORT_SERVER_DIR)/64/%.debuginfo:    		$(C2_DIR)/%.debuginfo
+		$(install-file)
     endif
-  else
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX)
-	$(install-file)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX)
-	$(install-file)
-$(EXPORT_CLIENT_DIR)/%.$(LIBRARY_SUFFIX):       $(C1_DIR)/%.$(LIBRARY_SUFFIX)
-	$(install-file)
-$(EXPORT_CLIENT_DIR)/64/%.$(LIBRARY_SUFFIX):    $(C1_DIR)/%.$(LIBRARY_SUFFIX)
-	$(install-file)
-$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(C2_DIR)/%.$(LIBRARY_SUFFIX)
-	$(install-file)
-$(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX):    $(C2_DIR)/%.$(LIBRARY_SUFFIX)
-	$(install-file)
-
-# Debug info for shared library
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo
-	$(install-file)
-$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C2_DIR)/%.debuginfo
-	$(install-file)
-$(EXPORT_CLIENT_DIR)/%.debuginfo:       $(C1_DIR)/%.debuginfo
-	$(install-file)
-$(EXPORT_CLIENT_DIR)/64/%.debuginfo:    $(C1_DIR)/%.debuginfo
-	$(install-file)
-$(EXPORT_SERVER_DIR)/%.debuginfo:       $(C2_DIR)/%.debuginfo
-	$(install-file)
-$(EXPORT_SERVER_DIR)/64/%.debuginfo:    $(C2_DIR)/%.debuginfo
-	$(install-file)
-  endif
+    ifeq ($(JVM_VARIANT_CLIENT), true)
+        $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX)
+		$(install-file)
+        $(EXPORT_CLIENT_DIR)/%.$(LIBRARY_SUFFIX):       $(C1_DIR)/%.$(LIBRARY_SUFFIX)
+		$(install-file)
+        $(EXPORT_CLIENT_DIR)/64/%.$(LIBRARY_SUFFIX):    $(C1_DIR)/%.$(LIBRARY_SUFFIX)
+		$(install-file)
+        $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: 		$(C1_DIR)/%.debuginfo
+		$(install-file)
+        $(EXPORT_CLIENT_DIR)/%.debuginfo:       		$(C1_DIR)/%.debuginfo
+		$(install-file)
+        $(EXPORT_CLIENT_DIR)/64/%.debuginfo:    		$(C1_DIR)/%.debuginfo
+		$(install-file)
+    endif
+    ifeq ($(JVM_VARIANT_ZEROSHARK), true)
+        $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
+		$(install-file)
+        $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
+		$(install-file)
+    endif
+    ifeq ($(JVM_VARIANT_ZERO), true)
+        $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
+		$(install-file)
+        $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
+		$(install-file)
+    endif
 endif
 
 # Jar file (sa-jdi.jar)
 $(EXPORT_LIB_DIR)/%.jar: $(GEN_DIR)/%.jar
 	$(install-file)
 
+$(EXPORT_JRE_LIB_DIR)/%.jar: $(GEN_DIR)/%.jar
+	$(install-file)
+
 # Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h, jfr.h)
 $(EXPORT_INCLUDE_DIR)/%: $(GEN_DIR)/jvmtifiles/%
 	$(install-file)
@@ -391,7 +407,6 @@
 	$(install-file)
 else
 $(EXPORT_INCLUDE_DIR)/jfr.h:
-	
 endif
 
 # Doc files (jvmti.html)
@@ -437,21 +452,28 @@
 	 ($(CD) $(JDK_IMAGE_DIR) && $(TAR) -xf -)
 
 test_jdk:
-  ifneq ($(ZERO_BUILD), true)
-    ifeq ($(ARCH_DATA_MODEL), 32)
-	$(JDK_IMAGE_DIR)/bin/java -client -version
-    endif
+  ifeq ($(JVM_VARIANT_CLIENT), true)
+	$(JDK_IMAGE_DIR)/bin/java -d$(ARCH_DATA_MODEL) -client -Xinternalversion
+	$(JDK_IMAGE_DIR)/bin/java -d$(ARCH_DATA_MODEL) -client -version
   endif
-	$(JDK_IMAGE_DIR)/bin/java -server -version
+  ifeq ($(findstring true, $(JVM_VARIANT_SERVER)\
+		$(JVM_VARIANT_ZERO)$(JVM_VARIANT_ZEROSHARK)), true)
+	$(JDK_IMAGE_DIR)/bin/java -d$(ARCH_DATA_MODEL) -server -Xinternalversion
+	$(JDK_IMAGE_DIR)/bin/java -d$(ARCH_DATA_MODEL) -server -version
+  endif
+  ifeq ($(JVM_VARIANT_KERNEL), true)
+	$(JDK_IMAGE_DIR)/bin/java -d$(ARCH_DATA_MODEL) -kernel -Xinternalversion
+	$(JDK_IMAGE_DIR)/bin/java -d$(ARCH_DATA_MODEL) -kernel -version
+  endif
 
-copy_product_jdk:
+copy_product_jdk::
 	$(RM) -r $(JDK_IMAGE_DIR)
 	$(MKDIR) -p $(JDK_IMAGE_DIR)
 	($(CD) $(JDK_IMPORT_PATH) && \
 	 $(TAR) -cf - $(JDK_DIRS)) | \
 	 ($(CD) $(JDK_IMAGE_DIR) && $(TAR) -xf -)
 
-copy_fastdebug_jdk:
+copy_fastdebug_jdk::
 	$(RM) -r $(JDK_IMAGE_DIR)/fastdebug
 	$(MKDIR) -p $(JDK_IMAGE_DIR)/fastdebug
 	if [ -d $(JDK_IMPORT_PATH)/fastdebug ] ; then \
@@ -464,7 +486,7 @@
 	   ($(CD) $(JDK_IMAGE_DIR)/fastdebug && $(TAR) -xf -) ; \
 	fi
 
-copy_debug_jdk:
+copy_debug_jdk::
 	$(RM) -r $(JDK_IMAGE_DIR)/debug
 	$(MKDIR) -p $(JDK_IMAGE_DIR)/debug
 	if [ -d $(JDK_IMPORT_PATH)/debug ] ; then \
@@ -481,36 +503,6 @@
 	   ($(CD) $(JDK_IMAGE_DIR)/debug && $(TAR) -xf -) ; \
 	fi
 
-# macosx universal builds
-
-ifeq ($(MACOSX_UNIVERSAL), true)
-$(UNIVERSAL_LIPO_LIST):
-	lipo -create -output $@ $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@)
-
-$(UNIVERSAL_COPY_LIST):
-	$(CP) $(EXPORT_JRE_LIB_DIR)/i386/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) $@
-
-universalize: $(UNIVERSAL_LIPO_LIST) $(UNIVERSAL_COPY_LIST)
-endif
-
-universal_product:
-	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 MACOSX_UNIVERSAL=true all_product
-	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 MACOSX_UNIVERSAL=true all_product
-	$(MKDIR) -p $(EXPORT_JRE_LIB_DIR)/{client,server}
-	$(QUIETLY) $(MAKE) MACOSX_UNIVERSAL=true universalize
-
-universal_fastdebug:
-	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 MACOSX_UNIVERSAL=true all_fastdebug
-	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 MACOSX_UNIVERSAL=true all_fastdebug
-	$(MKDIR) -p $(EXPORT_JRE_LIB_DIR)/{client,server}
-	$(QUIETLY) $(MAKE) MACOSX_UNIVERSAL=true universalize
-
-universal_debug:
-	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 MACOSX_UNIVERSAL=true all_debug
-	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 MACOSX_UNIVERSAL=true all_debug
-	$(MKDIR) -p $(EXPORT_JRE_LIB_DIR)/{client,server}
-	$(QUIETLY) $(MAKE) MACOSX_UNIVERSAL=true universalize
-
 #
 # Check target
 #
@@ -564,6 +556,7 @@
 OUTPUTDIR.desc             = Output directory, default is build/<osname>
 BOOTDIR.desc               = JDK used to compile agent java source and test with
 JDK_IMPORT_PATH.desc       = Promoted JDK to copy for 'create_jdk'
+JDK_IMAGE_DIR.desc         = Directory to place JDK to copy
 EXPORT_PATH.desc           = Directory to place files to export for JDK build
 
 # Make variables to print out (description and value)
@@ -572,6 +565,7 @@
     OUTPUTDIR                   \
     BOOTDIR                     \
     JDK_IMPORT_PATH             \
+    JDK_IMAGE_DIR               \
     EXPORT_PATH
 
 # Make variables that should refer to directories that exist
@@ -630,6 +624,13 @@
 	@$(ECHO) \
 "  $(MAKE) ALT_JDK_IMPORT_PATH=/opt/java/jdk$(JDK_VERSION)"
 
+# Universal build support
+ifeq ($(OS_VENDOR), Darwin)
+ifeq ($(MACOSX_UNIVERSAL),true)
+include $(GAMMADIR)/make/$(OSNAME)/makefiles/universal.gmk
+endif
+endif
+
 # JPRT rule to build this workspace
 include $(GAMMADIR)/make/jprt.gmk
 
@@ -639,6 +640,4 @@
 	export_product export_fastdebug export_debug export_optimized \
 	export_jdk_product export_jdk_fastdebug export_jdk_debug \
 	create_jdk copy_jdk update_jdk test_jdk \
-	copy_product_jdk copy_fastdebug_jdk copy_debug_jdk universalize \
-	universal_product
-
+	copy_product_jdk copy_fastdebug_jdk copy_debug_jdk 
--- a/make/bsd/Makefile	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/bsd/Makefile	Tue Apr 10 10:42:34 2012 -0700
@@ -188,7 +188,7 @@
 # in the build.sh script:
 TARGETS           = debug jvmg fastdebug optimized profiled product
 
-ifeq ($(ZERO_BUILD), true)
+ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
   SUBDIR_DOCS     = $(OSNAME)_$(VARIANTARCH)_docs
 else
   SUBDIR_DOCS     = $(OSNAME)_$(BUILDARCH)_docs
@@ -208,7 +208,7 @@
 TARGETS_SHARK     = $(addsuffix shark,$(TARGETS))
 
 BUILDTREE_MAKE    = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
-BUILDTREE_VARS    = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH)
+BUILDTREE_VARS    = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) LIBRARY_SUFFIX=$(LIBRARY_SUFFIX)
 BUILDTREE_VARS   += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
 
 BUILDTREE         = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS)
--- a/make/bsd/makefiles/adlc.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/bsd/makefiles/adlc.make	Tue Apr 10 10:42:34 2012 -0700
@@ -61,10 +61,10 @@
 INCLUDES += $(Src_Dirs_I:%=-I%)
 
 # set flags for adlc compilation
-CPPFLAGS = $(SYSDEFS) $(INCLUDES)
+CXXFLAGS = $(SYSDEFS) $(INCLUDES)
 
 # Force assertions on.
-CPPFLAGS += -DASSERT
+CXXFLAGS += -DASSERT
 
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
 # Compiler warnings are treated as errors
@@ -111,7 +111,7 @@
 
 $(EXEC) : $(OBJECTS)
 	@echo Making adlc
-	$(QUIETLY) $(HOST.LINK_NOPROF.CC) -o $(EXEC) $(OBJECTS)
+	$(QUIETLY) $(HOST.LINK_NOPROF.CXX) -o $(EXEC) $(OBJECTS)
 
 # Random dependencies:
 $(OBJECTS): opcodes.hpp classes.hpp adlc.hpp adlcVMDeps.hpp adlparse.hpp archDesc.hpp arena.hpp dict2.hpp filebuff.hpp forms.hpp formsopt.hpp formssel.hpp
@@ -213,14 +213,14 @@
 $(OUTDIR)/%.o: %.cpp
 	@echo Compiling $<
 	$(QUIETLY) $(REMOVE_TARGET)
-	$(QUIETLY) $(HOST.COMPILE.CC) -o $@ $< $(COMPILE_DONE)
+	$(QUIETLY) $(HOST.COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
 
 # Some object files are given a prefix, to disambiguate
 # them from objects of the same name built for the VM.
 $(OUTDIR)/adlc-%.o: %.cpp
 	@echo Compiling $<
 	$(QUIETLY) $(REMOVE_TARGET)
-	$(QUIETLY) $(HOST.COMPILE.CC) -o $@ $< $(COMPILE_DONE)
+	$(QUIETLY) $(HOST.COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
 
 # #########################################################################
 
--- a/make/bsd/makefiles/buildtree.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/bsd/makefiles/buildtree.make	Tue Apr 10 10:42:34 2012 -0700
@@ -58,6 +58,7 @@
 # needs to be set here since this Makefile doesn't include defs.make
 OS_VENDOR:=$(shell uname -s)
 
+-include $(SPEC)
 include $(GAMMADIR)/make/scm.make
 include $(GAMMADIR)/make/altsrc.make
 
@@ -68,7 +69,7 @@
 # For now, until the compiler is less wobbly:
 TESTFLAGS	= -Xbatch -showversion
 
-ifeq ($(ZERO_BUILD), true)
+ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
   PLATFORM_FILE = $(shell dirname $(shell dirname $(shell pwd)))/platform_zero
 else
   ifdef USE_SUNCC
@@ -162,20 +163,6 @@
   endif
 endif
 
-ifeq ($(OS_VENDOR), Darwin)
-  # MACOSX FIXME: we should be able to run test_gamma (see MACOSX_PORT-214)
-  ifeq ($(ALWAYS_PASS_TEST_GAMMA),)
-    # ALWAYS_PASS_TEST_GAMMA wasn't set so we default to true on MacOS X
-    # until MACOSX_PORT-214 is fixed
-    ALWAYS_PASS_TEST_GAMMA=true
-  endif
-endif
-ifeq ($(ALWAYS_PASS_TEST_GAMMA), true)
-  TEST_GAMMA_STATUS= echo 'exit 0';
-else
-  TEST_GAMMA_STATUS=
-endif
-
 BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION=  JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
 
 BUILDTREE	= \
@@ -261,6 +248,8 @@
 	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
 	    echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \
 	echo; \
+	[ -n "$(SPEC)" ] && \
+	    echo "include $(SPEC)"; \
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \
 	) > $@
@@ -353,12 +342,10 @@
 	$(BUILDTREE_COMMENT); \
 	[ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \
 	{ \
-	echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
-	echo "DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
 	echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
 	} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
 	echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
-	echo "export JAVA_HOME LD_LIBRARY_PATH DYLD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \
+	echo "export JAVA_HOME CLASSPATH HOTSPOT_BUILD_USER"; \
 	) > $@
 
 env.csh: env.sh
@@ -412,7 +399,7 @@
 JAVA_FLAG/64 = -d64
 
 WRONG_DATA_MODE_MSG = \
-	echo "JAVA_HOME must point to $(DATA_MODE)bit JDK."
+	echo "JAVA_HOME must point to a $(DATA_MODE)-bit OpenJDK."
 
 CROSS_COMPILING_MSG = \
 	echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run."
@@ -420,20 +407,78 @@
 test_gamma:  $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
-	echo '#!/bin/sh'; \
+	echo "#!/bin/sh"; \
+	echo ""; \
 	$(BUILDTREE_COMMENT); \
-	echo '. ./env.sh'; \
-	echo "if [ \"$(CROSS_COMPILE_ARCH)\" != \"\" ]; then { $(CROSS_COMPILING_MSG); exit 0; }; fi"; \
-	echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \
-	echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \
-	echo "then"; \
-	echo "  $(WRONG_DATA_MODE_MSG); exit 0;"; \
+	echo ""; \
+	echo "# Include environment settings for gamma run"; \
+	echo ""; \
+	echo ". ./env.sh"; \
+	echo ""; \
+	echo "# Do not run gamma test for cross compiles"; \
+	echo ""; \
+	echo "if [ -n \"$(CROSS_COMPILE_ARCH)\" ]; then "; \
+	echo "  $(CROSS_COMPILING_MSG)"; \
+	echo "  exit 0"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Make sure JAVA_HOME is set as it is required for gamma"; \
+	echo ""; \
+	echo "if [ -z \"\$${JAVA_HOME}\" ]; then "; \
+	echo "  $(NO_JAVA_HOME_MSG)"; \
+	echo "  exit 0"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Check JAVA_HOME version to be used for the test"; \
+	echo ""; \
+	echo "\$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion > /dev/null 2>&1"; \
+	echo "if [ \$$? -ne 0 ]; then "; \
+	echo "  $(WRONG_DATA_MODE_MSG)"; \
+	echo "  exit 0"; \
 	echo "fi"; \
+	echo ""; \
+	echo "# Use gamma_g if it exists"; \
+	echo ""; \
+	echo "GAMMA_PROG=gamma"; \
+	echo "if [ -f gamma_g ]; then "; \
+	echo "  GAMMA_PROG=gamma_g"; \
+	echo "fi"; \
+	echo ""; \
+	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
+	echo "  # Ensure architecture for gamma and JAVA_HOME is the same."; \
+	echo "  # NOTE: gamma assumes the OpenJDK directory layout."; \
+	echo ""; \
+	echo "  GAMMA_ARCH=\"\`file \$${GAMMA_PROG} | awk '{print \$$NF}'\`\""; \
+	echo "  JVM_LIB=\"\$${JAVA_HOME}/jre/lib/libjava.$(LIBRARY_SUFFIX)\""; \
+	echo "  if [ ! -f \$${JVM_LIB} ]; then"; \
+	echo "    JVM_LIB=\"\$${JAVA_HOME}/jre/lib/$${LIBARCH}/libjava.$(LIBRARY_SUFFIX)\""; \
+	echo "  fi"; \
+	echo "  if [ ! -f \$${JVM_LIB} ] || [ -z \"\`file \$${JVM_LIB} | grep \$${GAMMA_ARCH}\`\" ]; then "; \
+	echo "    $(WRONG_DATA_MODE_MSG)"; \
+	echo "    exit 0"; \
+	echo "  fi"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Compile Queens program for test"; \
+	echo ""; \
 	echo "rm -f Queens.class"; \
 	echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \
-	echo '[ -f gamma_g ] && { gamma=gamma_g; }'; \
-	echo './$${gamma:-gamma} $(TESTFLAGS) Queens < /dev/null'; \
-	$(TEST_GAMMA_STATUS) \
+	echo ""; \
+	echo "# Set library path solely for gamma launcher test run"; \
+	echo ""; \
+	echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
+	echo "export LD_LIBRARY_PATH"; \
+	echo "unset LD_LIBRARY_PATH_32"; \
+	echo "unset LD_LIBRARY_PATH_64"; \
+	echo ""; \
+	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
+	echo "  DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/native_threads:\$${JAVA_HOME}/jre/lib:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
+	echo "  export DYLD_LIBRARY_PATH"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Use the gamma launcher and JAVA_HOME to run the test"; \
+	echo ""; \
+	echo "./\$${GAMMA_PROG} $(TESTFLAGS) Queens < /dev/null"; \
 	) > $@
 	$(QUIETLY) chmod +x $@
 
--- a/make/bsd/makefiles/defs.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/bsd/makefiles/defs.make	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
 endif
 
 # zero
-ifeq ($(ZERO_BUILD), true)
+ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
   ifeq ($(ARCH_DATA_MODEL), 64)
     MAKE_ARGS      += LP64=1
   endif
@@ -124,6 +124,18 @@
   HS_ARCH          = ppc
 endif
 
+# On 32 bit bsd we build server and client, on 64 bit just server.
+ifeq ($(JVM_VARIANTS),)
+  ifeq ($(ARCH_DATA_MODEL), 32)
+    JVM_VARIANTS:=client,server
+    JVM_VARIANT_CLIENT:=true
+    JVM_VARIANT_SERVER:=true
+  else
+    JVM_VARIANTS:=server
+    JVM_VARIANT_SERVER:=true
+  endif
+endif
+
 JDK_INCLUDE_SUBDIR=bsd
 
 # Library suffix
@@ -142,18 +154,18 @@
 # client and server subdirectories have symbolic links to ../libjsig.so
 EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
 EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
+EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
 
-ifndef BUILD_CLIENT_ONLY
-EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
-EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
+EXPORT_LIST += $(EXPORT_JRE_LIB_DIR)/wb.jar
+
+ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
 endif
 
-ifneq ($(ZERO_BUILD), true)
-  ifeq ($(ARCH_DATA_MODEL), 32)
-    EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
-    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
-    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
-  endif
+ifeq ($(JVM_VARIANT_CLIENT),true)
+  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
+  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
 endif
 
 # Serviceability Binaries
@@ -171,10 +183,39 @@
 
 EXPORT_LIST += $(ADD_SA_BINARIES/$(HS_ARCH))
 
-UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
-UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX)
-UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX)
+# Universal build settings
+ifeq ($(OS_VENDOR), Darwin)
+  # Build universal binaries by default on Mac OS X
+  MACOSX_UNIVERSAL = true
+  ifneq ($(ALT_MACOSX_UNIVERSAL),)
+    MACOSX_UNIVERSAL = $(ALT_MACOSX_UNIVERSAL)
+  endif
+  MAKE_ARGS += MACOSX_UNIVERSAL=$(MACOSX_UNIVERSAL)
+
+  # Universal settings
+  ifeq ($(MACOSX_UNIVERSAL), true)
+
+    # Set universal export path but avoid using ARCH or PLATFORM subdirs
+    EXPORT_PATH=$(OUTPUTDIR)/export-universal$(EXPORT_SUBDIR)
+    ifneq ($(ALT_EXPORT_PATH),)
+      EXPORT_PATH=$(ALT_EXPORT_PATH)
+    endif
 
-UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/Xusage.txt
-UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/Xusage.txt
-UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX)
+    # Set universal image dir
+    JDK_IMAGE_DIR=$(OUTPUTDIR)/jdk-universal$(EXPORT_SUBDIR)
+    ifneq ($(ALT_JDK_IMAGE_DIR),)
+      JDK_IMAGE_DIR=$(ALT_JDK_IMAGE_DIR)
+    endif
+
+    # Binaries to 'universalize' if built
+    UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
+    UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX)
+    UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX)
+    UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX)
+
+    # Files to simply copy in place
+    UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/Xusage.txt
+    UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/Xusage.txt
+
+  endif
+endif
--- a/make/bsd/makefiles/dtrace.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/bsd/makefiles/dtrace.make	Tue Apr 10 10:42:34 2012 -0700
@@ -105,11 +105,11 @@
 
 lib$(GENOFFS).dylib: $(DTRACE_SRCDIR)/$(GENOFFS).cpp $(DTRACE_SRCDIR)/$(GENOFFS).h \
                   $(LIBJVM.o)
-	$(QUIETLY) $(CCC) $(CPPFLAGS) $(GENOFFS_CFLAGS) $(SHARED_FLAG) $(PICFLAG) \
+	$(QUIETLY) $(CXX) $(CXXFLAGS) $(GENOFFS_CFLAGS) $(SHARED_FLAG) $(PICFLAG) \
 		 $(LFLAGS_GENOFFS) -o $@ $(DTRACE_SRCDIR)/$(GENOFFS).cpp -ljvm
 
 $(GENOFFS): $(DTRACE_SRCDIR)/$(GENOFFS)Main.c lib$(GENOFFS).dylib
-	$(QUIETLY) $(LINK.CC) -o $@ $(DTRACE_SRCDIR)/$(GENOFFS)Main.c \
+	$(QUIETLY) $(LINK.CXX) -o $@ $(DTRACE_SRCDIR)/$(GENOFFS)Main.c \
 		./lib$(GENOFFS).dylib
 
 # $@.tmp is created first to avoid an empty $(JVMOFFS).h if an error occurs.
@@ -135,7 +135,7 @@
 	fi
 
 $(JVMOFFS.o): $(JVMOFFS).h $(JVMOFFS).cpp 
-	$(QUIETLY) $(CCC) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp
+	$(QUIETLY) $(CXX) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp
 
 $(LIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_DB_MAPFILE)
 	@echo Making $@
--- a/make/bsd/makefiles/gcc.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/bsd/makefiles/gcc.make	Tue Apr 10 10:42:34 2012 -0700
@@ -25,57 +25,59 @@
 OS_VENDOR = $(shell uname -s)
 
 #------------------------------------------------------------------------
-# CC, CPP & AS
+# CC, CXX & AS
+
+# If a SPEC is not set already, then use these defaults.
+ifeq ($(SPEC),)
+  # When cross-compiling the ALT_COMPILER_PATH points
+  # to the cross-compilation toolset
+  ifdef CROSS_COMPILE_ARCH
+    CXX = $(ALT_COMPILER_PATH)/g++
+    CC  = $(ALT_COMPILER_PATH)/gcc
+    HOSTCXX = g++
+    HOSTCC  = gcc
+  else ifneq ($(OS_VENDOR), Darwin)
+    CXX = g++
+    CC  = gcc
+    HOSTCXX = $(CXX)
+    HOSTCC  = $(CC)
+  endif
 
-# When cross-compiling the ALT_COMPILER_PATH points
-# to the cross-compilation toolset
-ifdef CROSS_COMPILE_ARCH
- CPP = $(ALT_COMPILER_PATH)/g++
- CC  = $(ALT_COMPILER_PATH)/gcc
- HOSTCPP = g++
- HOSTCC  = gcc
-else ifneq ($(OS_VENDOR), Darwin)
- CXX = g++
- CPP = $(CXX)
- CC  = gcc
- HOSTCPP = $(CPP)
- HOSTCC  = $(CC)
+  # i486 hotspot requires -mstackrealign on Darwin.
+  # llvm-gcc supports this in Xcode 3.2.6 and 4.0.
+  # gcc-4.0 supports this on earlier versions.
+  # Prefer llvm-gcc where available.
+  ifeq ($(OS_VENDOR), Darwin)
+    ifeq ($(origin CXX), default)
+      CXX = llvm-g++
+    endif
+    ifeq ($(origin CC), default)
+      CC  = llvm-gcc
+    endif
+
+    ifeq ($(ARCH), i486)
+      LLVM_SUPPORTS_STACKREALIGN := $(shell \
+       [ "0"`llvm-gcc -v 2>&1 | grep LLVM | sed -E "s/.*LLVM build ([0-9]+).*/\1/"` -gt "2333" ] \
+       && echo true || echo false)
+
+      ifeq ($(LLVM_SUPPORTS_STACKREALIGN), true)
+        CXX32 ?= llvm-g++
+        CC32  ?= llvm-gcc
+      else
+        CXX32 ?= g++-4.0
+        CC32  ?= gcc-4.0
+      endif
+      CXX = $(CXX32)
+      CC  = $(CC32)
+    endif
+
+    HOSTCXX = $(CXX)
+    HOSTCC  = $(CC)
+  endif
+
+  AS   = $(CC) -c -x assembler-with-cpp
 endif
 
-# i486 hotspot requires -mstackrealign on Darwin.
-# llvm-gcc supports this in Xcode 3.2.6 and 4.0.
-# gcc-4.0 supports this on earlier versions.
-# Prefer llvm-gcc where available.
-ifeq ($(OS_VENDOR), Darwin)
-  ifeq ($(origin CXX), default)
-   CXX = llvm-g++
-  endif
-  ifeq ($(origin CC), default)
-   CC  = llvm-gcc
-  endif
-  CPP  = $(CXX)
-
-  ifeq ($(ARCH), i486)
-  LLVM_SUPPORTS_STACKREALIGN := $(shell \
-   [ "0"`llvm-gcc -v 2>&1 | grep LLVM | sed -E "s/.*LLVM build ([0-9]+).*/\1/"` -gt "2333" ] \
-   && echo true || echo false)
-
-  ifeq ($(LLVM_SUPPORTS_STACKREALIGN), true)
-    CXX32 ?= llvm-g++
-    CC32  ?= llvm-gcc
-  else
-    CXX32 ?= g++-4.0
-    CC32  ?= gcc-4.0
-  endif
-  CPP = $(CXX32)
-  CC  = $(CC32)
-  endif
-
-  HOSTCPP = $(CPP)
-  HOSTCC  = $(CC)
-endif
-
-AS   = $(CC) -c -x assembler-with-cpp
 
 # -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only
 # prints the numbers (e.g. "2.95", "3.2.1")
@@ -103,11 +105,12 @@
 VM_PICFLAG/AOUT   =
 VM_PICFLAG        = $(VM_PICFLAG/$(LINK_INTO))
 
-ifeq ($(ZERO_BUILD), true)
-CFLAGS += $(LIBFFI_CFLAGS)
+ifeq ($(JVM_VARIANT_ZERO), true)
+  CFLAGS += $(LIBFFI_CFLAGS)
 endif
-ifeq ($(SHARK_BUILD), true)
-CFLAGS += $(LLVM_CFLAGS)
+ifeq ($(JVM_VARIANT_ZEROSHARK), true)
+  CFLAGS += $(LIBFFI_CFLAGS)
+  CFLAGS += $(LLVM_CFLAGS)
 endif
 CFLAGS += $(VM_PICFLAG)
 CFLAGS += -fno-rtti
--- a/make/bsd/makefiles/launcher.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/bsd/makefiles/launcher.make	Tue Apr 10 10:42:34 2012 -0700
@@ -50,14 +50,31 @@
   LIBS_LAUNCHER             += $(STATIC_STDCXX) $(LIBS)
 else
   LAUNCHER.o                 = launcher.o
-  LFLAGS_LAUNCHER           += -L`pwd`
+  LFLAGS_LAUNCHER           += -L`pwd` 
+
+  # The gamma launcher runs the JDK from $JAVA_HOME, overriding the JVM with a
+  # freshly built JVM at ./libjvm.{so|dylib}.  This is accomplished by setting 
+  # the library searchpath using ({DY}LD_LIBRARY_PATH) to find the local JVM 
+  # first.  Gamma dlopen()s libjava from $JAVA_HOME/jre/lib{/$arch}, which is
+  # statically linked with CoreFoundation framework libs. Unfortunately, gamma's
+  # unique searchpath results in some unresolved symbols in the framework 
+  # libraries, because JDK libraries are inadvertently discovered first on the
+  # searchpath, e.g. libjpeg.  On Mac OS X, filenames are case *insensitive*.
+  # So, the actual filename collision is libjpeg.dylib and libJPEG.dylib.
+  # To resolve this, gamma needs to also statically link with the CoreFoundation 
+  # framework libraries.
+
+  ifeq ($(OS_VENDOR),Darwin)
+    LFLAGS_LAUNCHER         += -framework CoreFoundation 
+  endif
+
   LIBS_LAUNCHER             += -l$(JVM) $(LIBS)
 endif
 
-LINK_LAUNCHER = $(LINK.c)
+LINK_LAUNCHER = $(LINK.CC)
 
-LINK_LAUNCHER/PRE_HOOK  = $(LINK_LIB.CC/PRE_HOOK)
-LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CC/POST_HOOK)
+LINK_LAUNCHER/PRE_HOOK  = $(LINK_LIB.CXX/PRE_HOOK)
+LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CXX/POST_HOOK)
 
 LAUNCHER_OUT = launcher
 
@@ -73,11 +90,11 @@
 
 $(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR_SHARE)/%.c
 	$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
-	$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CPPFLAGS)
+	$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
 
 $(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR)/%.c
 	$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
-	$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CPPFLAGS)
+	$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
 
 $(LAUNCHER): $(OBJS) $(LIBJVM) $(LAUNCHER_MAPFILE)
 	$(QUIETLY) echo Linking launcher...
--- a/make/bsd/makefiles/product.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/bsd/makefiles/product.make	Tue Apr 10 10:42:34 2012 -0700
@@ -55,4 +55,4 @@
 STRIP_AOUT   = $(STRIP) -x $@ || exit 1;
 
 # Don't strip in VM build; JDK build will strip libraries later
-# LINK_LIB.CC/POST_HOOK += $(STRIP_$(LINK_INTO))
+# LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))
--- a/make/bsd/makefiles/rules.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/bsd/makefiles/rules.make	Tue Apr 10 10:42:34 2012 -0700
@@ -27,52 +27,39 @@
 # Tell make that .cpp is important
 .SUFFIXES: .cpp $(SUFFIXES)
 
-# For now.  Other makefiles use CPP as the c++ compiler, but that should really
-# name the preprocessor.
-ifeq    ($(CCC),)
-CCC             = $(CPP)
-endif
-
 DEMANGLER       = c++filt
 DEMANGLE        = $(DEMANGLER) < $@ > .$@ && mv -f .$@ $@
 
-# $(CC) is the c compiler (cc/gcc), $(CCC) is the c++ compiler (CC/g++).
-C_COMPILE       = $(CC) $(CPPFLAGS) $(CFLAGS)
-CC_COMPILE      = $(CCC) $(CPPFLAGS) $(CFLAGS)
+# $(CC) is the c compiler (cc/gcc), $(CXX) is the c++ compiler (CC/g++).
+CC_COMPILE       = $(CC) $(CXXFLAGS) $(CFLAGS)
+CXX_COMPILE      = $(CXX) $(CXXFLAGS) $(CFLAGS)
 
 AS.S            = $(AS) $(ASFLAGS)
 
-COMPILE.c       = $(C_COMPILE) -c
-GENASM.c        = $(C_COMPILE) -S
-LINK.c          = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
-LINK_LIB.c      = $(CC) $(LFLAGS) $(SHARED_FLAG)
-PREPROCESS.c    = $(C_COMPILE) -E
+COMPILE.CC       = $(CC_COMPILE) -c
+GENASM.CC        = $(CC_COMPILE) -S
+LINK.CC          = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
+LINK_LIB.CC      = $(CC) $(LFLAGS) $(SHARED_FLAG)
+PREPROCESS.CC    = $(CC_COMPILE) -E
 
-COMPILE.CC      = $(CC_COMPILE) -c
-GENASM.CC       = $(CC_COMPILE) -S
-LINK.CC         = $(CCC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
-LINK_NOPROF.CC  = $(CCC) $(LFLAGS) $(AOUT_FLAGS)
-LINK_LIB.CC     = $(CCC) $(LFLAGS) $(SHARED_FLAG)
-PREPROCESS.CC   = $(CC_COMPILE) -E
+COMPILE.CXX      = $(CXX_COMPILE) -c
+GENASM.CXX       = $(CXX_COMPILE) -S
+LINK.CXX         = $(CXX) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
+LINK_NOPROF.CXX  = $(CXX) $(LFLAGS) $(AOUT_FLAGS)
+LINK_LIB.CXX     = $(CXX) $(LFLAGS) $(SHARED_FLAG)
+PREPROCESS.CXX   = $(CXX_COMPILE) -E
 
 # cross compiling the jvm with c2 requires host compilers to build
 # adlc tool
 
-HOST.CC_COMPILE      = $(HOSTCPP) $(CPPFLAGS) $(CFLAGS)
-HOST.COMPILE.CC      = $(HOST.CC_COMPILE) -c
-HOST.LINK_NOPROF.CC  = $(HOSTCPP) $(LFLAGS) $(AOUT_FLAGS)
+HOST.CXX_COMPILE      = $(HOSTCXX) $(CXXFLAGS) $(CFLAGS)
+HOST.COMPILE.CXX      = $(HOST.CXX_COMPILE) -c
+HOST.LINK_NOPROF.CXX  = $(HOSTCXX) $(LFLAGS) $(AOUT_FLAGS)
 
 
 # Effect of REMOVE_TARGET is to delete out-of-date files during "gnumake -k".
 REMOVE_TARGET   = rm -f $@
 
-# Synonyms.
-COMPILE.cpp     = $(COMPILE.CC)
-GENASM.cpp      = $(GENASM.CC)
-LINK.cpp        = $(LINK.CC)
-LINK_LIB.cpp    = $(LINK_LIB.CC)
-PREPROCESS.cpp  = $(PREPROCESS.CC)
-
 # Note use of ALT_BOOTDIR to explicitly specify location of java and
 # javac; this is the same environment variable used in the J2SE build
 # process for overriding the default spec, which is BOOTDIR.
@@ -161,14 +148,14 @@
 %.o: %.cpp
 	@echo Compiling $<
 	$(QUIETLY) $(REMOVE_TARGET)
-	$(QUIETLY) $(COMPILE.CC) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
+	$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
 else
 %.o: %.cpp
 	@echo Compiling $<
 	$(QUIETLY) $(REMOVE_TARGET)
 	$(QUIETLY) $(if $(findstring $@, $(NONPIC_OBJ_FILES)), \
-	   $(subst $(VM_PICFLAG), ,$(COMPILE.CC)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
-	   $(COMPILE.CC) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
+	   $(subst $(VM_PICFLAG), ,$(COMPILE.CXX)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
+	   $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
 endif
 
 %.o: %.s
@@ -178,13 +165,13 @@
 
 %.s: %.cpp
 	@echo Generating assembly for $<
-	$(QUIETLY) $(GENASM.CC) -o $@ $<
+	$(QUIETLY) $(GENASM.CXX) -o $@ $<
 	$(QUIETLY) $(DEMANGLE) $(COMPILE_DONE)
 
 # Intermediate files (for debugging macros)
 %.i: %.cpp
 	@echo Preprocessing $< to $@
-	$(QUIETLY) $(PREPROCESS.CC) $< > $@ $(COMPILE_DONE)
+	$(QUIETLY) $(PREPROCESS.CXX) $< > $@ $(COMPILE_DONE)
 
 #  Override gnumake built-in rules which do sccs get operations badly.
 #  (They put the checked out code in the current directory, not in the
--- a/make/bsd/makefiles/sparcWorks.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/bsd/makefiles/sparcWorks.make	Tue Apr 10 10:42:34 2012 -0700
@@ -23,14 +23,17 @@
 #
 
 #------------------------------------------------------------------------
-# CC, CPP & AS
+# CC, CXX & AS
 
-CPP = CC
-CC  = cc
-AS  = $(CC) -c
+# If a SPEC is not set already, then use these defaults.
+ifeq ($(SPEC),)
+  CXX = CC
+  CC  = cc
+  AS  = $(CC) -c
 
-HOSTCPP = $(CPP)
-HOSTCC  = $(CC)
+  HOSTCXX = $(CXX)
+  HOSTCC  = $(CC)
+endif
 
 ARCHFLAG = $(ARCHFLAG/$(BUILDARCH))
 ARCHFLAG/i486    = -m32
--- a/make/bsd/makefiles/top.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/bsd/makefiles/top.make	Tue Apr 10 10:42:34 2012 -0700
@@ -124,8 +124,8 @@
 	@$(UpdatePCH)
 	@$(MAKE) -f vm.make $(MFLAGS-adjusted)
 
-install: the_vm
-	@$(MAKE) -f vm.make install
+install gamma: the_vm
+	@$(MAKE) -f vm.make $@
 
 # next rules support "make foo.[ois]"
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/bsd/makefiles/universal.gmk	Tue Apr 10 10:42:34 2012 -0700
@@ -0,0 +1,113 @@
+#
+# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# macosx universal builds
+universal_product:
+	$(MAKE) MACOSX_UNIVERSAL=true all_product_universal
+universal_fastdebug:
+	$(MAKE) MACOSX_UNIVERSAL=true all_fastdebug_universal
+universal_debug:
+	$(MAKE) MACOSX_UNIVERSAL=true all_debug_universal
+
+
+# Universal builds include 1 or more architectures in a single binary
+all_product_universal:
+#	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_PRODUCT_TARGETS)
+	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_PRODUCT_TARGETS)
+	$(QUIETLY) $(MAKE) EXPORT_SUBDIR= universalize
+all_fastdebug_universal:
+#	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_FASTDEBUG_TARGETS)
+	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_FASTDEBUG_TARGETS)
+	$(QUIETLY) $(MAKE) EXPORT_SUBDIR=/fastdebug universalize
+all_debug_universal:
+#	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_DEBUG_TARGETS)
+	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_DEBUG_TARGETS)
+	$(QUIETLY) $(MAKE) EXPORT_SUBDIR=/debug universalize
+
+
+# Consolidate architecture builds into a single Universal binary
+universalize: $(UNIVERSAL_LIPO_LIST) $(UNIVERSAL_COPY_LIST)
+	$(RM) -r $(EXPORT_PATH)/jre/lib/{i386,amd64}
+
+
+# Package built libraries in a universal binary
+$(UNIVERSAL_LIPO_LIST):
+	BUILT_LIPO_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`"; \
+	if [ -n "$${BUILT_LIPO_FILES}" ]; then \
+	  $(MKDIR) -p $(shell dirname $@); \
+	  lipo -create -output $@ $${BUILT_LIPO_FILES}; \
+	fi	
+
+
+# Copy built non-universal binaries in place
+$(UNIVERSAL_COPY_LIST):
+	BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`"; \
+	if [ -n "$${BUILT_COPY_FILES}" ]; then \
+	  for i in $${BUILT_COPY_FILES}; do \
+	    if [ -f $${i} ]; then \
+	      $(MKDIR) -p $(shell dirname $@); \
+	      $(CP) $${i} $@; \
+	    fi; \
+	  done; \
+	fi
+
+
+# Replace arch specific binaries with universal binaries
+export_universal:
+	$(RM) -r $(EXPORT_PATH)/jre/lib/{i386,amd64}
+	$(RM) -r $(JDK_IMAGE_DIR)/jre/lib/{i386,amd64}
+	$(RM) $(JDK_IMAGE_DIR)/jre/lib/{client,server}/libjsig.$(LIBRARY_SUFFIX)
+	($(CD) $(EXPORT_PATH) && \
+	  $(TAR) -cf - *) | \
+	  ($(CD) $(JDK_IMAGE_DIR) && $(TAR) -xpf -)
+
+
+# Overlay universal binaries
+copy_universal:
+	$(RM) -r $(JDK_IMAGE_DIR)$(COPY_SUBDIR)/jre/lib/{i386,amd64}
+	$(RM) $(JDK_IMAGE_DIR)$(COPY_SUBDIR)/jre/lib/{client,server}/libjsig.$(LIBRARY_SUFFIX)
+	($(CD) $(EXPORT_PATH)$(COPY_SUBDIR) && \
+	  $(TAR) -cf - *) | \
+	  ($(CD) $(JDK_IMAGE_DIR)$(COPY_SUBDIR) && $(TAR) -xpf -)
+
+
+# Additional processing for universal builds
+export_product_jdk::
+	$(MAKE) EXPORT_SUBDIR=           export_universal
+export_optimized_jdk::
+	$(MAKE) EXPORT_SUBDIR=           export_universal
+export_fastdebug_jdk::
+	$(MAKE) EXPORT_SUBDIR=/fastdebug export_universal
+export_debug_jdk::
+	$(MAKE) EXPORT_SUBDIR=/debug     export_universal
+copy_product_jdk::
+	$(MAKE) COPY_SUBDIR=             copy_universal
+copy_fastdebug_jdk::
+	$(MAKE) COPY_SUBDIR=/fastdebug   copy_universal
+copy_debug_jdk::
+	$(MAKE) COPY_SUBDIR=/debug       copy_universal
+
+.PHONY:	universal_product universal_fastdebug universal_debug \
+	all_product_universal all_fastdebug_universal all_debug_universal \
+	universalize export_universal copy_universal
--- a/make/bsd/makefiles/vm.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/bsd/makefiles/vm.make	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
 -include $(DEP_DIR)/*.d
 
 # read machine-specific adjustments (%%% should do this via buildtree.make?)
-ifeq ($(ZERO_BUILD), true)
+ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
   include $(MAKEFILES_DIR)/zeroshark.make
 else
   include $(MAKEFILES_DIR)/$(BUILDARCH).make
@@ -82,18 +82,22 @@
 BUILD_USER    = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\""
 VM_DISTRO     = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\""
 
-CPPFLAGS =           \
+CXXFLAGS =           \
   ${SYSDEFS}         \
   ${INCLUDES}        \
   ${BUILD_VERSION}   \
   ${BUILD_TARGET}    \
   ${BUILD_USER}      \
   ${HS_LIB_ARCH}     \
-  ${JRE_VERSION}     \
   ${VM_DISTRO}
 
+# This is VERY important! The version define must only be supplied to vm_version.o
+# If not, ccache will not re-use the cache at all, since the version string might contain
+# a time and date. 
+vm_version.o: CXXFLAGS += ${JRE_VERSION} 
+
 ifdef DEFAULT_LIBPATH
-CPPFLAGS += -DDEFAULT_LIBPATH="\"$(DEFAULT_LIBPATH)\""
+CXXFLAGS += -DDEFAULT_LIBPATH="\"$(DEFAULT_LIBPATH)\""
 endif
 
 ifndef JAVASE_EMBEDDED
@@ -260,19 +264,19 @@
   ifeq ($(STATIC_CXX), true)
     LFLAGS_VM              += $(STATIC_LIBGCC)
     LIBS_VM                += $(STATIC_STDCXX)
-    LINK_VM                = $(LINK_LIB.c)
+    LINK_VM                = $(LINK_LIB.CC)
   else
-    LINK_VM                = $(LINK_LIB.CC)
+    LINK_VM                = $(LINK_LIB.CXX)
   endif
 
   LIBS_VM                  += $(LIBS)
 endif
-ifeq ($(ZERO_BUILD), true)
+ifeq ($(JVM_VARIANT_ZERO), true)
   LIBS_VM += $(LIBFFI_LIBS)
 endif
-ifeq ($(SHARK_BUILD), true)
+ifeq ($(JVM_VARIANT_ZEROSHARK), true)
+  LIBS_VM   += $(LIBFFI_LIBS) $(LLVM_LIBS)
   LFLAGS_VM += $(LLVM_LDFLAGS)
-  LIBS_VM   += $(LLVM_LIBS)
 endif
 
 
@@ -280,7 +284,7 @@
 $(PRECOMPILED_HEADER):
 	$(QUIETLY) echo Generating precompiled header $@
 	$(QUIETLY) mkdir -p $(PRECOMPILED_HEADER_DIR)
-	$(QUIETLY) $(COMPILE.CC) $(DEPFLAGS) -x c++-header $(PRECOMPILED_HEADER_SRC) -o $@ $(COMPILE_DONE)
+	$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -x c++-header $(PRECOMPILED_HEADER_SRC) -o $@ $(COMPILE_DONE)
 
 # making the library:
 
@@ -305,10 +309,10 @@
 $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
 	$(QUIETLY) {                                                    \
 	    echo Linking vm...;                                         \
-	    $(LINK_LIB.CC/PRE_HOOK)                                     \
+	    $(LINK_LIB.CXX/PRE_HOOK)                                     \
 	    $(LINK_VM) $(LD_SCRIPT_FLAG)                                \
 		       $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM);       \
-	    $(LINK_LIB.CC/POST_HOOK)                                    \
+	    $(LINK_LIB.CXX/POST_HOOK)                                    \
 	    rm -f $@.1; ln -s $@ $@.1;                                  \
 	    [ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
 	}
@@ -331,17 +335,20 @@
 # Serviceability agent
 include $(MAKEFILES_DIR)/saproc.make
 
+# Whitebox testing API
+include $(MAKEFILES_DIR)/wb.make
+
 #----------------------------------------------------------------------
 
 ifeq ($(OS_VENDOR), Darwin)
 $(LIBJVM).dSYM: $(LIBJVM)
 	dsymutil $(LIBJVM)
 
-# no launcher or libjvm_db for macosx
-build: $(LIBJVM) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(LIBJVM).dSYM
+# no libjvm_db for macosx
+build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(LIBJVM).dSYM $(WB_JAR)
 	echo "Doing vm.make build:"
 else
-build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC)
+build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC) $(WB_JAR)
 endif
 
 install: install_jvm install_jsig install_saproc
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/bsd/makefiles/wb.make	Tue Apr 10 10:42:34 2012 -0700
@@ -0,0 +1,46 @@
+#
+# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Rules to build whitebox testing library, used by vm.make
+WB = wb
+
+WBSRCDIR = $(GAMMADIR)/src/share/tools/whitebox
+
+WB_JAR = $(GENERATED)/$(WB).jar
+
+WB_JAVA_SRCS = $(shell find $(WBSRCDIR) -name '*.java')
+WB_JAVA_CLASSDIR = $(GENERATED)/wb/classes
+
+WB_JAVA_CLASSES  = $(patsubst $(WBSRCDIR)/%,$(WB_JAVA_CLASSDIR)/%, \
+	$(patsubst %.java,%.class,$(WB_JAVA_SRCS)))
+
+$(WB_JAVA_CLASSDIR)/%.class: $(WBSRCDIR)/%.java $(WB_JAVA_CLASSDIR)
+	$(REMOTE) $(COMPILE.JAVAC) -nowarn -d $(WB_JAVA_CLASSDIR) $<
+
+$(WB_JAR): $(WB_JAVA_CLASSES)
+	$(QUIETLY) $(REMOTE) $(RUN.JAR) cf $@ -C $(WB_JAVA_CLASSDIR)/ .
+
+$(WB_JAVA_CLASSDIR):
+	$(QUIETLY) mkdir -p $@
+
--- a/make/defs.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/defs.make	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,11 @@
 
 # The common definitions for hotspot builds.
 
+# Optionally include SPEC file generated by configure.
+ifneq ($(SPEC),)
+  include $(SPEC)
+endif
+
 # Default to verbose build logs (show all compile lines):
 MAKE_VERBOSE=y
 
@@ -50,6 +55,27 @@
 @$(RM) $@
 endef
 
+# Default values for JVM_VARIANT* variables if configure hasn't set
+# it already.
+ifeq ($(JVM_VARIANTS),)
+  ifeq ($(ZERO_BUILD), true)
+    ifeq ($(SHARK_BUILD), true)
+      JVM_VARIANTS:=zeroshark
+      JVM_VARIANT_ZEROSHARK:=true
+    else
+      JVM_VARIANTS:=zero
+      JVM_VARIANT_ZERO:=true
+    endif
+  else
+    # A default is needed
+    ifeq ($(BUILD_CLIENT_ONLY), true)
+      JVM_VARIANTS:=client
+      JVM_VARIANT_CLIENT:=true
+    endif
+    # Further defaults are platform and arch specific
+  endif
+endif
+
 # Directory paths and user name
 # Unless GAMMADIR is set on the command line, search upward from
 # the current directory for a parent directory containing "src/share/vm".
@@ -185,6 +211,18 @@
   BOOTDIR=$(ALT_BOOTDIR)
 endif
 
+# Select name of the export directory and honor ALT overrides
+EXPORT_PATH=$(OUTPUTDIR)/export-$(PLATFORM)$(EXPORT_SUBDIR)
+ifneq ($(ALT_EXPORT_PATH),)
+  EXPORT_PATH=$(ALT_EXPORT_PATH)
+endif
+
+# Default jdk image if one is created for you with create_jdk
+JDK_IMAGE_DIR=$(OUTPUTDIR)/jdk-$(PLATFORM)
+ifneq ($(ALT_JDK_IMAGE_DIR),)
+  JDK_IMAGE_DIR=$(ALT_JDK_IMAGE_DIR)
+endif
+
 # The platform dependent defs.make defines platform specific variable such 
 # as ARCH, EXPORT_LIST etc. We must place the include here after BOOTDIR is defined.
 include $(GAMMADIR)/make/$(OSNAME)/makefiles/defs.make
@@ -263,15 +301,6 @@
 # includes this make/defs.make file.
 MAKE_ARGS += HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION)
 
-# Select name of export directory
-EXPORT_PATH=$(OUTPUTDIR)/export-$(PLATFORM)$(EXPORT_SUBDIR)
-ifneq ($(ALT_EXPORT_PATH),)
-  EXPORT_PATH=$(ALT_EXPORT_PATH)
-endif
-
-# Default jdk image if one is created for you with create_jdk
-JDK_IMAGE_DIR=$(OUTPUTDIR)/jdk-$(PLATFORM)
-
 # Various export sub directories
 EXPORT_INCLUDE_DIR = $(EXPORT_PATH)/include
 EXPORT_DOCS_DIR = $(EXPORT_PATH)/docs
--- a/make/hotspot_version	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/hotspot_version	Tue Apr 10 10:42:34 2012 -0700
@@ -33,9 +33,9 @@
 # Don't put quotes (fail windows build).
 HOTSPOT_VM_COPYRIGHT=Copyright 2011
 
-HS_MAJOR_VER=23
+HS_MAJOR_VER=24
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=11
+HS_BUILD_NUMBER=06
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/jprt.properties	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/jprt.properties	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,9 @@
 
 # This tells jprt what default release we want to build
 
-jprt.tools.default.release=${jprt.submit.release}
+jprt.hotspot.default.release=jdk7
+
+jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}}
 
 # Disable syncing the source after builds and tests are done.
 
@@ -52,154 +54,72 @@
 # Define the Solaris platforms we want for the various releases
 jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10
 jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
-jprt.my.solaris.sparc.jdk7b107=solaris_sparc_5.10
-jprt.my.solaris.sparc.jdk7temp=solaris_sparc_5.10
-jprt.my.solaris.sparc.jdk6=solaris_sparc_5.8
-jprt.my.solaris.sparc.jdk6perf=solaris_sparc_5.8
-jprt.my.solaris.sparc.jdk6u10=solaris_sparc_5.8
-jprt.my.solaris.sparc.jdk6u14=solaris_sparc_5.8
-jprt.my.solaris.sparc.jdk6u18=solaris_sparc_5.8
-jprt.my.solaris.sparc.jdk6u20=solaris_sparc_5.8
-jprt.my.solaris.sparc.ejdk7=${jprt.my.solaris.sparc.jdk7}
-jprt.my.solaris.sparc.ejdk6=${jprt.my.solaris.sparc.jdk6}
+jprt.my.solaris.sparc.jdk7u4=${jprt.my.solaris.sparc.jdk7}
 jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
 
 jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
-jprt.my.solaris.sparcv9.jdk7b107=solaris_sparcv9_5.10
-jprt.my.solaris.sparcv9.jdk7temp=solaris_sparcv9_5.10
-jprt.my.solaris.sparcv9.jdk6=solaris_sparcv9_5.8
-jprt.my.solaris.sparcv9.jdk6perf=solaris_sparcv9_5.8
-jprt.my.solaris.sparcv9.jdk6u10=solaris_sparcv9_5.8
-jprt.my.solaris.sparcv9.jdk6u14=solaris_sparcv9_5.8
-jprt.my.solaris.sparcv9.jdk6u18=solaris_sparcv9_5.8
-jprt.my.solaris.sparcv9.jdk6u20=solaris_sparcv9_5.8
-jprt.my.solaris.sparcv9.ejdk7=${jprt.my.solaris.sparcv9.jdk7}
-jprt.my.solaris.sparcv9.ejdk6=${jprt.my.solaris.sparcv9.jdk6}
+jprt.my.solaris.sparcv9.jdk7u4=${jprt.my.solaris.sparcv9.jdk7}
 jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
 
 jprt.my.solaris.i586.jdk8=solaris_i586_5.10
 jprt.my.solaris.i586.jdk7=solaris_i586_5.10
-jprt.my.solaris.i586.jdk7b107=solaris_i586_5.10
-jprt.my.solaris.i586.jdk7temp=solaris_i586_5.10
-jprt.my.solaris.i586.jdk6=solaris_i586_5.8
-jprt.my.solaris.i586.jdk6perf=solaris_i586_5.8
-jprt.my.solaris.i586.jdk6u10=solaris_i586_5.8
-jprt.my.solaris.i586.jdk6u14=solaris_i586_5.8
-jprt.my.solaris.i586.jdk6u18=solaris_i586_5.8
-jprt.my.solaris.i586.jdk6u20=solaris_i586_5.8
-jprt.my.solaris.i586.ejdk7=${jprt.my.solaris.i586.jdk7}
-jprt.my.solaris.i586.ejdk6=${jprt.my.solaris.i586.jdk6}
+jprt.my.solaris.i586.jdk7u4=${jprt.my.solaris.i586.jdk7}
 jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
 
 jprt.my.solaris.x64.jdk8=solaris_x64_5.10
 jprt.my.solaris.x64.jdk7=solaris_x64_5.10
-jprt.my.solaris.x64.jdk7b107=solaris_x64_5.10
-jprt.my.solaris.x64.jdk7temp=solaris_x64_5.10
-jprt.my.solaris.x64.jdk6=solaris_x64_5.10
-jprt.my.solaris.x64.jdk6perf=solaris_x64_5.10
-jprt.my.solaris.x64.jdk6u10=solaris_x64_5.10
-jprt.my.solaris.x64.jdk6u14=solaris_x64_5.10
-jprt.my.solaris.x64.jdk6u18=solaris_x64_5.10
-jprt.my.solaris.x64.jdk6u20=solaris_x64_5.10
-jprt.my.solaris.x64.ejdk7=${jprt.my.solaris.x64.jdk7}
-jprt.my.solaris.x64.ejdk6=${jprt.my.solaris.x64.jdk6}
+jprt.my.solaris.x64.jdk7u4=${jprt.my.solaris.x64.jdk7}
 jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
 
 jprt.my.linux.i586.jdk8=linux_i586_2.6
 jprt.my.linux.i586.jdk7=linux_i586_2.6
-jprt.my.linux.i586.jdk7b107=linux_i586_2.6
-jprt.my.linux.i586.jdk7temp=linux_i586_2.6
-jprt.my.linux.i586.jdk6=linux_i586_2.4
-jprt.my.linux.i586.jdk6perf=linux_i586_2.4
-jprt.my.linux.i586.jdk6u10=linux_i586_2.4
-jprt.my.linux.i586.jdk6u14=linux_i586_2.4
-jprt.my.linux.i586.jdk6u18=linux_i586_2.4
-jprt.my.linux.i586.jdk6u20=linux_i586_2.4
-jprt.my.linux.i586.ejdk7=linux_i586_2.6
-jprt.my.linux.i586.ejdk6=linux_i586_2.6
+jprt.my.linux.i586.jdk7u4=${jprt.my.linux.i586.jdk7}
 jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
 
 jprt.my.linux.x64.jdk8=linux_x64_2.6
 jprt.my.linux.x64.jdk7=linux_x64_2.6
-jprt.my.linux.x64.jdk7b107=linux_x64_2.6
-jprt.my.linux.x64.jdk7temp=linux_x64_2.6
-jprt.my.linux.x64.jdk6=linux_x64_2.4
-jprt.my.linux.x64.jdk6perf=linux_x64_2.4
-jprt.my.linux.x64.jdk6u10=linux_x64_2.4
-jprt.my.linux.x64.jdk6u14=linux_x64_2.4
-jprt.my.linux.x64.jdk6u18=linux_x64_2.4
-jprt.my.linux.x64.jdk6u20=linux_x64_2.4
-jprt.my.linux.x64.ejdk7=${jprt.my.linux.x64.jdk7}
-jprt.my.linux.x64.ejdk6=${jprt.my.linux.x64.jdk6}
+jprt.my.linux.x64.jdk7u4=${jprt.my.linux.x64.jdk7}
 jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
 
 jprt.my.linux.ppc.jdk8=linux_ppc_2.6
 jprt.my.linux.ppc.jdk7=linux_ppc_2.6
-jprt.my.linux.ppc.jdk7b107=linux_ppc_2.6
-jprt.my.linux.ppc.jdk7temp=linux_ppc_2.6
-jprt.my.linux.ppc.ejdk6=linux_ppc_2.6
-jprt.my.linux.ppc.ejdk7=linux_ppc_2.6
+jprt.my.linux.ppc.jdk7u4=${jprt.my.linux.ppc.jdk7}
 jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
 
 jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6
 jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
-jprt.my.linux.ppcv2.jdk7b107=linux_ppcv2_2.6
-jprt.my.linux.ppcv2.jdk7temp=linux_ppcv2_2.6
-jprt.my.linux.ppcv2.ejdk6=linux_ppcv2_2.6
-jprt.my.linux.ppcv2.ejdk7=linux_ppcv2_2.6
+jprt.my.linux.ppcv2.jdk7u4=${jprt.my.linux.ppcv2.jdk7}
 jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
 
 jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6
 jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6
-jprt.my.linux.ppcsflt.jdk7b107=linux_ppcsflt_2.6
-jprt.my.linux.ppcsflt.jdk7temp=linux_ppcsflt_2.6
-jprt.my.linux.ppcsflt.ejdk6=linux_ppcsflt_2.6
-jprt.my.linux.ppcsflt.ejdk7=linux_ppcsflt_2.6
+jprt.my.linux.ppcsflt.jdk7u4=${jprt.my.linux.ppcsflt.jdk7}
 jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
 
 jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6
 jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6
-jprt.my.linux.armvfp.jdk7b107=linux_armvfp_2.6
-jprt.my.linux.armvfp.jdk7temp=linux_armvfp_2.6
-jprt.my.linux.armvfp.ejdk6=linux_armvfp_2.6
-jprt.my.linux.armvfp.ejdk7=linux_armvfp_2.6
+jprt.my.linux.armvfp.jdk7u4=${jprt.my.linux.armvfp.jdk7}
 jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}}
 
 jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6
 jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
-jprt.my.linux.armsflt.jdk7b107=linux_armsflt_2.6
-jprt.my.linux.armsflt.jdk7temp=linux_armsflt_2.6
-jprt.my.linux.armsflt.ejdk6=linux_armsflt_2.6
-jprt.my.linux.armsflt.ejdk7=linux_armsflt_2.6
+jprt.my.linux.armsflt.jdk7u4=${jprt.my.linux.armsflt.jdk7}
 jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
 
+jprt.my.macosx.x64.jdk8=macosx_x64_10.7
+jprt.my.macosx.x64.jdk7=macosx_x64_10.7
+jprt.my.macosx.x64.jdk7u4=${jprt.my.macosx.x64.jdk7}
+jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
+
 jprt.my.windows.i586.jdk8=windows_i586_5.1
 jprt.my.windows.i586.jdk7=windows_i586_5.1
-jprt.my.windows.i586.jdk7b107=windows_i586_5.0
-jprt.my.windows.i586.jdk7temp=windows_i586_5.0
-jprt.my.windows.i586.jdk6=windows_i586_5.0
-jprt.my.windows.i586.jdk6perf=windows_i586_5.0
-jprt.my.windows.i586.jdk6u10=windows_i586_5.0
-jprt.my.windows.i586.jdk6u14=windows_i586_5.0
-jprt.my.windows.i586.jdk6u18=windows_i586_5.0
-jprt.my.windows.i586.jdk6u20=windows_i586_5.0
-jprt.my.windows.i586.ejdk7=${jprt.my.windows.i586.jdk7}
-jprt.my.windows.i586.ejdk6=${jprt.my.windows.i586.jdk6}
+jprt.my.windows.i586.jdk7u4=${jprt.my.windows.i586.jdk7}
 jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
 
 jprt.my.windows.x64.jdk8=windows_x64_5.2
 jprt.my.windows.x64.jdk7=windows_x64_5.2
-jprt.my.windows.x64.jdk7b107=windows_x64_5.2
-jprt.my.windows.x64.jdk7temp=windows_x64_5.2
-jprt.my.windows.x64.jdk6=windows_x64_5.2
-jprt.my.windows.x64.jdk6perf=windows_x64_5.2
-jprt.my.windows.x64.jdk6u10=windows_x64_5.2
-jprt.my.windows.x64.jdk6u14=windows_x64_5.2
-jprt.my.windows.x64.jdk6u18=windows_x64_5.2
-jprt.my.windows.x64.jdk6u20=windows_x64_5.2
-jprt.my.windows.x64.ejdk7=${jprt.my.windows.x64.jdk7}
-jprt.my.windows.x64.ejdk6=${jprt.my.windows.x64.jdk6}
+jprt.my.windows.x64.jdk7u4=${jprt.my.windows.x64.jdk7}
 jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
 
 # Standard list of jprt build targets for this source tree
@@ -211,6 +131,7 @@
     ${jprt.my.solaris.x64}-{product|fastdebug|debug}, \
     ${jprt.my.linux.i586}-{product|fastdebug|debug}, \
     ${jprt.my.linux.x64}-{product|fastdebug}, \
+    ${jprt.my.macosx.x64}-{product|fastdebug|debug}, \
     ${jprt.my.windows.i586}-{product|fastdebug|debug}, \
     ${jprt.my.windows.x64}-{product|fastdebug|debug}
 
@@ -232,16 +153,7 @@
 
 jprt.build.targets.jdk8=${jprt.build.targets.all}
 jprt.build.targets.jdk7=${jprt.build.targets.all}
-jprt.build.targets.jdk7temp=${jprt.build.targets.all}
-jprt.build.targets.jdk7b107=${jprt.build.targets.all}
-jprt.build.targets.jdk6=${jprt.build.targets.standard}
-jprt.build.targets.jdk6perf=${jprt.build.targets.standard}
-jprt.build.targets.jdk6u10=${jprt.build.targets.standard}
-jprt.build.targets.jdk6u14=${jprt.build.targets.standard}
-jprt.build.targets.jdk6u18=${jprt.build.targets.standard}
-jprt.build.targets.jdk6u20=${jprt.build.targets.standard}
-jprt.build.targets.ejdk6=${jprt.build.targets.all}
-jprt.build.targets.ejdk7=${jprt.build.targets.all}
+jprt.build.targets.jdk7u4=${jprt.build.targets.all}
 jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
 
 # Subset lists of test targets for this source tree
@@ -416,6 +328,30 @@
     ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \
     ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParOldGC
 
+jprt.my.macosx.x64.test.targets = \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-scimark, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_default, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_G1, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_default, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \
+    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC
+#    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default, \
+#    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
+#    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
+#    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
+#    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC
+
 jprt.my.windows.i586.test.targets = \
     ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
     ${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
@@ -492,6 +428,7 @@
   ${jprt.my.solaris.x64.test.targets}, \
   ${jprt.my.linux.i586.test.targets}, \
   ${jprt.my.linux.x64.test.targets}, \
+  ${jprt.my.macosx.x64.test.targets}, \
   ${jprt.my.windows.i586.test.targets}, \
   ${jprt.my.windows.x64.test.targets}, \
   ${jprt.test.targets.open}
@@ -509,16 +446,7 @@
 
 jprt.test.targets.jdk8=${jprt.test.targets.standard}
 jprt.test.targets.jdk7=${jprt.test.targets.standard}
-jprt.test.targets.jdk7temp=${jprt.test.targets.standard}
-jprt.test.targets.jdk7b105=${jprt.test.targets.standard}
-jprt.test.targets.jdk6=${jprt.test.targets.standard}
-jprt.test.targets.jdk6perf=${jprt.test.targets.standard}
-jprt.test.targets.jdk6u10=${jprt.test.targets.standard}
-jprt.test.targets.jdk6u14=${jprt.test.targets.standard}
-jprt.test.targets.jdk6u18=${jprt.test.targets.standard}
-jprt.test.targets.jdk6u20=${jprt.test.targets.standard}
-jprt.test.targets.ejdk6=${jprt.test.targets.embedded}
-jprt.test.targets.ejdk7=${jprt.test.targets.embedded}
+jprt.test.targets.jdk7u4=${jprt.test.targets.jdk7}
 jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
 
 # The default test/Makefile targets that should be run
@@ -538,6 +466,7 @@
   ${jprt.my.solaris.x64}-*-c2-servertest, \
   ${jprt.my.linux.i586}-*-c2-servertest, \
   ${jprt.my.linux.x64}-*-c2-servertest, \
+  ${jprt.my.macosx.x64}-*-c2-servertest, \
   ${jprt.my.windows.i586}-*-c2-servertest, \
   ${jprt.my.windows.x64}-*-c2-servertest
 
@@ -548,28 +477,35 @@
   ${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \
   ${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \
   ${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \
+  ${jprt.my.macosx.x64}-fastdebug-c2-internalvmtests, \
   ${jprt.my.windows.i586}-fastdebug-c2-internalvmtests, \
   ${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
-  
+
+jprt.make.rule.test.targets.standard.wbapi = \
+  ${jprt.my.solaris.sparc}-{product|fastdebug}-c2-wbapitest, \
+  ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-wbapitest, \
+  ${jprt.my.solaris.i586}-{product|fastdebug}-c2-wbapitest, \
+  ${jprt.my.solaris.x64}-{product|fastdebug}-c2-wbapitest, \
+  ${jprt.my.linux.i586}-{product|fastdebug}-c2-wbapitest, \
+  ${jprt.my.linux.x64}-{product|fastdebug}-c2-wbapitest, \
+  ${jprt.my.windows.i586}-{product|fastdebug}-c2-wbapitest, \
+  ${jprt.my.windows.x64}-{product|fastdebug}-c2-wbapitest, \
+  ${jprt.my.solaris.sparc}-{product|fastdebug}-c1-wbapitest, \
+  ${jprt.my.solaris.i586}-{product|fastdebug}-c1-wbapitest, \
+  ${jprt.my.linux.i586}-{product|fastdebug}-c1-wbapitest, \
+  ${jprt.my.windows.i586}-{product|fastdebug}-c1-wbapitest
+
 jprt.make.rule.test.targets.standard = \
   ${jprt.make.rule.test.targets.standard.client}, \
   ${jprt.make.rule.test.targets.standard.server}, \
-  ${jprt.make.rule.test.targets.standard.internalvmtests}
+  ${jprt.make.rule.test.targets.standard.internalvmtests}, \
+  ${jprt.make.rule.test.targets.standard.wbapi}
 
 jprt.make.rule.test.targets.embedded = \
   ${jprt.make.rule.test.targets.standard.client}
 
 jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
 jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk7temp=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk7b107=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk6=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk6perf=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk6u10=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk6u14=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk6u18=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk6u20=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.ejdk6=${jprt.make.rule.test.targets.embedded}
-jprt.make.rule.test.targets.ejdk7=${jprt.make.rule.test.targets.embedded}
+jprt.make.rule.test.targets.jdk7u4=${jprt.make.rule.test.targets.jdk7}
 jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
 
--- a/make/linux/Makefile	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/linux/Makefile	Tue Apr 10 10:42:34 2012 -0700
@@ -188,7 +188,7 @@
 # in the build.sh script:
 TARGETS           = debug jvmg fastdebug optimized profiled product
 
-ifeq ($(ZERO_BUILD), true)
+ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
   SUBDIR_DOCS     = $(OSNAME)_$(VARIANTARCH)_docs
 else
   SUBDIR_DOCS     = $(OSNAME)_$(BUILDARCH)_docs
--- a/make/linux/makefiles/adlc.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/linux/makefiles/adlc.make	Tue Apr 10 10:42:34 2012 -0700
@@ -61,10 +61,10 @@
 INCLUDES += $(Src_Dirs_I:%=-I%)
 
 # set flags for adlc compilation
-CPPFLAGS = $(SYSDEFS) $(INCLUDES)
+CXXFLAGS = $(SYSDEFS) $(INCLUDES)
 
 # Force assertions on.
-CPPFLAGS += -DASSERT
+CXXFLAGS += -DASSERT
 
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
 # Compiler warnings are treated as errors
@@ -109,7 +109,7 @@
 
 $(EXEC) : $(OBJECTS)
 	@echo Making adlc
-	$(QUIETLY) $(HOST.LINK_NOPROF.CC) -o $(EXEC) $(OBJECTS)
+	$(QUIETLY) $(HOST.LINK_NOPROF.CXX) -o $(EXEC) $(OBJECTS)
 
 # Random dependencies:
 $(OBJECTS): opcodes.hpp classes.hpp adlc.hpp adlcVMDeps.hpp adlparse.hpp archDesc.hpp arena.hpp dict2.hpp filebuff.hpp forms.hpp formsopt.hpp formssel.hpp
@@ -211,14 +211,14 @@
 $(OUTDIR)/%.o: %.cpp
 	@echo Compiling $<
 	$(QUIETLY) $(REMOVE_TARGET)
-	$(QUIETLY) $(HOST.COMPILE.CC) -o $@ $< $(COMPILE_DONE)
+	$(QUIETLY) $(HOST.COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
 
 # Some object files are given a prefix, to disambiguate
 # them from objects of the same name built for the VM.
 $(OUTDIR)/adlc-%.o: %.cpp
 	@echo Compiling $<
 	$(QUIETLY) $(REMOVE_TARGET)
-	$(QUIETLY) $(HOST.COMPILE.CC) -o $@ $< $(COMPILE_DONE)
+	$(QUIETLY) $(HOST.COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
 
 # #########################################################################
 
--- a/make/linux/makefiles/buildtree.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/linux/makefiles/buildtree.make	Tue Apr 10 10:42:34 2012 -0700
@@ -55,6 +55,7 @@
 # The makefiles are split this way so that "make foo" will run faster by not
 # having to read the dependency files for the vm.
 
+-include $(SPEC)
 include $(GAMMADIR)/make/scm.make
 include $(GAMMADIR)/make/altsrc.make
 
@@ -65,7 +66,7 @@
 # For now, until the compiler is less wobbly:
 TESTFLAGS	= -Xbatch -showversion
 
-ifeq ($(ZERO_BUILD), true)
+ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
   PLATFORM_FILE = $(shell dirname $(shell dirname $(shell pwd)))/platform_zero
 else
   ifdef USE_SUNCC
@@ -244,6 +245,8 @@
 	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
 	    echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \
 	echo; \
+	[ -n "$(SPEC)" ] && \
+	    echo "include $(SPEC)"; \
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \
 	) > $@
@@ -326,11 +329,10 @@
 	$(BUILDTREE_COMMENT); \
 	[ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \
 	{ \
-	echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
 	echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
 	} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
 	echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
-	echo "export JAVA_HOME LD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \
+	echo "export JAVA_HOME CLASSPATH HOTSPOT_BUILD_USER"; \
 	) > $@
 
 env.csh: env.sh
@@ -384,7 +386,7 @@
 JAVA_FLAG/64 = -d64
 
 WRONG_DATA_MODE_MSG = \
-	echo "JAVA_HOME must point to $(DATA_MODE)bit JDK."
+	echo "JAVA_HOME must point to a $(DATA_MODE)-bit OpenJDK."
 
 CROSS_COMPILING_MSG = \
 	echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run."
@@ -392,19 +394,78 @@
 test_gamma:  $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
-	echo '#!/bin/sh'; \
+	echo "#!/bin/sh"; \
+	echo ""; \
 	$(BUILDTREE_COMMENT); \
-	echo '. ./env.sh'; \
-	echo "if [ \"$(CROSS_COMPILE_ARCH)\" != \"\" ]; then { $(CROSS_COMPILING_MSG); exit 0; }; fi"; \
-	echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \
-	echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \
-	echo "then"; \
-	echo "  $(WRONG_DATA_MODE_MSG); exit 0;"; \
+	echo ""; \
+	echo "# Include environment settings for gamma run"; \
+	echo ""; \
+	echo ". ./env.sh"; \
+	echo ""; \
+	echo "# Do not run gamma test for cross compiles"; \
+	echo ""; \
+	echo "if [ -n \"$(CROSS_COMPILE_ARCH)\" ]; then "; \
+	echo "  $(CROSS_COMPILING_MSG)"; \
+	echo "  exit 0"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Make sure JAVA_HOME is set as it is required for gamma"; \
+	echo ""; \
+	echo "if [ -z \"\$${JAVA_HOME}\" ]; then "; \
+	echo "  $(NO_JAVA_HOME_MSG)"; \
+	echo "  exit 0"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Check JAVA_HOME version to be used for the test"; \
+	echo ""; \
+	echo "\$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion > /dev/null 2>&1"; \
+	echo "if [ \$$? -ne 0 ]; then "; \
+	echo "  $(WRONG_DATA_MODE_MSG)"; \
+	echo "  exit 0"; \
 	echo "fi"; \
+	echo ""; \
+	echo "# Use gamma_g if it exists"; \
+	echo ""; \
+	echo "GAMMA_PROG=gamma"; \
+	echo "if [ -f gamma_g ]; then "; \
+	echo "  GAMMA_PROG=gamma_g"; \
+	echo "fi"; \
+	echo ""; \
+	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
+	echo "  # Ensure architecture for gamma and JAVA_HOME is the same."; \
+	echo "  # NOTE: gamma assumes the OpenJDK directory layout."; \
+	echo ""; \
+	echo "  GAMMA_ARCH=\"\`file \$${GAMMA_PROG} | awk '{print \$$NF}'\`\""; \
+	echo "  JVM_LIB=\"\$${JAVA_HOME}/jre/lib/libjava.$(LIBRARY_SUFFIX)\""; \
+	echo "  if [ ! -f \$${JVM_LIB} ]; then"; \
+	echo "    JVM_LIB=\"\$${JAVA_HOME}/jre/lib/$${LIBARCH}/libjava.$(LIBRARY_SUFFIX)\""; \
+	echo "  fi"; \
+	echo "  if [ ! -f \$${JVM_LIB} ] || [ -z \"\`file \$${JVM_LIB} | grep \$${GAMMA_ARCH}\`\" ]; then "; \
+	echo "    $(WRONG_DATA_MODE_MSG)"; \
+	echo "    exit 0"; \
+	echo "  fi"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Compile Queens program for test"; \
+	echo ""; \
 	echo "rm -f Queens.class"; \
 	echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \
-	echo '[ -f gamma_g ] && { gamma=gamma_g; }'; \
-	echo './$${gamma:-gamma} $(TESTFLAGS) Queens < /dev/null'; \
+	echo ""; \
+	echo "# Set library path solely for gamma launcher test run"; \
+	echo ""; \
+	echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
+	echo "export LD_LIBRARY_PATH"; \
+	echo "unset LD_LIBRARY_PATH_32"; \
+	echo "unset LD_LIBRARY_PATH_64"; \
+	echo ""; \
+	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
+	echo "  DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/native_threads:\$${JAVA_HOME}/jre/lib:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
+	echo "  export DYLD_LIBRARY_PATH"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Use the gamma launcher and JAVA_HOME to run the test"; \
+	echo ""; \
+	echo "./\$${GAMMA_PROG} $(TESTFLAGS) Queens < /dev/null"; \
 	) > $@
 	$(QUIETLY) chmod +x $@
 
--- a/make/linux/makefiles/defs.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/linux/makefiles/defs.make	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
 endif
 
 # zero
-ifeq ($(ZERO_BUILD), true)
+ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
   ifeq ($(ARCH_DATA_MODEL), 64)
     MAKE_ARGS      += LP64=1
   endif
@@ -114,6 +114,18 @@
   HS_ARCH          = ppc
 endif
 
+# On 32 bit linux we build server and client, on 64 bit just server.
+ifeq ($(JVM_VARIANTS),)
+  ifeq ($(ARCH_DATA_MODEL), 32)
+    JVM_VARIANTS:=client,server
+    JVM_VARIANT_CLIENT:=true
+    JVM_VARIANT_SERVER:=true
+  else
+    JVM_VARIANTS:=server
+    JVM_VARIANT_SERVER:=true
+  endif
+endif
+
 # determine if HotSpot is being built in JDK6 or earlier version
 JDK6_OR_EARLIER=0
 ifeq "$(shell expr \( '$(JDK_MAJOR_VERSION)' != '' \& '$(JDK_MINOR_VERSION)' != '' \& '$(JDK_MICRO_VERSION)' != '' \))" "1"
@@ -193,22 +205,22 @@
 EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
 EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
 
-ifndef BUILD_CLIENT_ONLY
-EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
-EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
+EXPORT_LIST += $(EXPORT_JRE_LIB_DIR)/wb.jar
+
+ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
   ifneq ($(OBJCOPY),)
     EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.debuginfo
   endif
 endif
 
-ifneq ($(ZERO_BUILD), true)
-  ifeq ($(ARCH_DATA_MODEL), 32)
-    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
-    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
-    ifneq ($(OBJCOPY),)
-      EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
-    endif
-  endif
+ifeq ($(JVM_VARIANT_CLIENT),true)
+  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
+  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
+  ifneq ($(OBJCOPY),)
+    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
+  endif 
 endif
 
 # Serviceability Binaries
--- a/make/linux/makefiles/gcc.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/linux/makefiles/gcc.make	Tue Apr 10 10:42:34 2012 -0700
@@ -23,23 +23,28 @@
 #
 
 #------------------------------------------------------------------------
-# CC, CPP & AS
+# CC, CXX & AS
 
-# When cross-compiling the ALT_COMPILER_PATH points
-# to the cross-compilation toolset
-ifdef CROSS_COMPILE_ARCH
-CPP = $(ALT_COMPILER_PATH)/g++
-CC  = $(ALT_COMPILER_PATH)/gcc
-HOSTCPP = g++
-HOSTCC  = gcc
-else
-CPP = g++
-CC  = gcc
-HOSTCPP = $(CPP)
-HOSTCC  = $(CC)
+# If a SPEC is not set already, then use these defaults.
+ifeq ($(SPEC),)
+  # When cross-compiling the ALT_COMPILER_PATH points
+  # to the cross-compilation toolset
+  ifdef CROSS_COMPILE_ARCH
+    CXX = $(ALT_COMPILER_PATH)/g++
+    CC  = $(ALT_COMPILER_PATH)/gcc
+    HOSTCXX = g++
+    HOSTCC  = gcc
+    STRIP = $(ALT_COMPILER_PATH)/strip
+  else
+    CXX = g++
+    CC  = gcc
+    HOSTCXX = $(CXX)
+    HOSTCC  = $(CC)
+    STRIP = strip
+  endif
+  AS  = $(CC) -c
 endif
 
-AS  = $(CC) -c
 
 # -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only
 # prints the numbers (e.g. "2.95", "3.2.1")
@@ -67,10 +72,11 @@
 VM_PICFLAG/AOUT   =
 VM_PICFLAG        = $(VM_PICFLAG/$(LINK_INTO))
 
-ifeq ($(ZERO_BUILD), true)
+ifeq ($(JVM_VARIANT_ZERO), true)
 CFLAGS += $(LIBFFI_CFLAGS)
 endif
-ifeq ($(SHARK_BUILD), true)
+ifeq ($(JVM_VARIANT_ZEROSHARK), true)
+CFLAGS += $(LIBFFI_CFLAGS)
 CFLAGS += $(LLVM_CFLAGS)
 endif
 CFLAGS += $(VM_PICFLAG)
@@ -261,9 +267,3 @@
 ifdef MINIMIZE_RAM_USAGE
 CFLAGS += -DMINIMIZE_RAM_USAGE
 endif
-
-ifdef CROSS_COMPILE_ARCH
-  STRIP = $(ALT_COMPILER_PATH)/strip
-else
-  STRIP = strip
-endif
--- a/make/linux/makefiles/launcher.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/linux/makefiles/launcher.make	Tue Apr 10 10:42:34 2012 -0700
@@ -54,10 +54,10 @@
   LIBS_LAUNCHER             += -l$(JVM) $(LIBS)
 endif
 
-LINK_LAUNCHER = $(LINK.c)
+LINK_LAUNCHER = $(LINK.CC)
 
-LINK_LAUNCHER/PRE_HOOK  = $(LINK_LIB.CC/PRE_HOOK)
-LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CC/POST_HOOK)
+LINK_LAUNCHER/PRE_HOOK  = $(LINK_LIB.CXX/PRE_HOOK)
+LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CXX/POST_HOOK)
 
 LAUNCHER_OUT = launcher
 
@@ -73,11 +73,11 @@
 
 $(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR_SHARE)/%.c
 	$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
-	$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CPPFLAGS)
+	$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
 
 $(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR)/%.c
 	$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
-	$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CPPFLAGS)
+	$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
 
 $(LAUNCHER): $(OBJS) $(LIBJVM) $(LAUNCHER_MAPFILE)
 	$(QUIETLY) echo Linking launcher...
--- a/make/linux/makefiles/ppc.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/linux/makefiles/ppc.make	Tue Apr 10 10:42:34 2012 -0700
@@ -28,3 +28,6 @@
 # Must also specify if CPU is big endian
 CFLAGS += -DVM_BIG_ENDIAN
 
+ifdef E500V2
+ASFLAGS += -Wa,-mspe -Wa,--defsym -Wa,E500V2=1 
+endif
--- a/make/linux/makefiles/product.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/linux/makefiles/product.make	Tue Apr 10 10:42:34 2012 -0700
@@ -52,4 +52,4 @@
 
 # If we can create .debuginfo files, then the VM is stripped in vm.make
 # and this macro is not used.
-# LINK_LIB.CC/POST_HOOK += $(STRIP_$(LINK_INTO))
+# LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))
--- a/make/linux/makefiles/rules.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/linux/makefiles/rules.make	Tue Apr 10 10:42:34 2012 -0700
@@ -27,52 +27,39 @@
 # Tell make that .cpp is important
 .SUFFIXES: .cpp $(SUFFIXES)
 
-# For now.  Other makefiles use CPP as the c++ compiler, but that should really
-# name the preprocessor.
-ifeq    ($(CCC),)
-CCC             = $(CPP)
-endif
-
 DEMANGLER       = c++filt
 DEMANGLE        = $(DEMANGLER) < $@ > .$@ && mv -f .$@ $@
 
-# $(CC) is the c compiler (cc/gcc), $(CCC) is the c++ compiler (CC/g++).
-C_COMPILE       = $(CC) $(CPPFLAGS) $(CFLAGS)
-CC_COMPILE      = $(CCC) $(CPPFLAGS) $(CFLAGS)
+# $(CC) is the c compiler (cc/gcc), $(CXX) is the c++ compiler (CC/g++).
+CC_COMPILE       = $(CC) $(CXXFLAGS) $(CFLAGS)
+CXX_COMPILE      = $(CXX) $(CXXFLAGS) $(CFLAGS)
 
 AS.S            = $(AS) $(ASFLAGS)
 
-COMPILE.c       = $(C_COMPILE) -c
-GENASM.c        = $(C_COMPILE) -S
-LINK.c          = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
-LINK_LIB.c      = $(CC) $(LFLAGS) $(SHARED_FLAG)
-PREPROCESS.c    = $(C_COMPILE) -E
+COMPILE.CC       = $(CC_COMPILE) -c
+GENASM.CC        = $(CC_COMPILE) -S
+LINK.CC          = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
+LINK_LIB.CC      = $(CC) $(LFLAGS) $(SHARED_FLAG)
+PREPROCESS.CC    = $(CC_COMPILE) -E
 
-COMPILE.CC      = $(CC_COMPILE) -c
-GENASM.CC       = $(CC_COMPILE) -S
-LINK.CC         = $(CCC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
-LINK_NOPROF.CC  = $(CCC) $(LFLAGS) $(AOUT_FLAGS)
-LINK_LIB.CC     = $(CCC) $(LFLAGS) $(SHARED_FLAG)
-PREPROCESS.CC   = $(CC_COMPILE) -E
+COMPILE.CXX      = $(CXX_COMPILE) -c
+GENASM.CXX       = $(CXX_COMPILE) -S
+LINK.CXX         = $(CXX) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
+LINK_NOPROF.CXX  = $(CXX) $(LFLAGS) $(AOUT_FLAGS)
+LINK_LIB.CXX     = $(CXX) $(LFLAGS) $(SHARED_FLAG)
+PREPROCESS.CXX   = $(CXX_COMPILE) -E
 
 # cross compiling the jvm with c2 requires host compilers to build
 # adlc tool
 
-HOST.CC_COMPILE      = $(HOSTCPP) $(CPPFLAGS) $(CFLAGS)
-HOST.COMPILE.CC      = $(HOST.CC_COMPILE) -c
-HOST.LINK_NOPROF.CC  = $(HOSTCPP) $(LFLAGS) $(AOUT_FLAGS)
+HOST.CXX_COMPILE      = $(HOSTCXX) $(CXXFLAGS) $(CFLAGS)
+HOST.COMPILE.CXX      = $(HOST.CXX_COMPILE) -c
+HOST.LINK_NOPROF.CXX  = $(HOSTCXX) $(LFLAGS) $(AOUT_FLAGS)
 
 
 # Effect of REMOVE_TARGET is to delete out-of-date files during "gnumake -k".
 REMOVE_TARGET   = rm -f $@
 
-# Synonyms.
-COMPILE.cpp     = $(COMPILE.CC)
-GENASM.cpp      = $(GENASM.CC)
-LINK.cpp        = $(LINK.CC)
-LINK_LIB.cpp    = $(LINK_LIB.CC)
-PREPROCESS.cpp  = $(PREPROCESS.CC)
-
 # Note use of ALT_BOOTDIR to explicitly specify location of java and
 # javac; this is the same environment variable used in the J2SE build
 # process for overriding the default spec, which is BOOTDIR.
@@ -161,14 +148,14 @@
 %.o: %.cpp
 	@echo Compiling $<
 	$(QUIETLY) $(REMOVE_TARGET)
-	$(QUIETLY) $(COMPILE.CC) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
+	$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
 else
 %.o: %.cpp
 	@echo Compiling $<
 	$(QUIETLY) $(REMOVE_TARGET)
 	$(QUIETLY) $(if $(findstring $@, $(NONPIC_OBJ_FILES)), \
-	   $(subst $(VM_PICFLAG), ,$(COMPILE.CC)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
-	   $(COMPILE.CC) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
+	   $(subst $(VM_PICFLAG), ,$(COMPILE.CXX)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
+	   $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
 endif
 
 %.o: %.s
@@ -178,13 +165,13 @@
 
 %.s: %.cpp
 	@echo Generating assembly for $<
-	$(QUIETLY) $(GENASM.CC) -o $@ $<
+	$(QUIETLY) $(GENASM.CXX) -o $@ $<
 	$(QUIETLY) $(DEMANGLE) $(COMPILE_DONE)
 
 # Intermediate files (for debugging macros)
 %.i: %.cpp
 	@echo Preprocessing $< to $@
-	$(QUIETLY) $(PREPROCESS.CC) $< > $@ $(COMPILE_DONE)
+	$(QUIETLY) $(PREPROCESS.CXX) $< > $@ $(COMPILE_DONE)
 
 #  Override gnumake built-in rules which do sccs get operations badly.
 #  (They put the checked out code in the current directory, not in the
--- a/make/linux/makefiles/saproc.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/linux/makefiles/saproc.make	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -75,6 +75,7 @@
 	fi
 	@echo Making SA debugger back-end...
 	$(QUIETLY) $(CC) -D$(BUILDARCH) -D_GNU_SOURCE                   \
+		   -D_FILE_OFFSET_BITS=64                               \
                    $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG)     \
 	           -I$(SASRCDIR)                                        \
 	           -I$(GENERATED)                                       \
--- a/make/linux/makefiles/sparcWorks.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/linux/makefiles/sparcWorks.make	Tue Apr 10 10:42:34 2012 -0700
@@ -23,14 +23,17 @@
 #
 
 #------------------------------------------------------------------------
-# CC, CPP & AS
+# CC, CXX & AS
 
-CPP = CC
-CC  = cc
-AS  = $(CC) -c
+# If a SPEC is not set already, then use these defaults.
+ifeq ($(SPEC),)
+  CXX = CC
+  CC  = cc
+  AS  = $(CC) -c
 
-HOSTCPP = $(CPP)
-HOSTCC  = $(CC)
+  HOSTCXX = $(CXX)
+  HOSTCC  = $(CC)
+endif
 
 ARCHFLAG = $(ARCHFLAG/$(BUILDARCH))
 ARCHFLAG/i486    = -m32
--- a/make/linux/makefiles/top.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/linux/makefiles/top.make	Tue Apr 10 10:42:34 2012 -0700
@@ -115,8 +115,8 @@
 	@$(UpdatePCH)
 	@$(MAKE) -f vm.make $(MFLAGS-adjusted)
 
-install: the_vm
-	@$(MAKE) -f vm.make install
+install gamma: the_vm
+	@$(MAKE) -f vm.make $@
 
 # next rules support "make foo.[ois]"
 
--- a/make/linux/makefiles/vm.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/linux/makefiles/vm.make	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
 -include $(DEP_DIR)/*.d
 
 # read machine-specific adjustments (%%% should do this via buildtree.make?)
-ifeq ($(ZERO_BUILD), true)
+ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
   include $(MAKEFILES_DIR)/zeroshark.make
 else
   include $(MAKEFILES_DIR)/$(BUILDARCH).make
@@ -88,16 +88,20 @@
 BUILD_USER    = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\""
 VM_DISTRO     = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\""
 
-CPPFLAGS =           \
+CXXFLAGS =           \
   ${SYSDEFS}         \
   ${INCLUDES}        \
   ${BUILD_VERSION}   \
   ${BUILD_TARGET}    \
   ${BUILD_USER}      \
   ${HS_LIB_ARCH}     \
-  ${JRE_VERSION}     \
   ${VM_DISTRO}
 
+# This is VERY important! The version define must only be supplied to vm_version.o
+# If not, ccache will not re-use the cache at all, since the version string might contain
+# a time and date. 
+vm_version.o: CXXFLAGS += ${JRE_VERSION}
+
 ifndef JAVASE_EMBEDDED
 CFLAGS += -DINCLUDE_TRACE
 endif
@@ -232,7 +236,7 @@
 vm.def: $(Res_Files) $(Obj_Files)
 	sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
 
-ifeq ($(SHARK_BUILD), true)
+ifeq ($(JVM_VARIANT_ZEROSHARK), true)
   STATIC_CXX = false
 else
   ifeq ($(ZERO_LIBARCH), ppc64)
@@ -264,21 +268,21 @@
 
   LIBS_VM                  += $(LIBS)
 endif
-ifeq ($(ZERO_BUILD), true)
+ifeq ($(JVM_VARIANT_ZERO), true)
   LIBS_VM += $(LIBFFI_LIBS)
 endif
-ifeq ($(SHARK_BUILD), true)
+ifeq ($(JVM_VARIANT_ZEROSHARK), true)
+  LIBS_VM   += $(LIBFFI_LIBS) $(LLVM_LIBS)
   LFLAGS_VM += $(LLVM_LDFLAGS)
-  LIBS_VM   += $(LLVM_LIBS)
 endif
 
-LINK_VM = $(LINK_LIB.c)
+LINK_VM = $(LINK_LIB.CC)
 
 # rule for building precompiled header
 $(PRECOMPILED_HEADER):
 	$(QUIETLY) echo Generating precompiled header $@
 	$(QUIETLY) mkdir -p $(PRECOMPILED_HEADER_DIR)
-	$(QUIETLY) $(COMPILE.CC) $(DEPFLAGS) -x c++-header $(PRECOMPILED_HEADER_SRC) -o $@ $(COMPILE_DONE)
+	$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -x c++-header $(PRECOMPILED_HEADER_SRC) -o $@ $(COMPILE_DONE)
 
 # making the library:
 
@@ -308,10 +312,10 @@
 $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
 	$(QUIETLY) {                                                    \
 	    echo Linking vm...;                                         \
-	    $(LINK_LIB.CC/PRE_HOOK)                                     \
+	    $(LINK_LIB.CXX/PRE_HOOK)                                     \
 	    $(LINK_VM) $(LD_SCRIPT_FLAG)                                \
 		       $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM);       \
-	    $(LINK_LIB.CC/POST_HOOK)                                    \
+	    $(LINK_LIB.CXX/POST_HOOK)                                    \
 	    rm -f $@.1; ln -s $@ $@.1;                                  \
 	    [ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
             if [ \"$(CROSS_COMPILE_ARCH)\" = \"\" ] ; then                    \
@@ -364,9 +368,12 @@
 # Serviceability agent
 include $(MAKEFILES_DIR)/saproc.make
 
+# Whitebox testing API
+include $(MAKEFILES_DIR)/wb.make
+
 #----------------------------------------------------------------------
 
-build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC)
+build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC) $(WB_JAR)
 
 install: install_jvm install_jsig install_saproc
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/linux/makefiles/wb.make	Tue Apr 10 10:42:34 2012 -0700
@@ -0,0 +1,46 @@
+#
+# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Rules to build whitebox testing library, used by vm.make
+WB = wb
+
+WBSRCDIR = $(GAMMADIR)/src/share/tools/whitebox
+
+WB_JAR = $(GENERATED)/$(WB).jar
+
+WB_JAVA_SRCS = $(shell find $(WBSRCDIR) -name '*.java')
+WB_JAVA_CLASSDIR = $(GENERATED)/wb/classes
+
+WB_JAVA_CLASSES  = $(patsubst $(WBSRCDIR)/%,$(WB_JAVA_CLASSDIR)/%, \
+	$(patsubst %.java,%.class,$(WB_JAVA_SRCS)))
+
+$(WB_JAVA_CLASSDIR)/%.class: $(WBSRCDIR)/%.java $(WB_JAVA_CLASSDIR)
+	$(REMOTE) $(COMPILE.JAVAC) -nowarn -d $(WB_JAVA_CLASSDIR) $<
+
+$(WB_JAR): $(WB_JAVA_CLASSES)
+	$(QUIETLY) $(REMOTE) $(RUN.JAR) cf $@ -C $(WB_JAVA_CLASSDIR)/ .
+
+$(WB_JAVA_CLASSDIR):
+	$(QUIETLY) mkdir -p $@
+
--- a/make/solaris/makefiles/adlc.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/solaris/makefiles/adlc.make	Tue Apr 10 10:42:34 2012 -0700
@@ -62,10 +62,10 @@
 INCLUDES += $(Src_Dirs_I:%=-I%)
 
 # set flags for adlc compilation
-CPPFLAGS = $(SYSDEFS) $(INCLUDES)
+CXXFLAGS = $(SYSDEFS) $(INCLUDES)
 
 # Force assertions on.
-CPPFLAGS += -DASSERT
+CXXFLAGS += -DASSERT
 
 ifndef USE_GCC
   # We need libCstd.so for adlc 
@@ -130,7 +130,7 @@
 
 $(EXEC) : $(OBJECTS)
 	@echo Making adlc
-	$(QUIETLY) $(LINK_NOPROF.CC) -o $(EXEC) $(OBJECTS)
+	$(QUIETLY) $(LINK_NOPROF.CXX) -o $(EXEC) $(OBJECTS)
 
 # Random dependencies:
 $(OBJECTS): opcodes.hpp classes.hpp adlc.hpp adlcVMDeps.hpp adlparse.hpp archDesc.hpp arena.hpp dict2.hpp filebuff.hpp forms.hpp formsopt.hpp formssel.hpp
@@ -228,14 +228,14 @@
 $(OUTDIR)/%.o: %.cpp
 	@echo Compiling $<
 	$(QUIETLY) $(REMOVE_TARGET)
-	$(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE)
+	$(QUIETLY) $(COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
 
 # Some object files are given a prefix, to disambiguate
 # them from objects of the same name built for the VM.
 $(OUTDIR)/adlc-%.o: %.cpp
 	@echo Compiling $<
 	$(QUIETLY) $(REMOVE_TARGET)
-	$(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE)
+	$(QUIETLY) $(COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
 
 # #########################################################################
 
--- a/make/solaris/makefiles/buildtree.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/solaris/makefiles/buildtree.make	Tue Apr 10 10:42:34 2012 -0700
@@ -55,6 +55,7 @@
 # The makefiles are split this way so that "make foo" will run faster by not
 # having to read the dependency files for the vm.
 
+-include $(SPEC)
 include $(GAMMADIR)/make/scm.make
 include $(GAMMADIR)/make/altsrc.make
 
@@ -118,7 +119,7 @@
 BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
 
 BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \
-        env.ksh env.csh jdkpath.sh .dbxrc test_gamma
+        env.sh env.csh jdkpath.sh .dbxrc test_gamma
 
 BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
 	ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
@@ -237,6 +238,8 @@
 	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
 	    echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \
 	echo; \
+	[ -n "$(SPEC)" ] && \
+	    echo "include $(SPEC)"; \
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \
 	) > $@
@@ -313,22 +316,19 @@
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
 	) > $@
 
-env.ksh: $(BUILDTREE_MAKE)
+env.sh: $(BUILDTREE_MAKE)
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
 	$(BUILDTREE_COMMENT); \
 	[ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \
 	{ \
-	echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
-	echo "unset LD_LIBRARY_PATH_32"; \
-	echo "unset LD_LIBRARY_PATH_64"; \
 	echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
 	} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
 	echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
 	echo "export JAVA_HOME LD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \
 	) > $@
 
-env.csh: env.ksh
+env.csh: env.sh
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
 	$(BUILDTREE_COMMENT); \
@@ -384,23 +384,86 @@
 JAVA_FLAG/64 = -d64
 
 WRONG_DATA_MODE_MSG = \
-	echo "JAVA_HOME must point to $(DATA_MODE)bit JDK."
+	echo "JAVA_HOME must point to a $(DATA_MODE)-bit OpenJDK."
+
+CROSS_COMPILING_MSG = \
+	echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run."
 
 test_gamma:  $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
-	echo '#!/bin/ksh'; \
+	echo "#!/bin/sh"; \
+	echo ""; \
 	$(BUILDTREE_COMMENT); \
-	echo '. ./env.ksh'; \
-	echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \
-	echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \
-	echo "then"; \
-	echo "  $(WRONG_DATA_MODE_MSG); exit 0;"; \
+	echo ""; \
+	echo "# Include environment settings for gamma run"; \
+	echo ""; \
+	echo ". ./env.sh"; \
+	echo ""; \
+	echo "# Do not run gamma test for cross compiles"; \
+	echo ""; \
+	echo "if [ -n \"$(CROSS_COMPILE_ARCH)\" ]; then "; \
+	echo "  $(CROSS_COMPILING_MSG)"; \
+	echo "  exit 0"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Make sure JAVA_HOME is set as it is required for gamma"; \
+	echo ""; \
+	echo "if [ -z \"\$${JAVA_HOME}\" ]; then "; \
+	echo "  $(NO_JAVA_HOME_MSG)"; \
+	echo "  exit 0"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Check JAVA_HOME version to be used for the test"; \
+	echo ""; \
+	echo "\$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion > /dev/null 2>&1"; \
+	echo "if [ \$$? -ne 0 ]; then "; \
+	echo "  $(WRONG_DATA_MODE_MSG)"; \
+	echo "  exit 0"; \
 	echo "fi"; \
+	echo ""; \
+	echo "# Use gamma_g if it exists"; \
+	echo ""; \
+	echo "GAMMA_PROG=gamma"; \
+	echo "if [ -f gamma_g ]; then "; \
+	echo "  GAMMA_PROG=gamma_g"; \
+	echo "fi"; \
+	echo ""; \
+	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
+	echo "  # Ensure architecture for gamma and JAVA_HOME is the same."; \
+	echo "  # NOTE: gamma assumes the OpenJDK directory layout."; \
+	echo ""; \
+	echo "  GAMMA_ARCH=\"\`file \$${GAMMA_PROG} | awk '{print \$$NF}'\`\""; \
+	echo "  JVM_LIB=\"\$${JAVA_HOME}/jre/lib/libjava.$(LIBRARY_SUFFIX)\""; \
+	echo "  if [ ! -f \$${JVM_LIB} ]; then"; \
+	echo "    JVM_LIB=\"\$${JAVA_HOME}/jre/lib/$${LIBARCH}/libjava.$(LIBRARY_SUFFIX)\""; \
+	echo "  fi"; \
+	echo "  if [ ! -f \$${JVM_LIB} ] || [ -z \"\`file \$${JVM_LIB} | grep \$${GAMMA_ARCH}\`\" ]; then "; \
+	echo "    $(WRONG_DATA_MODE_MSG)"; \
+	echo "    exit 0"; \
+	echo "  fi"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Compile Queens program for test"; \
+	echo ""; \
 	echo "rm -f Queens.class"; \
 	echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \
-	echo '[ -f gamma_g ] && { gamma=gamma_g; }'; \
-	echo './$${gamma:-gamma} $(TESTFLAGS) Queens < /dev/null'; \
+	echo ""; \
+	echo "# Set library path solely for gamma launcher test run"; \
+	echo ""; \
+	echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
+	echo "export LD_LIBRARY_PATH"; \
+	echo "unset LD_LIBRARY_PATH_32"; \
+	echo "unset LD_LIBRARY_PATH_64"; \
+	echo ""; \
+	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
+	echo "  DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/native_threads:\$${JAVA_HOME}/jre/lib:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
+	echo "  export DYLD_LIBRARY_PATH"; \
+	echo "fi"; \
+	echo ""; \
+	echo "# Use the gamma launcher and JAVA_HOME to run the test"; \
+	echo ""; \
+	echo "./\$${GAMMA_PROG} $(TESTFLAGS) Queens < /dev/null"; \
 	) > $@
 	$(QUIETLY) chmod +x $@
 
--- a/make/solaris/makefiles/defs.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/solaris/makefiles/defs.make	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -59,6 +59,18 @@
   endif
 endif
 
+# On 32 bit solaris we build server and client, on 64 bit just server.
+ifeq ($(JVM_VARIANTS),)
+  ifeq ($(ARCH_DATA_MODEL), 32)
+    JVM_VARIANTS:=client,server
+    JVM_VARIANT_CLIENT:=true
+    JVM_VARIANT_SERVER:=true
+  else
+    JVM_VARIANTS:=server
+    JVM_VARIANT_SERVER:=true
+  endif
+endif
+
 # determine if HotSpot is being built in JDK6 or earlier version
 JDK6_OR_EARLIER=0
 ifeq "$(shell expr \( '$(JDK_MAJOR_VERSION)' != '' \& '$(JDK_MINOR_VERSION)' != '' \& '$(JDK_MICRO_VERSION)' != '' \))" "1"
@@ -148,40 +160,42 @@
   EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
 endif
 
+EXPORT_LIST += $(EXPORT_JRE_LIB_DIR)/wb.jar
+
 EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
 EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
 
-ifneq ($(BUILD_CLIENT_ONLY),true)
-EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
-EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
-EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_db.$(LIBRARY_SUFFIX)
-EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_dtrace.$(LIBRARY_SUFFIX)
+ifeq ($(JVM_VARIANT_SERVER),true)
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_db.$(LIBRARY_SUFFIX)
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_dtrace.$(LIBRARY_SUFFIX)
+  ifeq ($(ARCH_DATA_MODEL),32)
+    EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.$(LIBRARY_SUFFIX)
+    EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.$(LIBRARY_SUFFIX)
+  endif
   ifneq ($(OBJCOPY),)
     EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.debuginfo
     EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_db.debuginfo
     EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_dtrace.debuginfo
   endif
 endif
-ifeq ($(ARCH_DATA_MODEL), 32)
+ifeq ($(JVM_VARIANT_CLIENT),true)
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX) 
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.$(LIBRARY_SUFFIX) 
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.$(LIBRARY_SUFFIX)
-  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.$(LIBRARY_SUFFIX)
-  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.$(LIBRARY_SUFFIX)
+  ifeq ($(ARCH_DATA_MODEL),32)
+    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.$(LIBRARY_SUFFIX)
+    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.$(LIBRARY_SUFFIX)
+  endif
   ifneq ($(OBJCOPY),)
     EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
     EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.debuginfo
     EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.debuginfo
-    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.debuginfo
-    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.debuginfo
-  endif
-  ifneq ($(BUILD_CLIENT_ONLY), true)
-    EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.$(LIBRARY_SUFFIX)
-    EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.$(LIBRARY_SUFFIX)
-    ifneq ($(OBJCOPY),)
-      EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.debuginfo
-      EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.debuginfo
+    ifeq ($(ARCH_DATA_MODEL),32)
+      EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.debuginfo
+      EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.debuginfo
     endif
   endif
 endif
--- a/make/solaris/makefiles/dtrace.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/solaris/makefiles/dtrace.make	Tue Apr 10 10:42:34 2012 -0700
@@ -150,11 +150,11 @@
 
 lib$(GENOFFS).so: $(DTRACE_SRCDIR)/$(GENOFFS).cpp $(DTRACE_SRCDIR)/$(GENOFFS).h \
                   $(LIBJVM.o)
-	$(QUIETLY) $(CCC) $(CPPFLAGS) $(GENOFFS_CFLAGS) $(SHARED_FLAG) $(PICFLAG) \
+	$(QUIETLY) $(CXX) $(CXXFLAGS) $(GENOFFS_CFLAGS) $(SHARED_FLAG) $(PICFLAG) \
 		 $(LFLAGS_GENOFFS) -o $@ $(DTRACE_SRCDIR)/$(GENOFFS).cpp -lc
 
 $(GENOFFS): $(DTRACE_SRCDIR)/$(GENOFFS)Main.c lib$(GENOFFS).so
-	$(QUIETLY) $(LINK.CC) -z nodefs -o $@ $(DTRACE_SRCDIR)/$(GENOFFS)Main.c \
+	$(QUIETLY) $(LINK.CXX) -z nodefs -o $@ $(DTRACE_SRCDIR)/$(GENOFFS)Main.c \
 		./lib$(GENOFFS).so
 
 CONDITIONALLY_UPDATE_JVMOFFS_TARGET = \
@@ -178,7 +178,7 @@
 	$(QUIETLY) $(CONDITIONALLY_UPDATE_JVMOFFS_TARGET)
 
 $(JVMOFFS.o): $(JVMOFFS).h $(JVMOFFS).cpp 
-	$(QUIETLY) $(CCC) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp
+	$(QUIETLY) $(CXX) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp
 
 $(LIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_DB_MAPFILE)
 	@echo Making $@
--- a/make/solaris/makefiles/gcc.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/solaris/makefiles/gcc.make	Tue Apr 10 10:42:34 2012 -0700
@@ -23,11 +23,15 @@
 #
 
 #------------------------------------------------------------------------
-# CC, CPP & AS
+# CC, CXX & AS
 
-CPP = g++
-CC  = gcc
-AS  = $(CC) -c
+# If a SPEC is not set already, then use these defaults.
+ifeq ($(SPEC),)
+  CXX = g++
+  CC  = gcc
+  AS  = $(CC) -c
+  MCS = /usr/ccs/bin/mcs
+endif
 
 Compiler = gcc
 
@@ -36,12 +40,12 @@
 CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
 CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
 
-# Check for the versions of C++ and C compilers ($CPP and $CC) used. 
+# Check for the versions of C++ and C compilers ($CXX and $CC) used. 
 
 # Get the last thing on the line that looks like x.x+ (x is a digit).
 COMPILER_REV := \
-$(shell $(CPP) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
-C_COMPILER_REV := \
+$(shell $(CXX) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
+CC_COMPILER_REV := \
 $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
 
 
@@ -193,5 +197,3 @@
 ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),) 
 DEBUG_CFLAGS += -gstabs 
 endif 
-
-MCS = /usr/ccs/bin/mcs
--- a/make/solaris/makefiles/launcher.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/solaris/makefiles/launcher.make	Tue Apr 10 10:42:34 2012 -0700
@@ -52,10 +52,10 @@
   LIBS_LAUNCHER             += -l$(JVM) $(LIBS)
 endif
 
-LINK_LAUNCHER = $(LINK.CC)
+LINK_LAUNCHER = $(LINK.CXX)
 
-LINK_LAUNCHER/PRE_HOOK  = $(LINK_LIB.CC/PRE_HOOK)
-LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CC/POST_HOOK)
+LINK_LAUNCHER/PRE_HOOK  = $(LINK_LIB.CXX/PRE_HOOK)
+LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CXX/POST_HOOK)
 
 ifeq ("${Platform_compiler}", "sparcWorks")
 # Enable the following LAUNCHERFLAGS addition if you need to compare the
@@ -86,11 +86,11 @@
 
 $(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR_SHARE)/%.c
 	$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
-	$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CPPFLAGS)
+	$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
 
 $(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR)/%.c
 	$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
-	$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CPPFLAGS)
+	$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
 
 $(LAUNCHER): $(OBJS) $(LIBJVM) $(LAUNCHER_MAPFILE)
 ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),)
--- a/make/solaris/makefiles/product.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/solaris/makefiles/product.make	Tue Apr 10 10:42:34 2012 -0700
@@ -70,7 +70,7 @@
 
 # If we can create .debuginfo files, then the VM is stripped in vm.make
 # and this macro is not used.
-# LINK_LIB.CC/POST_HOOK += $(STRIP_LIB.CC/POST_HOOK)
+# LINK_LIB.CXX/POST_HOOK += $(STRIP_LIB.CXX/POST_HOOK)
 
 G_SUFFIX =
 SYSDEFS += -DPRODUCT
--- a/make/solaris/makefiles/rules.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/solaris/makefiles/rules.make	Tue Apr 10 10:42:34 2012 -0700
@@ -27,44 +27,31 @@
 # Tell make that .cpp is important
 .SUFFIXES: .cpp $(SUFFIXES)
 
-# For now.  Other makefiles use CPP as the c++ compiler, but that should really
-# name the preprocessor.
-ifeq    ($(CCC),)
-CCC             = $(CPP)
-endif
-
 DEMANGLER       = c++filt
 DEMANGLE        = $(DEMANGLER) < $@ > .$@ && mv -f .$@ $@
 
-# $(CC) is the c compiler (cc/gcc), $(CCC) is the c++ compiler (CC/g++).
-C_COMPILE       = $(CC) $(CPPFLAGS) $(CFLAGS)
-CC_COMPILE      = $(CCC) $(CPPFLAGS) $(CFLAGS)
+# $(CC) is the c compiler (cc/gcc), $(CXX) is the c++ compiler (CC/g++).
+CC_COMPILE       = $(CC) $(CXXFLAGS) $(CFLAGS)
+CXX_COMPILE      = $(CXX) $(CXXFLAGS) $(CFLAGS)
 
 AS.S            = $(AS) $(ASFLAGS)
 
-COMPILE.c       = $(C_COMPILE) -c
-GENASM.c        = $(C_COMPILE) -S
-LINK.c          = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
-LINK_LIB.c      = $(CC) $(LFLAGS) $(SHARED_FLAG)
-PREPROCESS.c    = $(C_COMPILE) -E
+COMPILE.CC       = $(CC_COMPILE) -c
+GENASM.CC        = $(CC_COMPILE) -S
+LINK.CC          = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
+LINK_LIB.CC      = $(CC) $(LFLAGS) $(SHARED_FLAG)
+PREPROCESS.CC    = $(CC_COMPILE) -E
 
-COMPILE.CC      = $(CC_COMPILE) -c
-GENASM.CC       = $(CC_COMPILE) -S
-LINK.CC         = $(CCC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
-LINK_NOPROF.CC  = $(CCC) $(LFLAGS) $(AOUT_FLAGS)
-LINK_LIB.CC     = $(CCC) $(LFLAGS) $(SHARED_FLAG)
-PREPROCESS.CC   = $(CC_COMPILE) -E
+COMPILE.CXX      = $(CXX_COMPILE) -c
+GENASM.CXX       = $(CXX_COMPILE) -S
+LINK.CXX         = $(CXX) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
+LINK_NOPROF.CXX  = $(CXX) $(LFLAGS) $(AOUT_FLAGS)
+LINK_LIB.CXX     = $(CXX) $(LFLAGS) $(SHARED_FLAG)
+PREPROCESS.CXX   = $(CXX_COMPILE) -E
 
 # Effect of REMOVE_TARGET is to delete out-of-date files during "gnumake -k".
 REMOVE_TARGET   = rm -f $@
 
-# Synonyms.
-COMPILE.cpp     = $(COMPILE.CC)
-GENASM.cpp      = $(GENASM.CC)
-LINK.cpp        = $(LINK.CC)
-LINK_LIB.cpp    = $(LINK_LIB.CC)
-PREPROCESS.cpp  = $(PREPROCESS.CC)
-
 # Note use of ALT_BOOTDIR to explicitly specify location of java and
 # javac; this is the same environment variable used in the J2SE build
 # process for overriding the default spec, which is BOOTDIR.
@@ -153,14 +140,14 @@
 %.o: %.cpp
 	@echo Compiling $<
 	$(QUIETLY) $(REMOVE_TARGET)
-	$(QUIETLY) $(COMPILE.CC) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
+	$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
 else
 %.o: %.cpp
 	@echo Compiling $<
 	$(QUIETLY) $(REMOVE_TARGET)
 	$(QUIETLY) $(if $(findstring $@, $(NONPIC_OBJ_FILES)), \
-	   $(subst $(VM_PICFLAG), ,$(COMPILE.CC)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
-	   $(COMPILE.CC) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
+	   $(subst $(VM_PICFLAG), ,$(COMPILE.CXX)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
+	   $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
 endif
 
 %.o: %.s
@@ -170,13 +157,13 @@
 
 %.s: %.cpp
 	@echo Generating assembly for $<
-	$(QUIETLY) $(GENASM.CC) -o $@ $<
+	$(QUIETLY) $(GENASM.CXX) -o $@ $<
 	$(QUIETLY) $(DEMANGLE) $(COMPILE_DONE)
 
 # Intermediate files (for debugging macros)
 %.i: %.cpp
 	@echo Preprocessing $< to $@
-	$(QUIETLY) $(PREPROCESS.CC) $< > $@ $(COMPILE_DONE)
+	$(QUIETLY) $(PREPROCESS.CXX) $< > $@ $(COMPILE_DONE)
 
 #  Override gnumake built-in rules which do sccs get operations badly.
 #  (They put the checked out code in the current directory, not in the
--- a/make/solaris/makefiles/saproc.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/solaris/makefiles/saproc.make	Tue Apr 10 10:42:34 2012 -0700
@@ -93,7 +93,7 @@
 	  exit 1; \
 	fi
 	@echo Making SA debugger back-end...
-	$(QUIETLY) $(CPP)                                               \
+	$(QUIETLY) $(CXX)                                               \
                    $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG)     \
 	           -I$(SASRCDIR)                                        \
 	           -I$(GENERATED)                                       \
--- a/make/solaris/makefiles/sparcWorks.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/solaris/makefiles/sparcWorks.make	Tue Apr 10 10:42:34 2012 -0700
@@ -22,38 +22,42 @@
 #  
 #
 
-# Compiler-specific flags for sparcworks.
-
-# tell make which C and C++ compilers to use
-CC	= cc
-CPP	= CC
+# If a SPEC is not set already, then use these defaults.
+ifeq ($(SPEC),)
+  # Compiler-specific flags for sparcworks.
+  CC	= cc
+  CXX	= CC
 
-# Note that this 'as' is an older version of the Sun Studio 'fbe', and will
-#   use the older style options. The 'fbe' options will match 'cc' and 'CC'.
-AS	= /usr/ccs/bin/as
+  # Note that this 'as' is an older version of the Sun Studio 'fbe', and will
+  #   use the older style options. The 'fbe' options will match 'cc' and 'CC'.
+  AS	= /usr/ccs/bin/as
 
-NM	= /usr/ccs/bin/nm
-NAWK    = /bin/nawk
+  NM    = /usr/ccs/bin/nm
+  NAWK  = /bin/nawk
+
+  MCS	= /usr/ccs/bin/mcs
+  STRIP	= /usr/ccs/bin/strip
+endif
 
 REORDER_FLAG = -xF
 
-# Check for the versions of C++ and C compilers ($CPP and $CC) used. 
+# Check for the versions of C++ and C compilers ($CXX and $CC) used. 
 
 # Get the last thing on the line that looks like x.x+ (x is a digit).
 COMPILER_REV := \
-$(shell $(CPP) -V 2>&1 | sed -n 's/^.*[ ,\t]C++[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p')
-C_COMPILER_REV := \
+$(shell $(CXX) -V 2>&1 | sed -n 's/^.*[ ,\t]C++[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p')
+CC_COMPILER_REV := \
 $(shell $(CC) -V 2>&1 | sed -n 's/^.*[ ,\t]C[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p')
 
 # Pick which compiler is validated
 ifeq ($(JRE_RELEASE_VER),1.6.0)
   # Validated compiler for JDK6 is SS11 (5.8)
   VALIDATED_COMPILER_REVS   := 5.8
-  VALIDATED_C_COMPILER_REVS := 5.8
+  VALIDATED_CC_COMPILER_REVS := 5.8
 else
   # Validated compiler for JDK7 is SS12 update 1 + patches (5.10)
   VALIDATED_COMPILER_REVS   := 5.10
-  VALIDATED_C_COMPILER_REVS := 5.10
+  VALIDATED_CC_COMPILER_REVS := 5.10
 endif
 
 # Warning messages about not using the above validated versions
@@ -67,13 +71,13 @@
 	warning.)
 endif
 
-ENFORCE_C_COMPILER_REV${ENFORCE_C_COMPILER_REV} := $(strip ${VALIDATED_C_COMPILER_REVS})
-ifeq ($(filter ${ENFORCE_C_COMPILER_REV},${C_COMPILER_REV}),)
-PRINTABLE_C_REVS := $(subst $(shell echo ' '), or ,${ENFORCE_C_COMPILER_REV})
+ENFORCE_CC_COMPILER_REV${ENFORCE_CC_COMPILER_REV} := $(strip ${VALIDATED_CC_COMPILER_REVS})
+ifeq ($(filter ${ENFORCE_CC_COMPILER_REV},${CC_COMPILER_REV}),)
+PRINTABLE_C_REVS := $(subst $(shell echo ' '), or ,${ENFORCE_CC_COMPILER_REV})
 dummy_var_to_enforce_c_compiler_rev := $(shell \
-	echo >&2 WARNING: You are using cc version ${C_COMPILER_REV} and \
+	echo >&2 WARNING: You are using cc version ${CC_COMPILER_REV} and \
 	should be using version ${PRINTABLE_C_REVS}.; \
-	echo >&2 Set ENFORCE_C_COMPILER_REV=${C_COMPILER_REV} to avoid this \
+	echo >&2 Set ENFORCE_CC_COMPILER_REV=${CC_COMPILER_REV} to avoid this \
 	warning.)
 endif
 
@@ -98,7 +102,7 @@
                        } \
 	      END      { exit rc; }'
 
-LINK_LIB.CC/PRE_HOOK += $(JVM_CHECK_SYMBOLS) || exit 1;
+LINK_LIB.CXX/PRE_HOOK += $(JVM_CHECK_SYMBOLS) || exit 1;
 
 # New architecture options started in SS12 (5.9), we need both styles to build.
 #   The older arch options for SS11 (5.8) or older and also for /usr/ccs/bin/as.
@@ -518,7 +522,7 @@
 #FASTDEBUG_CFLAGS += -Qoption ccfe -xglobalstatic
 
 ifeq	(${COMPILER_REV_NUMERIC}, 502)
-COMPILER_DATE := $(shell $(CPP) -V 2>&1 | sed -n '/^.*[ ]C++[ ]\([1-9]\.[0-9][0-9]*\)/p' | awk '{ print $$NF; }')
+COMPILER_DATE := $(shell $(CXX) -V 2>&1 | sed -n '/^.*[ ]C++[ ]\([1-9]\.[0-9][0-9]*\)/p' | awk '{ print $$NF; }')
 ifeq	(${COMPILER_DATE}, 2001/01/31)
 # disable -g0 in fastdebug since SC6.1 dated 2001/01/31 seems to be buggy
 # use an innocuous value because it will get -g if it's empty
@@ -557,9 +561,6 @@
 #LINK_INTO = LIBJVM
 endif
 
-MCS	= /usr/ccs/bin/mcs
-STRIP	= /usr/ccs/bin/strip
-
 # Solaris platforms collect lots of redundant file-ident lines,
 # to the point of wasting a significant percentage of file space.
 # (The text is stored in ELF .comment sections, contributed by
@@ -568,7 +569,7 @@
 # removing repeated lines.  The data can be extracted from
 # binaries in the field by using "mcs -p libjvm.so" or the older
 # command "what libjvm.so".
-LINK_LIB.CC/POST_HOOK += $(MCS) -c $@ || exit 1;
+LINK_LIB.CXX/POST_HOOK += $(MCS) -c $@ || exit 1;
 # (The exit 1 is necessary to cause a build failure if the command fails and
 # multiple commands are strung together, and the final semicolon is necessary
 # since the hook must terminate itself as a valid command.)
@@ -576,7 +577,7 @@
 # Also, strip debug and line number information (worth about 1.7Mb).
 # If we can create .debuginfo files, then the VM is stripped in vm.make
 # and this macro is not used.
-STRIP_LIB.CC/POST_HOOK = $(STRIP) -x $@ || exit 1;
-# STRIP_LIB.CC/POST_HOOK is incorporated into LINK_LIB.CC/POST_HOOK
+STRIP_LIB.CXX/POST_HOOK = $(STRIP) -x $@ || exit 1;
+# STRIP_LIB.CXX/POST_HOOK is incorporated into LINK_LIB.CXX/POST_HOOK
 # in certain configurations, such as product.make.  Other configurations,
 # such as debug.make, do not include the strip operation.
--- a/make/solaris/makefiles/top.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/solaris/makefiles/top.make	Tue Apr 10 10:42:34 2012 -0700
@@ -107,8 +107,8 @@
 the_vm: vm_build_preliminaries $(adjust-mflags)
 	@$(MAKE) -f vm.make $(MFLAGS-adjusted)
 
-install: the_vm
-	@$(MAKE) -f vm.make install
+install gamma: the_vm
+	@$(MAKE) -f vm.make $@
 
 # next rules support "make foo.[oi]"
 
--- a/make/solaris/makefiles/vm.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/solaris/makefiles/vm.make	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -76,16 +76,20 @@
 BUILD_USER    = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\""
 VM_DISTRO     = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\""
 
-CPPFLAGS =           \
+CXXFLAGS =           \
   ${SYSDEFS}         \
   ${INCLUDES}        \
   ${BUILD_VERSION}   \
   ${BUILD_TARGET}    \
   ${BUILD_USER}      \
   ${HS_LIB_ARCH}     \
-  ${JRE_VERSION}     \
   ${VM_DISTRO}
 
+# This is VERY important! The version define must only be supplied to vm_version.o
+# If not, ccache will not re-use the cache at all, since the version string might contain
+# a time and date. 
+vm_version.o: CXXFLAGS += ${JRE_VERSION} 
+
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
 CFLAGS += $(CFLAGS_WARN)
 
@@ -265,17 +269,17 @@
 endif
 
 ifdef USE_GCC
-LINK_VM = $(LINK_LIB.c)
+LINK_VM = $(LINK_LIB.CC)
 else
-LINK_VM = $(LINK_LIB.CC)
+LINK_VM = $(LINK_LIB.CXX)
 endif
 # making the library:
 $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) 
 ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),)
 	@echo Linking vm...
-	$(QUIETLY) $(LINK_LIB.CC/PRE_HOOK)
+	$(QUIETLY) $(LINK_LIB.CXX/PRE_HOOK)
 	$(QUIETLY) $(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM)
-	$(QUIETLY) $(LINK_LIB.CC/POST_HOOK)
+	$(QUIETLY) $(LINK_LIB.CXX/POST_HOOK)
 	$(QUIETLY) rm -f $@.1 && ln -s $@ $@.1
 	$(QUIETLY) [ -f $(LIBJVM_G) ] || ln -s $@ $(LIBJVM_G)
 	$(QUIETLY) [ -f $(LIBJVM_G).1 ] || ln -s $@.1 $(LIBJVM_G).1
@@ -317,9 +321,12 @@
 # Serviceability agent
 include $(MAKEFILES_DIR)/saproc.make
 
+# Whitebox testing API
+include $(MAKEFILES_DIR)/wb.make
+
 #----------------------------------------------------------------------
 
-build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(LIBJVM_DTRACE) $(BUILDLIBSAPROC) dtraceCheck
+build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(LIBJVM_DTRACE) $(BUILDLIBSAPROC) dtraceCheck $(WB_JAR)
 
 install: install_jvm install_jsig install_saproc
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/solaris/makefiles/wb.make	Tue Apr 10 10:42:34 2012 -0700
@@ -0,0 +1,46 @@
+#
+# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+# Rules to build whitebox testing library, used by vm.make
+
+WB = wb
+
+WBSRCDIR = $(GAMMADIR)/src/share/tools/whitebox
+
+WB_JAR = $(GENERATED)/$(WB).jar
+
+WB_JAVA_SRCS = $(shell find $(WBSRCDIR) -name '*.java')
+WB_JAVA_CLASSDIR = $(GENERATED)/wb/classes
+
+WB_JAVA_CLASSES  = $(patsubst $(WBSRCDIR)/%,$(WB_JAVA_CLASSDIR)/%, \
+	$(patsubst %.java,%.class,$(WB_JAVA_SRCS)))
+
+$(WB_JAVA_CLASSDIR)/%.class: $(WBSRCDIR)/%.java $(WB_JAVA_CLASSDIR)
+	$(REMOTE) $(COMPILE.JAVAC) -nowarn -d $(WB_JAVA_CLASSDIR) $<
+
+$(WB_JAR): $(WB_JAVA_CLASSES)
+	$(QUIETLY) $(REMOTE) $(RUN.JAR) cf $@ -C $(WB_JAVA_CLASSDIR)/ .
+
+$(WB_JAVA_CLASSDIR):
+	$(QUIETLY) mkdir -p $@
+
--- a/make/windows/build.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/build.make	Tue Apr 10 10:42:34 2012 -0700
@@ -297,6 +297,10 @@
 	@ echo BUILDARCH=$(BUILDARCH)         			>> $@
 	@ echo Platform_arch=$(Platform_arch)        		>> $@
 	@ echo Platform_arch_model=$(Platform_arch_model)	>> $@
+	@ echo CXX=$(CXX)					>> $@
+	@ echo LD=$(LD)						>> $@
+	@ echo MT=$(MT)						>> $@
+	@ echo RC=$(RC)						>> $@
 	@ sh $(WorkSpace)/make/windows/get_msc_ver.sh		>> $@
 
 checks: checkVariant checkWorkSpace checkSA
--- a/make/windows/build_vm_def.sh	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/build_vm_def.sh	Tue Apr 10 10:42:34 2012 -0700
@@ -57,10 +57,10 @@
 
 # When called from IDE the first param should contain the link version, otherwise may be nill
 if [ "x$1" != "x" ]; then
-LINK_VER="$1"
+LD_VER="$1"
 fi
 
-if [ "x$LINK_VER" != "x800" -a  "x$LINK_VER" != "x900" -a "x$LINK_VER" != "x1000" ]; then
+if [ "x$LD_VER" != "x800" -a  "x$LD_VER" != "x900" -a "x$LD_VER" != "x1000" ]; then
 $DUMPBIN /symbols *.obj | "$GREP" "??_7.*@@6B@" | "$GREP" -v "type_info" | "$AWK" '{print $7}' | "$SORT" | "$UNIQ" > vm2.def
 else
 # Can't use pipes when calling cl.exe or link.exe from IDE. Using transit file vm3.def
--- a/make/windows/get_msc_ver.sh	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/get_msc_ver.sh	Tue Apr 10 10:42:34 2012 -0700
@@ -72,8 +72,8 @@
   echo "MSC_VER_RAW=$MSC_VER_RAW"
 fi
 
-if [ "x$FORCE_LINK_VER" != "x" ]; then
-  echo "LINK_VER=$FORCE_LINK_VER"
+if [ "x$FORCE_LD_VER" != "x" ]; then
+  echo "LD_VER=$FORCE_LD_VER"
 else
   # use the "link" command that is co-located with the "cl" command
   cl_cmd=`which cl`
@@ -83,11 +83,11 @@
     # which can't find "cl" so just use which ever "link" we find
     link_cmd="link"
   fi
-  LINK_VER_RAW=`"$link_cmd" 2>&1 | "$HEAD" -n 1 | "$SED" 's/.*Version[\ ]*\([0-9][0-9.]*\).*/\1/'`
-  LINK_VER_MAJOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f1`
-  LINK_VER_MINOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f2`
-  LINK_VER_MICRO=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f3`
-  LINK_VER=`"$EXPR" $LINK_VER_MAJOR \* 100 + $LINK_VER_MINOR`
-  echo "LINK_VER=$LINK_VER"
-  echo "LINK_VER_RAW=$LINK_VER_RAW"
+  LD_VER_RAW=`"$link_cmd" 2>&1 | "$HEAD" -n 1 | "$SED" 's/.*Version[\ ]*\([0-9][0-9.]*\).*/\1/'`
+  LD_VER_MAJOR=`"$ECHO" $LD_VER_RAW | "$CUT" -d'.' -f1`
+  LD_VER_MINOR=`"$ECHO" $LD_VER_RAW | "$CUT" -d'.' -f2`
+  LD_VER_MICRO=`"$ECHO" $LD_VER_RAW | "$CUT" -d'.' -f3`
+  LD_VER=`"$EXPR" $LD_VER_MAJOR \* 100 + $LD_VER_MINOR`
+  echo "LD_VER=$LD_VER"
+  echo "LD_VER_RAW=$LD_VER_RAW"
 fi
--- a/make/windows/makefiles/adlc.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/makefiles/adlc.make	Tue Apr 10 10:42:34 2012 -0700
@@ -45,9 +45,9 @@
 ADLCFLAGS=-q -T -U_LP64
 !endif
 
-ADLC_CPP_FLAGS=$(CPP_FLAGS) /D _CRT_SECURE_NO_WARNINGS /D _CRT_SECURE_NO_DEPRECATE
+ADLC_CXX_FLAGS=$(CXX_FLAGS) /D _CRT_SECURE_NO_WARNINGS /D _CRT_SECURE_NO_DEPRECATE
 
-CPP_INCLUDE_DIRS=\
+CXX_INCLUDE_DIRS=\
   /I "..\generated" \
   /I "$(WorkSpace)\src\share\vm" \
   /I "$(WorkSpace)\src\os\windows\vm" \
@@ -94,14 +94,14 @@
   $(AdlcOutDir)\dfa_$(Platform_arch_model).cpp
 
 {$(WorkSpace)\src\share\vm\adlc}.cpp.obj::
-        $(CPP) $(ADLC_CPP_FLAGS) $(EXH_FLAGS) $(CPP_INCLUDE_DIRS) /c $<
+        $(CXX) $(ADLC_CXX_FLAGS) $(EXH_FLAGS) $(CXX_INCLUDE_DIRS) /c $<
 
 {$(WorkSpace)\src\share\vm\opto}.cpp.obj::
-        $(CPP) $(ADLC_CPP_FLAGS) $(EXH_FLAGS) $(CPP_INCLUDE_DIRS) /c $<
+        $(CXX) $(ADLC_CXX_FLAGS) $(EXH_FLAGS) $(CXX_INCLUDE_DIRS) /c $<
 
 adlc.exe: main.obj adlparse.obj archDesc.obj arena.obj dfa.obj dict2.obj filebuff.obj \
           forms.obj formsopt.obj formssel.obj opcodes.obj output_c.obj output_h.obj
-	$(LINK) $(LINK_FLAGS) /subsystem:console /out:$@ $**
+	$(LD) $(LD_FLAGS) /subsystem:console /out:$@ $**
 !if "$(MT)" != ""
 # The previous link command created a .manifest file that we want to
 # insert into the linked artifact so we do not need to track it
--- a/make/windows/makefiles/compile.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/makefiles/compile.make	Tue Apr 10 10:42:34 2012 -0700
@@ -23,9 +23,11 @@
 #
 
 # Generic compiler settings
-CPP=cl.exe
+!if "x$(CXX)" == "x"
+CXX=cl.exe
+!endif
 
-# CPP Flags: (these vary slightly from VC6->VS2003->VS2005 compilers)
+# CXX Flags: (these vary slightly from VC6->VS2003->VS2005 compilers)
 #   /nologo   Supress copyright message at every cl.exe startup
 #   /W3       Warning level 3
 #   /Zi       Include debugging information
@@ -50,47 +52,47 @@
 # improving the quality of crash log stack traces involving jvm.dll.
 
 # These are always used in all compiles
-CPP_FLAGS=/nologo /W3 /WX
+CXX_FLAGS=/nologo /W3 /WX
 
 # Let's add debug information always too.
-CPP_FLAGS=$(CPP_FLAGS) /Zi
+CXX_FLAGS=$(CXX_FLAGS) /Zi
 
 # Based on BUILDARCH we add some flags and select the default compiler name
 !if "$(BUILDARCH)" == "ia64"
 MACHINE=IA64
 DEFAULT_COMPILER_NAME=VS2003
-CPP_FLAGS=$(CPP_FLAGS) /D "CC_INTERP" /D "_LP64" /D "IA64"
+CXX_FLAGS=$(CXX_FLAGS) /D "CC_INTERP" /D "_LP64" /D "IA64"
 !endif
 
 !if "$(BUILDARCH)" == "amd64"
 MACHINE=AMD64
 DEFAULT_COMPILER_NAME=VS2005
-CPP_FLAGS=$(CPP_FLAGS) /D "_LP64" /D "AMD64"
+CXX_FLAGS=$(CXX_FLAGS) /D "_LP64" /D "AMD64"
 LP64=1
 !endif
 
 !if "$(BUILDARCH)" == "i486"
 MACHINE=I386
 DEFAULT_COMPILER_NAME=VS2003
-CPP_FLAGS=$(CPP_FLAGS) /D "IA32"
+CXX_FLAGS=$(CXX_FLAGS) /D "IA32"
 !endif
 
 # Sanity check, this is the default if not amd64, ia64, or i486
 !ifndef DEFAULT_COMPILER_NAME
-CPP=ARCH_ERROR
+CXX=ARCH_ERROR
 !endif
 
-CPP_FLAGS=$(CPP_FLAGS) /D "WIN32" /D "_WINDOWS"
+CXX_FLAGS=$(CXX_FLAGS) /D "WIN32" /D "_WINDOWS"
 # Must specify this for sharedRuntimeTrig.cpp
-CPP_FLAGS=$(CPP_FLAGS) /D "VM_LITTLE_ENDIAN"
+CXX_FLAGS=$(CXX_FLAGS) /D "VM_LITTLE_ENDIAN"
 
 # Used for platform dispatching
-CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_FAMILY_windows
-CPP_FLAGS=$(CPP_FLAGS) /D TARGET_ARCH_$(Platform_arch)
-CPP_FLAGS=$(CPP_FLAGS) /D TARGET_ARCH_MODEL_$(Platform_arch_model)
-CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_ARCH_windows_$(Platform_arch)
-CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_ARCH_MODEL_windows_$(Platform_arch_model)
-CPP_FLAGS=$(CPP_FLAGS) /D TARGET_COMPILER_visCPP
+CXX_FLAGS=$(CXX_FLAGS) /D TARGET_OS_FAMILY_windows
+CXX_FLAGS=$(CXX_FLAGS) /D TARGET_ARCH_$(Platform_arch)
+CXX_FLAGS=$(CXX_FLAGS) /D TARGET_ARCH_MODEL_$(Platform_arch_model)
+CXX_FLAGS=$(CXX_FLAGS) /D TARGET_OS_ARCH_windows_$(Platform_arch)
+CXX_FLAGS=$(CXX_FLAGS) /D TARGET_OS_ARCH_MODEL_windows_$(Platform_arch_model)
+CXX_FLAGS=$(CXX_FLAGS) /D TARGET_COMPILER_visCPP
 
 
 # MSC_VER is a 4 digit number that tells us what compiler is being used
@@ -150,14 +152,14 @@
 # Always add the _STATIC_CPPLIB flag
 STATIC_CPPLIB_OPTION = /D _STATIC_CPPLIB /D _DISABLE_DEPRECATE_STATIC_CPPLIB
 MS_RUNTIME_OPTION = $(MS_RUNTIME_OPTION) $(STATIC_CPPLIB_OPTION)
-CPP_FLAGS=$(CPP_FLAGS) $(MS_RUNTIME_OPTION)
+CXX_FLAGS=$(CXX_FLAGS) $(MS_RUNTIME_OPTION)
 
 # How /GX option is spelled
 GX_OPTION = /GX
 
 # Optimization settings for various versions of the compilers and types of
 #    builds. Three basic sets of settings: product, fastdebug, and debug.
-#    These get added into CPP_FLAGS as needed by other makefiles.
+#    These get added into CXX_FLAGS as needed by other makefiles.
 !if "$(COMPILER_NAME)" == "VC6"
 PRODUCT_OPT_OPTION   = /Ox /Os /Gy /GF
 FASTDEBUG_OPT_OPTION = /Ox /Os /Gy /GF
@@ -180,34 +182,40 @@
 #    externals at link time. Even with /GS-, you need bufferoverflowU.lib.
 #    NOTE: Currently we decided to not use /GS-
 BUFFEROVERFLOWLIB = bufferoverflowU.lib
-LINK_FLAGS = /manifest $(LINK_FLAGS) $(BUFFEROVERFLOWLIB)
+LD_FLAGS = /manifest $(LD_FLAGS) $(BUFFEROVERFLOWLIB)
 # Manifest Tool - used in VS2005 and later to adjust manifests stored
 # as resources inside build artifacts.
+!if "x$(MT)" == "x"
 MT=mt.exe
 !endif
+!endif
 
 !if "$(COMPILER_NAME)" == "VS2008"
 PRODUCT_OPT_OPTION   = /O2 /Oy-
 FASTDEBUG_OPT_OPTION = /O2 /Oy-
 DEBUG_OPT_OPTION     = /Od
 GX_OPTION = /EHsc
-LINK_FLAGS = /manifest $(LINK_FLAGS)
+LD_FLAGS = /manifest $(LD_FLAGS)
 # Manifest Tool - used in VS2005 and later to adjust manifests stored
 # as resources inside build artifacts.
+!if "x$(MT)" == "x"
 MT=mt.exe
 !endif
+!endif
 
 !if "$(COMPILER_NAME)" == "VS2010"
 PRODUCT_OPT_OPTION   = /O2 /Oy-
 FASTDEBUG_OPT_OPTION = /O2 /Oy-
 DEBUG_OPT_OPTION     = /Od
 GX_OPTION = /EHsc
-LINK_FLAGS = /manifest $(LINK_FLAGS)
+LD_FLAGS = /manifest $(LD_FLAGS)
 # Manifest Tool - used in VS2005 and later to adjust manifests stored
 # as resources inside build artifacts.
+!if "x$(MT)" == "x"
 MT=mt.exe
+!endif
 !if "$(BUILDARCH)" == "i486"
-LINK_FLAGS = /SAFESEH $(LINK_FLAGS)
+LD_FLAGS = /SAFESEH $(LD_FLAGS)
 !endif
 !endif
 
@@ -225,19 +233,23 @@
 !endif
 
 # Generic linker settings
-LINK=link.exe
-LINK_FLAGS= $(LINK_FLAGS) kernel32.lib user32.lib gdi32.lib winspool.lib \
+!if "x$(LD)" == "x"
+LD=link.exe
+!endif
+LD_FLAGS= $(LD_FLAGS) kernel32.lib user32.lib gdi32.lib winspool.lib \
  comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib \
  uuid.lib Wsock32.lib winmm.lib /nologo /machine:$(MACHINE) /opt:REF \
  /opt:ICF,8 /map /debug
 
 
 !if $(MSC_VER) >= 1600 
-LINK_FLAGS= $(LINK_FLAGS) psapi.lib
+LD_FLAGS= $(LD_FLAGS) psapi.lib
 !endif
 
 # Resource compiler settings
+!if "x$(RC)" == "x"
 RC=rc.exe
+!endif
 RC_FLAGS=/D "HS_VER=$(HS_VER)" \
 	 /D "HS_DOTVER=$(HS_DOTVER)" \
 	 /D "HS_BUILD_ID=$(HS_BUILD_ID)" \
@@ -250,7 +262,7 @@
 	 /D "HS_INTERNAL_NAME=$(HS_INTERNAL_NAME)" \
 	 /D "HS_NAME=$(HS_NAME)"
 
-# Need this to match the CPP_FLAGS settings
+# Need this to match the CXX_FLAGS settings
 !if "$(MFC_DEBUG)" == "true"
 RC_FLAGS = $(RC_FLAGS) /D "_DEBUG"
 !endif
--- a/make/windows/makefiles/debug.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/makefiles/debug.make	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -33,12 +33,12 @@
 BUILD_PCH_FILE=_build_pch_file.obj
 !endif
 
-default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA
+default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA wb
 
 !include ../local.make
 !include compile.make
 
-CPP_FLAGS=$(CPP_FLAGS) $(DEBUG_OPT_OPTION)
+CXX_FLAGS=$(CXX_FLAGS) $(DEBUG_OPT_OPTION)
 
 !include $(WorkSpace)/make/windows/makefiles/vm.make
 !include local.make
@@ -52,8 +52,8 @@
 	sh $(WorkSpace)/make/windows/build_vm_def.sh
 
 $(AOUT): $(Res_Files) $(Obj_Files) vm.def
-	$(LINK) @<<
-  $(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
+	$(LD) @<<
+  $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
 <<
 !if "$(MT)" != ""
 # The previous link command created a .manifest file that we want to
@@ -65,3 +65,4 @@
 !include $(WorkSpace)/make/windows/makefiles/shared.make
 !include $(WorkSpace)/make/windows/makefiles/sa.make
 !include $(WorkSpace)/make/windows/makefiles/launcher.make
+!include $(WorkSpace)/make/windows/makefiles/wb.make
--- a/make/windows/makefiles/defs.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/makefiles/defs.make	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -107,6 +107,19 @@
   endif
 endif
 
+# On 32 bit windows we build server, client and kernel, on 64 bit just server.
+ifeq ($(JVM_VARIANTS),)
+  ifeq ($(ARCH_DATA_MODEL), 32)
+    JVM_VARIANTS:=client,server,kernel
+    JVM_VARIANT_CLIENT:=true
+    JVM_VARIANT_SERVER:=true
+    JVM_VARIANT_KERNEL:=true
+  else
+    JVM_VARIANTS:=server
+    JVM_VARIANT_SERVER:=true
+  endif
+endif
+
 JDK_INCLUDE_SUBDIR=win32
 
 # Library suffix
@@ -177,23 +190,28 @@
 EXPORT_CLIENT_DIR = $(EXPORT_JRE_BIN_DIR)/client
 EXPORT_KERNEL_DIR = $(EXPORT_JRE_BIN_DIR)/kernel
 
-EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
-EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.$(LIBRARY_SUFFIX)
-EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.pdb
-EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.map
-EXPORT_LIST += $(EXPORT_LIB_DIR)/jvm.lib
-ifeq ($(ARCH_DATA_MODEL), 32)
+ifeq ($(JVM_VARIANT_SERVER),true)
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.$(LIBRARY_SUFFIX)
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.pdb
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/jvm.map
+  EXPORT_LIST += $(EXPORT_LIB_DIR)/jvm.lib
+endif
+ifeq ($(JVM_VARIANT_CLIENT),true)
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/jvm.$(LIBRARY_SUFFIX)
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/jvm.pdb
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/jvm.map
-  # kernel vm
+endif
+ifeq ($(JVM_VARIANT_KERNEL),true)
   EXPORT_LIST += $(EXPORT_KERNEL_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_KERNEL_DIR)/jvm.$(LIBRARY_SUFFIX)
   EXPORT_LIST += $(EXPORT_KERNEL_DIR)/jvm.pdb
   EXPORT_LIST += $(EXPORT_KERNEL_DIR)/jvm.map
 endif
 
+EXPORT_LIST += $(EXPORT_JRE_LIB_DIR)/wb.jar
+
 ifeq ($(BUILD_WIN_SA), 1)
   EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.$(LIBRARY_SUFFIX)
   EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.pdb
@@ -202,3 +220,19 @@
   # Must pass this down to nmake.
   MAKE_ARGS += BUILD_WIN_SA=1
 endif
+
+# Propagate compiler and tools paths from configure to nmake. 
+# Need to make sure they contain \\ and not /.
+ifneq ($(SPEC),)
+  ifeq ($(USING_CYGWIN), true)
+    MAKE_ARGS += CXX="$(subst /,\\,$(shell /bin/cygpath -s -m -a $(CXX)))"
+    MAKE_ARGS += LD="$(subst /,\\,$(shell /bin/cygpath -s -m -a $(LD)))"
+    MAKE_ARGS += RC="$(subst /,\\,$(shell /bin/cygpath -s -m -a $(RC)))"
+    MAKE_ARGS += MT="$(subst /,\\,$(shell /bin/cygpath -s -m -a $(MT)))"
+  else
+    MAKE_ARGS += CXX="$(subst /,\\,$(CXX))"
+    MAKE_ARGS += LD="$(subst /,\\,$(LD))"
+    MAKE_ARGS += RC="$(subst /,\\,$(RC))"
+    MAKE_ARGS += MT="$(subst /,\\,$(MT))"
+  endif
+endif
--- a/make/windows/makefiles/fastdebug.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/makefiles/fastdebug.make	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -33,12 +33,12 @@
 BUILD_PCH_FILE=_build_pch_file.obj
 !endif
 
-default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA
+default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA wb
 
 !include ../local.make
 !include compile.make
 
-CPP_FLAGS=$(CPP_FLAGS) $(FASTDEBUG_OPT_OPTION)
+CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION)
 
 !include $(WorkSpace)/make/windows/makefiles/vm.make
 !include local.make
@@ -52,8 +52,8 @@
 	sh $(WorkSpace)/make/windows/build_vm_def.sh
 
 $(AOUT): $(Res_Files) $(Obj_Files) vm.def
-	$(LINK) @<<
-  $(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
+	$(LD) @<<
+  $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
 <<
 !if "$(MT)" != ""
 # The previous link command created a .manifest file that we want to
@@ -65,3 +65,4 @@
 !include $(WorkSpace)/make/windows/makefiles/shared.make
 !include $(WorkSpace)/make/windows/makefiles/sa.make
 !include $(WorkSpace)/make/windows/makefiles/launcher.make
+!include $(WorkSpace)/make/windows/makefiles/wb.make
--- a/make/windows/makefiles/launcher.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/makefiles/launcher.make	Tue Apr 10 10:42:34 2012 -0700
@@ -23,7 +23,7 @@
 #
 
 
-LAUNCHER_FLAGS=$(CPP_FLAGS) $(ARCHFLAG) \
+LAUNCHER_FLAGS=$(CXX_FLAGS) $(ARCHFLAG) \
 	/D FULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \
 	/D JDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \
 	/D JDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \
@@ -39,18 +39,18 @@
 	/I $(WorkSpace)\src\cpu\$(Platform_arch)\vm \
 	/I $(WorkSpace)\src\os\windows\vm
 
-LINK_FLAGS=/manifest $(HS_INTERNAL_NAME).lib kernel32.lib user32.lib /nologo /machine:$(MACHINE) /map /debug /subsystem:console 
+LD_FLAGS=/manifest $(HS_INTERNAL_NAME).lib kernel32.lib user32.lib /nologo /machine:$(MACHINE) /map /debug /subsystem:console 
 
 !if "$(COMPILER_NAME)" == "VS2005"
 # This VS2005 compiler has /GS as a default and requires bufferoverflowU.lib
 #    on the link command line, otherwise we get missing __security_check_cookie
 #    externals at link time. Even with /GS-, you need bufferoverflowU.lib.
 BUFFEROVERFLOWLIB = bufferoverflowU.lib
-LINK_FLAGS = $(LINK_FLAGS) $(BUFFEROVERFLOWLIB)
+LD_FLAGS = $(LD_FLAGS) $(BUFFEROVERFLOWLIB)
 !endif
 
 !if "$(COMPILER_NAME)" == "VS2010" && "$(BUILDARCH)" == "i486"
-LINK_FLAGS = /SAFESEH $(LINK_FLAGS)
+LD_FLAGS = /SAFESEH $(LD_FLAGS)
 !endif
 
 LAUNCHERDIR = $(WorkSpace)/src/os/windows/launcher
@@ -60,14 +60,14 @@
 
 {$(LAUNCHERDIR)}.c{$(OUTDIR)}.obj:
 	-mkdir $(OUTDIR) 2>NUL >NUL
-        $(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $<
+        $(CXX) $(LAUNCHER_FLAGS) /c /Fo$@ $<
 
 {$(LAUNCHERDIR_SHARE)}.c{$(OUTDIR)}.obj:
 	-mkdir $(OUTDIR) 2>NUL >NUL
-        $(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $<
+        $(CXX) $(LAUNCHER_FLAGS) /c /Fo$@ $<
 
 $(OUTDIR)\*.obj: $(LAUNCHERDIR)\*.c $(LAUNCHERDIR)\*.h $(LAUNCHERDIR_SHARE)\*.c $(LAUNCHERDIR_SHARE)\*.h
 
 launcher: $(OUTDIR)\java.obj $(OUTDIR)\java_md.obj $(OUTDIR)\jli_util.obj
 	echo $(JAVA_HOME) > jdkpath.txt  
-	$(LINK) $(LINK_FLAGS) /out:hotspot.exe $**
+	$(LD) $(LD_FLAGS) /out:hotspot.exe $**
--- a/make/windows/makefiles/product.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/makefiles/product.make	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -32,12 +32,12 @@
 BUILD_PCH_FILE=_build_pch_file.obj
 !endif
 
-default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA
+default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA wb
 
 !include ../local.make
 !include compile.make
 
-CPP_FLAGS=$(CPP_FLAGS) $(PRODUCT_OPT_OPTION)
+CXX_FLAGS=$(CXX_FLAGS) $(PRODUCT_OPT_OPTION)
 
 RELEASE=
 
@@ -54,16 +54,16 @@
 # Kernel doesn't need exported vtbl symbols.
 !if "$(Variant)" == "kernel"
 $(AOUT): $(Res_Files) $(Obj_Files)
-	$(LINK) @<<
-  $(LINK_FLAGS) /out:$@ /implib:$*.lib $(Obj_Files) $(Res_Files)
+	$(LD) @<<
+  $(LD_FLAGS) /out:$@ /implib:$*.lib $(Obj_Files) $(Res_Files)
 <<
 !else
 vm.def: $(Obj_Files)
 	sh $(WorkSpace)/make/windows/build_vm_def.sh
 
 $(AOUT): $(Res_Files) $(Obj_Files) vm.def
-	$(LINK) @<<
-  $(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
+	$(LD) @<<
+  $(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
 <<
 !endif
 !if "$(MT)" != ""
@@ -76,3 +76,4 @@
 !include $(WorkSpace)/make/windows/makefiles/shared.make
 !include $(WorkSpace)/make/windows/makefiles/sa.make
 !include $(WorkSpace)/make/windows/makefiles/launcher.make
+!include $(WorkSpace)/make/windows/makefiles/wb.make
--- a/make/windows/makefiles/projectcreator.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/makefiles/projectcreator.make	Tue Apr 10 10:42:34 2012 -0700
@@ -89,7 +89,7 @@
         -jdkTargetRoot $(HOTSPOTJDKDIST) \
         -define ALIGN_STACK_FRAMES \
         -define VM_LITTLE_ENDIAN \
-        -prelink  "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME)	set JAVA_HOME=$(HOTSPOTJDKDIST)	$(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LINK_VER)" \
+        -prelink  "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME)	set JAVA_HOME=$(HOTSPOTJDKDIST)	$(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LD_VER)" \
         -postbuild "" "Building hotspot.exe..." "cd $(HOTSPOTBUILDSPACE)\%f\%b	set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME)	nmake -f $(HOTSPOTWORKSPACE)\make\windows\projectfiles\common\Makefile LOCAL_MAKE=$(HOTSPOTBUILDSPACE)\%f\local.make JAVA_HOME=$(HOTSPOTJDKDIST) launcher" \
         -ignoreFile jsig.c \
         -ignoreFile jvmtiEnvRecommended.cpp \
--- a/make/windows/makefiles/sa.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/makefiles/sa.make	Tue Apr 10 10:42:34 2012 -0700
@@ -91,16 +91,16 @@
 !if "$(COMPILER_NAME)" == "VS2005"
 # On amd64, VS2005 compiler requires bufferoverflowU.lib on the link command line, 
 # otherwise we get missing __security_check_cookie externals at link time. 
-SA_LINK_FLAGS = bufferoverflowU.lib
+SA_LD_FLAGS = bufferoverflowU.lib
 !endif
 !else
 SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 /Gm $(GX_OPTION) /ZI /Od /D "WIN32" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
 !endif
 !if "$(MT)" != ""
-SA_LINK_FLAGS = /manifest $(SA_LINK_FLAGS)
+SA_LD_FLAGS = /manifest $(SA_LD_FLAGS)
 !endif
 SASRCFILE = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp
-SA_LFLAGS = $(SA_LINK_FLAGS) /nologo /subsystem:console /map /debug /machine:$(MACHINE)
+SA_LFLAGS = $(SA_LD_FLAGS) /nologo /subsystem:console /map /debug /machine:$(MACHINE)
 
 # Note that we do not keep sawindbj.obj around as it would then
 # get included in the dumpbin command in build_vm_def.sh
@@ -110,14 +110,14 @@
 # Use ";#2" for .dll and ";#1" for .exe in the MT command below:
 $(SAWINDBG): $(SASRCFILE)
 	set INCLUDE=$(SA_INCLUDE)$(INCLUDE)
-	$(CPP) @<<
+	$(CXX) @<<
 	  /I"$(BootStrapDir)/include" /I"$(BootStrapDir)/include/win32" 
 	  /I"$(GENERATED)" $(SA_CFLAGS)
 	  $(SASRCFILE)
 	  /out:sawindbg.obj
 <<
 	set LIB=$(SA_LIB)$(LIB)
-	$(LINK) /out:$@ /DLL sawindbg.obj dbgeng.lib $(SA_LFLAGS)
+	$(LD) /out:$@ /DLL sawindbg.obj dbgeng.lib $(SA_LFLAGS)
 !if "$(MT)" != ""
 	$(MT) /manifest $(@F).manifest /outputresource:$(@F);#2
 !endif
--- a/make/windows/makefiles/sanity.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/makefiles/sanity.make	Tue Apr 10 10:42:34 2012 -0700
@@ -31,5 +31,5 @@
 	echo *** WARNING *** unrecognized cl.exe version $(MSC_VER) ($(RAW_MSC_VER)).  Use FORCE_MSC_VER to override automatic detection.
 
 checkLink:
-	@ if "$(LINK_VER)" NEQ "710" if "$(LINK_VER)" NEQ "800" if "$(LINK_VER)" NEQ "900" if "$(LINK_VER)" NEQ "1000" \
-	echo *** WARNING *** unrecognized link.exe version $(LINK_VER) ($(RAW_LINK_VER)).  Use FORCE_LINK_VER to override automatic detection.
+	@ if "$(LD_VER)" NEQ "710" if "$(LD_VER)" NEQ "800" if "$(LD_VER)" NEQ "900" if "$(LD_VER)" NEQ "1000" \
+	echo *** WARNING *** unrecognized link.exe version $(LD_VER) ($(RAW_LD_VER)).  Use FORCE_LD_VER to override automatic detection.
--- a/make/windows/makefiles/shared.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/makefiles/shared.make	Tue Apr 10 10:42:34 2012 -0700
@@ -30,8 +30,8 @@
 DIR=.
 !endif
 
-!ifndef CPP
-CPP=cl.exe
+!ifndef CXX
+CXX=cl.exe
 !endif
 
 
--- a/make/windows/makefiles/vm.make	Wed Apr 04 20:44:38 2012 -0700
+++ b/make/windows/makefiles/vm.make	Tue Apr 10 10:42:34 2012 -0700
@@ -32,12 +32,12 @@
 
 !ifdef RELEASE
 !ifdef DEVELOP
-CPP_FLAGS=$(CPP_FLAGS) /D "DEBUG"
+CXX_FLAGS=$(CXX_FLAGS) /D "DEBUG"
 !else
-CPP_FLAGS=$(CPP_FLAGS) /D "PRODUCT"
+CXX_FLAGS=$(CXX_FLAGS) /D "PRODUCT"
 !endif
 !else
-CPP_FLAGS=$(CPP_FLAGS) /D "ASSERT"
+CXX_FLAGS=$(CXX_FLAGS) /D "ASSERT"
 !endif
 
 !if "$(Variant)" == "core"
@@ -45,19 +45,19 @@
 !endif
 
 !if "$(Variant)" == "kernel"
-CPP_FLAGS=$(CPP_FLAGS) /D "KERNEL"
+CXX_FLAGS=$(CXX_FLAGS) /D "KERNEL"
 !endif
 
 !if "$(Variant)" == "compiler1"
-CPP_FLAGS=$(CPP_FLAGS) /D "COMPILER1"
+CXX_FLAGS=$(CXX_FLAGS) /D "COMPILER1"
 !endif
 
 !if "$(Variant)" == "compiler2"
-CPP_FLAGS=$(CPP_FLAGS) /D "COMPILER2"
+CXX_FLAGS=$(CXX_FLAGS) /D "COMPILER2"
 !endif
 
 !if "$(Variant)" == "tiered"
-CPP_FLAGS=$(CPP_FLAGS) /D "COMPILER1" /D "COMPILER2"
+CXX_FLAGS=$(CXX_FLAGS) /D "COMPILER1" /D "COMPILER2"
 !endif
 
 !if "$(BUILDARCH)" == "i486"
@@ -67,21 +67,21 @@
 !endif
 
 # The following variables are defined in the generated local.make file.
-CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_RELEASE_VERSION=\"$(HS_BUILD_VER)\""
-CPP_FLAGS=$(CPP_FLAGS) /D "JRE_RELEASE_VERSION=\"$(JRE_RELEASE_VER)\""
-CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_LIB_ARCH=\"$(HOTSPOT_LIB_ARCH)\""
-CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_TARGET=\"$(BUILD_FLAVOR)\""
-CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\""
-CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\""
+CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_RELEASE_VERSION=\"$(HS_BUILD_VER)\""
+CXX_FLAGS=$(CXX_FLAGS) /D "JRE_RELEASE_VERSION=\"$(JRE_RELEASE_VER)\""
+CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_LIB_ARCH=\"$(HOTSPOT_LIB_ARCH)\""
+CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_BUILD_TARGET=\"$(BUILD_FLAVOR)\""
+CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\""
+CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\""
 
 !ifndef JAVASE_EMBEDDED
-CPP_FLAGS=$(CPP_FLAGS) /D "INCLUDE_TRACE"
+CXX_FLAGS=$(CXX_FLAGS) /D "INCLUDE_TRACE"
 !endif
 
-CPP_FLAGS=$(CPP_FLAGS) $(CPP_INCLUDE_DIRS)
+CXX_FLAGS=$(CXX_FLAGS) $(CXX_INCLUDE_DIRS)
 
 # Define that so jni.h is on correct side
-CPP_FLAGS=$(CPP_FLAGS) /D "_JNI_IMPLEMENTATION_"
+CXX_FLAGS=$(CXX_FLAGS) /D "_JNI_IMPLEMENTATION_"
 
 !if "$(BUILDARCH)" == "ia64"
 STACK_SIZE="/STACK:1048576,262144"
@@ -102,7 +102,7 @@
 
 # If you modify exports below please do the corresponding changes in
 # src/share/tools/ProjectCreator/WinGammaPlatformVC7.java
-LINK_FLAGS=$(LINK_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 \
+LD_FLAGS=$(LD_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 \
   /export:JNI_GetDefaultJavaVMInitArgs       \
   /export:JNI_CreateJavaVM                   \
   /export:JVM_FindClassFromBootLoader        \
@@ -118,25 +118,25 @@
   /export:JVM_GetThreadStateValues           \
   /export:JVM_InitAgentProperties
 
-CPP_INCLUDE_DIRS=/I "..\generated"
+CXX_INCLUDE_DIRS=/I "..\generated"
 
 !if exists($(ALTSRC)\share\vm)
-CPP_INCLUDE_DIRS=$(CPP_INCLUDE_DIRS) /I "$(ALTSRC)\share\vm"
+CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\share\vm"
 !endif
 
 !if exists($(ALTSRC)\os\windows\vm)
-CPP_INCLUDE_DIRS=$(CPP_INCLUDE_DIRS) /I "$(ALTSRC)\os\windows\vm"
+CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\os\windows\vm"
 !endif
 
 !if exists($(ALTSRC)\os_cpu\windows_$(Platform_arch)\vm)
-CPP_INCLUDE_DIRS=$(CPP_INCLUDE_DIRS) /I "$(ALTSRC)\os_cpu\windows_$(Platform_arch)\vm"
+CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\os_cpu\windows_$(Platform_arch)\vm"
 !endif
 
 !if exists($(ALTSRC)\cpu\$(Platform_arch)\vm)
-CPP_INCLUDE_DIRS=$(CPP_INCLUDE_DIRS) /I "$(ALTSRC)\cpu\$(Platform_arch)\vm"
+CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\cpu\$(Platform_arch)\vm"
 !endif
 
-CPP_INCLUDE_DIRS=$(CPP_INCLUDE_DIRS) \
+CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) \
   /I "$(COMMONSRC)\share\vm" \
   /I "$(COMMONSRC)\share\vm\precompiled" \
   /I "$(COMMONSRC)\share\vm\prims" \
@@ -144,12 +144,12 @@
   /I "$(COMMONSRC)\os_cpu\windows_$(Platform_arch)\vm" \
   /I "$(COMMONSRC)\cpu\$(Platform_arch)\vm"
 
-CPP_DONT_USE_PCH=/D DONT_USE_PRECOMPILED_HEADER
+CXX_DONT_USE_PCH=/D DONT_USE_PRECOMPILED_HEADER
 
 !if "$(USE_PRECOMPILED_HEADER)" != "0"
-CPP_USE_PCH=/Fp"vm.pch" /Yu"precompiled.hpp"
+CXX_USE_PCH=/Fp"vm.pch" /Yu"precompiled.hpp"
 !else
-CPP_USE_PCH=$(CPP_DONT_USE_PCH)
+CXX_USE_PCH=$(CXX_DONT_USE_PCH)
 !endif
 
 # Where to find the source code for the virtual machine (is this used?)
@@ -194,101 +194,101 @@
 # Special case files not using precompiled header files.
 
 c1_RInfo_$(Platform_arch).obj: $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp 
-	 $(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp
+	 $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp
 
 os_windows.obj: $(WorkSpace)\src\os\windows\vm\os_windows.cpp
-        $(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\os_windows.cpp
+        $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\os_windows.cpp
 
 os_windows_$(Platform_arch).obj: $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\os_windows_$(Platform_arch).cpp
-        $(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\os_windows_$(Platform_arch).cpp
+        $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\os_windows_$(Platform_arch).cpp
 
 osThread_windows.obj: $(WorkSpace)\src\os\windows\vm\osThread_windows.cpp
-        $(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\osThread_windows.cpp
+        $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\osThread_windows.cpp
 
 conditionVar_windows.obj: $(WorkSpace)\src\os\windows\vm\conditionVar_windows.cpp
-        $(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\conditionVar_windows.cpp
+        $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\conditionVar_windows.cpp
 
 getThread_windows_$(Platform_arch).obj: $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\getThread_windows_$(Platform_arch).cpp
-        $(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\getThread_windows_$(Platform_arch).cpp
+        $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\getThread_windows_$(Platform_arch).cpp
 
 opcodes.obj: $(WorkSpace)\src\share\vm\opto\opcodes.cpp
-        $(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\share\vm\opto\opcodes.cpp
+        $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\share\vm\opto\opcodes.cpp
 
 bytecodeInterpreter.obj: $(WorkSpace)\src\share\vm\interpreter\bytecodeInterpreter.cpp
-        $(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\share\vm\interpreter\bytecodeInterpreter.cpp
+        $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\share\vm\interpreter\bytecodeInterpreter.cpp
 
 bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWithChecks.cpp
-        $(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c ..\generated\jvmtifiles\bytecodeInterpreterWithChecks.cpp
+        $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c ..\generated\jvmtifiles\bytecodeInterpreterWithChecks.cpp
 
 # Default rules for the Virtual Machine
 {$(COMMONSRC)\share\vm\c1}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\compiler}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\code}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\interpreter}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\ci}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\classfile}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\gc_implementation\parallelScavenge}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\gc_implementation\shared}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\gc_implementation\parNew}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\gc_implementation\concurrentMarkSweep}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\gc_implementation\g1}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\gc_interface}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\asm}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\memory}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\oops}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\prims}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\runtime}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\services}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\trace}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\utilities}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\libadt}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\share\vm\opto}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\os\windows\vm}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 # This guy should remain a single colon rule because
 # otherwise we can't specify the output filename.
@@ -296,113 +296,113 @@
         @$(RC) $(RC_FLAGS) /fo"$@" $<
 
 {$(COMMONSRC)\cpu\$(Platform_arch)\vm}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(COMMONSRC)\os_cpu\windows_$(Platform_arch)\vm}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\c1}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\compiler}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\code}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\interpreter}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\ci}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\classfile}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\gc_implementation\parallelScavenge}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\gc_implementation\shared}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\gc_implementation\parNew}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\gc_implementation\concurrentMarkSweep}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\gc_implementation\g1}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\gc_interface}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\asm}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\memory}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\oops}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\prims}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\runtime}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\services}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\trace}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\utilities}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\libadt}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\opto}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\os\windows\vm}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 # otherwise we can't specify the output filename.
 {$(ALTSRC)\os\windows\vm}.rc.res:
         @$(RC) $(RC_FLAGS) /fo"$@" $<
 
 {$(ALTSRC)\cpu\$(Platform_arch)\vm}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\os_cpu\windows_$(Platform_arch)\vm}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {..\generated\incls}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {..\generated\adfiles}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {..\generated\jvmtifiles}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\jfr}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\jfr\agent}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\jfr\agent\isolated_deps\util}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 {$(ALTSRC)\share\vm\jfr\jvm}.cpp.obj::
-        $(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 default::
 
 _build_pch_file.obj:
         @echo #include "precompiled.hpp" > ../generated/_build_pch_file.cpp
-        $(CPP) $(CPP_FLAGS) /Fp"vm.pch" /Yc"precompiled.hpp" /c ../generated/_build_pch_file.cpp
+        $(CXX) $(CXX_FLAGS) /Fp"vm.pch" /Yc"precompiled.hpp" /c ../generated/_build_pch_file.cpp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/windows/makefiles/wb.make	Tue Apr 10 10:42:34 2012 -0700
@@ -0,0 +1,54 @@
+#
+# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# This makefile is used to build the whitebox testing lib
+# and compile the tests which use it
+
+!include $(WorkSpace)/make/windows/makefiles/rules.make
+
+WBSRCDIR = $(WorkSpace)/src/share/tools/whitebox
+
+# turn GENERATED into a windows path to get sane dependencies
+WB_CLASSES=$(GENERATED:/=\)\wb\classes
+WB_JAR=$(GENERATED:/=\)\wb.jar
+
+# call recursive make to do wildcard expansion
+.SUFFIXES : .java .class
+wb_java_srcs: $(WorkSpace)\src\share\tools\whitebox\sun\hotspot\*.java $(WB_CLASSES)
+	$(MAKE) -f $(WorkSpace)\make\windows\makefiles\$(BUILD_FLAVOR).make $(**:.java=.class)
+
+
+{$(WorkSpace)\src\share\tools\whitebox\sun\hotspot}.java.class::
+	$(COMPILE_JAVAC) -d $(WB_CLASSES) $<
+
+$(WB_JAR): wb_java_srcs
+	$(RUN_JAR) cf $@ -C $(WB_CLASSES) .
+
+# turn $@ to a unix path because mkdir in PATH is cygwin/mks mkdir
+$(WB_CLASSES):
+	mkdir -p $(@:\=/)
+
+# main target to build wb
+wb: $(WB_JAR)
+
--- a/src/cpu/sparc/vm/assembler_sparc.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2134,6 +2134,7 @@
   // address pseudos: make these names unlike instruction names to avoid confusion
   inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
   inline void load_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
+  inline void load_bool_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
   inline void load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
   inline void store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
   inline void store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
@@ -2220,7 +2221,7 @@
   // traps as per trap.h (SPARC ABI?)
 
   void breakpoint_trap();
-  void breakpoint_trap(Condition c, CC cc = icc);
+  void breakpoint_trap(Condition c, CC cc);
   void flush_windows_trap();
   void clean_windows_trap();
   void get_psr_trap();
@@ -2249,7 +2250,7 @@
   // this platform we assume byte size
 
   inline void stbool(Register d, const Address& a) { stb(d, a); }
-  inline void ldbool(const Address& a, Register d) { ldsb(a, d); }
+  inline void ldbool(const Address& a, Register d) { ldub(a, d); }
   inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
 
   // klass oop manipulations if compressed
--- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -692,6 +692,17 @@
 }
 
 
+inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) {
+  assert_not_delayed();
+  if (ForceUnreachable) {
+    patchable_sethi(addrlit, d);
+  } else {
+    sethi(addrlit, d);
+  }
+  ldub(d, addrlit.low10() + offset, d);
+}
+
+
 inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
   assert_not_delayed();
   if (ForceUnreachable) {
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -472,7 +472,7 @@
   __ load_klass(src_reg, tmp_reg);
 
   Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
-  __ ld(ref_type_adr, tmp_reg);
+  __ ldub(ref_type_adr, tmp_reg);
 
   // _reference_type field is of type ReferenceType (enum)
   assert(REF_NONE == 0, "check this code");
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -3231,6 +3231,26 @@
   // no-op on TSO
 }
 
+void LIR_Assembler::membar_loadload() {
+  // no-op
+  //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
+}
+
+void LIR_Assembler::membar_storestore() {
+  // no-op
+  //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
+}
+
+void LIR_Assembler::membar_loadstore() {
+  // no-op
+  //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
+}
+
+void LIR_Assembler::membar_storeload() {
+  __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
+}
+
+
 // Pack two sequential registers containing 32 bit values
 // into a single 64 bit register.
 // src and src->successor() are packed into dst
--- a/src/cpu/sparc/vm/c2_globals_sparc.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
 #else
 define_pd_global(bool, ProfileInterpreter,           true);
 #endif // CC_INTERP
-define_pd_global(bool, TieredCompilation,            true);
+define_pd_global(bool, TieredCompilation,            trueInTiered);
 define_pd_global(intx, CompileThreshold,             10000);
 define_pd_global(intx, BackEdgeThreshold,            140000);
 
--- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1187,7 +1187,7 @@
 
   #ifdef ASSERT
     __ tst(O1);
-    __ breakpoint_trap(Assembler::zero);
+    __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
   #endif // ASSERT
 
     const int entry_size            = frame::interpreter_frame_monitor_size() * wordSize;
--- a/src/cpu/sparc/vm/frame_sparc.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/sparc/vm/frame_sparc.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #include "oops/markOop.hpp"
 #include "oops/methodOop.hpp"
 #include "oops/oop.inline.hpp"
+#include "prims/methodHandles.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
@@ -810,7 +811,7 @@
 }
 
 
-#ifdef ASSERT
+#ifndef PRODUCT
 
 #define DESCRIBE_FP_OFFSET(name) \
   values.describe(frame_no, fp() + frame::name##_offset, #name)
@@ -820,11 +821,19 @@
     values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1);
   }
 
-  if (is_interpreted_frame()) {
+  if (is_ricochet_frame()) {
+    MethodHandles::RicochetFrame::describe(this, values, frame_no);
+  } else if (is_interpreted_frame()) {
     DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
     DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
     DESCRIBE_FP_OFFSET(interpreter_frame_padding);
     DESCRIBE_FP_OFFSET(interpreter_frame_oop_temp);
+
+    // esp, according to Lesp (e.g. not depending on bci), if seems valid
+    intptr_t* esp = *interpreter_frame_esp_addr();
+    if ((esp >= sp()) && (esp < fp())) {
+      values.describe(-1, esp, "*Lesp");
+    }
   }
 
   if (!is_compiled_frame()) {
@@ -844,4 +853,3 @@
   // unused... but returns fp() to minimize changes introduced by 7087445
   return fp();
 }
-
--- a/src/cpu/sparc/vm/globals_sparc.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/sparc/vm/globals_sparc.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -56,14 +56,15 @@
 // Stack slots are 2X larger in LP64 than in the 32 bit VM.
 define_pd_global(intx, ThreadStackSize,       1024);
 define_pd_global(intx, VMThreadStackSize,     1024);
+define_pd_global(intx, StackShadowPages, 10 DEBUG_ONLY(+1));
 #else
 define_pd_global(intx, ThreadStackSize,       512);
 define_pd_global(intx, VMThreadStackSize,     512);
+define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
 #endif
 
 define_pd_global(intx, StackYellowPages, 2);
 define_pd_global(intx, StackRedPages, 1);
-define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
 
 define_pd_global(intx, PreInflateSpin,       40);  // Determined by running design center
 
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -177,7 +177,7 @@
   BLOCK_COMMENT("ricochet_blob.bounce");
 
   if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
-  trace_method_handle(_masm, "ricochet_blob.bounce");
+  trace_method_handle(_masm, "return/ricochet_blob.bounce");
 
   __ JMP(L1_continuation, 0);
   __ delayed()->nop();
@@ -268,14 +268,16 @@
 }
 
 // Emit code to verify that FP is pointing at a valid ricochet frame.
-#ifdef ASSERT
+#ifndef PRODUCT
 enum {
   ARG_LIMIT = 255, SLOP = 45,
   // use this parameter for checking for garbage stack movements:
   UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
   // the slop defends against false alarms due to fencepost errors
 };
+#endif
 
+#ifdef ASSERT
 void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
   // The stack should look like this:
   //    ... keep1 | dest=42 | keep2 | magic | handler | magic | recursive args | [RF]
@@ -1001,31 +1003,142 @@
 }
 
 #ifndef PRODUCT
+void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no)  {
+    RicochetFrame* rf = new RicochetFrame(*fr);
+
+    // ricochet slots (kept in registers for sparc)
+    values.describe(frame_no, rf->register_addr(I5_savedSP), err_msg("exact_sender_sp reg for #%d", frame_no));
+    values.describe(frame_no, rf->register_addr(L5_conversion), err_msg("conversion reg for #%d", frame_no));
+    values.describe(frame_no, rf->register_addr(L4_saved_args_base), err_msg("saved_args_base reg for #%d", frame_no));
+    values.describe(frame_no, rf->register_addr(L3_saved_args_layout), err_msg("saved_args_layout reg for #%d", frame_no));
+    values.describe(frame_no, rf->register_addr(L2_saved_target), err_msg("saved_target reg for #%d", frame_no));
+    values.describe(frame_no, rf->register_addr(L1_continuation), err_msg("continuation reg for #%d", frame_no));
+
+    // relevant ricochet targets (in caller frame)
+    values.describe(-1, rf->saved_args_base(),  err_msg("*saved_args_base for #%d", frame_no));
+    values.describe(-1, (intptr_t *)(STACK_BIAS+(uintptr_t)rf->exact_sender_sp()),  err_msg("*exact_sender_sp+STACK_BIAS for #%d", frame_no));
+}
+#endif // ASSERT
+
+#ifndef PRODUCT
 extern "C" void print_method_handle(oop mh);
 void trace_method_handle_stub(const char* adaptername,
                               oopDesc* mh,
-                              intptr_t* saved_sp) {
+                              intptr_t* saved_sp,
+                              intptr_t* args,
+                              intptr_t* tracing_fp) {
   bool has_mh = (strstr(adaptername, "return/") == NULL);  // return adapters don't have mh
-  tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp);
-  if (has_mh)
+
+  tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp, args);
+
+  if (Verbose) {
+    // dumping last frame with frame::describe
+
+    JavaThread* p = JavaThread::active();
+
+    ResourceMark rm;
+    PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
+    FrameValues values;
+
+    // Note: We want to allow trace_method_handle from any call site.
+    // While trace_method_handle creates a frame, it may be entered
+    // without a valid return PC in O7 (e.g. not just after a call).
+    // Walking that frame could lead to failures due to that invalid PC.
+    // => carefully detect that frame when doing the stack walking
+
+    // walk up to the right frame using the "tracing_fp" argument
+    intptr_t* cur_sp = StubRoutines::Sparc::flush_callers_register_windows_func()();
+    frame cur_frame(cur_sp, frame::unpatchable, NULL);
+
+    while (cur_frame.fp() != (intptr_t *)(STACK_BIAS+(uintptr_t)tracing_fp)) {
+      cur_frame = os::get_sender_for_C_frame(&cur_frame);
+    }
+
+    // safely create a frame and call frame::describe
+    intptr_t *dump_sp = cur_frame.sender_sp();
+    intptr_t *dump_fp = cur_frame.link();
+
+    bool walkable = has_mh; // whether the traced frame shoud be walkable
+
+    // the sender for cur_frame is the caller of trace_method_handle
+    if (walkable) {
+      // The previous definition of walkable may have to be refined
+      // if new call sites cause the next frame constructor to start
+      // failing. Alternatively, frame constructors could be
+      // modified to support the current or future non walkable
+      // frames (but this is more intrusive and is not considered as
+      // part of this RFE, which will instead use a simpler output).
+      frame dump_frame = frame(dump_sp,
+                               cur_frame.sp(), // younger_sp
+                               false); // no adaptation
+      dump_frame.describe(values, 1);
+    } else {
+      // Robust dump for frames which cannot be constructed from sp/younger_sp
+      // Add descriptions without building a Java frame to avoid issues
+      values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
+      values.describe(-1, dump_sp, "sp");
+    }
+
+    bool has_args = has_mh; // whether Gargs is meaningful
+
+    // mark args, if seems valid (may not be valid for some adapters)
+    if (has_args) {
+      if ((args >= dump_sp) && (args < dump_fp)) {
+        values.describe(-1, args, "*G4_args");
+      }
+    }
+
+    // mark saved_sp, if seems valid (may not be valid for some adapters)
+    intptr_t *unbiased_sp = (intptr_t *)(STACK_BIAS+(uintptr_t)saved_sp);
+    if ((unbiased_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (unbiased_sp < dump_fp)) {
+      values.describe(-1, unbiased_sp, "*saved_sp+STACK_BIAS");
+    }
+
+    // Note: the unextended_sp may not be correct
+    tty->print_cr("  stack layout:");
+    values.print(p);
+  }
+
+  if (has_mh) {
     print_method_handle(mh);
+  }
 }
+
 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
   if (!TraceMethodHandles)  return;
   BLOCK_COMMENT("trace_method_handle {");
   // save: Gargs, O5_savedSP
-  __ save_frame(16);
+  __ save_frame(16); // need space for saving required FPU state
+
   __ set((intptr_t) adaptername, O0);
   __ mov(G3_method_handle, O1);
   __ mov(I5_savedSP, O2);
+  __ mov(Gargs, O3);
+  __ mov(I6, O4); // frame identifier for safe stack walking
+
+  // Save scratched registers that might be needed. Robustness is more
+  // important than optimizing the saves for this debug only code.
+
+  // save FP result, valid at some call sites (adapter_opt_return_float, ...)
+  Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);
+  __ stf(FloatRegisterImpl::D, Ftos_d, d_save);
+  // Safely save all globals but G2 (handled by call_VM_leaf) and G7
+  // (OS reserved).
   __ mov(G3_method_handle, L3);
   __ mov(Gargs, L4);
   __ mov(G5_method_type, L5);
-  __ call_VM_leaf(L7, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
+  __ mov(G6, L6);
+  __ mov(G1, L1);
+
+  __ call_VM_leaf(L2 /* for G2 */, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
 
   __ mov(L3, G3_method_handle);
   __ mov(L4, Gargs);
   __ mov(L5, G5_method_type);
+  __ mov(L6, G6);
+  __ mov(L1, G1);
+  __ ldf(FloatRegisterImpl::D, d_save, Ftos_d);
+
   __ restore();
   BLOCK_COMMENT("} trace_method_handle");
 }
@@ -1250,7 +1363,7 @@
         move_typed_arg(_masm, arg_type, false,
                        prim_value_addr,
                        Address(O0_argslot, 0),
-                       O2_scratch);  // must be an even register for !_LP64 long moves (uses O2/O3)
+                      O2_scratch);  // must be an even register for !_LP64 long moves (uses O2/O3)
       }
 
       if (direct_to_method) {
--- a/src/cpu/sparc/vm/methodHandles_sparc.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/sparc/vm/methodHandles_sparc.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -145,6 +145,8 @@
   }
 
   static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
+
+  static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
 };
 
 // Additional helper methods for MethodHandles code generation:
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -321,6 +321,16 @@
   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 }
 
+static VMRegPair reg64_to_VMRegPair(Register r) {
+  VMRegPair ret;
+  if (wordSize == 8) {
+    ret.set2(r->as_VMReg());
+  } else {
+    ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
+  }
+  return ret;
+}
+
 // ---------------------------------------------------------------------------
 // Read the array of BasicTypes from a signature, and compute where the
 // arguments should go.  Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
@@ -1444,6 +1454,25 @@
 }
 
 
+static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
+  if (src.first()->is_stack()) {
+    if (dst.first()->is_stack()) {
+      // stack to stack
+      __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5);
+      __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
+    } else {
+      // stack to reg
+      __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
+    }
+  } else if (dst.first()->is_stack()) {
+    // reg to stack
+    __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
+  } else {
+    __ mov(src.first()->as_Register(), dst.first()->as_Register());
+  }
+}
+
+
 // An oop arg. Must pass a handle not the oop itself
 static void object_move(MacroAssembler* masm,
                         OopMap* map,
@@ -1748,6 +1777,166 @@
   }
 }
 
+
+static void save_or_restore_arguments(MacroAssembler* masm,
+                                      const int stack_slots,
+                                      const int total_in_args,
+                                      const int arg_save_area,
+                                      OopMap* map,
+                                      VMRegPair* in_regs,
+                                      BasicType* in_sig_bt) {
+  // if map is non-NULL then the code should store the values,
+  // otherwise it should load them.
+  if (map != NULL) {
+    // Fill in the map
+    for (int i = 0; i < total_in_args; i++) {
+      if (in_sig_bt[i] == T_ARRAY) {
+        if (in_regs[i].first()->is_stack()) {
+          int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
+          map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
+        } else if (in_regs[i].first()->is_Register()) {
+          map->set_oop(in_regs[i].first());
+        } else {
+          ShouldNotReachHere();
+        }
+      }
+    }
+  }
+
+  // Save or restore double word values
+  int handle_index = 0;
+  for (int i = 0; i < total_in_args; i++) {
+    int slot = handle_index + arg_save_area;
+    int offset = slot * VMRegImpl::stack_slot_size;
+    if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) {
+      const Register reg = in_regs[i].first()->as_Register();
+      if (reg->is_global()) {
+        handle_index += 2;
+        assert(handle_index <= stack_slots, "overflow");
+        if (map != NULL) {
+          __ stx(reg, SP, offset + STACK_BIAS);
+        } else {
+          __ ldx(SP, offset + STACK_BIAS, reg);
+        }
+      }
+    } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) {
+      handle_index += 2;
+      assert(handle_index <= stack_slots, "overflow");
+      if (map != NULL) {
+        __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
+      } else {
+        __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
+      }
+    }
+  }
+  // Save floats
+  for (int i = 0; i < total_in_args; i++) {
+    int slot = handle_index + arg_save_area;
+    int offset = slot * VMRegImpl::stack_slot_size;
+    if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) {
+      handle_index++;
+      assert(handle_index <= stack_slots, "overflow");
+      if (map != NULL) {
+        __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
+      } else {
+        __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
+      }
+    }
+  }
+
+}
+
+
+// Check GC_locker::needs_gc and enter the runtime if it's true.  This
+// keeps a new JNI critical region from starting until a GC has been
+// forced.  Save down any oops in registers and describe them in an
+// OopMap.
+static void check_needs_gc_for_critical_native(MacroAssembler* masm,
+                                               const int stack_slots,
+                                               const int total_in_args,
+                                               const int arg_save_area,
+                                               OopMapSet* oop_maps,
+                                               VMRegPair* in_regs,
+                                               BasicType* in_sig_bt) {
+  __ block_comment("check GC_locker::needs_gc");
+  Label cont;
+  AddressLiteral sync_state(GC_locker::needs_gc_address());
+  __ load_bool_contents(sync_state, G3_scratch);
+  __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
+  __ delayed()->nop();
+
+  // Save down any values that are live in registers and call into the
+  // runtime to halt for a GC
+  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+  save_or_restore_arguments(masm, stack_slots, total_in_args,
+                            arg_save_area, map, in_regs, in_sig_bt);
+
+  __ mov(G2_thread, L7_thread_cache);
+
+  __ set_last_Java_frame(SP, noreg);
+
+  __ block_comment("block_for_jni_critical");
+  __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type);
+  __ delayed()->mov(L7_thread_cache, O0);
+  oop_maps->add_gc_map( __ offset(), map);
+
+  __ restore_thread(L7_thread_cache); // restore G2_thread
+  __ reset_last_Java_frame();
+
+  // Reload all the register arguments
+  save_or_restore_arguments(masm, stack_slots, total_in_args,
+                            arg_save_area, NULL, in_regs, in_sig_bt);
+
+  __ bind(cont);
+#ifdef ASSERT
+  if (StressCriticalJNINatives) {
+    // Stress register saving
+    OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+    save_or_restore_arguments(masm, stack_slots, total_in_args,
+                              arg_save_area, map, in_regs, in_sig_bt);
+    // Destroy argument registers
+    for (int i = 0; i < total_in_args; i++) {
+      if (in_regs[i].first()->is_Register()) {
+        const Register reg = in_regs[i].first()->as_Register();
+        if (reg->is_global()) {
+          __ mov(G0, reg);
+        }
+      } else if (in_regs[i].first()->is_FloatRegister()) {
+        __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
+      }
+    }
+
+    save_or_restore_arguments(masm, stack_slots, total_in_args,
+                              arg_save_area, NULL, in_regs, in_sig_bt);
+  }
+#endif
+}
+
+// Unpack an array argument into a pointer to the body and the length
+// if the array is non-null, otherwise pass 0 for both.
+static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
+  // Pass the length, ptr pair
+  Label is_null, done;
+  if (reg.first()->is_stack()) {
+    VMRegPair tmp  = reg64_to_VMRegPair(L2);
+    // Load the arg up from the stack
+    move_ptr(masm, reg, tmp);
+    reg = tmp;
+  }
+  __ cmp(reg.first()->as_Register(), G0);
+  __ brx(Assembler::equal, false, Assembler::pt, is_null);
+  __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4);
+  move_ptr(masm, reg64_to_VMRegPair(L4), body_arg);
+  __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4);
+  move32_64(masm, reg64_to_VMRegPair(L4), length_arg);
+  __ ba_short(done);
+  __ bind(is_null);
+  // Pass zeros
+  move_ptr(masm, reg64_to_VMRegPair(G0), body_arg);
+  move32_64(masm, reg64_to_VMRegPair(G0), length_arg);
+  __ bind(done);
+}
+
 // ---------------------------------------------------------------------------
 // Generate a native wrapper for a given method.  The method takes arguments
 // in the Java compiled code convention, marshals them to the native
@@ -1762,6 +1951,13 @@
                                                 BasicType *in_sig_bt,
                                                 VMRegPair *in_regs,
                                                 BasicType ret_type) {
+  bool is_critical_native = true;
+  address native_func = method->critical_native_function();
+  if (native_func == NULL) {
+    native_func = method->native_function();
+    is_critical_native = false;
+  }
+  assert(native_func != NULL, "must have function");
 
   // Native nmethod wrappers never take possesion of the oop arguments.
   // So the caller will gc the arguments. The only thing we need an
@@ -1841,22 +2037,70 @@
   // we convert the java signature to a C signature by inserting
   // the hidden arguments as arg[0] and possibly arg[1] (static method)
 
-  int total_c_args = total_in_args + 1;
-  if (method->is_static()) {
-    total_c_args++;
+  int total_c_args = total_in_args;
+  int total_save_slots = 6 * VMRegImpl::slots_per_word;
+  if (!is_critical_native) {
+    total_c_args += 1;
+    if (method->is_static()) {
+      total_c_args++;
+    }
+  } else {
+    for (int i = 0; i < total_in_args; i++) {
+      if (in_sig_bt[i] == T_ARRAY) {
+        // These have to be saved and restored across the safepoint
+        total_c_args++;
+      }
+    }
   }
 
   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
-  VMRegPair  * out_regs   = NEW_RESOURCE_ARRAY(VMRegPair,   total_c_args);
+  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
+  BasicType* in_elem_bt = NULL;
 
   int argc = 0;
-  out_sig_bt[argc++] = T_ADDRESS;
-  if (method->is_static()) {
-    out_sig_bt[argc++] = T_OBJECT;
-  }
-
-  for (int i = 0; i < total_in_args ; i++ ) {
-    out_sig_bt[argc++] = in_sig_bt[i];
+  if (!is_critical_native) {
+    out_sig_bt[argc++] = T_ADDRESS;
+    if (method->is_static()) {
+      out_sig_bt[argc++] = T_OBJECT;
+    }
+
+    for (int i = 0; i < total_in_args ; i++ ) {
+      out_sig_bt[argc++] = in_sig_bt[i];
+    }
+  } else {
+    Thread* THREAD = Thread::current();
+    in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
+    SignatureStream ss(method->signature());
+    for (int i = 0; i < total_in_args ; i++ ) {
+      if (in_sig_bt[i] == T_ARRAY) {
+        // Arrays are passed as int, elem* pair
+        out_sig_bt[argc++] = T_INT;
+        out_sig_bt[argc++] = T_ADDRESS;
+        Symbol* atype = ss.as_symbol(CHECK_NULL);
+        const char* at = atype->as_C_string();
+        if (strlen(at) == 2) {
+          assert(at[0] == '[', "must be");
+          switch (at[1]) {
+            case 'B': in_elem_bt[i]  = T_BYTE; break;
+            case 'C': in_elem_bt[i]  = T_CHAR; break;
+            case 'D': in_elem_bt[i]  = T_DOUBLE; break;
+            case 'F': in_elem_bt[i]  = T_FLOAT; break;
+            case 'I': in_elem_bt[i]  = T_INT; break;
+            case 'J': in_elem_bt[i]  = T_LONG; break;
+            case 'S': in_elem_bt[i]  = T_SHORT; break;
+            case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
+            default: ShouldNotReachHere();
+          }
+        }
+      } else {
+        out_sig_bt[argc++] = in_sig_bt[i];
+        in_elem_bt[i] = T_VOID;
+      }
+      if (in_sig_bt[i] != T_VOID) {
+        assert(in_sig_bt[i] == ss.type(), "must match");
+        ss.next();
+      }
+    }
   }
 
   // Now figure out where the args must be stored and how much stack space
@@ -1866,6 +2110,35 @@
   int out_arg_slots;
   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
 
+  if (is_critical_native) {
+    // Critical natives may have to call out so they need a save area
+    // for register arguments.
+    int double_slots = 0;
+    int single_slots = 0;
+    for ( int i = 0; i < total_in_args; i++) {
+      if (in_regs[i].first()->is_Register()) {
+        const Register reg = in_regs[i].first()->as_Register();
+        switch (in_sig_bt[i]) {
+          case T_ARRAY:
+          case T_BOOLEAN:
+          case T_BYTE:
+          case T_SHORT:
+          case T_CHAR:
+          case T_INT:  assert(reg->is_in(), "don't need to save these"); break;
+          case T_LONG: if (reg->is_global()) double_slots++; break;
+          default:  ShouldNotReachHere();
+        }
+      } else if (in_regs[i].first()->is_FloatRegister()) {
+        switch (in_sig_bt[i]) {
+          case T_FLOAT:  single_slots++; break;
+          case T_DOUBLE: double_slots++; break;
+          default:  ShouldNotReachHere();
+        }
+      }
+    }
+    total_save_slots = double_slots * 2 + single_slots;
+  }
+
   // Compute framesize for the wrapper.  We need to handlize all oops in
   // registers. We must create space for them here that is disjoint from
   // the windowed save area because we have no control over when we might
@@ -1885,12 +2158,11 @@
 
   // Now the space for the inbound oop handle area
 
-  int oop_handle_offset = stack_slots;
-  stack_slots += 6*VMRegImpl::slots_per_word;
+  int oop_handle_offset = round_to(stack_slots, 2);
+  stack_slots += total_save_slots;
 
   // Now any space we need for handlizing a klass if static method
 
-  int oop_temp_slot_offset = 0;
   int klass_slot_offset = 0;
   int klass_offset = -1;
   int lock_slot_offset = 0;
@@ -1954,6 +2226,10 @@
 
   __ verify_thread();
 
+  if (is_critical_native) {
+    check_needs_gc_for_critical_native(masm, stack_slots,  total_in_args,
+                                       oop_handle_offset, oop_maps, in_regs, in_sig_bt);
+  }
 
   //
   // We immediately shuffle the arguments so that any vm call we have to
@@ -1982,7 +2258,6 @@
   // caller.
   //
   OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
-  int c_arg = total_c_args - 1;
   // Record sp-based slot for receiver on stack for non-static methods
   int receiver_offset = -1;
 
@@ -2002,7 +2277,7 @@
 
 #endif /* ASSERT */
 
-  for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
+  for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) {
 
 #ifdef ASSERT
     if (in_regs[i].first()->is_Register()) {
@@ -2019,7 +2294,13 @@
 
     switch (in_sig_bt[i]) {
       case T_ARRAY:
+        if (is_critical_native) {
+          unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]);
+          c_arg--;
+          break;
+        }
       case T_OBJECT:
+        assert(!is_critical_native, "no oop arguments");
         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
                     ((i == 0) && (!is_static)),
                     &receiver_offset);
@@ -2029,7 +2310,7 @@
 
       case T_FLOAT:
         float_move(masm, in_regs[i], out_regs[c_arg]);
-          break;
+        break;
 
       case T_DOUBLE:
         assert( i + 1 < total_in_args &&
@@ -2051,7 +2332,7 @@
 
   // Pre-load a static method's oop into O1.  Used both by locking code and
   // the normal JNI call code.
-  if (method->is_static()) {
+  if (method->is_static() && !is_critical_native) {
     __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
 
     // Now handlize the static class mirror in O1.  It's known not-null.
@@ -2064,13 +2345,13 @@
   const Register L6_handle = L6;
 
   if (method->is_synchronized()) {
+    assert(!is_critical_native, "unhandled");
     __ mov(O1, L6_handle);
   }
 
   // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
   // except O6/O7. So if we must call out we must push a new frame. We immediately
   // push a new frame and flush the windows.
-
 #ifdef _LP64
   intptr_t thepc = (intptr_t) __ pc();
   {
@@ -2202,32 +2483,28 @@
   }
 
   // get JNIEnv* which is first argument to native
-
-  __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
+  if (!is_critical_native) {
+    __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
+  }
 
   // Use that pc we placed in O7 a while back as the current frame anchor
-
   __ set_last_Java_frame(SP, O7);
 
+  // We flushed the windows ages ago now mark them as flushed before transitioning.
+  __ set(JavaFrameAnchor::flushed, G3_scratch);
+  __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
+
   // Transition from _thread_in_Java to _thread_in_native.
   __ set(_thread_in_native, G3_scratch);
-  __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
-
-  // We flushed the windows ages ago now mark them as flushed
-
-  // mark windows as flushed
-  __ set(JavaFrameAnchor::flushed, G3_scratch);
-
-  Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
 
 #ifdef _LP64
-  AddressLiteral dest(method->native_function());
+  AddressLiteral dest(native_func);
   __ relocate(relocInfo::runtime_call_type);
   __ jumpl_to(dest, O7, O7);
 #else
-  __ call(method->native_function(), relocInfo::runtime_call_type);
+  __ call(native_func, relocInfo::runtime_call_type);
 #endif
-  __ delayed()->st(G3_scratch, flags);
+  __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
 
   __ restore_thread(L7_thread_cache); // restore G2_thread
 
@@ -2259,6 +2536,7 @@
     ShouldNotReachHere();
   }
 
+  Label after_transition;
   // must we block?
 
   // Block, if necessary, before resuming in _thread_in_Java state.
@@ -2303,22 +2581,34 @@
     // a distinct one for this pc
     //
     save_native_result(masm, ret_type, stack_slots);
-    __ call_VM_leaf(L7_thread_cache,
-                    CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
-                    G2_thread);
+    if (!is_critical_native) {
+      __ call_VM_leaf(L7_thread_cache,
+                      CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
+                      G2_thread);
+    } else {
+      __ call_VM_leaf(L7_thread_cache,
+                      CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
+                      G2_thread);
+    }
 
     // Restore any method result value
     restore_native_result(masm, ret_type, stack_slots);
+
+    if (is_critical_native) {
+      // The call above performed the transition to thread_in_Java so
+      // skip the transition logic below.
+      __ ba(after_transition);
+      __ delayed()->nop();
+    }
+
     __ bind(no_block);
   }
 
   // thread state is thread_in_native_trans. Any safepoint blocking has already
   // happened so we can now change state to _thread_in_Java.
-
-
   __ set(_thread_in_Java, G3_scratch);
   __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
-
+  __ bind(after_transition);
 
   Label no_reguard;
   __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
@@ -2416,12 +2706,14 @@
       __ verify_oop(I0);
   }
 
-  // reset handle block
-  __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
-  __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
-
-  __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
-  check_forward_pending_exception(masm, G3_scratch);
+  if (!is_critical_native) {
+    // reset handle block
+    __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
+    __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
+
+    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
+    check_forward_pending_exception(masm, G3_scratch);
+  }
 
 
   // Return
@@ -2450,6 +2742,10 @@
                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
                                             in_ByteSize(lock_offset),
                                             oop_maps);
+
+  if (is_critical_native) {
+    nm->set_lazy_critical_native(true);
+  }
   return nm;
 
 }
@@ -2473,17 +2769,6 @@
 static int  fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
 static bool offsets_initialized = false;
 
-static VMRegPair reg64_to_VMRegPair(Register r) {
-  VMRegPair ret;
-  if (wordSize == 8) {
-    ret.set2(r->as_VMReg());
-  } else {
-    ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
-  }
-  return ret;
-}
-
-
 nmethod *SharedRuntime::generate_dtrace_nmethod(
     MacroAssembler *masm, methodHandle method) {
 
@@ -3040,7 +3325,7 @@
   // make sure that the frames are aligned properly
 #ifndef _LP64
   __ btst(wordSize*2-1, SP);
-  __ breakpoint_trap(Assembler::notZero);
+  __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc);
 #endif
   #endif
 
@@ -3122,7 +3407,7 @@
 #ifdef ASSERT
   // make sure that there is at least one entry in the array
   __ tst(O4array_size);
-  __ breakpoint_trap(Assembler::zero);
+  __ breakpoint_trap(Assembler::zero, Assembler::icc);
 #endif
 
   // Now push the new interpreter frames
@@ -3146,6 +3431,9 @@
   ResourceMark rm;
   // setup code generation tools
   int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
+  if (UseStackBanging) {
+    pad += StackShadowPages*16 + 32;
+  }
 #ifdef _LP64
   CodeBuffer buffer("deopt_blob", 2100+pad, 512);
 #else
@@ -3365,6 +3653,9 @@
   ResourceMark rm;
   // setup code generation tools
   int pad = VerifyThread ? 512 : 0;
+  if (UseStackBanging) {
+    pad += StackShadowPages*16 + 32;
+  }
 #ifdef _LP64
   CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
 #else
--- a/src/cpu/sparc/vm/sparc.ad	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/sparc/vm/sparc.ad	Tue Apr 10 10:42:34 2012 -0700
@@ -1832,6 +1832,8 @@
   case Op_CountLeadingZerosL:
   case Op_CountTrailingZerosI:
   case Op_CountTrailingZerosL:
+  case Op_PopCountI:
+  case Op_PopCountL:
     if (!UsePopCountInstruction)
       return false;
     break;
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -379,7 +379,7 @@
 
 #ifdef ASSERT
     __ tst(O0);
-    __ breakpoint_trap(Assembler::zero);
+    __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
 #endif // ASSERT
 
     __ bind(done);
@@ -2050,7 +2050,7 @@
   AddressLiteral stop_at(&StopInterpreterAt);
   __ load_ptr_contents(stop_at, G4_scratch);
   __ cmp(G3_scratch, G4_scratch);
-  __ breakpoint_trap(Assembler::equal);
+  __ breakpoint_trap(Assembler::equal, Assembler::icc);
 }
 #endif // not PRODUCT
 #endif // !CC_INTERP
--- a/src/cpu/x86/vm/assembler_x86.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -236,6 +236,16 @@
   }
 }
 
+// Force generation of a 4 byte immediate value even if it fits into 8bit
+void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
+  assert(isByte(op1) && isByte(op2), "wrong opcode");
+  assert((op1 & 0x01) == 1, "should be 32bit operation");
+  assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
+  emit_byte(op1);
+  emit_byte(op2 | encode(dst));
+  emit_long(imm32);
+}
+
 // immediate-to-memory forms
 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
   assert((op1 & 0x01) == 1, "should be 32bit operation");
@@ -939,6 +949,7 @@
 }
 
 void Assembler::addr_nop_4() {
+  assert(UseAddressNop, "no CPU support");
   // 4 bytes: NOP DWORD PTR [EAX+0]
   emit_byte(0x0F);
   emit_byte(0x1F);
@@ -947,6 +958,7 @@
 }
 
 void Assembler::addr_nop_5() {
+  assert(UseAddressNop, "no CPU support");
   // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
   emit_byte(0x0F);
   emit_byte(0x1F);
@@ -956,6 +968,7 @@
 }
 
 void Assembler::addr_nop_7() {
+  assert(UseAddressNop, "no CPU support");
   // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
   emit_byte(0x0F);
   emit_byte(0x1F);
@@ -964,6 +977,7 @@
 }
 
 void Assembler::addr_nop_8() {
+  assert(UseAddressNop, "no CPU support");
   // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
   emit_byte(0x0F);
   emit_byte(0x1F);
@@ -2769,6 +2783,12 @@
   emit_arith(0x81, 0xE8, dst, imm32);
 }
 
+// Force generation of a 4 byte immediate value even if it fits into 8bit
+void Assembler::subl_imm32(Register dst, int32_t imm32) {
+  prefix(dst);
+  emit_arith_imm32(0x81, 0xE8, dst, imm32);
+}
+
 void Assembler::subl(Register dst, Address src) {
   InstructionMark im(this);
   prefix(src, dst);
@@ -4760,6 +4780,12 @@
   emit_arith(0x81, 0xE8, dst, imm32);
 }
 
+// Force generation of a 4 byte immediate value even if it fits into 8bit
+void Assembler::subq_imm32(Register dst, int32_t imm32) {
+  (void) prefixq_and_encode(dst->encoding());
+  emit_arith_imm32(0x81, 0xE8, dst, imm32);
+}
+
 void Assembler::subq(Register dst, Address src) {
   InstructionMark im(this);
   prefixq(src, dst);
@@ -5101,15 +5127,6 @@
   }
 }
 
-void MacroAssembler::fat_nop() {
-  // A 5 byte nop that is safe for patching (see patch_verified_entry)
-  emit_byte(0x26); // es:
-  emit_byte(0x2e); // cs:
-  emit_byte(0x64); // fs:
-  emit_byte(0x65); // gs:
-  emit_byte(0x90);
-}
-
 void MacroAssembler::jC2(Register tmp, Label& L) {
   // set parity bit if FPU flag C2 is set (via rax)
   save_rax(tmp);
@@ -5704,17 +5721,6 @@
   /* else */      { subq(dst, value)       ; return; }
 }
 
-void MacroAssembler::fat_nop() {
-  // A 5 byte nop that is safe for patching (see patch_verified_entry)
-  // Recommened sequence from 'Software Optimization Guide for the AMD
-  // Hammer Processor'
-  emit_byte(0x66);
-  emit_byte(0x66);
-  emit_byte(0x90);
-  emit_byte(0x66);
-  emit_byte(0x90);
-}
-
 void MacroAssembler::incrementq(Register reg, int value) {
   if (value == min_jint) { addq(reg, value); return; }
   if (value <  0) { decrementq(reg, -value); return; }
@@ -6766,6 +6772,19 @@
   mov(rbp, rsp);
 }
 
+// A 5 byte nop that is safe for patching (see patch_verified_entry)
+void MacroAssembler::fat_nop() {
+  if (UseAddressNop) {
+    addr_nop_5();
+  } else {
+    emit_byte(0x26); // es:
+    emit_byte(0x2e); // cs:
+    emit_byte(0x64); // fs:
+    emit_byte(0x65); // gs:
+    emit_byte(0x90);
+  }
+}
+
 void MacroAssembler::fcmp(Register tmp) {
   fcmp(tmp, 1, true, true);
 }
@@ -7825,6 +7844,11 @@
   LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
 }
 
+// Force generation of a 4 byte immediate value even if it fits into 8bit
+void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
+  LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32));
+}
+
 void MacroAssembler::subptr(Register dst, Register src) {
   LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
 }
@@ -9292,6 +9316,80 @@
 }
 #endif // _LP64
 
+
+// C2 compiled method's prolog code.
+void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode_24b) {
+
+  // WARNING: Initial instruction MUST be 5 bytes or longer so that
+  // NativeJump::patch_verified_entry will be able to patch out the entry
+  // code safely. The push to verify stack depth is ok at 5 bytes,
+  // the frame allocation can be either 3 or 6 bytes. So if we don't do
+  // stack bang then we must use the 6 byte frame allocation even if
+  // we have no frame. :-(
+
+  assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
+  // Remove word for return addr
+  framesize -= wordSize;
+
+  // Calls to C2R adapters often do not accept exceptional returns.
+  // We require that their callers must bang for them.  But be careful, because
+  // some VM calls (such as call site linkage) can use several kilobytes of
+  // stack.  But the stack safety zone should account for that.
+  // See bugs 4446381, 4468289, 4497237.
+  if (stack_bang) {
+    generate_stack_overflow_check(framesize);
+
+    // We always push rbp, so that on return to interpreter rbp, will be
+    // restored correctly and we can correct the stack.
+    push(rbp);
+    // Remove word for ebp
+    framesize -= wordSize;
+
+    // Create frame
+    if (framesize) {
+      subptr(rsp, framesize);
+    }
+  } else {
+    // Create frame (force generation of a 4 byte immediate value)
+    subptr_imm32(rsp, framesize);
+
+    // Save RBP register now.
+    framesize -= wordSize;
+    movptr(Address(rsp, framesize), rbp);
+  }
+
+  if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
+    framesize -= wordSize;
+    movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
+  }
+
+#ifndef _LP64
+  // If method sets FPU control word do it now
+  if (fp_mode_24b) {
+    fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
+  }
+  if (UseSSE >= 2 && VerifyFPU) {
+    verify_FPU(0, "FPU stack must be clean on entry");
+  }
+#endif
+
+#ifdef ASSERT
+  if (VerifyStackAtCalls) {
+    Label L;
+    push(rax);
+    mov(rax, rsp);
+    andptr(rax, StackAlignmentInBytes-1);
+    cmpptr(rax, StackAlignmentInBytes-wordSize);
+    pop(rax);
+    jcc(Assembler::equal, L);
+    stop("Stack is not properly aligned!");
+    bind(L);
+  }
+#endif
+
+}
+
+
 // IndexOf for constant substrings with size >= 8 chars
 // which don't need to be loaded through stack.
 void MacroAssembler::string_indexofC8(Register str1, Register str2,
--- a/src/cpu/x86/vm/assembler_x86.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -667,6 +667,8 @@
   void emit_arith_b(int op1, int op2, Register dst, int imm8);
 
   void emit_arith(int op1, int op2, Register dst, int32_t imm32);
+  // Force generation of a 4 byte immediate value even if it fits into 8bit
+  void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32);
   // only 32bit??
   void emit_arith(int op1, int op2, Register dst, jobject obj);
   void emit_arith(int op1, int op2, Register dst, Register src);
@@ -1526,6 +1528,9 @@
   void subq(Register dst, Address src);
   void subq(Register dst, Register src);
 
+  // Force generation of a 4 byte immediate value even if it fits into 8bit
+  void subl_imm32(Register dst, int32_t imm32);
+  void subq_imm32(Register dst, int32_t imm32);
 
   // Subtract Scalar Double-Precision Floating-Point Values
   void subsd(XMMRegister dst, Address src);
@@ -1763,8 +1768,8 @@
   // Alignment
   void align(int modulus);
 
-  // Misc
-  void fat_nop(); // 5 byte nop
+  // A 5 byte nop that is safe for patching (see patch_verified_entry)
+  void fat_nop();
 
   // Stack frame creation/removal
   void enter();
@@ -2275,6 +2280,8 @@
 
   void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
   void subptr(Register dst, int32_t src);
+  // Force generation of a 4 byte immediate value even if it fits into 8bit
+  void subptr_imm32(Register dst, int32_t src);
   void subptr(Register dst, Register src);
   void subptr(Register dst, RegisterOrConstant src) {
     if (src.is_constant()) subptr(dst, (int) src.as_constant());
@@ -2566,6 +2573,9 @@
   void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
   void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
 
+  // C2 compiled method's prolog code.
+  void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b);
+
   // IndexOf strings.
   // Small strings are loaded through stack if they cross page boundary.
   void string_indexof(Register str1, Register str2,
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -520,7 +520,7 @@
   __ load_klass(tmp_reg, src_reg);
 
   Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
-  __ cmpl(ref_type_adr, REF_NONE);
+  __ cmpb(ref_type_adr, REF_NONE);
   __ jcc(Assembler::equal, _continuation);
 
   // Is marking active?
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -3713,6 +3713,25 @@
   // __ store_fence();
 }
 
+void LIR_Assembler::membar_loadload() {
+  // no-op
+  //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
+}
+
+void LIR_Assembler::membar_storestore() {
+  // no-op
+  //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
+}
+
+void LIR_Assembler::membar_loadstore() {
+  // no-op
+  //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
+}
+
+void LIR_Assembler::membar_storeload() {
+  __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
+}
+
 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
   assert(result_reg->is_register(), "check");
 #ifdef _LP64
--- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -381,6 +381,16 @@
 
 
 void C1_MacroAssembler::verified_entry() {
+  if (C1Breakpoint || VerifyFPU || !UseStackBanging) {
+    // Verified Entry first instruction should be 5 bytes long for correct
+    // patching by patch_verified_entry().
+    //
+    // C1Breakpoint and VerifyFPU have one byte first instruction.
+    // Also first instruction will be one byte "push(rbp)" if stack banging
+    // code is not generated (see build_frame() above).
+    // For all these cases generate long instruction first.
+    fat_nop();
+  }
   if (C1Breakpoint)int3();
   // build frame
   verify_FPU(0, "method_entry");
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -47,6 +47,12 @@
   assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different");
   assert(oop_result1 != thread && oop_result2 != thread, "registers must be different");
   assert(args_size >= 0, "illegal args_size");
+  bool align_stack = false;
+#ifdef _LP64
+  // At a method handle call, the stack may not be properly aligned
+  // when returning with an exception.
+  align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id);
+#endif
 
 #ifdef _LP64
   mov(c_rarg0, thread);
@@ -59,11 +65,21 @@
   push(thread);
 #endif // _LP64
 
-  set_last_Java_frame(thread, noreg, rbp, NULL);
+  int call_offset;
+  if (!align_stack) {
+    set_last_Java_frame(thread, noreg, rbp, NULL);
+  } else {
+    address the_pc = pc();
+    call_offset = offset();
+    set_last_Java_frame(thread, noreg, rbp, the_pc);
+    andptr(rsp, -(StackAlignmentInBytes));    // Align stack
+  }
 
   // do the call
   call(RuntimeAddress(entry));
-  int call_offset = offset();
+  if (!align_stack) {
+    call_offset = offset();
+  }
   // verify callee-saved register
 #ifdef ASSERT
   guarantee(thread != rax, "change this code");
@@ -78,7 +94,7 @@
   }
   pop(rax);
 #endif
-  reset_last_Java_frame(thread, true, false);
+  reset_last_Java_frame(thread, true, align_stack);
 
   // discard thread and arguments
   NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
--- a/src/cpu/x86/vm/c2_globals_x86.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/c2_globals_x86.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,7 @@
 #else
 define_pd_global(bool, ProfileInterpreter,           true);
 #endif // CC_INTERP
-define_pd_global(bool, TieredCompilation,            true);
+define_pd_global(bool, TieredCompilation,            trueInTiered);
 define_pd_global(intx, CompileThreshold,             10000);
 define_pd_global(intx, BackEdgeThreshold,            100000);
 
--- a/src/cpu/x86/vm/frame_x86.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/frame_x86.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -28,6 +28,7 @@
 #include "oops/markOop.hpp"
 #include "oops/methodOop.hpp"
 #include "oops/oop.inline.hpp"
+#include "prims/methodHandles.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
@@ -651,13 +652,15 @@
   return &interpreter_frame_tos_address()[index];
 }
 
-#ifdef ASSERT
+#ifndef PRODUCT
 
 #define DESCRIBE_FP_OFFSET(name) \
   values.describe(frame_no, fp() + frame::name##_offset, #name)
 
 void frame::describe_pd(FrameValues& values, int frame_no) {
-  if (is_interpreted_frame()) {
+  if (is_ricochet_frame()) {
+    MethodHandles::RicochetFrame::describe(this, values, frame_no);
+  } else if (is_interpreted_frame()) {
     DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
     DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
     DESCRIBE_FP_OFFSET(interpreter_frame_method);
@@ -667,7 +670,6 @@
     DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
     DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
   }
-
 }
 #endif
 
--- a/src/cpu/x86/vm/globals_x86.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/globals_x86.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -60,9 +60,9 @@
 #ifdef AMD64
 // Very large C++ stack frames using solaris-amd64 optimized builds
 // due to lack of optimization caused by C++ compiler bugs
-define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2));
+define_pd_global(intx, StackShadowPages, NOT_WIN64(20) WIN64_ONLY(6) DEBUG_ONLY(+2));
 #else
-define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+5));
+define_pd_global(intx, StackShadowPages, 4 DEBUG_ONLY(+5));
 #endif // AMD64
 
 define_pd_global(intx, PreInflateSpin,           10);
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -279,14 +279,16 @@
 }
 
 // Emit code to verify that RBP is pointing at a valid ricochet frame.
-#ifdef ASSERT
+#ifndef PRODUCT
 enum {
   ARG_LIMIT = 255, SLOP = 4,
   // use this parameter for checking for garbage stack movements:
   UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
   // the slop defends against false alarms due to fencepost errors
 };
+#endif
 
+#ifdef ASSERT
 void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
   // The stack should look like this:
   //    ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args |
@@ -990,53 +992,103 @@
   BLOCK_COMMENT("} move_return_value");
 }
 
+#ifndef PRODUCT
+#define DESCRIBE_RICOCHET_OFFSET(rf, name) \
+  values.describe(frame_no, (intptr_t *) (((uintptr_t)rf) + MethodHandles::RicochetFrame::name##_offset_in_bytes()), #name)
+
+void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no)  {
+    address bp = (address) fr->fp();
+    RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes());
+
+    // ricochet slots
+    DESCRIBE_RICOCHET_OFFSET(rf, exact_sender_sp);
+    DESCRIBE_RICOCHET_OFFSET(rf, conversion);
+    DESCRIBE_RICOCHET_OFFSET(rf, saved_args_base);
+    DESCRIBE_RICOCHET_OFFSET(rf, saved_args_layout);
+    DESCRIBE_RICOCHET_OFFSET(rf, saved_target);
+    DESCRIBE_RICOCHET_OFFSET(rf, continuation);
+
+    // relevant ricochet targets (in caller frame)
+    values.describe(-1, rf->saved_args_base(),  err_msg("*saved_args_base for #%d", frame_no));
+}
+#endif // ASSERT
 
 #ifndef PRODUCT
 extern "C" void print_method_handle(oop mh);
 void trace_method_handle_stub(const char* adaptername,
                               oop mh,
                               intptr_t* saved_regs,
-                              intptr_t* entry_sp,
-                              intptr_t* saved_sp,
-                              intptr_t* saved_bp) {
+                              intptr_t* entry_sp) {
   // called as a leaf from native code: do not block the JVM!
   bool has_mh = (strstr(adaptername, "return/") == NULL);  // return adapters don't have rcx_mh
-  intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
-  intptr_t* base_sp = last_sp;
-  typedef MethodHandles::RicochetFrame RicochetFrame;
-  RicochetFrame* rfp = (RicochetFrame*)((address)saved_bp - RicochetFrame::sender_link_offset_in_bytes());
-  if (Universe::heap()->is_in((address) rfp->saved_args_base())) {
-    // Probably an interpreter frame.
-    base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
-  }
-  intptr_t    mh_reg = (intptr_t)mh;
-  const char* mh_reg_name = "rcx_mh";
-  if (!has_mh)  mh_reg_name = "rcx";
-  tty->print_cr("MH %s %s="PTR_FORMAT" sp=("PTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="PTR_FORMAT,
-                adaptername, mh_reg_name, mh_reg,
-                (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
+  const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
+  tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT, adaptername, mh_reg_name, mh, entry_sp);
+
   if (Verbose) {
-    tty->print(" reg dump: ");
-    int saved_regs_count = (entry_sp-1) - saved_regs;
-    // 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax
-    int i;
-    for (i = 0; i <= saved_regs_count; i++) {
-      if (i > 0 && i % 4 == 0 && i != saved_regs_count) {
+    tty->print_cr("Registers:");
+    const int saved_regs_count = RegisterImpl::number_of_registers;
+    for (int i = 0; i < saved_regs_count; i++) {
+      Register r = as_Register(i);
+      // The registers are stored in reverse order on the stack (by pusha).
+      tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]);
+      if ((i + 1) % 4 == 0) {
         tty->cr();
-        tty->print("   + dump: ");
+      } else {
+        tty->print(", ");
       }
-      tty->print(" %d: "PTR_FORMAT, i, saved_regs[i]);
     }
     tty->cr();
-    if (last_sp != saved_sp && last_sp != NULL)
-      tty->print_cr("*** last_sp="PTR_FORMAT, (intptr_t)last_sp);
-    int stack_dump_count = 16;
-    if (stack_dump_count < (int)(saved_bp + 2 - saved_sp))
-      stack_dump_count = (int)(saved_bp + 2 - saved_sp);
-    if (stack_dump_count > 64)  stack_dump_count = 48;
-    for (i = 0; i < stack_dump_count; i += 4) {
-      tty->print_cr(" dump at SP[%d] "PTR_FORMAT": "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT,
-                    i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]);
+
+    {
+     // dumping last frame with frame::describe
+
+      JavaThread* p = JavaThread::active();
+
+      ResourceMark rm;
+      PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
+      FrameValues values;
+
+      // Note: We want to allow trace_method_handle from any call site.
+      // While trace_method_handle creates a frame, it may be entered
+      // without a PC on the stack top (e.g. not just after a call).
+      // Walking that frame could lead to failures due to that invalid PC.
+      // => carefully detect that frame when doing the stack walking
+
+      // Current C frame
+      frame cur_frame = os::current_frame();
+
+      // Robust search of trace_calling_frame (independant of inlining).
+      // Assumes saved_regs comes from a pusha in the trace_calling_frame.
+      assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
+      frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
+      while (trace_calling_frame.fp() < saved_regs) {
+        trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
+      }
+
+      // safely create a frame and call frame::describe
+      intptr_t *dump_sp = trace_calling_frame.sender_sp();
+      intptr_t *dump_fp = trace_calling_frame.link();
+
+      bool walkable = has_mh; // whether the traced frame shoud be walkable
+
+      if (walkable) {
+        // The previous definition of walkable may have to be refined
+        // if new call sites cause the next frame constructor to start
+        // failing. Alternatively, frame constructors could be
+        // modified to support the current or future non walkable
+        // frames (but this is more intrusive and is not considered as
+        // part of this RFE, which will instead use a simpler output).
+        frame dump_frame = frame(dump_sp, dump_fp);
+        dump_frame.describe(values, 1);
+      } else {
+        // Stack may not be walkable (invalid PC above FP):
+        // Add descriptions without building a Java frame to avoid issues
+        values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
+        values.describe(-1, dump_sp, "sp for #1");
+      }
+
+      tty->print_cr("Stack layout:");
+      values.print(p);
     }
     if (has_mh)
       print_method_handle(mh);
@@ -1051,41 +1103,58 @@
   oopDesc* mh;
   intptr_t* saved_regs;
   intptr_t* entry_sp;
-  intptr_t* saved_sp;
-  intptr_t* saved_bp;
 };
 void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {
   trace_method_handle_stub(args->adaptername,
                            args->mh,
                            args->saved_regs,
-                           args->entry_sp,
-                           args->saved_sp,
-                           args->saved_bp);
+                           args->entry_sp);
 }
 
 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
   if (!TraceMethodHandles)  return;
   BLOCK_COMMENT("trace_method_handle {");
-  __ push(rax);
-  __ lea(rax, Address(rsp, wordSize * NOT_LP64(6) LP64_ONLY(14))); // entry_sp  __ pusha();
+  __ enter();
+  __ andptr(rsp, -16); // align stack if needed for FPU state
   __ pusha();
-  __ mov(rbx, rsp);
-  __ enter();
-  // incoming state:
+  __ mov(rbx, rsp); // for retreiving saved_regs
+  // Note: saved_regs must be in the entered frame for the
+  // robust stack walking implemented in trace_method_handle_stub.
+
+  // save FP result, valid at some call sites (adapter_opt_return_float, ...)
+  __ increment(rsp, -2 * wordSize);
+  if  (UseSSE >= 2) {
+    __ movdbl(Address(rsp, 0), xmm0);
+  } else if (UseSSE == 1) {
+    __ movflt(Address(rsp, 0), xmm0);
+  } else {
+    __ fst_d(Address(rsp, 0));
+  }
+
+  // Incoming state:
   // rcx: method handle
-  // r13 or rsi: saved sp
-  // To avoid calling convention issues, build a record on the stack and pass the pointer to that instead.
-  __ push(rbp);               // saved_bp
-  __ push(rsi);               // saved_sp
-  __ push(rax);               // entry_sp
+  //
+  // To avoid calling convention issues, build a record on the stack
+  // and pass the pointer to that instead.
+  __ push(rbp);               // entry_sp (with extra align space)
   __ push(rbx);               // pusha saved_regs
   __ push(rcx);               // mh
-  __ push(rcx);               // adaptername
+  __ push(rcx);               // slot for adaptername
   __ movptr(Address(rsp, 0), (intptr_t) adaptername);
   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp);
-  __ leave();
+  __ increment(rsp, sizeof(MethodHandleStubArguments));
+
+  if  (UseSSE >= 2) {
+    __ movdbl(xmm0, Address(rsp, 0));
+  } else if (UseSSE == 1) {
+    __ movflt(xmm0, Address(rsp, 0));
+  } else {
+    __ fld_d(Address(rsp, 0));
+  }
+  __ increment(rsp, 2 * wordSize);
+
   __ popa();
-  __ pop(rax);
+  __ leave();
   BLOCK_COMMENT("} trace_method_handle");
 }
 #endif //PRODUCT
@@ -2267,23 +2336,19 @@
 
       // grab another temp
       Register rsi_temp = rsi;
-      { if (rsi_temp == saved_last_sp)  __ push(saved_last_sp); }
-      // (preceding push must be done after argslot address is taken!)
-#define UNPUSH_RSI \
-      { if (rsi_temp == saved_last_sp)  __ pop(saved_last_sp); }
 
       // arx_argslot points both to the array and to the first output arg
       vmarg = Address(rax_argslot, 0);
 
       // Get the array value.
-      Register  rsi_array       = rsi_temp;
+      Register  rdi_array       = rdi_temp;
       Register  rdx_array_klass = rdx_temp;
       BasicType elem_type = ek_adapter_opt_spread_type(ek);
       int       elem_slots = type2size[elem_type];  // 1 or 2
       int       array_slots = 1;  // array is always a T_OBJECT
       int       length_offset   = arrayOopDesc::length_offset_in_bytes();
       int       elem0_offset    = arrayOopDesc::base_offset_in_bytes(elem_type);
-      __ movptr(rsi_array, vmarg);
+      __ movptr(rdi_array, vmarg);
 
       Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done;
       if (length_can_be_zero) {
@@ -2294,12 +2359,30 @@
           __ testl(rbx_temp, rbx_temp);
           __ jcc(Assembler::notZero, L_skip);
         }
-        __ testptr(rsi_array, rsi_array);
-        __ jcc(Assembler::zero, L_array_is_empty);
+        __ testptr(rdi_array, rdi_array);
+        __ jcc(Assembler::notZero, L_skip);
+
+        // If 'rsi' contains the 'saved_last_sp' (this is only the
+        // case in a 32-bit version of the VM) we have to save 'rsi'
+        // on the stack because later on (at 'L_array_is_empty') 'rsi'
+        // will be overwritten.
+        { if (rsi_temp == saved_last_sp)  __ push(saved_last_sp); }
+        // Also prepare a handy macro which restores 'rsi' if required.
+#define UNPUSH_RSI                                                      \
+        { if (rsi_temp == saved_last_sp)  __ pop(saved_last_sp); }
+
+        __ jmp(L_array_is_empty);
         __ bind(L_skip);
       }
-      __ null_check(rsi_array, oopDesc::klass_offset_in_bytes());
-      __ load_klass(rdx_array_klass, rsi_array);
+      __ null_check(rdi_array, oopDesc::klass_offset_in_bytes());
+      __ load_klass(rdx_array_klass, rdi_array);
+
+      // Save 'rsi' if required (see comment above).  Do this only
+      // after the null check such that the exception handler which is
+      // called in the case of a null pointer exception will not be
+      // confused by the extra value on the stack (it expects the
+      // return pointer on top of the stack)
+      { if (rsi_temp == saved_last_sp)  __ push(saved_last_sp); }
 
       // Check the array type.
       Register rbx_klass = rbx_temp;
@@ -2307,18 +2390,18 @@
       load_klass_from_Class(_masm, rbx_klass);
 
       Label ok_array_klass, bad_array_klass, bad_array_length;
-      __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi_temp, ok_array_klass);
+      __ check_klass_subtype(rdx_array_klass, rbx_klass, rsi_temp, ok_array_klass);
       // If we get here, the type check failed!
       __ jmp(bad_array_klass);
       __ BIND(ok_array_klass);
 
       // Check length.
       if (length_constant >= 0) {
-        __ cmpl(Address(rsi_array, length_offset), length_constant);
+        __ cmpl(Address(rdi_array, length_offset), length_constant);
       } else {
         Register rbx_vminfo = rbx_temp;
         load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion);
-        __ cmpl(rbx_vminfo, Address(rsi_array, length_offset));
+        __ cmpl(rbx_vminfo, Address(rdi_array, length_offset));
       }
       __ jcc(Assembler::notEqual, bad_array_length);
 
@@ -2330,9 +2413,9 @@
         __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize));
         // 'stack_move' is negative number of words to insert
         // This number already accounts for elem_slots.
-        Register rdi_stack_move = rdi_temp;
-        load_stack_move(_masm, rdi_stack_move, rcx_recv, true);
-        __ cmpptr(rdi_stack_move, 0);
+        Register rsi_stack_move = rsi_temp;
+        load_stack_move(_masm, rsi_stack_move, rcx_recv, true);
+        __ cmpptr(rsi_stack_move, 0);
         assert(stack_move_unit() < 0, "else change this comparison");
         __ jcc(Assembler::less, L_insert_arg_space);
         __ jcc(Assembler::equal, L_copy_args);
@@ -2343,12 +2426,12 @@
         __ jmp(L_args_done);  // no spreading to do
         __ BIND(L_insert_arg_space);
         // come here in the usual case, stack_move < 0 (2 or more spread arguments)
-        Register rsi_temp = rsi_array;  // spill this
-        insert_arg_slots(_masm, rdi_stack_move,
-                         rax_argslot, rbx_temp, rsi_temp);
+        Register rdi_temp = rdi_array;  // spill this
+        insert_arg_slots(_masm, rsi_stack_move,
+                         rax_argslot, rbx_temp, rdi_temp);
         // reload the array since rsi was killed
         // reload from rdx_argslot_limit since rax_argslot is now decremented
-        __ movptr(rsi_array, Address(rdx_argslot_limit, -Interpreter::stackElementSize));
+        __ movptr(rdi_array, Address(rdx_argslot_limit, -Interpreter::stackElementSize));
       } else if (length_constant >= 1) {
         int new_slots = (length_constant * elem_slots) - array_slots;
         insert_arg_slots(_masm, new_slots * stack_move_unit(),
@@ -2371,16 +2454,16 @@
       if (length_constant == -1) {
         // [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
         // Array element [0] goes at rdx_argslot_limit[-wordSize].
-        Register rsi_source = rsi_array;
-        __ lea(rsi_source, Address(rsi_array, elem0_offset));
+        Register rdi_source = rdi_array;
+        __ lea(rdi_source, Address(rdi_array, elem0_offset));
         Register rdx_fill_ptr = rdx_argslot_limit;
         Label loop;
         __ BIND(loop);
         __ addptr(rdx_fill_ptr, -Interpreter::stackElementSize * elem_slots);
         move_typed_arg(_masm, elem_type, true,
-                       Address(rdx_fill_ptr, 0), Address(rsi_source, 0),
-                       rbx_temp, rdi_temp);
-        __ addptr(rsi_source, type2aelembytes(elem_type));
+                       Address(rdx_fill_ptr, 0), Address(rdi_source, 0),
+                       rbx_temp, rsi_temp);
+        __ addptr(rdi_source, type2aelembytes(elem_type));
         __ cmpptr(rdx_fill_ptr, rax_argslot);
         __ jcc(Assembler::above, loop);
       } else if (length_constant == 0) {
@@ -2391,8 +2474,8 @@
         for (int index = 0; index < length_constant; index++) {
           slot_offset -= Interpreter::stackElementSize * elem_slots;  // fill backward
           move_typed_arg(_masm, elem_type, true,
-                         Address(rax_argslot, slot_offset), Address(rsi_array, elem_offset),
-                         rbx_temp, rdi_temp);
+                         Address(rax_argslot, slot_offset), Address(rdi_array, elem_offset),
+                         rbx_temp, rsi_temp);
           elem_offset += type2aelembytes(elem_type);
         }
       }
--- a/src/cpu/x86/vm/methodHandles_x86.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/methodHandles_x86.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -224,6 +224,8 @@
   }
 
   static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
+
+  static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
 };
 
 // Additional helper methods for MethodHandles code generation:
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1091,12 +1091,238 @@
   }
 }
 
+
+static void save_or_restore_arguments(MacroAssembler* masm,
+                                      const int stack_slots,
+                                      const int total_in_args,
+                                      const int arg_save_area,
+                                      OopMap* map,
+                                      VMRegPair* in_regs,
+                                      BasicType* in_sig_bt) {
+  // if map is non-NULL then the code should store the values,
+  // otherwise it should load them.
+  int handle_index = 0;
+  // Save down double word first
+  for ( int i = 0; i < total_in_args; i++) {
+    if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
+      int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
+      int offset = slot * VMRegImpl::stack_slot_size;
+      handle_index += 2;
+      assert(handle_index <= stack_slots, "overflow");
+      if (map != NULL) {
+        __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
+      } else {
+        __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
+      }
+    }
+    if (in_regs[i].first()->is_Register() && in_sig_bt[i] == T_LONG) {
+      int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
+      int offset = slot * VMRegImpl::stack_slot_size;
+      handle_index += 2;
+      assert(handle_index <= stack_slots, "overflow");
+      if (map != NULL) {
+        __ movl(Address(rsp, offset), in_regs[i].first()->as_Register());
+        if (in_regs[i].second()->is_Register()) {
+          __ movl(Address(rsp, offset + 4), in_regs[i].second()->as_Register());
+        }
+      } else {
+        __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
+        if (in_regs[i].second()->is_Register()) {
+          __ movl(in_regs[i].second()->as_Register(), Address(rsp, offset + 4));
+        }
+      }
+    }
+  }
+  // Save or restore single word registers
+  for ( int i = 0; i < total_in_args; i++) {
+    if (in_regs[i].first()->is_Register()) {
+      int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
+      int offset = slot * VMRegImpl::stack_slot_size;
+      assert(handle_index <= stack_slots, "overflow");
+      if (in_sig_bt[i] == T_ARRAY && map != NULL) {
+        map->set_oop(VMRegImpl::stack2reg(slot));;
+      }
+
+      // Value is in an input register pass we must flush it to the stack
+      const Register reg = in_regs[i].first()->as_Register();
+      switch (in_sig_bt[i]) {
+        case T_ARRAY:
+          if (map != NULL) {
+            __ movptr(Address(rsp, offset), reg);
+          } else {
+            __ movptr(reg, Address(rsp, offset));
+          }
+          break;
+        case T_BOOLEAN:
+        case T_CHAR:
+        case T_BYTE:
+        case T_SHORT:
+        case T_INT:
+          if (map != NULL) {
+            __ movl(Address(rsp, offset), reg);
+          } else {
+            __ movl(reg, Address(rsp, offset));
+          }
+          break;
+        case T_OBJECT:
+        default: ShouldNotReachHere();
+      }
+    } else if (in_regs[i].first()->is_XMMRegister()) {
+      if (in_sig_bt[i] == T_FLOAT) {
+        int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
+        int offset = slot * VMRegImpl::stack_slot_size;
+        assert(handle_index <= stack_slots, "overflow");
+        if (map != NULL) {
+          __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
+        } else {
+          __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
+        }
+      }
+    } else if (in_regs[i].first()->is_stack()) {
+      if (in_sig_bt[i] == T_ARRAY && map != NULL) {
+        int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
+        map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
+      }
+    }
+  }
+}
+
+// Check GC_locker::needs_gc and enter the runtime if it's true.  This
+// keeps a new JNI critical region from starting until a GC has been
+// forced.  Save down any oops in registers and describe them in an
+// OopMap.
+static void check_needs_gc_for_critical_native(MacroAssembler* masm,
+                                               Register thread,
+                                               int stack_slots,
+                                               int total_c_args,
+                                               int total_in_args,
+                                               int arg_save_area,
+                                               OopMapSet* oop_maps,
+                                               VMRegPair* in_regs,
+                                               BasicType* in_sig_bt) {
+  __ block_comment("check GC_locker::needs_gc");
+  Label cont;
+  __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
+  __ jcc(Assembler::equal, cont);
+
+  // Save down any incoming oops and call into the runtime to halt for a GC
+
+  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+
+  save_or_restore_arguments(masm, stack_slots, total_in_args,
+                            arg_save_area, map, in_regs, in_sig_bt);
+
+  address the_pc = __ pc();
+  oop_maps->add_gc_map( __ offset(), map);
+  __ set_last_Java_frame(thread, rsp, noreg, the_pc);
+
+  __ block_comment("block_for_jni_critical");
+  __ push(thread);
+  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
+  __ increment(rsp, wordSize);
+
+  __ get_thread(thread);
+  __ reset_last_Java_frame(thread, false, true);
+
+  save_or_restore_arguments(masm, stack_slots, total_in_args,
+                            arg_save_area, NULL, in_regs, in_sig_bt);
+
+  __ bind(cont);
+#ifdef ASSERT
+  if (StressCriticalJNINatives) {
+    // Stress register saving
+    OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+    save_or_restore_arguments(masm, stack_slots, total_in_args,
+                              arg_save_area, map, in_regs, in_sig_bt);
+    // Destroy argument registers
+    for (int i = 0; i < total_in_args - 1; i++) {
+      if (in_regs[i].first()->is_Register()) {
+        const Register reg = in_regs[i].first()->as_Register();
+        __ xorptr(reg, reg);
+      } else if (in_regs[i].first()->is_XMMRegister()) {
+        __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
+      } else if (in_regs[i].first()->is_FloatRegister()) {
+        ShouldNotReachHere();
+      } else if (in_regs[i].first()->is_stack()) {
+        // Nothing to do
+      } else {
+        ShouldNotReachHere();
+      }
+      if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
+        i++;
+      }
+    }
+
+    save_or_restore_arguments(masm, stack_slots, total_in_args,
+                              arg_save_area, NULL, in_regs, in_sig_bt);
+  }
+#endif
+}
+
+// Unpack an array argument into a pointer to the body and the length
+// if the array is non-null, otherwise pass 0 for both.
+static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
+  Register tmp_reg = rax;
+  assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
+         "possible collision");
+  assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
+         "possible collision");
+
+  // Pass the length, ptr pair
+  Label is_null, done;
+  VMRegPair tmp(tmp_reg->as_VMReg());
+  if (reg.first()->is_stack()) {
+    // Load the arg up from the stack
+    simple_move32(masm, reg, tmp);
+    reg = tmp;
+  }
+  __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
+  __ jccb(Assembler::equal, is_null);
+  __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
+  simple_move32(masm, tmp, body_arg);
+  // load the length relative to the body.
+  __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
+                           arrayOopDesc::base_offset_in_bytes(in_elem_type)));
+  simple_move32(masm, tmp, length_arg);
+  __ jmpb(done);
+  __ bind(is_null);
+  // Pass zeros
+  __ xorptr(tmp_reg, tmp_reg);
+  simple_move32(masm, tmp, body_arg);
+  simple_move32(masm, tmp, length_arg);
+  __ bind(done);
+}
+
+
 // ---------------------------------------------------------------------------
 // Generate a native wrapper for a given method.  The method takes arguments
 // in the Java compiled code convention, marshals them to the native
 // convention (handlizes oops, etc), transitions to native, makes the call,
 // returns to java state (possibly blocking), unhandlizes any result and
 // returns.
+//
+// Critical native functions are a shorthand for the use of
+// GetPrimtiveArrayCritical and disallow the use of any other JNI
+// functions.  The wrapper is expected to unpack the arguments before
+// passing them to the callee and perform checks before and after the
+// native call to ensure that they GC_locker
+// lock_critical/unlock_critical semantics are followed.  Some other
+// parts of JNI setup are skipped like the tear down of the JNI handle
+// block and the check for pending exceptions it's impossible for them
+// to be thrown.
+//
+// They are roughly structured like this:
+//    if (GC_locker::needs_gc())
+//      SharedRuntime::block_for_jni_critical();
+//    tranistion to thread_in_native
+//    unpack arrray arguments and call native entry point
+//    check for safepoint in progress
+//    check if any thread suspend flags are set
+//      call into JVM and possible unlock the JNI critical
+//      if a GC was suppressed while in the critical native.
+//    transition back to thread_in_Java
+//    return to caller
+//
 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
                                                 methodHandle method,
                                                 int compile_id,
@@ -1105,6 +1331,13 @@
                                                 BasicType *in_sig_bt,
                                                 VMRegPair *in_regs,
                                                 BasicType ret_type) {
+  bool is_critical_native = true;
+  address native_func = method->critical_native_function();
+  if (native_func == NULL) {
+    native_func = method->native_function();
+    is_critical_native = false;
+  }
+  assert(native_func != NULL, "must have function");
 
   // An OopMap for lock (and class if static)
   OopMapSet *oop_maps = new OopMapSet();
@@ -1115,30 +1348,72 @@
   // we convert the java signature to a C signature by inserting
   // the hidden arguments as arg[0] and possibly arg[1] (static method)
 
-  int total_c_args = total_in_args + 1;
-  if (method->is_static()) {
-    total_c_args++;
+  int total_c_args = total_in_args;
+  if (!is_critical_native) {
+    total_c_args += 1;
+    if (method->is_static()) {
+      total_c_args++;
+    }
+  } else {
+    for (int i = 0; i < total_in_args; i++) {
+      if (in_sig_bt[i] == T_ARRAY) {
+        total_c_args++;
+      }
+    }
   }
 
   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
-  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair,   total_c_args);
+  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
+  BasicType* in_elem_bt = NULL;
 
   int argc = 0;
-  out_sig_bt[argc++] = T_ADDRESS;
-  if (method->is_static()) {
-    out_sig_bt[argc++] = T_OBJECT;
+  if (!is_critical_native) {
+    out_sig_bt[argc++] = T_ADDRESS;
+    if (method->is_static()) {
+      out_sig_bt[argc++] = T_OBJECT;
+    }
+
+    for (int i = 0; i < total_in_args ; i++ ) {
+      out_sig_bt[argc++] = in_sig_bt[i];
+    }
+  } else {
+    Thread* THREAD = Thread::current();
+    in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
+    SignatureStream ss(method->signature());
+    for (int i = 0; i < total_in_args ; i++ ) {
+      if (in_sig_bt[i] == T_ARRAY) {
+        // Arrays are passed as int, elem* pair
+        out_sig_bt[argc++] = T_INT;
+        out_sig_bt[argc++] = T_ADDRESS;
+        Symbol* atype = ss.as_symbol(CHECK_NULL);
+        const char* at = atype->as_C_string();
+        if (strlen(at) == 2) {
+          assert(at[0] == '[', "must be");
+          switch (at[1]) {
+            case 'B': in_elem_bt[i]  = T_BYTE; break;
+            case 'C': in_elem_bt[i]  = T_CHAR; break;
+            case 'D': in_elem_bt[i]  = T_DOUBLE; break;
+            case 'F': in_elem_bt[i]  = T_FLOAT; break;
+            case 'I': in_elem_bt[i]  = T_INT; break;
+            case 'J': in_elem_bt[i]  = T_LONG; break;
+            case 'S': in_elem_bt[i]  = T_SHORT; break;
+            case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
+            default: ShouldNotReachHere();
+          }
+        }
+      } else {
+        out_sig_bt[argc++] = in_sig_bt[i];
+        in_elem_bt[i] = T_VOID;
+      }
+      if (in_sig_bt[i] != T_VOID) {
+        assert(in_sig_bt[i] == ss.type(), "must match");
+        ss.next();
+      }
+    }
   }
 
-  int i;
-  for (i = 0; i < total_in_args ; i++ ) {
-    out_sig_bt[argc++] = in_sig_bt[i];
-  }
-
-
   // Now figure out where the args must be stored and how much stack space
-  // they require (neglecting out_preserve_stack_slots but space for storing
-  // the 1st six register arguments). It's weird see int_stk_helper.
-  //
+  // they require.
   int out_arg_slots;
   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
 
@@ -1151,9 +1426,44 @@
   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
 
   // Now the space for the inbound oop handle area
+  int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
+  if (is_critical_native) {
+    // Critical natives may have to call out so they need a save area
+    // for register arguments.
+    int double_slots = 0;
+    int single_slots = 0;
+    for ( int i = 0; i < total_in_args; i++) {
+      if (in_regs[i].first()->is_Register()) {
+        const Register reg = in_regs[i].first()->as_Register();
+        switch (in_sig_bt[i]) {
+          case T_ARRAY:
+          case T_BOOLEAN:
+          case T_BYTE:
+          case T_SHORT:
+          case T_CHAR:
+          case T_INT:  single_slots++; break;
+          case T_LONG: double_slots++; break;
+          default:  ShouldNotReachHere();
+        }
+      } else if (in_regs[i].first()->is_XMMRegister()) {
+        switch (in_sig_bt[i]) {
+          case T_FLOAT:  single_slots++; break;
+          case T_DOUBLE: double_slots++; break;
+          default:  ShouldNotReachHere();
+        }
+      } else if (in_regs[i].first()->is_FloatRegister()) {
+        ShouldNotReachHere();
+      }
+    }
+    total_save_slots = double_slots * 2 + single_slots;
+    // align the save area
+    if (double_slots != 0) {
+      stack_slots = round_to(stack_slots, 2);
+    }
+  }
 
   int oop_handle_offset = stack_slots;
-  stack_slots += 2*VMRegImpl::slots_per_word;
+  stack_slots += total_save_slots;
 
   // Now any space we need for handlizing a klass if static method
 
@@ -1161,7 +1471,6 @@
   int klass_offset = -1;
   int lock_slot_offset = 0;
   bool is_static = false;
-  int oop_temp_slot_offset = 0;
 
   if (method->is_static()) {
     klass_slot_offset = stack_slots;
@@ -1221,7 +1530,7 @@
   // First thing make an ic check to see if we should even be here
 
   // We are free to use all registers as temps without saving them and
-  // restoring them except rbp,. rbp, is the only callee save register
+  // restoring them except rbp. rbp is the only callee save register
   // as far as the interpreter and the compiler(s) are concerned.
 
 
@@ -1230,7 +1539,6 @@
   Label hit;
   Label exception_pending;
 
-
   __ verify_oop(receiver);
   __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
   __ jcc(Assembler::equal, hit);
@@ -1292,11 +1600,10 @@
 
   // Generate a new frame for the wrapper.
   __ enter();
-  // -2 because return address is already present and so is saved rbp,
+  // -2 because return address is already present and so is saved rbp
   __ subptr(rsp, stack_size - 2*wordSize);
 
-  // Frame is now completed as far a size and linkage.
-
+  // Frame is now completed as far as size and linkage.
   int frame_complete = ((intptr_t)__ pc()) - start;
 
   // Calculate the difference between rsp and rbp,. We need to know it
@@ -1319,7 +1626,6 @@
   // Compute the rbp, offset for any slots used after the jni call
 
   int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
-  int oop_temp_slot_rbp_offset = (oop_temp_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
 
   // We use rdi as a thread pointer because it is callee save and
   // if we load it once it is usable thru the entire wrapper
@@ -1332,6 +1638,10 @@
 
   __ get_thread(thread);
 
+  if (is_critical_native) {
+    check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
+                                       oop_handle_offset, oop_maps, in_regs, in_sig_bt);
+  }
 
   //
   // We immediately shuffle the arguments so that any vm call we have to
@@ -1353,7 +1663,7 @@
   // vectors we have in our possession. We simply walk the java vector to
   // get the source locations and the c vector to get the destinations.
 
-  int c_arg = method->is_static() ? 2 : 1 ;
+  int c_arg = is_critical_native ? 0 : (method->is_static() ? 2 : 1 );
 
   // Record rsp-based slot for receiver on stack for non-static methods
   int receiver_offset = -1;
@@ -1373,10 +1683,16 @@
   // Are free to temporaries if we have to do  stack to steck moves.
   // All inbound args are referenced based on rbp, and all outbound args via rsp.
 
-  for (i = 0; i < total_in_args ; i++, c_arg++ ) {
+  for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
     switch (in_sig_bt[i]) {
       case T_ARRAY:
+        if (is_critical_native) {
+          unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
+          c_arg++;
+          break;
+        }
       case T_OBJECT:
+        assert(!is_critical_native, "no oop arguments");
         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
                     ((i == 0) && (!is_static)),
                     &receiver_offset);
@@ -1408,7 +1724,7 @@
 
   // Pre-load a static method's oop into rsi.  Used both by locking code and
   // the normal JNI call code.
-  if (method->is_static()) {
+  if (method->is_static() && !is_critical_native) {
 
     //  load opp into a register
     __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
@@ -1463,6 +1779,7 @@
 
   // Lock a synchronized method
   if (method->is_synchronized()) {
+    assert(!is_critical_native, "unhandled");
 
 
     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
@@ -1529,14 +1846,15 @@
 
 
   // get JNIEnv* which is first argument to native
-
-  __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
-  __ movptr(Address(rsp, 0), rdx);
+  if (!is_critical_native) {
+    __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
+    __ movptr(Address(rsp, 0), rdx);
+  }
 
   // Now set thread in native
   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
 
-  __ call(RuntimeAddress(method->native_function()));
+  __ call(RuntimeAddress(native_func));
 
   // WARNING - on Windows Java Natives use pascal calling convention and pop the
   // arguments off of the stack. We could just re-adjust the stack pointer here
@@ -1591,6 +1909,8 @@
     __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
   }
 
+  Label after_transition;
+
   // check for safepoint operation in progress and/or pending suspend requests
   { Label Continue;
 
@@ -1611,17 +1931,29 @@
     //
     save_native_result(masm, ret_type, stack_slots);
     __ push(thread);
-    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
-                                            JavaThread::check_special_condition_for_native_trans)));
+    if (!is_critical_native) {
+      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
+                                              JavaThread::check_special_condition_for_native_trans)));
+    } else {
+      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
+                                              JavaThread::check_special_condition_for_native_trans_and_transition)));
+    }
     __ increment(rsp, wordSize);
     // Restore any method result value
     restore_native_result(masm, ret_type, stack_slots);
 
+    if (is_critical_native) {
+      // The call above performed the transition to thread_in_Java so
+      // skip the transition logic below.
+      __ jmpb(after_transition);
+    }
+
     __ bind(Continue);
   }
 
   // change thread state
   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
+  __ bind(after_transition);
 
   Label reguard;
   Label reguard_done;
@@ -1710,15 +2042,15 @@
       __ verify_oop(rax);
   }
 
-  // reset handle block
-  __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
-
-  __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
-
-  // Any exception pending?
-  __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
-  __ jcc(Assembler::notEqual, exception_pending);
-
+  if (!is_critical_native) {
+    // reset handle block
+    __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
+    __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
+
+    // Any exception pending?
+    __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
+    __ jcc(Assembler::notEqual, exception_pending);
+  }
 
   // no exception, we're almost done
 
@@ -1829,16 +2161,18 @@
 
   // BEGIN EXCEPTION PROCESSING
 
-  // Forward  the exception
-  __ bind(exception_pending);
-
-  // remove possible return value from FPU register stack
-  __ empty_FPU_stack();
-
-  // pop our frame
-  __ leave();
-  // and forward the exception
-  __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
+  if (!is_critical_native) {
+    // Forward  the exception
+    __ bind(exception_pending);
+
+    // remove possible return value from FPU register stack
+    __ empty_FPU_stack();
+
+    // pop our frame
+    __ leave();
+    // and forward the exception
+    __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
+  }
 
   __ flush();
 
@@ -1851,6 +2185,11 @@
                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
                                             oop_maps);
+
+  if (is_critical_native) {
+    nm->set_lazy_critical_native(true);
+  }
+
   return nm;
 
 }
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -938,6 +938,25 @@
   }
 }
 
+static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
+  if (src.first()->is_stack()) {
+    if (dst.first()->is_stack()) {
+      // stack to stack
+      __ movq(rax, Address(rbp, reg2offset_in(src.first())));
+      __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
+    } else {
+      // stack to reg
+      __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
+    }
+  } else if (dst.first()->is_stack()) {
+    // reg to stack
+    __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
+  } else {
+    if (dst.first() != src.first()) {
+      __ movq(dst.first()->as_Register(), src.first()->as_Register());
+    }
+  }
+}
 
 // An oop arg. Must pass a handle not the oop itself
 static void object_move(MacroAssembler* masm,
@@ -1152,6 +1171,368 @@
     }
 }
 
+
+static void save_or_restore_arguments(MacroAssembler* masm,
+                                      const int stack_slots,
+                                      const int total_in_args,
+                                      const int arg_save_area,
+                                      OopMap* map,
+                                      VMRegPair* in_regs,
+                                      BasicType* in_sig_bt) {
+  // if map is non-NULL then the code should store the values,
+  // otherwise it should load them.
+  int slot = arg_save_area;
+  // Save down double word first
+  for ( int i = 0; i < total_in_args; i++) {
+    if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
+      int offset = slot * VMRegImpl::stack_slot_size;
+      slot += VMRegImpl::slots_per_word;
+      assert(slot <= stack_slots, "overflow");
+      if (map != NULL) {
+        __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
+      } else {
+        __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
+      }
+    }
+    if (in_regs[i].first()->is_Register() &&
+        (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
+      int offset = slot * VMRegImpl::stack_slot_size;
+      if (map != NULL) {
+        __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
+        if (in_sig_bt[i] == T_ARRAY) {
+          map->set_oop(VMRegImpl::stack2reg(slot));;
+        }
+      } else {
+        __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
+      }
+      slot += VMRegImpl::slots_per_word;
+    }
+  }
+  // Save or restore single word registers
+  for ( int i = 0; i < total_in_args; i++) {
+    if (in_regs[i].first()->is_Register()) {
+      int offset = slot * VMRegImpl::stack_slot_size;
+      slot++;
+      assert(slot <= stack_slots, "overflow");
+
+      // Value is in an input register pass we must flush it to the stack
+      const Register reg = in_regs[i].first()->as_Register();
+      switch (in_sig_bt[i]) {
+        case T_BOOLEAN:
+        case T_CHAR:
+        case T_BYTE:
+        case T_SHORT:
+        case T_INT:
+          if (map != NULL) {
+            __ movl(Address(rsp, offset), reg);
+          } else {
+            __ movl(reg, Address(rsp, offset));
+          }
+          break;
+        case T_ARRAY:
+        case T_LONG:
+          // handled above
+          break;
+        case T_OBJECT:
+        default: ShouldNotReachHere();
+      }
+    } else if (in_regs[i].first()->is_XMMRegister()) {
+      if (in_sig_bt[i] == T_FLOAT) {
+        int offset = slot * VMRegImpl::stack_slot_size;
+        slot++;
+        assert(slot <= stack_slots, "overflow");
+        if (map != NULL) {
+          __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
+        } else {
+          __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
+        }
+      }
+    } else if (in_regs[i].first()->is_stack()) {
+      if (in_sig_bt[i] == T_ARRAY && map != NULL) {
+        int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
+        map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
+      }
+    }
+  }
+}
+
+
+// Check GC_locker::needs_gc and enter the runtime if it's true.  This
+// keeps a new JNI critical region from starting until a GC has been
+// forced.  Save down any oops in registers and describe them in an
+// OopMap.
+static void check_needs_gc_for_critical_native(MacroAssembler* masm,
+                                               int stack_slots,
+                                               int total_c_args,
+                                               int total_in_args,
+                                               int arg_save_area,
+                                               OopMapSet* oop_maps,
+                                               VMRegPair* in_regs,
+                                               BasicType* in_sig_bt) {
+  __ block_comment("check GC_locker::needs_gc");
+  Label cont;
+  __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
+  __ jcc(Assembler::equal, cont);
+
+  // Save down any incoming oops and call into the runtime to halt for a GC
+
+  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+  save_or_restore_arguments(masm, stack_slots, total_in_args,
+                            arg_save_area, map, in_regs, in_sig_bt);
+
+  address the_pc = __ pc();
+  oop_maps->add_gc_map( __ offset(), map);
+  __ set_last_Java_frame(rsp, noreg, the_pc);
+
+  __ block_comment("block_for_jni_critical");
+  __ movptr(c_rarg0, r15_thread);
+  __ mov(r12, rsp); // remember sp
+  __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+  __ andptr(rsp, -16); // align stack as required by ABI
+  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
+  __ mov(rsp, r12); // restore sp
+  __ reinit_heapbase();
+
+  __ reset_last_Java_frame(false, true);
+
+  save_or_restore_arguments(masm, stack_slots, total_in_args,
+                            arg_save_area, NULL, in_regs, in_sig_bt);
+
+  __ bind(cont);
+#ifdef ASSERT
+  if (StressCriticalJNINatives) {
+    // Stress register saving
+    OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+    save_or_restore_arguments(masm, stack_slots, total_in_args,
+                              arg_save_area, map, in_regs, in_sig_bt);
+    // Destroy argument registers
+    for (int i = 0; i < total_in_args - 1; i++) {
+      if (in_regs[i].first()->is_Register()) {
+        const Register reg = in_regs[i].first()->as_Register();
+        __ xorptr(reg, reg);
+      } else if (in_regs[i].first()->is_XMMRegister()) {
+        __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
+      } else if (in_regs[i].first()->is_FloatRegister()) {
+        ShouldNotReachHere();
+      } else if (in_regs[i].first()->is_stack()) {
+        // Nothing to do
+      } else {
+        ShouldNotReachHere();
+      }
+      if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
+        i++;
+      }
+    }
+
+    save_or_restore_arguments(masm, stack_slots, total_in_args,
+                              arg_save_area, NULL, in_regs, in_sig_bt);
+  }
+#endif
+}
+
+// Unpack an array argument into a pointer to the body and the length
+// if the array is non-null, otherwise pass 0 for both.
+static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
+  Register tmp_reg = rax;
+  assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
+         "possible collision");
+  assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
+         "possible collision");
+
+  // Pass the length, ptr pair
+  Label is_null, done;
+  VMRegPair tmp;
+  tmp.set_ptr(tmp_reg->as_VMReg());
+  if (reg.first()->is_stack()) {
+    // Load the arg up from the stack
+    move_ptr(masm, reg, tmp);
+    reg = tmp;
+  }
+  __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
+  __ jccb(Assembler::equal, is_null);
+  __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
+  move_ptr(masm, tmp, body_arg);
+  // load the length relative to the body.
+  __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
+                           arrayOopDesc::base_offset_in_bytes(in_elem_type)));
+  move32_64(masm, tmp, length_arg);
+  __ jmpb(done);
+  __ bind(is_null);
+  // Pass zeros
+  __ xorptr(tmp_reg, tmp_reg);
+  move_ptr(masm, tmp, body_arg);
+  move32_64(masm, tmp, length_arg);
+  __ bind(done);
+}
+
+
+class ComputeMoveOrder: public StackObj {
+  class MoveOperation: public ResourceObj {
+    friend class ComputeMoveOrder;
+   private:
+    VMRegPair        _src;
+    VMRegPair        _dst;
+    int              _src_index;
+    int              _dst_index;
+    bool             _processed;
+    MoveOperation*  _next;
+    MoveOperation*  _prev;
+
+    static int get_id(VMRegPair r) {
+      return r.first()->value();
+    }
+
+   public:
+    MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
+      _src(src)
+    , _src_index(src_index)
+    , _dst(dst)
+    , _dst_index(dst_index)
+    , _next(NULL)
+    , _prev(NULL)
+    , _processed(false) {
+    }
+
+    VMRegPair src() const              { return _src; }
+    int src_id() const                 { return get_id(src()); }
+    int src_index() const              { return _src_index; }
+    VMRegPair dst() const              { return _dst; }
+    void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
+    int dst_index() const              { return _dst_index; }
+    int dst_id() const                 { return get_id(dst()); }
+    MoveOperation* next() const       { return _next; }
+    MoveOperation* prev() const       { return _prev; }
+    void set_processed()               { _processed = true; }
+    bool is_processed() const          { return _processed; }
+
+    // insert
+    void break_cycle(VMRegPair temp_register) {
+      // create a new store following the last store
+      // to move from the temp_register to the original
+      MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
+
+      // break the cycle of links and insert new_store at the end
+      // break the reverse link.
+      MoveOperation* p = prev();
+      assert(p->next() == this, "must be");
+      _prev = NULL;
+      p->_next = new_store;
+      new_store->_prev = p;
+
+      // change the original store to save it's value in the temp.
+      set_dst(-1, temp_register);
+    }
+
+    void link(GrowableArray<MoveOperation*>& killer) {
+      // link this store in front the store that it depends on
+      MoveOperation* n = killer.at_grow(src_id(), NULL);
+      if (n != NULL) {
+        assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
+        _next = n;
+        n->_prev = this;
+      }
+    }
+  };
+
+ private:
+  GrowableArray<MoveOperation*> edges;
+
+ public:
+  ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
+                    BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
+    // Move operations where the dest is the stack can all be
+    // scheduled first since they can't interfere with the other moves.
+    for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
+      if (in_sig_bt[i] == T_ARRAY) {
+        c_arg--;
+        if (out_regs[c_arg].first()->is_stack() &&
+            out_regs[c_arg + 1].first()->is_stack()) {
+          arg_order.push(i);
+          arg_order.push(c_arg);
+        } else {
+          if (out_regs[c_arg].first()->is_stack() ||
+              in_regs[i].first() == out_regs[c_arg].first()) {
+            add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
+          } else {
+            add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
+          }
+        }
+      } else if (in_sig_bt[i] == T_VOID) {
+        arg_order.push(i);
+        arg_order.push(c_arg);
+      } else {
+        if (out_regs[c_arg].first()->is_stack() ||
+            in_regs[i].first() == out_regs[c_arg].first()) {
+          arg_order.push(i);
+          arg_order.push(c_arg);
+        } else {
+          add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
+        }
+      }
+    }
+    // Break any cycles in the register moves and emit the in the
+    // proper order.
+    GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
+    for (int i = 0; i < stores->length(); i++) {
+      arg_order.push(stores->at(i)->src_index());
+      arg_order.push(stores->at(i)->dst_index());
+    }
+ }
+
+  // Collected all the move operations
+  void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
+    if (src.first() == dst.first()) return;
+    edges.append(new MoveOperation(src_index, src, dst_index, dst));
+  }
+
+  // Walk the edges breaking cycles between moves.  The result list
+  // can be walked in order to produce the proper set of loads
+  GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
+    // Record which moves kill which values
+    GrowableArray<MoveOperation*> killer;
+    for (int i = 0; i < edges.length(); i++) {
+      MoveOperation* s = edges.at(i);
+      assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
+      killer.at_put_grow(s->dst_id(), s, NULL);
+    }
+    assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
+           "make sure temp isn't in the registers that are killed");
+
+    // create links between loads and stores
+    for (int i = 0; i < edges.length(); i++) {
+      edges.at(i)->link(killer);
+    }
+
+    // at this point, all the move operations are chained together
+    // in a doubly linked list.  Processing it backwards finds
+    // the beginning of the chain, forwards finds the end.  If there's
+    // a cycle it can be broken at any point,  so pick an edge and walk
+    // backward until the list ends or we end where we started.
+    GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
+    for (int e = 0; e < edges.length(); e++) {
+      MoveOperation* s = edges.at(e);
+      if (!s->is_processed()) {
+        MoveOperation* start = s;
+        // search for the beginning of the chain or cycle
+        while (start->prev() != NULL && start->prev() != s) {
+          start = start->prev();
+        }
+        if (start->prev() == s) {
+          start->break_cycle(temp_register);
+        }
+        // walk the chain forward inserting to store list
+        while (start != NULL) {
+          stores->append(start);
+          start->set_processed();
+          start = start->next();
+        }
+      }
+    }
+    return stores;
+  }
+};
+
+
 // ---------------------------------------------------------------------------
 // Generate a native wrapper for a given method.  The method takes arguments
 // in the Java compiled code convention, marshals them to the native
@@ -1166,10 +1547,14 @@
                                                 BasicType *in_sig_bt,
                                                 VMRegPair *in_regs,
                                                 BasicType ret_type) {
-  // Native nmethod wrappers never take possesion of the oop arguments.
-  // So the caller will gc the arguments. The only thing we need an
-  // oopMap for is if the call is static
-  //
+  bool is_critical_native = true;
+  address native_func = method->critical_native_function();
+  if (native_func == NULL) {
+    native_func = method->native_function();
+    is_critical_native = false;
+  }
+  assert(native_func != NULL, "must have function");
+
   // An OopMap for lock (and class if static)
   OopMapSet *oop_maps = new OopMapSet();
   intptr_t start = (intptr_t)__ pc();
@@ -1180,27 +1565,72 @@
   // we convert the java signature to a C signature by inserting
   // the hidden arguments as arg[0] and possibly arg[1] (static method)
 
-  int total_c_args = total_in_args + 1;
-  if (method->is_static()) {
-    total_c_args++;
+  int total_c_args = total_in_args;
+  if (!is_critical_native) {
+    total_c_args += 1;
+    if (method->is_static()) {
+      total_c_args++;
+    }
+  } else {
+    for (int i = 0; i < total_in_args; i++) {
+      if (in_sig_bt[i] == T_ARRAY) {
+        total_c_args++;
+      }
+    }
   }
 
   BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
-  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair,   total_c_args);
+  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
+  BasicType* in_elem_bt = NULL;
 
   int argc = 0;
-  out_sig_bt[argc++] = T_ADDRESS;
-  if (method->is_static()) {
-    out_sig_bt[argc++] = T_OBJECT;
-  }
-
-  for (int i = 0; i < total_in_args ; i++ ) {
-    out_sig_bt[argc++] = in_sig_bt[i];
+  if (!is_critical_native) {
+    out_sig_bt[argc++] = T_ADDRESS;
+    if (method->is_static()) {
+      out_sig_bt[argc++] = T_OBJECT;
+    }
+
+    for (int i = 0; i < total_in_args ; i++ ) {
+      out_sig_bt[argc++] = in_sig_bt[i];
+    }
+  } else {
+    Thread* THREAD = Thread::current();
+    in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
+    SignatureStream ss(method->signature());
+    for (int i = 0; i < total_in_args ; i++ ) {
+      if (in_sig_bt[i] == T_ARRAY) {
+        // Arrays are passed as int, elem* pair
+        out_sig_bt[argc++] = T_INT;
+        out_sig_bt[argc++] = T_ADDRESS;
+        Symbol* atype = ss.as_symbol(CHECK_NULL);
+        const char* at = atype->as_C_string();
+        if (strlen(at) == 2) {
+          assert(at[0] == '[', "must be");
+          switch (at[1]) {
+            case 'B': in_elem_bt[i]  = T_BYTE; break;
+            case 'C': in_elem_bt[i]  = T_CHAR; break;
+            case 'D': in_elem_bt[i]  = T_DOUBLE; break;
+            case 'F': in_elem_bt[i]  = T_FLOAT; break;
+            case 'I': in_elem_bt[i]  = T_INT; break;
+            case 'J': in_elem_bt[i]  = T_LONG; break;
+            case 'S': in_elem_bt[i]  = T_SHORT; break;
+            case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
+            default: ShouldNotReachHere();
+          }
+        }
+      } else {
+        out_sig_bt[argc++] = in_sig_bt[i];
+        in_elem_bt[i] = T_VOID;
+      }
+      if (in_sig_bt[i] != T_VOID) {
+        assert(in_sig_bt[i] == ss.type(), "must match");
+        ss.next();
+      }
+    }
   }
 
   // Now figure out where the args must be stored and how much stack space
   // they require.
-  //
   int out_arg_slots;
   out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
 
@@ -1213,13 +1643,47 @@
   int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
 
   // Now the space for the inbound oop handle area
+  int total_save_slots = 6 * VMRegImpl::slots_per_word;  // 6 arguments passed in registers
+  if (is_critical_native) {
+    // Critical natives may have to call out so they need a save area
+    // for register arguments.
+    int double_slots = 0;
+    int single_slots = 0;
+    for ( int i = 0; i < total_in_args; i++) {
+      if (in_regs[i].first()->is_Register()) {
+        const Register reg = in_regs[i].first()->as_Register();
+        switch (in_sig_bt[i]) {
+          case T_BOOLEAN:
+          case T_BYTE:
+          case T_SHORT:
+          case T_CHAR:
+          case T_INT:  single_slots++; break;
+          case T_ARRAY:
+          case T_LONG: double_slots++; break;
+          default:  ShouldNotReachHere();
+        }
+      } else if (in_regs[i].first()->is_XMMRegister()) {
+        switch (in_sig_bt[i]) {
+          case T_FLOAT:  single_slots++; break;
+          case T_DOUBLE: double_slots++; break;
+          default:  ShouldNotReachHere();
+        }
+      } else if (in_regs[i].first()->is_FloatRegister()) {
+        ShouldNotReachHere();
+      }
+    }
+    total_save_slots = double_slots * 2 + single_slots;
+    // align the save area
+    if (double_slots != 0) {
+      stack_slots = round_to(stack_slots, 2);
+    }
+  }
 
   int oop_handle_offset = stack_slots;
-  stack_slots += 6*VMRegImpl::slots_per_word;
+  stack_slots += total_save_slots;
 
   // Now any space we need for handlizing a klass if static method
 
-  int oop_temp_slot_offset = 0;
   int klass_slot_offset = 0;
   int klass_offset = -1;
   int lock_slot_offset = 0;
@@ -1272,7 +1736,6 @@
 
   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
 
-
   // First thing make an ic check to see if we should even be here
 
   // We are free to use all registers as temps without saving them and
@@ -1283,22 +1746,22 @@
   const Register ic_reg = rax;
   const Register receiver = j_rarg0;
 
-  Label ok;
+  Label hit;
   Label exception_pending;
 
   assert_different_registers(ic_reg, receiver, rscratch1);
   __ verify_oop(receiver);
   __ load_klass(rscratch1, receiver);
   __ cmpq(ic_reg, rscratch1);
-  __ jcc(Assembler::equal, ok);
+  __ jcc(Assembler::equal, hit);
 
   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 
-  __ bind(ok);
-
   // Verified entry point must be aligned
   __ align(8);
 
+  __ bind(hit);
+
   int vep_offset = ((intptr_t)__ pc()) - start;
 
   // The instruction at the verified entry point must be 5 bytes or longer
@@ -1319,9 +1782,8 @@
   // -2 because return address is already present and so is saved rbp
   __ subptr(rsp, stack_size - 2*wordSize);
 
-    // Frame is now completed as far as size and linkage.
-
-    int frame_complete = ((intptr_t)__ pc()) - start;
+  // Frame is now completed as far as size and linkage.
+  int frame_complete = ((intptr_t)__ pc()) - start;
 
 #ifdef ASSERT
     {
@@ -1341,7 +1803,10 @@
 
   const Register oop_handle_reg = r14;
 
-
+  if (is_critical_native) {
+    check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
+                                       oop_handle_offset, oop_maps, in_regs, in_sig_bt);
+  }
 
   //
   // We immediately shuffle the arguments so that any vm call we have to
@@ -1390,9 +1855,43 @@
 
 #endif /* ASSERT */
 
-
-  int c_arg = total_c_args - 1;
-  for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
+  // This may iterate in two different directions depending on the
+  // kind of native it is.  The reason is that for regular JNI natives
+  // the incoming and outgoing registers are offset upwards and for
+  // critical natives they are offset down.
+  GrowableArray<int> arg_order(2 * total_in_args);
+  VMRegPair tmp_vmreg;
+  tmp_vmreg.set1(rbx->as_VMReg());
+
+  if (!is_critical_native) {
+    for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
+      arg_order.push(i);
+      arg_order.push(c_arg);
+    }
+  } else {
+    // Compute a valid move order, using tmp_vmreg to break any cycles
+    ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
+  }
+
+  int temploc = -1;
+  for (int ai = 0; ai < arg_order.length(); ai += 2) {
+    int i = arg_order.at(ai);
+    int c_arg = arg_order.at(ai + 1);
+    __ block_comment(err_msg("move %d -> %d", i, c_arg));
+    if (c_arg == -1) {
+      assert(is_critical_native, "should only be required for critical natives");
+      // This arg needs to be moved to a temporary
+      __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
+      in_regs[i] = tmp_vmreg;
+      temploc = i;
+      continue;
+    } else if (i == -1) {
+      assert(is_critical_native, "should only be required for critical natives");
+      // Read from the temporary location
+      assert(temploc != -1, "must be valid");
+      i = temploc;
+      temploc = -1;
+    }
 #ifdef ASSERT
     if (in_regs[i].first()->is_Register()) {
       assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
@@ -1407,7 +1906,20 @@
 #endif /* ASSERT */
     switch (in_sig_bt[i]) {
       case T_ARRAY:
+        if (is_critical_native) {
+          unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
+          c_arg++;
+#ifdef ASSERT
+          if (out_regs[c_arg].first()->is_Register()) {
+            reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
+          } else if (out_regs[c_arg].first()->is_XMMRegister()) {
+            freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
+          }
+#endif
+          break;
+        }
       case T_OBJECT:
+        assert(!is_critical_native, "no oop arguments");
         object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
                     ((i == 0) && (!is_static)),
                     &receiver_offset);
@@ -1439,11 +1951,11 @@
 
   // point c_arg at the first arg that is already loaded in case we
   // need to spill before we call out
-  c_arg++;
+  int c_arg = total_c_args - total_in_args;
 
   // Pre-load a static method's oop into r14.  Used both by locking code and
   // the normal JNI call code.
-  if (method->is_static()) {
+  if (method->is_static() && !is_critical_native) {
 
     //  load oop into a register
     __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
@@ -1509,6 +2021,7 @@
   Label lock_done;
 
   if (method->is_synchronized()) {
+    assert(!is_critical_native, "unhandled");
 
 
     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
@@ -1572,13 +2085,14 @@
 
 
   // get JNIEnv* which is first argument to native
-
-  __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
+  if (!is_critical_native) {
+    __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
+  }
 
   // Now set thread in native
   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
 
-  __ call(RuntimeAddress(method->native_function()));
+  __ call(RuntimeAddress(native_func));
 
     // Either restore the MXCSR register after returning from the JNI Call
     // or verify that it wasn't changed.
@@ -1634,6 +2148,7 @@
     }
   }
 
+  Label after_transition;
 
   // check for safepoint operation in progress and/or pending suspend requests
   {
@@ -1659,16 +2174,28 @@
     __ mov(r12, rsp); // remember sp
     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
     __ andptr(rsp, -16); // align stack as required by ABI
-    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
+    if (!is_critical_native) {
+      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
+    } else {
+      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
+    }
     __ mov(rsp, r12); // restore sp
     __ reinit_heapbase();
     // Restore any method result value
     restore_native_result(masm, ret_type, stack_slots);
+
+    if (is_critical_native) {
+      // The call above performed the transition to thread_in_Java so
+      // skip the transition logic below.
+      __ jmpb(after_transition);
+    }
+
     __ bind(Continue);
   }
 
   // change thread state
   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
+  __ bind(after_transition);
 
   Label reguard;
   Label reguard_done;
@@ -1746,17 +2273,21 @@
       __ verify_oop(rax);
   }
 
-  // reset handle block
-  __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
-  __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
+  if (!is_critical_native) {
+    // reset handle block
+    __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
+    __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
+  }
 
   // pop our frame
 
   __ leave();
 
-  // Any exception pending?
-  __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
-  __ jcc(Assembler::notEqual, exception_pending);
+  if (!is_critical_native) {
+    // Any exception pending?
+    __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
+    __ jcc(Assembler::notEqual, exception_pending);
+  }
 
   // Return
 
@@ -1764,12 +2295,13 @@
 
   // Unexpected paths are out of line and go here
 
-  // forward the exception
-  __ bind(exception_pending);
-
-  // and forward the exception
-  __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
-
+  if (!is_critical_native) {
+    // forward the exception
+    __ bind(exception_pending);
+
+    // and forward the exception
+    __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
+  }
 
   // Slow path locking & unlocking
   if (method->is_synchronized()) {
@@ -1876,6 +2408,11 @@
                                             (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
                                             in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
                                             oop_maps);
+
+  if (is_critical_native) {
+    nm->set_lazy_critical_native(true);
+  }
+
   return nm;
 
 }
@@ -3255,8 +3792,12 @@
   //
   // address OptoRuntime::handle_exception_C(JavaThread* thread)
 
-  __ set_last_Java_frame(noreg, noreg, NULL);
+  // At a method handle call, the stack may not be properly aligned
+  // when returning with an exception.
+  address the_pc = __ pc();
+  __ set_last_Java_frame(noreg, noreg, the_pc);
   __ mov(c_rarg0, r15_thread);
+  __ andptr(rsp, -(StackAlignmentInBytes));    // Align stack
   __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
 
   // Set an oopmap for the call site.  This oopmap will only be used if we
@@ -3267,9 +3808,9 @@
 
   OopMapSet* oop_maps = new OopMapSet();
 
-  oop_maps->add_gc_map( __ pc()-start, new OopMap(SimpleRuntimeFrame::framesize, 0));
-
-  __ reset_last_Java_frame(false, false);
+  oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
+
+  __ reset_last_Java_frame(false, true);
 
   // Restore callee-saved registers
 
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -710,6 +710,21 @@
     return start;
   }
 
+  // Support for intptr_t get_previous_sp()
+  //
+  // This routine is used to find the previous stack pointer for the
+  // caller.
+  address generate_get_previous_sp() {
+    StubCodeMark mark(this, "StubRoutines", "get_previous_sp");
+    address start = __ pc();
+
+    __ movptr(rax, rsp);
+    __ addptr(rax, 8); // return address is at the top of the stack.
+    __ ret(0);
+
+    return start;
+  }
+
   //----------------------------------------------------------------------------------------------------
   // Support for void verify_mxcsr()
   //
@@ -2978,7 +2993,9 @@
     int frame_complete = __ pc() - start;
 
     // Set up last_Java_sp and last_Java_fp
-    __ set_last_Java_frame(rsp, rbp, NULL);
+    address the_pc = __ pc();
+    __ set_last_Java_frame(rsp, rbp, the_pc);
+    __ andptr(rsp, -(StackAlignmentInBytes));    // Align stack
 
     // Call runtime
     if (arg1 != noreg) {
@@ -2995,9 +3012,9 @@
     // Generate oop map
     OopMap* map = new OopMap(framesize, 0);
 
-    oop_maps->add_gc_map(__ pc() - start, map);
-
-    __ reset_last_Java_frame(true, false);
+    oop_maps->add_gc_map(the_pc - start, map);
+
+    __ reset_last_Java_frame(true, true);
 
     __ leave(); // required for proper stackwalking of RuntimeStub frame
 
@@ -3058,6 +3075,7 @@
 
     // platform dependent
     StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
+    StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();
 
     StubRoutines::x86::_verify_mxcsr_entry    = generate_verify_mxcsr();
 
--- a/src/cpu/x86/vm/stubRoutines_x86_64.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/stubRoutines_x86_64.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -43,6 +43,7 @@
 // a description of how to extend it, see the stubRoutines.hpp file.
 
 address StubRoutines::x86::_get_previous_fp_entry = NULL;
+address StubRoutines::x86::_get_previous_sp_entry = NULL;
 
 address StubRoutines::x86::_verify_mxcsr_entry = NULL;
 
--- a/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -41,6 +41,7 @@
 
  private:
   static address _get_previous_fp_entry;
+  static address _get_previous_sp_entry;
   static address _verify_mxcsr_entry;
 
   static address _f2i_fixup;
@@ -61,6 +62,11 @@
     return _get_previous_fp_entry;
   }
 
+  static address get_previous_sp_entry()
+  {
+    return _get_previous_sp_entry;
+  }
+
   static address verify_mxcsr_entry()
   {
     return _verify_mxcsr_entry;
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,8 +65,8 @@
 
   address generate_getPsrInfo() {
     // Flags to test CPU type.
-    const uint32_t EFL_AC           = 0x40000;
-    const uint32_t EFL_ID           = 0x200000;
+    const uint32_t HS_EFL_AC           = 0x40000;
+    const uint32_t HS_EFL_ID           = 0x200000;
     // Values for when we don't have a CPUID instruction.
     const int      CPU_FAMILY_SHIFT = 8;
     const uint32_t CPU_FAMILY_386   = (3 << CPU_FAMILY_SHIFT);
@@ -100,7 +100,7 @@
     //
     // if we are unable to change the AC flag, we have a 386
     //
-    __ xorl(rax, EFL_AC);
+    __ xorl(rax, HS_EFL_AC);
     __ push(rax);
     __ popf();
     __ pushf();
@@ -118,7 +118,7 @@
     //
     __ bind(detect_486);
     __ mov(rax, rcx);
-    __ xorl(rax, EFL_ID);
+    __ xorl(rax, HS_EFL_ID);
     __ push(rax);
     __ popf();
     __ pushf();
--- a/src/cpu/x86/vm/vm_version_x86.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -249,13 +249,18 @@
 
   enum {
     // AMD
-    CPU_FAMILY_AMD_11H       = 17,
+    CPU_FAMILY_AMD_11H       = 0x11,
     // Intel
     CPU_FAMILY_INTEL_CORE    = 6,
-    CPU_MODEL_NEHALEM_EP     = 26,
-    CPU_MODEL_WESTMERE_EP    = 44,
-//  CPU_MODEL_IVYBRIDGE_EP   = ??, TODO - get real value
-    CPU_MODEL_SANDYBRIDGE_EP = 45
+    CPU_MODEL_NEHALEM        = 0x1e,
+    CPU_MODEL_NEHALEM_EP     = 0x1a,
+    CPU_MODEL_NEHALEM_EX     = 0x2e,
+    CPU_MODEL_WESTMERE       = 0x25,
+    CPU_MODEL_WESTMERE_EP    = 0x2c,
+    CPU_MODEL_WESTMERE_EX    = 0x2f,
+    CPU_MODEL_SANDYBRIDGE    = 0x2a,
+    CPU_MODEL_SANDYBRIDGE_EP = 0x2d,
+    CPU_MODEL_IVYBRIDGE_EP   = 0x3a
   } cpuExtendedFamily;
 
   // cpuid information block.  All info derived from executing cpuid with
@@ -325,7 +330,7 @@
     uint32_t proc_name_4, proc_name_5, proc_name_6, proc_name_7;
     uint32_t proc_name_8, proc_name_9, proc_name_10,proc_name_11;
 
-    // cpuid function 0x80000005 //AMD L1, Intel reserved
+    // cpuid function 0x80000005 // AMD L1, Intel reserved
     uint32_t     ext_cpuid5_eax; // unused currently
     uint32_t     ext_cpuid5_ebx; // reserved
     ExtCpuid5Ex  ext_cpuid5_ecx; // L1 data cache info (AMD)
@@ -547,15 +552,15 @@
   static bool is_intel_tsc_synched_at_init()  {
     if (is_intel_family_core()) {
       uint32_t ext_model = extended_cpu_model();
-      if (ext_model == CPU_MODEL_NEHALEM_EP   ||
-          ext_model == CPU_MODEL_WESTMERE_EP  ||
-// TODO   ext_model == CPU_MODEL_IVYBRIDGE_EP ||
-          ext_model == CPU_MODEL_SANDYBRIDGE_EP) {
-        // 2-socket invtsc support. EX versions with 4 sockets are not
-        // guaranteed to synchronize tscs at initialization via a double
-        // handshake. The tscs can be explicitly set in software.  Code
-        // that uses tsc values must be prepared for them to arbitrarily
-        // jump backward or forward.
+      if (ext_model == CPU_MODEL_NEHALEM_EP     ||
+          ext_model == CPU_MODEL_WESTMERE_EP    ||
+          ext_model == CPU_MODEL_SANDYBRIDGE_EP ||
+          ext_model == CPU_MODEL_IVYBRIDGE_EP) {
+        // <= 2-socket invariant tsc support. EX versions are usually used
+        // in > 2-socket systems and likely don't synchronize tscs at
+        // initialization.
+        // Code that uses tsc values must be prepared for them to arbitrarily
+        // jump forward or backward.
         return true;
       }
     }
--- a/src/cpu/x86/vm/x86.ad	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/x86.ad	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -37,10 +37,87 @@
   static address double_signmask() { return (address)double_signmask_pool; }
   static address double_signflip() { return (address)double_signflip_pool; }
 #endif
+
+#ifndef PRODUCT
+  void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
+    st->print("nop \t# %d bytes pad for loops and calls", _count);
+  }
+#endif
+
+  void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
+    MacroAssembler _masm(&cbuf);
+    __ nop(_count);
+  }
+
+  uint MachNopNode::size(PhaseRegAlloc*) const {
+    return _count;
+  }
+
+#ifndef PRODUCT
+  void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const {
+    st->print("# breakpoint");
+  }
+#endif
+
+  void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
+    MacroAssembler _masm(&cbuf);
+    __ int3();
+  }
+
+  uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
+    return MachNode::size(ra_);
+  }
+
+%}
+
+encode %{
+
+  enc_class preserve_SP %{
+    debug_only(int off0 = cbuf.insts_size());
+    MacroAssembler _masm(&cbuf);
+    // RBP is preserved across all calls, even compiled calls.
+    // Use it to preserve RSP in places where the callee might change the SP.
+    __ movptr(rbp_mh_SP_save, rsp);
+    debug_only(int off1 = cbuf.insts_size());
+    assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
+  %}
+
+  enc_class restore_SP %{
+    MacroAssembler _masm(&cbuf);
+    __ movptr(rsp, rbp_mh_SP_save);
+  %}
+
+  enc_class call_epilog %{
+    if (VerifyStackAtCalls) {
+      // Check that stack depth is unchanged: find majik cookie on stack
+      int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word));
+      MacroAssembler _masm(&cbuf);
+      Label L;
+      __ cmpptr(Address(rsp, framesize), (int32_t)0xbadb100d);
+      __ jccb(Assembler::equal, L);
+      // Die if stack mismatch
+      __ int3();
+      __ bind(L);
+    }
+  %}
+
 %}
 
 // INSTRUCTIONS -- Platform independent definitions (same for 32- and 64-bit)
 
+// ============================================================================
+
+instruct ShouldNotReachHere() %{
+  match(Halt);
+  format %{ "int3\t# ShouldNotReachHere" %}
+  ins_encode %{
+    __ int3();
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// ============================================================================
+
 instruct addF_reg(regF dst, regF src) %{
   predicate((UseSSE>=1) && (UseAVX == 0));
   match(Set dst (AddF dst src));
--- a/src/cpu/x86/vm/x86_32.ad	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/x86_32.ad	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -341,12 +341,6 @@
   return round_to(current_offset, alignment_required()) - current_offset;
 }
 
-#ifndef PRODUCT
-void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
-  st->print("INT3");
-}
-#endif
-
 // EMIT_RM()
 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
   unsigned char c = (unsigned char)((f1 << 6) | (f2 << 3) | f3);
@@ -550,118 +544,66 @@
 
 //=============================================================================
 #ifndef PRODUCT
-void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
+void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
   Compile* C = ra_->C;
-  if( C->in_24_bit_fp_mode() ) {
-    st->print("FLDCW  24 bit fpu control word");
-    st->print_cr(""); st->print("\t");
-  }
 
   int framesize = C->frame_slots() << LogBytesPerInt;
   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
-  // Remove two words for return addr and rbp,
-  framesize -= 2*wordSize;
-
-  // Calls to C2R adapters often do not accept exceptional returns.
-  // We require that their callers must bang for them.  But be careful, because
-  // some VM calls (such as call site linkage) can use several kilobytes of
-  // stack.  But the stack safety zone should account for that.
-  // See bugs 4446381, 4468289, 4497237.
+  // Remove wordSize for return addr which is already pushed.
+  framesize -= wordSize;
+
   if (C->need_stack_bang(framesize)) {
-    st->print_cr("# stack bang"); st->print("\t");
-  }
-  st->print_cr("PUSHL  EBP"); st->print("\t");
-
-  if( VerifyStackAtCalls ) { // Majik cookie to verify stack depth
-    st->print("PUSH   0xBADB100D\t# Majik cookie for stack depth check");
-    st->print_cr(""); st->print("\t");
     framesize -= wordSize;
-  }
-
-  if ((C->in_24_bit_fp_mode() || VerifyStackAtCalls ) && framesize < 128 ) {
+    st->print("# stack bang");
+    st->print("\n\t");
+    st->print("PUSH   EBP\t# Save EBP");
     if (framesize) {
-      st->print("SUB    ESP,%d\t# Create frame",framesize);
+      st->print("\n\t");
+      st->print("SUB    ESP, #%d\t# Create frame",framesize);
     }
   } else {
-    st->print("SUB    ESP,%d\t# Create frame",framesize);
+    st->print("SUB    ESP, #%d\t# Create frame",framesize);
+    st->print("\n\t");
+    framesize -= wordSize;
+    st->print("MOV    [ESP + #%d], EBP\t# Save EBP",framesize);
+  }
+
+  if (VerifyStackAtCalls) {
+    st->print("\n\t");
+    framesize -= wordSize;
+    st->print("MOV    [ESP + #%d], 0xBADB100D\t# Majik cookie for stack depth check",framesize);
   }
+
+  if( C->in_24_bit_fp_mode() ) {
+    st->print("\n\t");
+    st->print("FLDCW  \t# load 24 bit fpu control word");
+  }
+  if (UseSSE >= 2 && VerifyFPU) {
+    st->print("\n\t");
+    st->print("# verify FPU stack (must be clean on entry)");
+  }
+
+#ifdef ASSERT
+  if (VerifyStackAtCalls) {
+    st->print("\n\t");
+    st->print("# stack alignment check");
+  }
+#endif
+  st->cr();
 }
 #endif
 
 
 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
   Compile* C = ra_->C;
-
-  if (UseSSE >= 2 && VerifyFPU) {
-    MacroAssembler masm(&cbuf);
-    masm.verify_FPU(0, "FPU stack must be clean on entry");
-  }
-
-  // WARNING: Initial instruction MUST be 5 bytes or longer so that
-  // NativeJump::patch_verified_entry will be able to patch out the entry
-  // code safely. The fldcw is ok at 6 bytes, the push to verify stack
-  // depth is ok at 5 bytes, the frame allocation can be either 3 or
-  // 6 bytes. So if we don't do the fldcw or the push then we must
-  // use the 6 byte frame allocation even if we have no frame. :-(
-  // If method sets FPU control word do it now
-  if( C->in_24_bit_fp_mode() ) {
-    MacroAssembler masm(&cbuf);
-    masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
-  }
+  MacroAssembler _masm(&cbuf);
 
   int framesize = C->frame_slots() << LogBytesPerInt;
-  assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
-  // Remove two words for return addr and rbp,
-  framesize -= 2*wordSize;
-
-  // Calls to C2R adapters often do not accept exceptional returns.
-  // We require that their callers must bang for them.  But be careful, because
-  // some VM calls (such as call site linkage) can use several kilobytes of
-  // stack.  But the stack safety zone should account for that.
-  // See bugs 4446381, 4468289, 4497237.
-  if (C->need_stack_bang(framesize)) {
-    MacroAssembler masm(&cbuf);
-    masm.generate_stack_overflow_check(framesize);
-  }
-
-  // We always push rbp, so that on return to interpreter rbp, will be
-  // restored correctly and we can correct the stack.
-  emit_opcode(cbuf, 0x50 | EBP_enc);
-
-  if( VerifyStackAtCalls ) { // Majik cookie to verify stack depth
-    emit_opcode(cbuf, 0x68); // push 0xbadb100d
-    emit_d32(cbuf, 0xbadb100d);
-    framesize -= wordSize;
-  }
-
-  if ((C->in_24_bit_fp_mode() || VerifyStackAtCalls ) && framesize < 128 ) {
-    if (framesize) {
-      emit_opcode(cbuf, 0x83);   // sub  SP,#framesize
-      emit_rm(cbuf, 0x3, 0x05, ESP_enc);
-      emit_d8(cbuf, framesize);
-    }
-  } else {
-    emit_opcode(cbuf, 0x81);   // sub  SP,#framesize
-    emit_rm(cbuf, 0x3, 0x05, ESP_enc);
-    emit_d32(cbuf, framesize);
-  }
+
+  __ verified_entry(framesize, C->need_stack_bang(framesize), C->in_24_bit_fp_mode());
+
   C->set_frame_complete(cbuf.insts_size());
 
-#ifdef ASSERT
-  if (VerifyStackAtCalls) {
-    Label L;
-    MacroAssembler masm(&cbuf);
-    masm.push(rax);
-    masm.mov(rax, rsp);
-    masm.andptr(rax, StackAlignmentInBytes-1);
-    masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
-    masm.pop(rax);
-    masm.jcc(Assembler::equal, L);
-    masm.stop("Stack is not properly aligned!");
-    masm.bind(L);
-  }
-#endif
-
   if (C->has_mach_constant_base_node()) {
     // NOTE: We set the table base offset here because users might be
     // emitted before MachConstantBaseNode.
@@ -1169,7 +1111,7 @@
 }
 
 #ifndef PRODUCT
-void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
+void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const {
   implementation( NULL, ra_, false, st );
 }
 #endif
@@ -1182,22 +1124,6 @@
   return implementation( NULL, ra_, true, NULL );
 }
 
-//=============================================================================
-#ifndef PRODUCT
-void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
-  st->print("NOP \t# %d bytes pad for loops and calls", _count);
-}
-#endif
-
-void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
-  MacroAssembler _masm(&cbuf);
-  __ nop(_count);
-}
-
-uint MachNopNode::size(PhaseRegAlloc *) const {
-  return _count;
-}
-
 
 //=============================================================================
 #ifndef PRODUCT
@@ -1367,6 +1293,14 @@
   if (!has_match_rule(opcode))
     return false;
 
+  switch (opcode) {
+    case Op_PopCountI:
+    case Op_PopCountL:
+      if (!UsePopCountInstruction)
+        return false;
+    break;
+  }
+  
   return true;  // Per default match rules are supported.
 }
 
@@ -1883,21 +1817,6 @@
     }
   %}
 
-  enc_class preserve_SP %{
-    debug_only(int off0 = cbuf.insts_size());
-    MacroAssembler _masm(&cbuf);
-    // RBP is preserved across all calls, even compiled calls.
-    // Use it to preserve RSP in places where the callee might change the SP.
-    __ movptr(rbp_mh_SP_save, rsp);
-    debug_only(int off1 = cbuf.insts_size());
-    assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
-  %}
-
-  enc_class restore_SP %{
-    MacroAssembler _masm(&cbuf);
-    __ movptr(rsp, rbp_mh_SP_save);
-  %}
-
   enc_class Java_Static_Call (method meth) %{    // JAVA STATIC CALL
     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
     // who we intended to call.
@@ -3846,9 +3765,9 @@
   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
   // Otherwise, it is above the locks and verification slot and alignment word
   return_addr(STACK - 1 +
-              round_to(1+VerifyStackAtCalls+
-              Compile::current()->fixed_slots(),
-              (StackAlignmentInBytes/wordSize)));
+              round_to((Compile::current()->in_preserve_stack_slots() +
+                        Compile::current()->fixed_slots()),
+                       stack_alignment_in_slots()));
 
   // Body of function which returns an integer array locating
   // arguments either in registers or in stack slots.  Passed an array
@@ -13476,6 +13395,25 @@
   ins_pipe( ialu_reg_mem );
 %}
 
+
+// ============================================================================
+// This name is KNOWN by the ADLC and cannot be changed.
+// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
+// for this guy.
+instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{
+  match(Set dst (ThreadLocal));
+  effect(DEF dst, KILL cr);
+
+  format %{ "MOV    $dst, Thread::current()" %}
+  ins_encode %{
+    Register dstReg = as_Register($dst$$reg);
+    __ get_thread(dstReg);
+  %}
+  ins_pipe( ialu_reg_fat );
+%}
+
+
+
 //----------PEEPHOLE RULES-----------------------------------------------------
 // These must follow all instruction definitions as they use the names
 // defined in the instructions definitions.
--- a/src/cpu/x86/vm/x86_64.ad	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/x86/vm/x86_64.ad	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -610,13 +610,6 @@
   return round_to(current_offset, alignment_required()) - current_offset;
 }
 
-#ifndef PRODUCT
-void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const
-{
-  st->print("INT3");
-}
-#endif
-
 // EMIT_RM()
 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
   unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3);
@@ -853,121 +846,53 @@
 
 //=============================================================================
 #ifndef PRODUCT
-void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const
-{
+void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
   Compile* C = ra_->C;
 
   int framesize = C->frame_slots() << LogBytesPerInt;
   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
-  // Remove wordSize for return adr already pushed
-  // and another for the RBP we are going to save
-  framesize -= 2*wordSize;
-  bool need_nop = true;
-
-  // Calls to C2R adapters often do not accept exceptional returns.
-  // We require that their callers must bang for them.  But be
-  // careful, because some VM calls (such as call site linkage) can
-  // use several kilobytes of stack.  But the stack safety zone should
-  // account for that.  See bugs 4446381, 4468289, 4497237.
+  // Remove wordSize for return addr which is already pushed.
+  framesize -= wordSize;
+
   if (C->need_stack_bang(framesize)) {
-    st->print_cr("# stack bang"); st->print("\t");
-    need_nop = false;
+    framesize -= wordSize;
+    st->print("# stack bang");
+    st->print("\n\t");
+    st->print("pushq   rbp\t# Save rbp");
+    if (framesize) {
+      st->print("\n\t");
+      st->print("subq    rsp, #%d\t# Create frame",framesize);
+    }
+  } else {
+    st->print("subq    rsp, #%d\t# Create frame",framesize);
+    st->print("\n\t");
+    framesize -= wordSize;
+    st->print("movq    [rsp + #%d], rbp\t# Save rbp",framesize);
   }
-  st->print_cr("pushq   rbp"); st->print("\t");
 
   if (VerifyStackAtCalls) {
-    // Majik cookie to verify stack depth
-    st->print_cr("pushq   0xffffffffbadb100d"
-                  "\t# Majik cookie for stack depth check");
-    st->print("\t");
-    framesize -= wordSize; // Remove 2 for cookie
-    need_nop = false;
+    st->print("\n\t");
+    framesize -= wordSize;
+    st->print("movq    [rsp + #%d], 0xbadb100d\t# Majik cookie for stack depth check",framesize);
+#ifdef ASSERT
+    st->print("\n\t");
+    st->print("# stack alignment check");
+#endif
   }
-
-  if (framesize) {
-    st->print("subq    rsp, #%d\t# Create frame", framesize);
-    if (framesize < 0x80 && need_nop) {
-      st->print("\n\tnop\t# nop for patch_verified_entry");
-    }
-  }
+  st->cr();
 }
 #endif
 
-void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
-{
+void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
   Compile* C = ra_->C;
-
-  // WARNING: Initial instruction MUST be 5 bytes or longer so that
-  // NativeJump::patch_verified_entry will be able to patch out the entry
-  // code safely. The fldcw is ok at 6 bytes, the push to verify stack
-  // depth is ok at 5 bytes, the frame allocation can be either 3 or
-  // 6 bytes. So if we don't do the fldcw or the push then we must
-  // use the 6 byte frame allocation even if we have no frame. :-(
-  // If method sets FPU control word do it now
+  MacroAssembler _masm(&cbuf);
 
   int framesize = C->frame_slots() << LogBytesPerInt;
-  assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
-  // Remove wordSize for return adr already pushed
-  // and another for the RBP we are going to save
-  framesize -= 2*wordSize;
-  bool need_nop = true;
-
-  // Calls to C2R adapters often do not accept exceptional returns.
-  // We require that their callers must bang for them.  But be
-  // careful, because some VM calls (such as call site linkage) can
-  // use several kilobytes of stack.  But the stack safety zone should
-  // account for that.  See bugs 4446381, 4468289, 4497237.
-  if (C->need_stack_bang(framesize)) {
-    MacroAssembler masm(&cbuf);
-    masm.generate_stack_overflow_check(framesize);
-    need_nop = false;
-  }
-
-  // We always push rbp so that on return to interpreter rbp will be
-  // restored correctly and we can correct the stack.
-  emit_opcode(cbuf, 0x50 | RBP_enc);
-
-  if (VerifyStackAtCalls) {
-    // Majik cookie to verify stack depth
-    emit_opcode(cbuf, 0x68); // pushq (sign-extended) 0xbadb100d
-    emit_d32(cbuf, 0xbadb100d);
-    framesize -= wordSize; // Remove 2 for cookie
-    need_nop = false;
-  }
-
-  if (framesize) {
-    emit_opcode(cbuf, Assembler::REX_W);
-    if (framesize < 0x80) {
-      emit_opcode(cbuf, 0x83);   // sub  SP,#framesize
-      emit_rm(cbuf, 0x3, 0x05, RSP_enc);
-      emit_d8(cbuf, framesize);
-      if (need_nop) {
-        emit_opcode(cbuf, 0x90); // nop
-      }
-    } else {
-      emit_opcode(cbuf, 0x81);   // sub  SP,#framesize
-      emit_rm(cbuf, 0x3, 0x05, RSP_enc);
-      emit_d32(cbuf, framesize);
-    }
-  }
+
+  __ verified_entry(framesize, C->need_stack_bang(framesize), false);
 
   C->set_frame_complete(cbuf.insts_size());
 
-#ifdef ASSERT
-  if (VerifyStackAtCalls) {
-    Label L;
-    MacroAssembler masm(&cbuf);
-    masm.push(rax);
-    masm.mov(rax, rsp);
-    masm.andptr(rax, StackAlignmentInBytes-1);
-    masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
-    masm.pop(rax);
-    masm.jcc(Assembler::equal, L);
-    masm.stop("Stack is not properly aligned!");
-    masm.bind(L);
-  }
-#endif
-
   if (C->has_mach_constant_base_node()) {
     // NOTE: We set the table base offset here because users might be
     // emitted before MachConstantBaseNode.
@@ -1598,26 +1523,6 @@
 
 //=============================================================================
 #ifndef PRODUCT
-void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const
-{
-  st->print("nop \t# %d bytes pad for loops and calls", _count);
-}
-#endif
-
-void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const
-{
-  MacroAssembler _masm(&cbuf);
-  __ nop(_count);
-}
-
-uint MachNopNode::size(PhaseRegAlloc*) const
-{
-  return _count;
-}
-
-
-//=============================================================================
-#ifndef PRODUCT
 void BoxLockNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 {
   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
@@ -1809,6 +1714,14 @@
   if (!has_match_rule(opcode))
     return false;
 
+  switch (opcode) {
+    case Op_PopCountI:
+    case Op_PopCountL:
+      if (!UsePopCountInstruction)
+        return false;
+    break;
+  }
+
   return true;  // Per default match rules are supported.
 }
 
@@ -2323,21 +2236,6 @@
                    RELOC_DISP32);
   %}
 
-  enc_class preserve_SP %{
-    debug_only(int off0 = cbuf.insts_size());
-    MacroAssembler _masm(&cbuf);
-    // RBP is preserved across all calls, even compiled calls.
-    // Use it to preserve RSP in places where the callee might change the SP.
-    __ movptr(rbp_mh_SP_save, rsp);
-    debug_only(int off1 = cbuf.insts_size());
-    assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
-  %}
-
-  enc_class restore_SP %{
-    MacroAssembler _masm(&cbuf);
-    __ movptr(rsp, rbp_mh_SP_save);
-  %}
-
   enc_class Java_Static_Call(method meth)
   %{
     // JAVA STATIC CALL
@@ -3276,9 +3174,9 @@
   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
   // Otherwise, it is above the locks and verification slot and alignment word
   return_addr(STACK - 2 +
-              round_to(2 + 2 * VerifyStackAtCalls +
-                       Compile::current()->fixed_slots(),
-                       WordsPerLong * 2));
+              round_to((Compile::current()->in_preserve_stack_slots() +
+                        Compile::current()->fixed_slots()),
+                       stack_alignment_in_slots()));
 
   // Body of function which returns an integer array locating
   // arguments either in registers or in stack slots.  Passed an array
@@ -11736,6 +11634,21 @@
 %}
 
 
+// ============================================================================
+// This name is KNOWN by the ADLC and cannot be changed.
+// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
+// for this guy.
+instruct tlsLoadP(r15_RegP dst) %{
+  match(Set dst (ThreadLocal));
+  effect(DEF dst);
+
+  size(0);
+  format %{ "# TLS is in R15" %}
+  ins_encode( /*empty encoding*/ );
+  ins_pipe(ialu_reg_reg);
+%}
+
+
 //----------PEEPHOLE RULES-----------------------------------------------------
 // These must follow all instruction definitions as they use the names
 // defined in the instructions definitions.
--- a/src/cpu/zero/vm/frame_zero.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/cpu/zero/vm/frame_zero.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -418,7 +418,7 @@
   }
 }
 
-#ifdef ASSERT
+#ifndef PRODUCT
 
 void frame::describe_pd(FrameValues& values, int frame_no) {
 
--- a/src/os/bsd/vm/attachListener_bsd.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os/bsd/vm/attachListener_bsd.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -206,10 +206,15 @@
   // put in listen mode, set permissions, and rename into place
   res = ::listen(listener, 5);
   if (res == 0) {
-      RESTARTABLE(::chmod(initial_path, S_IREAD|S_IWRITE), res);
+    RESTARTABLE(::chmod(initial_path, S_IREAD|S_IWRITE), res);
+    if (res == 0) {
+      // make sure the file is owned by the effective user and effective group
+      // (this is the default on linux, but not on mac os)
+      RESTARTABLE(::chown(initial_path, geteuid(), getegid()), res);
       if (res == 0) {
-          res = ::rename(initial_path, path);
+        res = ::rename(initial_path, path);
       }
+    }
   }
   if (res == -1) {
     RESTARTABLE(::close(listener), res);
--- a/src/os/bsd/vm/decoder_machO.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os/bsd/vm/decoder_machO.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,8 +29,9 @@
 
 #include "utilities/decoder.hpp"
 
-// Just a placehold for now
-class MachODecoder: public NullDecoder {
+// Just a placehold for now, a real implementation should derive
+// from AbstractDecoder
+class MachODecoder : public NullDecoder {
 public:
   MachODecoder() { }
   ~MachODecoder() { }
--- a/src/os/bsd/vm/osThread_bsd.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os/bsd/vm/osThread_bsd.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,11 @@
 
 void OSThread::pd_initialize() {
   assert(this != NULL, "check");
+#ifdef __APPLE__
+  _thread_id        = 0;
+#else
   _thread_id        = NULL;
+#endif
   _pthread_id       = NULL;
   _siginfo = NULL;
   _ucontext = NULL;
--- a/src/os/bsd/vm/osThread_bsd.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os/bsd/vm/osThread_bsd.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,10 +40,17 @@
  private:
 
 #ifdef _ALLBSD_SOURCE
-  // _thread_id and _pthread_id are the same on BSD
-  // keep both to minimize code divergence in os_bsd.cpp
+
+#ifdef __APPLE__
+  thread_t  _thread_id;
+#else
   pthread_t _thread_id;
+#endif
+
+  // _pthread_id is the pthread id, which is used by library calls
+  // (e.g. pthread_kill).
   pthread_t _pthread_id;
+
 #else
   // _thread_id is kernel thread id (similar to LWP id on Solaris). Each
   // thread has a unique thread_id (BsdThreads or NPTL). It can be used
@@ -64,9 +71,15 @@
   void    set_caller_sigmask(sigset_t sigmask)  { _caller_sigmask = sigmask; }
 
 #ifdef _ALLBSD_SOURCE
+#ifdef __APPLE__
+  thread_t thread_id() const {
+    return _thread_id;
+  }
+#else
   pthread_t thread_id() const {
     return _thread_id;
   }
+#endif
 #else
   pid_t thread_id() const {
     return _thread_id;
@@ -84,9 +97,15 @@
   }
 #endif // ASSERT
 #ifdef _ALLBSD_SOURCE
+#ifdef __APPLE__
+  void set_thread_id(thread_t id) {
+    _thread_id = id;
+  }
+#else
   void set_thread_id(pthread_t id) {
     _thread_id = id;
   }
+#endif
 #else
   void set_thread_id(pid_t id) {
     _thread_id = id;
--- a/src/os/bsd/vm/os_bsd.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os/bsd/vm/os_bsd.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -301,6 +301,12 @@
 #error Add appropriate cpu_arch setting
 #endif
 
+// Compiler variant
+#ifdef COMPILER2
+#define COMPILER_VARIANT "server"
+#else
+#define COMPILER_VARIANT "client"
+#endif
 
 #ifndef _ALLBSD_SOURCE
 // pid_t gettid()
@@ -562,6 +568,25 @@
             sprintf(ld_library_path, "%s:%s", v, t);
             free(t);
         }
+
+#ifdef __APPLE__
+        // Apple's Java6 has "." at the beginning of java.library.path.
+        // OpenJDK on Windows has "." at the end of java.library.path.
+        // OpenJDK on Linux and Solaris don't have "." in java.library.path
+        // at all. To ease the transition from Apple's Java6 to OpenJDK7,
+        // "." is appended to the end of java.library.path. Yes, this
+        // could cause a change in behavior, but Apple's Java6 behavior
+        // can be achieved by putting "." at the beginning of the
+        // JAVA_LIBRARY_PATH environment variable.
+        {
+            char *t = ld_library_path;
+            // that's +3 for appending ":." and the trailing '\0'
+            ld_library_path = (char *) malloc(strlen(t) + 3);
+            sprintf(ld_library_path, "%s:%s", t, ".");
+            free(t);
+        }
+#endif
+
         Arguments::set_library_path(ld_library_path);
     }
 
@@ -973,8 +998,13 @@
   }
 
 #ifdef _ALLBSD_SOURCE
+#ifdef __APPLE__
+  // thread_id is mach thread on macos
+  osthread->set_thread_id(::mach_thread_self());
+#else
   // thread_id is pthread_id on BSD
   osthread->set_thread_id(::pthread_self());
+#endif
 #else
   // thread_id is kernel thread id (similar to Solaris LWP id)
   osthread->set_thread_id(os::Bsd::gettid());
@@ -1165,7 +1195,11 @@
 
   // Store pthread info into the OSThread
 #ifdef _ALLBSD_SOURCE
+#ifdef __APPLE__
+  osthread->set_thread_id(::mach_thread_self());
+#else
   osthread->set_thread_id(::pthread_self());
+#endif
 #else
   osthread->set_thread_id(os::Bsd::gettid());
 #endif
@@ -1782,7 +1816,13 @@
   return n;
 }
 
-intx os::current_thread_id() { return (intx)pthread_self(); }
+intx os::current_thread_id() {
+#ifdef __APPLE__
+  return (intx)::mach_thread_self();
+#else
+  return (intx)::pthread_self();
+#endif
+}
 int os::current_process_id() {
 
   // Under the old bsd thread library, bsd gives each thread
@@ -2507,7 +2547,7 @@
 
 static char saved_jvm_path[MAXPATHLEN] = {0};
 
-// Find the full path to the current module, libjvm.so or libjvm_g.so
+// Find the full path to the current module, libjvm or libjvm_g
 void os::jvm_path(char *buf, jint buflen) {
   // Error checking.
   if (buflen < MAXPATHLEN) {
@@ -2532,11 +2572,11 @@
 
   if (Arguments::created_by_gamma_launcher()) {
     // Support for the gamma launcher.  Typical value for buf is
-    // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
+    // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm".  If "/jre/lib/" appears at
     // the right place in the string, then assume we are installed in a JDK and
-    // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
-    // up the path so it looks like libjvm.so is installed there (append a
-    // fake suffix hotspot/libjvm.so).
+    // we're done.  Otherwise, check for a JAVA_HOME environment variable and
+    // construct a path to the JVM being overridden.
+
     const char *p = buf + strlen(buf) - 1;
     for (int count = 0; p > buf && count < 5; ++count) {
       for (--p; p > buf && *p != '/'; --p)
@@ -2550,7 +2590,7 @@
         char* jrelib_p;
         int len;
 
-        // Check the current module name "libjvm.so" or "libjvm_g.so".
+        // Check the current module name "libjvm" or "libjvm_g".
         p = strrchr(buf, '/');
         assert(strstr(p, "/libjvm") == p, "invalid library name");
         p = strstr(p, "_g") ? "_g" : "";
@@ -2563,19 +2603,32 @@
         // modules image doesn't have "jre" subdirectory
         len = strlen(buf);
         jrelib_p = buf + len;
-        snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
+
+        // Add the appropriate library subdir
+        snprintf(jrelib_p, buflen-len, "/jre/lib");
         if (0 != access(buf, F_OK)) {
-          snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
+          snprintf(jrelib_p, buflen-len, "/lib");
         }
 
+        // Add the appropriate client or server subdir
+        len = strlen(buf);
+        jrelib_p = buf + len;
+        snprintf(jrelib_p, buflen-len, "/%s", COMPILER_VARIANT);
+        if (0 != access(buf, F_OK)) {
+          snprintf(jrelib_p, buflen-len, "");
+        }
+
+        // If the path exists within JAVA_HOME, add the JVM library name
+        // to complete the path to JVM being overridden.  Otherwise fallback
+        // to the path to the current library.
         if (0 == access(buf, F_OK)) {
-          // Use current module name "libjvm[_g].so" instead of
-          // "libjvm"debug_only("_g")".so" since for fastdebug version
-          // we should have "libjvm.so" but debug_only("_g") adds "_g"!
+          // Use current module name "libjvm[_g]" instead of
+          // "libjvm"debug_only("_g")"" since for fastdebug version
+          // we should have "libjvm" but debug_only("_g") adds "_g"!
           len = strlen(buf);
-          snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
+          snprintf(buf + len, buflen-len, "/libjvm%s%s", p, JNI_LIB_SUFFIX);
         } else {
-          // Go back to path of .so
+          // Fall back to path of current library
           rp = realpath(dli_fname, buf);
           if (rp == NULL)
             return;
@@ -3570,26 +3623,28 @@
 // It is only used when ThreadPriorityPolicy=1 and requires root privilege.
 
 #if defined(_ALLBSD_SOURCE) && !defined(__APPLE__)
-int os::java_to_os_priority[MaxPriority + 1] = {
+int os::java_to_os_priority[CriticalPriority + 1] = {
   19,              // 0 Entry should never be used
 
    0,              // 1 MinPriority
    3,              // 2
    6,              // 3
 
-   10,              // 4
-   15,              // 5 NormPriority
-   18,              // 6
-
-   21,              // 7
-   25,              // 8
-   28,              // 9 NearMaxPriority
-
-   31              // 10 MaxPriority
+  10,              // 4
+  15,              // 5 NormPriority
+  18,              // 6
+
+  21,              // 7
+  25,              // 8
+  28,              // 9 NearMaxPriority
+
+  31,              // 10 MaxPriority
+
+  31               // 11 CriticalPriority
 };
 #elif defined(__APPLE__)
 /* Using Mach high-level priority assignments */
-int os::java_to_os_priority[MaxPriority + 1] = {
+int os::java_to_os_priority[CriticalPriority + 1] = {
    0,              // 0 Entry should never be used (MINPRI_USER)
 
   27,              // 1 MinPriority
@@ -3604,10 +3659,12 @@
   34,              // 8
   35,              // 9 NearMaxPriority
 
-  36               // 10 MaxPriority
+  36,              // 10 MaxPriority
+
+  36               // 11 CriticalPriority
 };
 #else
-int os::java_to_os_priority[MaxPriority + 1] = {
+int os::java_to_os_priority[CriticalPriority + 1] = {
   19,              // 0 Entry should never be used
 
    4,              // 1 MinPriority
@@ -3622,7 +3679,9 @@
   -3,              // 8
   -4,              // 9 NearMaxPriority
 
-  -5               // 10 MaxPriority
+  -5,              // 10 MaxPriority
+
+  -5               // 11 CriticalPriority
 };
 #endif
 
@@ -3638,6 +3697,9 @@
       ThreadPriorityPolicy = 0;
     }
   }
+  if (UseCriticalJavaThreadPriority) {
+    os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
+  }
   return 0;
 }
 
@@ -5105,9 +5167,9 @@
   struct thread_basic_info tinfo;
   mach_msg_type_number_t tcount = THREAD_INFO_MAX;
   kern_return_t kr;
-  mach_port_t mach_thread;
-
-  mach_thread = pthread_mach_thread_np(thread->osthread()->thread_id());
+  thread_t mach_thread;
+
+  mach_thread = thread->osthread()->thread_id();
   kr = thread_info(mach_thread, THREAD_BASIC_INFO, (thread_info_t)&tinfo, &tcount);
   if (kr != KERN_SUCCESS)
     return -1;
--- a/src/os/linux/vm/os_linux.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2547,7 +2547,14 @@
 }
 
 void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) {
-  commit_memory(addr, bytes, alignment_hint, false);
+  // This method works by doing an mmap over an existing mmaping and effectively discarding
+  // the existing pages. However it won't work for SHM-based large pages that cannot be
+  // uncommitted at all. We don't do anything in this case to avoid creating a segment with
+  // small pages on top of the SHM segment. This method always works for small pages, so we
+  // allow that in any case.
+  if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
+    commit_memory(addr, bytes, alignment_hint, false);
+  }
 }
 
 void os::numa_make_global(char *addr, size_t bytes) {
@@ -3383,7 +3390,7 @@
 // this reason, the code should not be used as default (ThreadPriorityPolicy=0).
 // It is only used when ThreadPriorityPolicy=1 and requires root privilege.
 
-int os::java_to_os_priority[MaxPriority + 1] = {
+int os::java_to_os_priority[CriticalPriority + 1] = {
   19,              // 0 Entry should never be used
 
    4,              // 1 MinPriority
@@ -3398,7 +3405,9 @@
   -3,              // 8
   -4,              // 9 NearMaxPriority
 
-  -5               // 10 MaxPriority
+  -5,              // 10 MaxPriority
+
+  -5               // 11 CriticalPriority
 };
 
 static int prio_init() {
@@ -3413,6 +3422,9 @@
       ThreadPriorityPolicy = 0;
     }
   }
+  if (UseCriticalJavaThreadPriority) {
+    os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
+  }
   return 0;
 }
 
@@ -4685,14 +4697,12 @@
                      char *addr, size_t bytes, bool read_only,
                      bool allow_exec) {
   int prot;
-  int flags;
+  int flags = MAP_PRIVATE;
 
   if (read_only) {
     prot = PROT_READ;
-    flags = MAP_SHARED;
   } else {
     prot = PROT_READ | PROT_WRITE;
-    flags = MAP_PRIVATE;
   }
 
   if (allow_exec) {
--- a/src/os/posix/launcher/java_md.c	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os/posix/launcher/java_md.c	Tue Apr 10 10:42:34 2012 -0700
@@ -701,6 +701,14 @@
     char libjava[MAXPATHLEN];
 
     if (GetApplicationHome(path, pathsize)) {
+
+        /* Is the JRE universal, i.e. no arch dir? */
+        sprintf(libjava, "%s/jre/lib/" JAVA_DLL, path);
+        if (access(libjava, F_OK) == 0) {
+            strcat(path, "/jre");
+            goto found;
+        }
+
         /* Is JRE co-located with the application? */
         sprintf(libjava, "%s/lib/%s/" JAVA_DLL, path, arch);
         if (access(libjava, F_OK) == 0) {
@@ -734,7 +742,7 @@
     ifn->GetDefaultJavaVMInitArgs = JNI_GetDefaultJavaVMInitArgs;
     return JNI_TRUE;
 #else
-   Dl_info dlinfo;
+    Dl_info dlinfo;
     void *libjvm;
 
     if (_launcher_debug) {
--- a/src/os/solaris/vm/osThread_solaris.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os/solaris/vm/osThread_solaris.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,17 +28,17 @@
 // This is embedded via include into the class OSThread
 
  private:
-
-  thread_t _thread_id;      // Solaris thread id
-  unsigned int  _lwp_id;    // lwp ID, only used with bound threads
-  sigset_t _caller_sigmask; // Caller's signal mask
-  bool _vm_created_thread;  // true if the VM create this thread
-                            // false if primary thread or attached thread
+  thread_t _thread_id;         // Solaris thread id
+  uint     _lwp_id;            // lwp ID, only used with bound threads
+  int      _native_priority;   // Saved native priority when starting
+                               // a bound thread
+  sigset_t _caller_sigmask;    // Caller's signal mask
+  bool     _vm_created_thread; // true if the VM created this thread,
+                               // false if primary thread or attached thread
  public:
-
-  thread_t thread_id() const      { return _thread_id; }
-
-  unsigned int lwp_id() const     { return _lwp_id; }
+  thread_t thread_id() const       { return _thread_id; }
+  uint     lwp_id() const          { return _lwp_id; }
+  int      native_priority() const { return _native_priority; }
 
   // Set and get state of _vm_created_thread flag
   void set_vm_created()           { _vm_created_thread = true; }
@@ -62,8 +62,9 @@
     return true;
   }
 #endif
-  void set_thread_id(thread_t id) { _thread_id = id;   }
-  void set_lwp_id(unsigned int id){ _lwp_id = id;   }
+  void set_thread_id(thread_t id)    { _thread_id = id; }
+  void set_lwp_id(uint id)           { _lwp_id = id; }
+  void set_native_priority(int prio) { _native_priority = prio; }
 
  // ***************************************************************
  // interrupt support.  interrupts (using signals) are used to get
--- a/src/os/solaris/vm/os_solaris.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os/solaris/vm/os_solaris.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -114,6 +114,7 @@
 # include <sys/rtpriocntl.h>
 # include <sys/tspriocntl.h>
 # include <sys/iapriocntl.h>
+# include <sys/fxpriocntl.h>
 # include <sys/loadavg.h>
 # include <string.h>
 # include <stdio.h>
@@ -129,8 +130,8 @@
 #ifdef _GNU_SOURCE
 // See bug #6514594
 extern "C" int madvise(caddr_t, size_t, int);
-extern "C"  int memcntl(caddr_t addr, size_t len, int cmd, caddr_t  arg,
-     int attr, int mask);
+extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg,
+                       int attr, int mask);
 #endif //_GNU_SOURCE
 
 /*
@@ -215,8 +216,9 @@
 #define MaximumPriority 127
 
 // Values for ThreadPriorityPolicy == 1
-int prio_policy1[MaxPriority+1] = { -99999, 0, 16, 32, 48, 64,
-                                        80, 96, 112, 124, 127 };
+int prio_policy1[CriticalPriority+1] = {
+  -99999,  0, 16,  32,  48,  64,
+          80, 96, 112, 124, 127, 127 };
 
 // System parameters used internally
 static clock_t clock_tics_per_sec = 100;
@@ -1011,15 +1013,6 @@
   // use debugger to set breakpoint here
 }
 
-// Returns an estimate of the current stack pointer. Result must be guaranteed to
-// point into the calling threads stack, and be no lower than the current stack
-// pointer.
-address os::current_stack_pointer() {
-  volatile int dummy;
-  address sp = (address)&dummy + 8;     // %%%% need to confirm if this is right
-  return sp;
-}
-
 static thread_t main_thread;
 
 // Thread start routine for all new Java threads
@@ -1048,15 +1041,22 @@
   }
 
   // If the creator called set priority before we started,
-  // we need to call set priority now that we have an lwp.
-  // Get the priority from libthread and set the priority
-  // for the new Solaris lwp.
+  // we need to call set_native_priority now that we have an lwp.
+  // We used to get the priority from thr_getprio (we called
+  // thr_setprio way back in create_thread) and pass it to
+  // set_native_priority, but Solaris scales the priority
+  // in java_to_os_priority, so when we read it back here,
+  // we pass trash to set_native_priority instead of what's
+  // in java_to_os_priority. So we save the native priority
+  // in the osThread and recall it here.
+
   if ( osthr->thread_id() != -1 ) {
     if ( UseThreadPriorities ) {
-      thr_getprio(osthr->thread_id(), &prio);
+      int prio = osthr->native_priority();
       if (ThreadPriorityVerbose) {
-        tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT ", setting priority: %d\n",
-                      osthr->thread_id(), osthr->lwp_id(), prio );
+        tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
+                      INTPTR_FORMAT ", setting priority: %d\n",
+                      osthr->thread_id(), osthr->lwp_id(), prio);
       }
       os::set_native_priority(thread, prio);
     }
@@ -1353,13 +1353,12 @@
   // Remember that we created this thread so we can set priority on it
   osthread->set_vm_created();
 
-  // Set the default thread priority otherwise use NormalPriority
-
-  if ( UseThreadPriorities ) {
-     thr_setprio(tid, (DefaultThreadPriority == -1) ?
+  // Set the default thread priority.  If using bound threads, setting
+  // lwp priority will be delayed until thread start.
+  set_native_priority(thread,
+                      DefaultThreadPriority == -1 ?
                         java_to_os_priority[NormPriority] :
                         DefaultThreadPriority);
-  }
 
   // Initial thread state is INITIALIZED, not SUSPENDED
   osthread->set_state(INITIALIZED);
@@ -3728,7 +3727,7 @@
 } SchedInfo;
 
 
-static SchedInfo tsLimits, iaLimits, rtLimits;
+static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
 
 #ifdef ASSERT
 static int  ReadBackValidate = 1;
@@ -3739,6 +3738,8 @@
 static int  myCur       = 0;
 static bool priocntl_enable = false;
 
+static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
+static int java_MaxPriority_to_os_priority = 0; // Saved mapping
 
 // Call the version of priocntl suitable for all supported versions
 // of Solaris. We need to call through this wrapper so that we can
@@ -3783,19 +3784,27 @@
   if (os::Solaris::T2_libthread() || UseBoundThreads) {
     // If ThreadPriorityPolicy is 1, switch tables
     if (ThreadPriorityPolicy == 1) {
-      for (i = 0 ; i < MaxPriority+1; i++)
+      for (i = 0 ; i < CriticalPriority+1; i++)
         os::java_to_os_priority[i] = prio_policy1[i];
     }
+    if (UseCriticalJavaThreadPriority) {
+      // MaxPriority always maps to the FX scheduling class and criticalPrio.
+      // See set_native_priority() and set_lwp_class_and_priority().
+      // Save original MaxPriority mapping in case attempt to
+      // use critical priority fails.
+      java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
+      // Set negative to distinguish from other priorities
+      os::java_to_os_priority[MaxPriority] = -criticalPrio;
+    }
   }
   // Not using Bound Threads, set to ThreadPolicy 1
   else {
-    for ( i = 0 ; i < MaxPriority+1; i++ ) {
+    for ( i = 0 ; i < CriticalPriority+1; i++ ) {
       os::java_to_os_priority[i] = prio_policy1[i];
     }
     return 0;
   }
 
-
   // Get IDs for a set of well-known scheduling classes.
   // TODO-FIXME: GETCLINFO returns the current # of classes in the
   // the system.  We should have a loop that iterates over the
@@ -3828,24 +3837,33 @@
   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
   rtLimits.minPrio = 0;
 
+  strcpy(ClassInfo.pc_clname, "FX");
+  ClassInfo.pc_cid = -1;
+  rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
+  if (rslt < 0) return errno;
+  assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
+  fxLimits.schedPolicy = ClassInfo.pc_cid;
+  fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
+  fxLimits.minPrio = 0;
 
   // Query our "current" scheduling class.
-  // This will normally be IA,TS or, rarely, RT.
-  memset (&ParmInfo, 0, sizeof(ParmInfo));
+  // This will normally be IA, TS or, rarely, FX or RT.
+  memset(&ParmInfo, 0, sizeof(ParmInfo));
   ParmInfo.pc_cid = PC_CLNULL;
-  rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo );
-  if ( rslt < 0 ) return errno;
+  rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
+  if (rslt < 0) return errno;
   myClass = ParmInfo.pc_cid;
 
   // We now know our scheduling classId, get specific information
-  // the class.
+  // about the class.
   ClassInfo.pc_cid = myClass;
   ClassInfo.pc_clname[0] = 0;
-  rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo );
-  if ( rslt < 0 ) return errno;
-
-  if (ThreadPriorityVerbose)
-    tty->print_cr ("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
+  rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
+  if (rslt < 0) return errno;
+
+  if (ThreadPriorityVerbose) {
+    tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
+  }
 
   memset(&ParmInfo, 0, sizeof(pcparms_t));
   ParmInfo.pc_cid = PC_CLNULL;
@@ -3865,6 +3883,11 @@
     myMin = tsLimits.minPrio;
     myMax = tsLimits.maxPrio;
     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
+  } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
+    fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
+    myMin = fxLimits.minPrio;
+    myMax = fxLimits.maxPrio;
+    myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
   } else {
     // No clue - punt
     if (ThreadPriorityVerbose)
@@ -3872,8 +3895,9 @@
     return EINVAL;      // no clue, punt
   }
 
-  if (ThreadPriorityVerbose)
-        tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
+  if (ThreadPriorityVerbose) {
+    tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
+  }
 
   priocntl_enable = true;  // Enable changing priorities
   return 0;
@@ -3882,6 +3906,7 @@
 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
+#define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
 
 
 // scale_to_lwp_priority
@@ -3900,13 +3925,13 @@
 }
 
 
-// set_lwp_priority
+// set_lwp_class_and_priority
 //
-// Set the priority of the lwp.  This call should only be made
-// when using bound threads (T2 threads are bound by default).
+// Set the class and priority of the lwp.  This call should only
+// be made when using bound threads (T2 threads are bound by default).
 //
-int     set_lwp_priority (int ThreadID, int lwpid, int newPrio )
-{
+int set_lwp_class_and_priority(int ThreadID, int lwpid,
+                               int newPrio, int new_class, bool scale) {
   int rslt;
   int Actual, Expected, prv;
   pcparms_t ParmInfo;                   // for GET-SET
@@ -3927,19 +3952,20 @@
     return EINVAL;
   }
 
-
   // If lwp hasn't started yet, just return
   // the _start routine will call us again.
   if ( lwpid <= 0 ) {
     if (ThreadPriorityVerbose) {
-      tty->print_cr ("deferring the set_lwp_priority of thread " INTPTR_FORMAT " to %d, lwpid not set",
+      tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
+                     INTPTR_FORMAT " to %d, lwpid not set",
                      ThreadID, newPrio);
     }
     return 0;
   }
 
   if (ThreadPriorityVerbose) {
-    tty->print_cr ("set_lwp_priority(" INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
+    tty->print_cr ("set_lwp_class_and_priority("
+                   INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
                    ThreadID, lwpid, newPrio);
   }
 
@@ -3948,40 +3974,69 @@
   rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
   if (rslt < 0) return errno;
 
-  if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
+  int cur_class = ParmInfo.pc_cid;
+  ParmInfo.pc_cid = (id_t)new_class;
+
+  if (new_class == rtLimits.schedPolicy) {
     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
-    rtInfo->rt_pri     = scale_to_lwp_priority (rtLimits.minPrio, rtLimits.maxPrio, newPrio);
+    rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
+                                                       rtLimits.maxPrio, newPrio)
+                               : newPrio;
     rtInfo->rt_tqsecs  = RT_NOCHANGE;
     rtInfo->rt_tqnsecs = RT_NOCHANGE;
     if (ThreadPriorityVerbose) {
       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
     }
-  } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
-    iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
-    int maxClamped     = MIN2(iaLimits.maxPrio, (int)iaInfo->ia_uprilim);
-    iaInfo->ia_upri    = scale_to_lwp_priority(iaLimits.minPrio, maxClamped, newPrio);
-    iaInfo->ia_uprilim = IA_NOCHANGE;
+  } else if (new_class == iaLimits.schedPolicy) {
+    iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
+    int maxClamped     = MIN2(iaLimits.maxPrio,
+                              cur_class == new_class
+                                ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
+    iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
+                                                       maxClamped, newPrio)
+                               : newPrio;
+    iaInfo->ia_uprilim = cur_class == new_class
+                           ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
     iaInfo->ia_mode    = IA_NOCHANGE;
     if (ThreadPriorityVerbose) {
-      tty->print_cr ("IA: [%d...%d] %d->%d\n",
-               iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
+      tty->print_cr("IA: [%d...%d] %d->%d\n",
+                    iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
     }
-  } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
-    tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
-    int maxClamped     = MIN2(tsLimits.maxPrio, (int)tsInfo->ts_uprilim);
-    prv                = tsInfo->ts_upri;
-    tsInfo->ts_upri    = scale_to_lwp_priority(tsLimits.minPrio, maxClamped, newPrio);
-    tsInfo->ts_uprilim = IA_NOCHANGE;
+  } else if (new_class == tsLimits.schedPolicy) {
+    tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
+    int maxClamped     = MIN2(tsLimits.maxPrio,
+                              cur_class == new_class
+                                ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
+    tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
+                                                       maxClamped, newPrio)
+                               : newPrio;
+    tsInfo->ts_uprilim = cur_class == new_class
+                           ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
     if (ThreadPriorityVerbose) {
-      tty->print_cr ("TS: %d [%d...%d] %d->%d\n",
-               prv, tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
+      tty->print_cr("TS: [%d...%d] %d->%d\n",
+                    tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
     }
-    if (prv == tsInfo->ts_upri) return 0;
+  } else if (new_class == fxLimits.schedPolicy) {
+    fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
+    int maxClamped     = MIN2(fxLimits.maxPrio,
+                              cur_class == new_class
+                                ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
+    fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
+                                                       maxClamped, newPrio)
+                               : newPrio;
+    fxInfo->fx_uprilim = cur_class == new_class
+                           ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
+    fxInfo->fx_tqsecs  = FX_NOCHANGE;
+    fxInfo->fx_tqnsecs = FX_NOCHANGE;
+    if (ThreadPriorityVerbose) {
+      tty->print_cr("FX: [%d...%d] %d->%d\n",
+                    fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
+    }
   } else {
-    if ( ThreadPriorityVerbose ) {
-      tty->print_cr ("Unknown scheduling class\n");
+    if (ThreadPriorityVerbose) {
+      tty->print_cr("Unknown new scheduling class %d\n", new_class);
     }
-      return EINVAL;    // no clue, punt
+    return EINVAL;    // no clue, punt
   }
 
   rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
@@ -4016,16 +4071,20 @@
   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
     Actual   = TSPRI(ReadBack)->ts_upri;
     Expected = TSPRI(ParmInfo)->ts_upri;
+  } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
+    Actual   = FXPRI(ReadBack)->fx_upri;
+    Expected = FXPRI(ParmInfo)->fx_upri;
   } else {
-    if ( ThreadPriorityVerbose ) {
-      tty->print_cr("set_lwp_priority: unexpected class in readback: %d\n", ParmInfo.pc_cid);
+    if (ThreadPriorityVerbose) {
+      tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
+                    ParmInfo.pc_cid);
     }
   }
 
   if (Actual != Expected) {
-    if ( ThreadPriorityVerbose ) {
-      tty->print_cr ("set_lwp_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
-             lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
+    if (ThreadPriorityVerbose) {
+      tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
+                     lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
     }
   }
 #endif
@@ -4033,8 +4092,6 @@
   return 0;
 }
 
-
-
 // Solaris only gives access to 128 real priorities at a time,
 // so we expand Java's ten to fill this range.  This would be better
 // if we dynamically adjusted relative priorities.
@@ -4055,8 +4112,7 @@
 // which do not explicitly alter their thread priorities.
 //
 
-
-int os::java_to_os_priority[MaxPriority + 1] = {
+int os::java_to_os_priority[CriticalPriority + 1] = {
   -99999,         // 0 Entry should never be used
 
   0,              // 1 MinPriority
@@ -4071,17 +4127,51 @@
   127,            // 8
   127,            // 9 NearMaxPriority
 
-  127             // 10 MaxPriority
+  127,            // 10 MaxPriority
+
+  -criticalPrio   // 11 CriticalPriority
 };
 
-
 OSReturn os::set_native_priority(Thread* thread, int newpri) {
+  OSThread* osthread = thread->osthread();
+
+  // Save requested priority in case the thread hasn't been started
+  osthread->set_native_priority(newpri);
+
+  // Check for critical priority request
+  bool fxcritical = false;
+  if (newpri == -criticalPrio) {
+    fxcritical = true;
+    newpri = criticalPrio;
+  }
+
   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
-  if ( !UseThreadPriorities ) return OS_OK;
-  int status = thr_setprio(thread->osthread()->thread_id(), newpri);
-  if ( os::Solaris::T2_libthread() || (UseBoundThreads && thread->osthread()->is_vm_created()) )
-    status |= (set_lwp_priority (thread->osthread()->thread_id(),
-                    thread->osthread()->lwp_id(), newpri ));
+  if (!UseThreadPriorities) return OS_OK;
+
+  int status = 0;
+
+  if (!fxcritical) {
+    // Use thr_setprio only if we have a priority that thr_setprio understands
+    status = thr_setprio(thread->osthread()->thread_id(), newpri);
+  }
+
+  if (os::Solaris::T2_libthread() ||
+      (UseBoundThreads && osthread->is_vm_created())) {
+    int lwp_status =
+      set_lwp_class_and_priority(osthread->thread_id(),
+                                 osthread->lwp_id(),
+                                 newpri,
+                                 fxcritical ? fxLimits.schedPolicy : myClass,
+                                 !fxcritical);
+    if (lwp_status != 0 && fxcritical) {
+      // Try again, this time without changing the scheduling class
+      newpri = java_MaxPriority_to_os_priority;
+      lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
+                                              osthread->lwp_id(),
+                                              newpri, myClass, false);
+    }
+    status |= lwp_status;
+  }
   return (status == 0) ? OS_OK : OS_ERR;
 }
 
--- a/src/os/windows/vm/decoder_windows.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os/windows/vm/decoder_windows.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -36,7 +36,7 @@
 typedef BOOL  (WINAPI *pfn_SymGetSymFromAddr64)(HANDLE, DWORD64, PDWORD64, PIMAGEHLP_SYMBOL64);
 typedef DWORD (WINAPI *pfn_UndecorateSymbolName)(const char*, char*, DWORD, DWORD);
 
-class WindowsDecoder: public NullDecoder {
+class WindowsDecoder : public AbstractDecoder {
 
 public:
   WindowsDecoder();
--- a/src/os/windows/vm/os_windows.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -324,16 +324,6 @@
   os::breakpoint();
 }
 
-// Returns an estimate of the current stack pointer. Result must be guaranteed
-// to point into the calling threads stack, and be no lower than the current
-// stack pointer.
-
-address os::current_stack_pointer() {
-  int dummy;
-  address sp = (address)&dummy;
-  return sp;
-}
-
 // os::current_stack_base()
 //
 //   Returns the base of the stack, which is the stack's
@@ -2088,7 +2078,6 @@
 #elif _M_AMD64
   PCONTEXT ctx = exceptionInfo->ContextRecord;
   address pc = (address)ctx->Rip;
-  NOT_PRODUCT(Events::log("idiv overflow exception at " INTPTR_FORMAT , pc));
   assert(pc[0] == 0xF7, "not an idiv opcode");
   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
   assert(ctx->Rax == min_jint, "unexpected idiv exception");
@@ -2100,7 +2089,6 @@
 #else
   PCONTEXT ctx = exceptionInfo->ContextRecord;
   address pc = (address)ctx->Eip;
-  NOT_PRODUCT(Events::log("idiv overflow exception at " INTPTR_FORMAT , pc));
   assert(pc[0] == 0xF7, "not an idiv opcode");
   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
   assert(ctx->Eax == min_jint, "unexpected idiv exception");
@@ -3296,7 +3284,7 @@
 // so we compress Java's ten down to seven.  It would be better
 // if we dynamically adjusted relative priorities.
 
-int os::java_to_os_priority[MaxPriority + 1] = {
+int os::java_to_os_priority[CriticalPriority + 1] = {
   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
   THREAD_PRIORITY_LOWEST,                       // 2
@@ -3307,10 +3295,11 @@
   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
-  THREAD_PRIORITY_HIGHEST                       // 10 MaxPriority
+  THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
+  THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
 };
 
-int prio_policy1[MaxPriority + 1] = {
+int prio_policy1[CriticalPriority + 1] = {
   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
   THREAD_PRIORITY_LOWEST,                       // 2
@@ -3321,17 +3310,21 @@
   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
   THREAD_PRIORITY_HIGHEST,                      // 8
   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
-  THREAD_PRIORITY_TIME_CRITICAL                 // 10 MaxPriority
+  THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
+  THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
 };
 
 static int prio_init() {
   // If ThreadPriorityPolicy is 1, switch tables
   if (ThreadPriorityPolicy == 1) {
     int i;
-    for (i = 0; i < MaxPriority + 1; i++) {
+    for (i = 0; i < CriticalPriority + 1; i++) {
       os::java_to_os_priority[i] = prio_policy1[i];
     }
   }
+  if (UseCriticalJavaThreadPriority) {
+    os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ;
+  }
   return 0;
 }
 
@@ -5331,4 +5324,3 @@
 }
 
 #endif
-
--- a/src/os_cpu/bsd_x86/vm/bsd_x86_32.ad	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/bsd_x86/vm/bsd_x86_32.ad	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -24,137 +24,3 @@
 
 // X86 Bsd Architecture Description File
 
-//----------OS-DEPENDENT ENCODING BLOCK-----------------------------------------------------
-// This block specifies the encoding classes used by the compiler to output
-// byte streams.  Encoding classes generate functions which are called by
-// Machine Instruction Nodes in order to generate the bit encoding of the
-// instruction.  Operands specify their base encoding interface with the
-// interface keyword.  There are currently supported four interfaces,
-// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
-// operand to generate a function which returns its register number when
-// queried.   CONST_INTER causes an operand to generate a function which
-// returns the value of the constant when queried.  MEMORY_INTER causes an
-// operand to generate four functions which return the Base Register, the
-// Index Register, the Scale Value, and the Offset Value of the operand when
-// queried.  COND_INTER causes an operand to generate six functions which
-// return the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional instruction.
-// Instructions specify two basic values for encoding.  They use the
-// ins_encode keyword to specify their encoding class (which must be one of
-// the class names specified in the encoding block), and they use the
-// opcode keyword to specify, in order, their primary, secondary, and
-// tertiary opcode.  Only the opcode sections which a particular instruction
-// needs for encoding need to be specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-
-  enc_class bsd_tlsencode (eRegP dst) %{
-    Register dstReg = as_Register($dst$$reg);
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-      masm->get_thread(dstReg);
-  %}
-
-  enc_class bsd_breakpoint  %{
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-    masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-  %}
-
-  enc_class call_epilog %{
-    if( VerifyStackAtCalls ) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP,-3*VMRegImpl::slots_per_word));
-      if(framesize >= 128) {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0xBC);
-        emit_d8(cbuf,0x24);
-        emit_d32(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      else {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0x7C);
-        emit_d8(cbuf,0x24);
-        emit_d8(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      // jmp EQ around INT3
-      // QQQ TODO
-      const int jump_around = 5; // size of call to breakpoint, 1 for CC
-      emit_opcode(cbuf,0x74);
-      emit_d8(cbuf, jump_around);
-      // QQQ temporary
-      emit_break(cbuf);
-      // Die if stack mismatch
-      // emit_opcode(cbuf,0xCC);
-    }
-  %}
-
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-//----------OS and Locking Instructions----------------------------------------
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{
-  match(Set dst (ThreadLocal));
-  effect(DEF dst, KILL cr);
-
-  format %{ "MOV    $dst, Thread::current()" %}
-  ins_encode( bsd_tlsencode(dst) );
-  ins_pipe( ialu_reg_fat );
-%}
-
-instruct TLS(eRegP dst) %{
-  match(Set dst (ThreadLocal));
-
-  expand %{
-    tlsLoadP(dst);
-  %}
-%}
-
-// Die now
-instruct ShouldNotReachHere( )
-%{
-  match(Halt);
-
-  // Use the following format syntax
-  format %{ "INT3   ; ShouldNotReachHere" %}
-  // QQQ TODO for now call breakpoint
-  // opcode(0xCC);
-  // ins_encode(Opc);
-  ins_encode(bsd_breakpoint);
-  ins_pipe( pipe_slow );
-%}
-
-
-
-// Platform dependent source
-
-source %{
-
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer &cbuf) {
-
-  // Debugger doesn't really catch this but best we can do so far QQQ
-  MacroAssembler* masm = new MacroAssembler(&cbuf);
-  masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
-  emit_break(cbuf);
-}
-
-
-uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
-  return 5;
-}
-
-%}
--- a/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -55,8 +55,7 @@
   // adding a syntax that specifies the sizes of fields in an order,
   // so that the adlc can build the emit functions automagically
 
-  enc_class Java_To_Runtime(method meth)
-  %{
+  enc_class Java_To_Runtime(method meth) %{
     // No relocation needed
 
     // movq r10, <meth>
@@ -70,104 +69,15 @@
     emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
   %}
 
-  enc_class bsd_breakpoint
-  %{
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-    masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-  %}
-
-  enc_class call_epilog
-  %{
-    if (VerifyStackAtCalls) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize =
-        ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word));
-      if (framesize) {
-        if (framesize < 0x80) {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0x7C);
-          emit_d8(cbuf, 0x24);
-          emit_d8(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        } else {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0xBC);
-          emit_d8(cbuf, 0x24);
-          emit_d32(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        }
-      }
-      // jmp EQ around INT3
-      // QQQ TODO
-      const int jump_around = 5; // size of call to breakpoint, 1 for CC
-      emit_opcode(cbuf, 0x74);
-      emit_d8(cbuf, jump_around);
-      // QQQ temporary
-      emit_break(cbuf);
-      // Die if stack mismatch
-      // emit_opcode(cbuf,0xCC);
-    }
-  %}
-
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-//----------OS and Locking Instructions----------------------------------------
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-instruct tlsLoadP(r15_RegP dst)
-%{
-  match(Set dst (ThreadLocal));
-  effect(DEF dst);
-
-  size(0);
-  format %{ "# TLS is in R15" %}
-  ins_encode( /*empty encoding*/ );
-  ins_pipe(ialu_reg_reg);
-%}
-
-// Die now
-instruct ShouldNotReachHere()
-%{
-  match(Halt);
-
-  // Use the following format syntax
-  format %{ "int3\t# ShouldNotReachHere" %}
-  // QQQ TODO for now call breakpoint
-  // opcode(0xCC);
-  // ins_encode(Opc);
-  ins_encode(bsd_breakpoint);
-  ins_pipe(pipe_slow);
 %}
 
 
 // Platform dependent source
 
-source
-%{
+source %{
 
 int MachCallRuntimeNode::ret_addr_offset() {
   return 13; // movq r10,#addr; callq (r10)
 }
 
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer& cbuf) {
-  // Debugger doesn't really catch this but best we can do so far QQQ
-  MacroAssembler* masm = new MacroAssembler(&cbuf);
-  masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MachBreakpointNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
-  emit_break(cbuf);
-}
-
-uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
-  return 5;
-}
-
 %}
--- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -362,7 +362,7 @@
 }
 
 intptr_t* _get_previous_fp() {
-#if defined(SPARC_WORKS) || defined(__clang__)
+#if defined(SPARC_WORKS) || defined(__clang__) || defined(__llvm__)
   register intptr_t **ebp;
   __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
 #else
@@ -1126,3 +1126,8 @@
                       : "r" (fpu_cntrl) : "memory");
 #endif // !AMD64
 }
+
+#ifndef PRODUCT
+void os::verify_stack_alignment() {
+}
+#endif
--- a/src/os_cpu/bsd_x86/vm/vmStructs_bsd_x86.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/bsd_x86/vm/vmStructs_bsd_x86.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,12 +29,18 @@
 // constants required by the Serviceability Agent. This file is
 // referenced by vmStructs.cpp.
 
+#ifdef __APPLE__
+#define OS_THREAD_ID_TYPE thread_t
+#else
+#define OS_THREAD_ID_TYPE pthread_t
+#endif
+
 #define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field, last_entry) \
                                                                                                                                      \
   /******************************/                                                                                                   \
   /* Threads (NOTE: incomplete) */                                                                                                   \
   /******************************/                                                                                                   \
-  nonstatic_field(OSThread,                      _thread_id,                                      pthread_t)                             \
+  nonstatic_field(OSThread,                      _thread_id,                                      OS_THREAD_ID_TYPE)                 \
   nonstatic_field(OSThread,                      _pthread_id,                                     pthread_t)                         \
   /* This must be the last entry, and must be present */                                                                             \
   last_entry()
@@ -46,7 +52,7 @@
   /* Posix Thread IDs   */                                                \
   /**********************/                                                \
                                                                           \
-  declare_integer_type(pid_t)                                             \
+  declare_unsigned_integer_type(thread_t)                                 \
   declare_unsigned_integer_type(pthread_t)                                \
                                                                           \
   /* This must be the last entry, and must be present */                  \
--- a/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -562,3 +562,8 @@
   }
 };
 #endif // !_LP64
+
+#ifndef PRODUCT
+void os::verify_stack_alignment() {
+}
+#endif
--- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -756,3 +756,8 @@
   // guard page, only enable glibc guard page for non-Java threads.
   return (thr_type == java_thread ? 0 : page_size());
 }
+
+#ifndef PRODUCT
+void os::verify_stack_alignment() {
+}
+#endif
--- a/src/os_cpu/linux_x86/vm/linux_x86_32.ad	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/linux_x86/vm/linux_x86_32.ad	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -24,137 +24,3 @@
 
 // X86 Linux Architecture Description File
 
-//----------OS-DEPENDENT ENCODING BLOCK-----------------------------------------------------
-// This block specifies the encoding classes used by the compiler to output
-// byte streams.  Encoding classes generate functions which are called by
-// Machine Instruction Nodes in order to generate the bit encoding of the
-// instruction.  Operands specify their base encoding interface with the
-// interface keyword.  There are currently supported four interfaces,
-// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
-// operand to generate a function which returns its register number when
-// queried.   CONST_INTER causes an operand to generate a function which
-// returns the value of the constant when queried.  MEMORY_INTER causes an
-// operand to generate four functions which return the Base Register, the
-// Index Register, the Scale Value, and the Offset Value of the operand when
-// queried.  COND_INTER causes an operand to generate six functions which
-// return the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional instruction.
-// Instructions specify two basic values for encoding.  They use the
-// ins_encode keyword to specify their encoding class (which must be one of
-// the class names specified in the encoding block), and they use the
-// opcode keyword to specify, in order, their primary, secondary, and
-// tertiary opcode.  Only the opcode sections which a particular instruction
-// needs for encoding need to be specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-
-  enc_class linux_tlsencode (eRegP dst) %{
-    Register dstReg = as_Register($dst$$reg);
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-      masm->get_thread(dstReg);
-  %}
-
-  enc_class linux_breakpoint  %{
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-    masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-  %}
-
-  enc_class call_epilog %{
-    if( VerifyStackAtCalls ) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP,-3*VMRegImpl::slots_per_word));
-      if(framesize >= 128) {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0xBC);
-        emit_d8(cbuf,0x24);
-        emit_d32(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      else {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0x7C);
-        emit_d8(cbuf,0x24);
-        emit_d8(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      // jmp EQ around INT3
-      // QQQ TODO
-      const int jump_around = 5; // size of call to breakpoint, 1 for CC
-      emit_opcode(cbuf,0x74);
-      emit_d8(cbuf, jump_around);
-      // QQQ temporary
-      emit_break(cbuf);
-      // Die if stack mismatch
-      // emit_opcode(cbuf,0xCC);
-    }
-  %}
-
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-//----------OS and Locking Instructions----------------------------------------
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{
-  match(Set dst (ThreadLocal));
-  effect(DEF dst, KILL cr);
-
-  format %{ "MOV    $dst, Thread::current()" %}
-  ins_encode( linux_tlsencode(dst) );
-  ins_pipe( ialu_reg_fat );
-%}
-
-instruct TLS(eRegP dst) %{
-  match(Set dst (ThreadLocal));
-
-  expand %{
-    tlsLoadP(dst);
-  %}
-%}
-
-// Die now
-instruct ShouldNotReachHere( )
-%{
-  match(Halt);
-
-  // Use the following format syntax
-  format %{ "INT3   ; ShouldNotReachHere" %}
-  // QQQ TODO for now call breakpoint
-  // opcode(0xCC);
-  // ins_encode(Opc);
-  ins_encode(linux_breakpoint);
-  ins_pipe( pipe_slow );
-%}
-
-
-
-// Platform dependent source
-
-source %{
-
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer &cbuf) {
-
-  // Debugger doesn't really catch this but best we can do so far QQQ
-  MacroAssembler* masm = new MacroAssembler(&cbuf);
-  masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
-  emit_break(cbuf);
-}
-
-
-uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
-  return MachNode::size(ra_);
-}
-
-%}
--- a/src/os_cpu/linux_x86/vm/linux_x86_64.ad	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/linux_x86/vm/linux_x86_64.ad	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -55,8 +55,7 @@
   // adding a syntax that specifies the sizes of fields in an order,
   // so that the adlc can build the emit functions automagically
 
-  enc_class Java_To_Runtime(method meth)
-  %{
+  enc_class Java_To_Runtime(method meth) %{
     // No relocation needed
 
     // movq r10, <meth>
@@ -70,105 +69,15 @@
     emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
   %}
 
-  enc_class linux_breakpoint
-  %{
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-    masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-  %}
-
-  enc_class call_epilog
-  %{
-    if (VerifyStackAtCalls) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize =
-        ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word));
-      if (framesize) {
-        if (framesize < 0x80) {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0x7C);
-          emit_d8(cbuf, 0x24);
-          emit_d8(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        } else {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0xBC);
-          emit_d8(cbuf, 0x24);
-          emit_d32(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        }
-      }
-      // jmp EQ around INT3
-      // QQQ TODO
-      const int jump_around = 5; // size of call to breakpoint, 1 for CC
-      emit_opcode(cbuf, 0x74);
-      emit_d8(cbuf, jump_around);
-      // QQQ temporary
-      emit_break(cbuf);
-      // Die if stack mismatch
-      // emit_opcode(cbuf,0xCC);
-    }
-  %}
-
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-//----------OS and Locking Instructions----------------------------------------
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-instruct tlsLoadP(r15_RegP dst)
-%{
-  match(Set dst (ThreadLocal));
-  effect(DEF dst);
-
-  size(0);
-  format %{ "# TLS is in R15" %}
-  ins_encode( /*empty encoding*/ );
-  ins_pipe(ialu_reg_reg);
-%}
-
-// Die now
-instruct ShouldNotReachHere()
-%{
-  match(Halt);
-
-  // Use the following format syntax
-  format %{ "int3\t# ShouldNotReachHere" %}
-  // QQQ TODO for now call breakpoint
-  // opcode(0xCC);
-  // ins_encode(Opc);
-  ins_encode(linux_breakpoint);
-  ins_pipe(pipe_slow);
 %}
 
 
 // Platform dependent source
 
-source
-%{
+source %{
 
 int MachCallRuntimeNode::ret_addr_offset() {
   return 13; // movq r10,#addr; callq (r10)
 }
 
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer& cbuf) {
-  // Debugger doesn't really catch this but best we can do so far QQQ
-  MacroAssembler* masm = new MacroAssembler(&cbuf);
-  masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MachBreakpointNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
-  emit_break(cbuf);
-}
-
-uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
-  // distance could be far and requires load and call through register
-  return MachNode::size(ra_);
-}
-
 %}
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -862,3 +862,11 @@
                       : "r" (fpu_cntrl) : "memory");
 #endif // !AMD64
 }
+
+#ifndef PRODUCT
+void os::verify_stack_alignment() {
+#ifdef AMD64
+  assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
+#endif
+}
+#endif
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -506,3 +506,8 @@
   }
 };
 #endif // !_LP64
+
+#ifndef PRODUCT
+void os::verify_stack_alignment() {
+}
+#endif
--- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -251,6 +251,15 @@
   return frame(fr->sender_sp(), frame::unpatchable, fr->sender_pc());
 }
 
+// Returns an estimate of the current stack pointer. Result must be guaranteed to
+// point into the calling threads stack, and be no lower than the current stack
+// pointer.
+address os::current_stack_pointer() {
+  volatile int dummy;
+  address sp = (address)&dummy + 8;     // %%%% need to confirm if this is right
+  return sp;
+}
+
 frame os::current_frame() {
   intptr_t* sp = StubRoutines::Sparc::flush_callers_register_windows_func()();
   frame myframe(sp, frame::unpatchable,
@@ -815,3 +824,8 @@
    __asm__ __volatile__ ("wr %%g0, 0, %%fprs \n\t" : : :);
   }
 #endif //defined(__sparc) && defined(COMPILER2)
+
+#ifndef PRODUCT
+void os::verify_stack_alignment() {
+}
+#endif
--- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -237,6 +237,12 @@
   return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
 }
 
+extern "C" intptr_t *_get_current_sp();  // in .il file
+
+address os::current_stack_pointer() {
+  return (address)_get_current_sp();
+}
+
 extern "C" intptr_t *_get_current_fp();  // in .il file
 
 frame os::current_frame() {
@@ -954,3 +960,11 @@
   _solaris_raw_setup_fpu(fpu_cntrl);
 }
 #endif // AMD64
+
+#ifndef PRODUCT
+void os::verify_stack_alignment() {
+#ifdef AMD64
+  assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
+#endif
+}
+#endif
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -24,144 +24,3 @@
 
 // X86 Solaris Architecture Description File
 
-//----------OS-DEPENDENT ENCODING BLOCK-----------------------------------------------------
-// This block specifies the encoding classes used by the compiler to output
-// byte streams.  Encoding classes generate functions which are called by
-// Machine Instruction Nodes in order to generate the bit encoding of the
-// instruction.  Operands specify their base encoding interface with the
-// interface keyword.  There are currently supported four interfaces,
-// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
-// operand to generate a function which returns its register number when
-// queried.   CONST_INTER causes an operand to generate a function which
-// returns the value of the constant when queried.  MEMORY_INTER causes an
-// operand to generate four functions which return the Base Register, the
-// Index Register, the Scale Value, and the Offset Value of the operand when
-// queried.  COND_INTER causes an operand to generate six functions which
-// return the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional instruction.
-// Instructions specify two basic values for encoding.  They use the
-// ins_encode keyword to specify their encoding class (which must be one of
-// the class names specified in the encoding block), and they use the
-// opcode keyword to specify, in order, their primary, secondary, and
-// tertiary opcode.  Only the opcode sections which a particular instruction
-// needs for encoding need to be specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-
-  enc_class solaris_tlsencode (eRegP dst) %{
-    Register dstReg = as_Register($dst$$reg);
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-    masm->get_thread(dstReg);
-  %}
-
-  enc_class solaris_breakpoint  %{
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-    // Really need to fix this
-    masm->push(rax);
-    masm->push(rcx);
-    masm->push(rdx);
-    masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-    masm->pop(rdx);
-    masm->pop(rcx);
-    masm->pop(rax);
-  %}
-
-  enc_class call_epilog %{
-    if( VerifyStackAtCalls ) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP,-3*VMRegImpl::slots_per_word));
-      if(framesize >= 128) {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0xBC);
-        emit_d8(cbuf,0x24);
-        emit_d32(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      else {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0x7C);
-        emit_d8(cbuf,0x24);
-        emit_d8(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      // jmp EQ around INT3
-      // QQQ TODO
-      const int jump_around = 11; // size of call to breakpoint (and register preserve), 1 for CC
-      emit_opcode(cbuf,0x74);
-      emit_d8(cbuf, jump_around);
-      // QQQ temporary
-      emit_break(cbuf);
-      // Die if stack mismatch
-      // emit_opcode(cbuf,0xCC);
-    }
-  %}
-
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-//----------OS and Locking Instructions----------------------------------------
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{
-  match(Set dst (ThreadLocal));
-  effect(DEF dst, KILL cr);
-
-  format %{ "MOV    $dst, Thread::current()" %}
-  ins_encode( solaris_tlsencode(dst) );
-  ins_pipe( ialu_reg_fat );
-%}
-
-instruct TLS(eRegP dst) %{
-  match(Set dst (ThreadLocal));
-
-  expand %{
-    tlsLoadP(dst);
-  %}
-%}
-
-// Die now
-instruct ShouldNotReachHere( )
-%{
-  match(Halt);
-
-  // Use the following format syntax
-  format %{ "INT3   ; ShouldNotReachHere" %}
-  // QQQ TODO for now call breakpoint
-  // opcode(0xCC);
-  // ins_encode(Opc);
-  ins_encode(solaris_breakpoint);
-  ins_pipe( pipe_slow );
-%}
-
-
-
-// Platform dependent source
-
-source %{
-
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer &cbuf) {
-
-  // Debugger doesn't really catch this but best we can do so far QQQ
-  MacroAssembler* masm = new MacroAssembler(&cbuf);
-  masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
-  emit_break(cbuf);
-}
-
-
-uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
-  return MachNode::size(ra_);
-}
-
-%}
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.il	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.il	Tue Apr 10 10:42:34 2012 -0700
@@ -37,6 +37,12 @@
       movl     %gs:0, %eax 
       .end
 
+  // Get current sp
+      .inline _get_current_sp,0
+      .volatile
+      movl     %esp, %eax 
+      .end
+
   // Get current fp
       .inline _get_current_fp,0
       .volatile
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2004, 2006, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -55,8 +55,7 @@
   // adding a syntax that specifies the sizes of fields in an order,
   // so that the adlc can build the emit functions automagically
 
-  enc_class Java_To_Runtime(method meth)
-  %{
+  enc_class Java_To_Runtime(method meth) %{
     // No relocation needed
 
     // movq r10, <meth>
@@ -70,118 +69,24 @@
     emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
   %}
 
-  enc_class solaris_breakpoint
-  %{
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-    masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-  %}
-
-  enc_class call_epilog
-  %{
-    if (VerifyStackAtCalls) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize =
-        ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word));
-      if (framesize) {
-        if (framesize < 0x80) {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0x7C);
-          emit_d8(cbuf, 0x24);
-          emit_d8(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        } else {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0xBC);
-          emit_d8(cbuf, 0x24);
-          emit_d32(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        }
-      }
-      // jmp EQ around INT3
-      // QQQ TODO
-      const int jump_around = 5; // size of call to breakpoint, 1 for CC
-      emit_opcode(cbuf, 0x74);
-      emit_d8(cbuf, jump_around);
-      // QQQ temporary
-      emit_break(cbuf);
-      // Die if stack mismatch
-      // emit_opcode(cbuf,0xCC);
+  enc_class post_call_verify_mxcsr %{
+    MacroAssembler _masm(&cbuf);
+    if (RestoreMXCSROnJNICalls) {
+      __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
+    }
+    else if (CheckJNICalls) {
+      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry())));
     }
   %}
-
-  enc_class post_call_verify_mxcsr %{
-    MacroAssembler masm(&cbuf);
-    if (RestoreMXCSROnJNICalls) {
-      masm.ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
-    }
-    else if (CheckJNICalls) {
-      masm.call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry())));
-    }
-  %}
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-//----------OS and Locking Instructions----------------------------------------
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-instruct tlsLoadP(r15_RegP dst)
-%{
-  match(Set dst (ThreadLocal));
-  effect(DEF dst);
-
-  size(0);
-  format %{ "# TLS is in R15" %}
-  ins_encode( /*empty encoding*/ );
-  ins_pipe(ialu_reg_reg);
-%}
-
-// Die now
-instruct ShouldNotReachHere()
-%{
-  match(Halt);
-
-  // Use the following format syntax
-  format %{ "int3\t# ShouldNotReachHere" %}
-  // QQQ TODO for now call breakpoint
-  // opcode(0xCC);
-  // ins_encode(Opc);
-  ins_encode(solaris_breakpoint);
-  ins_pipe(pipe_slow);
 %}
 
 
 // Platform dependent source
 
-source
-%{
+source %{
 
-int MachCallRuntimeNode::ret_addr_offset()
-{
+int MachCallRuntimeNode::ret_addr_offset() {
   return 13; // movq r10,#addr; callq (r10)
 }
 
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer& cbuf)
-{
-  // Debugger doesn't really catch this but best we can do so far QQQ
-  MacroAssembler* masm = new MacroAssembler(&cbuf);
-  masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MachBreakpointNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
-{
-  emit_break(cbuf);
-}
-
-uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const
-{
-  // distance could be far and requires load and call through register
-  return MachNode::size(ra_);
-}
-
 %}
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.il	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_64.il	Tue Apr 10 10:42:34 2012 -0700
@@ -30,6 +30,12 @@
       movq     %fs:0, %rax 
       .end
 
+  // Get current sp
+      .inline _get_current_sp,0
+      .volatile
+      movq     %rsp, %rax 
+      .end
+
   // Get current fp
       .inline _get_current_fp,0
       .volatile
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -370,6 +370,26 @@
   return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
 }
 
+#ifndef AMD64
+// Returns an estimate of the current stack pointer. Result must be guaranteed
+// to point into the calling threads stack, and be no lower than the current
+// stack pointer.
+address os::current_stack_pointer() {
+  int dummy;
+  address sp = (address)&dummy;
+  return sp;
+}
+#else
+// Returns the current stack pointer. Accurate value needed for
+// os::verify_stack_alignment().
+address os::current_stack_pointer() {
+  typedef address get_sp_func();
+  get_sp_func* func = CAST_TO_FN_PTR(get_sp_func*,
+                                     StubRoutines::x86::get_previous_sp_entry());
+  return (*func)();
+}
+#endif
+
 
 #ifndef AMD64
 intptr_t* _get_previous_fp() {
@@ -546,3 +566,11 @@
   __asm fldcw fpu_cntrl_word;
 #endif // !AMD64
 }
+
+#ifndef PRODUCT
+void os::verify_stack_alignment() {
+#ifdef AMD64
+  assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
+#endif
+}
+#endif
--- a/src/os_cpu/windows_x86/vm/windows_x86_32.ad	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/windows_x86/vm/windows_x86_32.ad	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -24,134 +24,3 @@
 
 // X86 Win32 Architecture Description File
 
-//----------OS-DEPENDENT ENCODING BLOCK-----------------------------------------------------
-// This block specifies the encoding classes used by the compiler to output
-// byte streams.  Encoding classes generate functions which are called by
-// Machine Instruction Nodes in order to generate the bit encoding of the
-// instruction.  Operands specify their base encoding interface with the
-// interface keyword.  There are currently supported four interfaces,
-// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
-// operand to generate a function which returns its register number when
-// queried.   CONST_INTER causes an operand to generate a function which
-// returns the value of the constant when queried.  MEMORY_INTER causes an
-// operand to generate four functions which return the Base Register, the
-// Index Register, the Scale Value, and the Offset Value of the operand when
-// queried.  COND_INTER causes an operand to generate six functions which
-// return the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional instruction.
-// Instructions specify two basic values for encoding.  They use the
-// ins_encode keyword to specify their encoding class (which must be one of
-// the class names specified in the encoding block), and they use the
-// opcode keyword to specify, in order, their primary, secondary, and
-// tertiary opcode.  Only the opcode sections which a particular instruction
-// needs for encoding need to be specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-
-  enc_class tlsencode (eRegP dst, eRegP src) %{
-    emit_rm(cbuf, 0x2, $dst$$reg, $src$$reg);
-    emit_d32(cbuf, ThreadLocalStorage::get_thread_ptr_offset() );
-  %}
-
-  enc_class call_epilog %{
-    if( VerifyStackAtCalls ) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP,-3*VMRegImpl::slots_per_word));
-      if(framesize >= 128) {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0xBC);
-        emit_d8(cbuf,0x24);
-        emit_d32(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      else {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0x7C);
-        emit_d8(cbuf,0x24);
-        emit_d8(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      // jmp EQ around INT3
-      emit_opcode(cbuf,0x74);
-      emit_d8(cbuf,1);
-      // Die if stack mismatch
-      emit_opcode(cbuf,0xCC);
-    }
-  %}
-
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-
-//----------OS and Locking Instructions----------------------------------------
-
-// The prefix of this name is KNOWN by the ADLC and cannot be changed.
-instruct tlsLoadP_prefixLoadP(eRegP t1) %{
-  effect(DEF t1);
-
-  format %{ "MOV    $t1,FS:[0x00] "%}
-  opcode(0x8B, 0x64);
-  ins_encode(OpcS, OpcP, conmemref(t1));
-  ins_pipe( ialu_reg_fat );
-%}
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-// %%% Should do this with a clause like:  bottom_type(TypeRawPtr::BOTTOM);
-instruct tlsLoadP(eRegP dst, eRegP t1) %{
-  effect(DEF dst, USE t1);
-
-  format %{ "MOV    $dst,[$t1 + TLS::thread_ptr_offset()]" %}
-  opcode(0x8B);
-  ins_encode(OpcP, tlsencode(dst, t1));
-  ins_pipe( ialu_reg_reg_fat );
-%}
-
-instruct TLS(eRegP dst) %{
-  match(Set dst (ThreadLocal));
-  expand %{
-    eRegP t1;
-    tlsLoadP_prefixLoadP(t1);
-    tlsLoadP(dst, t1);
-  %}
-%}
-
-// Die now
-instruct ShouldNotReachHere( )
-%{
-  match(Halt);
-  // Use the following format syntax
-  format %{ "INT3   ; ShouldNotReachHere" %}
-  opcode(0xCC);
-  ins_encode(OpcP);
-  ins_pipe( pipe_slow );
-%}
-
-//
-// Platform dependent source
-//
-source %{
-
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer &cbuf) {
-  cbuf.insts()->emit_int8((unsigned char) 0xcc);
-}
-
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
-  emit_break(cbuf);
-}
-
-
-uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
-  return 1;
-}
-
-
-%}
--- a/src/os_cpu/windows_x86/vm/windows_x86_64.ad	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/os_cpu/windows_x86/vm/windows_x86_64.ad	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -67,69 +67,6 @@
     emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
   %}
 
-  enc_class call_epilog %{
-    if (VerifyStackAtCalls) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize =
-        ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word));
-      if (framesize) {
-        if (framesize < 0x80) {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0x7C);
-          emit_d8(cbuf, 0x24);
-          emit_d8(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        } else {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0xBC);
-          emit_d8(cbuf, 0x24);
-          emit_d32(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        }
-      }
-      // jmp EQ around INT3
-      // QQQ TODO
-      const int jump_around = 5; // size of call to breakpoint, 1 for CC
-      emit_opcode(cbuf, 0x74);
-      emit_d8(cbuf, jump_around);
-      // QQQ temporary
-      emit_break(cbuf);
-      // Die if stack mismatch
-      // emit_opcode(cbuf,0xCC);
-    }
-  %}
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-
-//----------OS and Locking Instructions----------------------------------------
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-instruct tlsLoadP(r15_RegP dst)
-%{
-  match(Set dst (ThreadLocal));
-  effect(DEF dst);
-
-  size(0);
-  format %{ "# TLS is in R15" %}
-  ins_encode( /*empty encoding*/ );
-  ins_pipe(ialu_reg_reg);
-%}
-
-// Die now
-instruct ShouldNotReachHere( )
-%{
-  match(Halt);
-  // Use the following format syntax
-  format %{ "INT3   ; ShouldNotReachHere" %}
-  opcode(0xCC);
-  ins_encode(OpcP);
-  ins_pipe( pipe_slow );
 %}
 
 //
@@ -142,17 +79,4 @@
   return 13; // movq r10,#addr; callq (r10)
 }
 
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer &cbuf) {
-  cbuf.insts()->emit_int8((unsigned char) 0xcc);
-}
-
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
-  emit_break(cbuf);
-}
-
-uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
-  return 1;
-}
-
 %}
--- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/Compilation.java	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/Compilation.java	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
     private boolean osr;
     private Method method;
     private CallSite call = new CallSite();
+    private CallSite lateInlineCall = new CallSite();
     private int osrBci;
     private String icount;
     private String bcount;
@@ -80,6 +81,13 @@
             sb.append(site);
             sb.append("\n");
         }
+        if (getLateInlineCall().getCalls() != null) {
+            sb.append("late inline:\n");
+            for (CallSite site : getLateInlineCall().getCalls()) {
+                sb.append(site);
+                sb.append("\n");
+            }
+        }
         return sb.toString();
     }
 
@@ -115,6 +123,12 @@
                     site.print(stream, indent + 2);
                 }
             }
+            if (printInlining && lateInlineCall.getCalls() != null) {
+                stream.println("late inline:");
+                for (CallSite site : lateInlineCall.getCalls()) {
+                    site.print(stream, indent + 2);
+                }
+            }
         }
     }
 
@@ -215,7 +229,11 @@
     }
 
     public void setMethod(Method method) {
-        this.method = method;
+        // Don't change method if it is already set to avoid changing
+        // it by post parse inlining info.
+        if (getMethod() == null) {
+            this.method = method;
+        }
     }
 
     public CallSite getCall() {
@@ -226,6 +244,10 @@
         this.call = call;
     }
 
+    public CallSite getLateInlineCall() {
+        return lateInlineCall;
+    }
+
     public double getElapsedTime() {
         return end - start;
     }
--- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -146,6 +146,7 @@
     private CallSite site;
     private Stack<Phase> phaseStack = new Stack<Phase>();
     private UncommonTrapEvent currentTrap;
+    private Stack<CallSite> late_inline_scope;
 
     long parseLong(String l) {
         try {
@@ -302,6 +303,7 @@
             }
             events.add(compile);
             compiles.put(makeId(atts), compile);
+            site = compile.getCall();
         } else if (qname.equals("type")) {
             type(search(atts, "id"), search(atts, "name"));
         } else if (qname.equals("bc")) {
@@ -360,12 +362,22 @@
                 // uncommon trap inserted during parsing.
                 // ignore for now
             }
+        } else if (qname.equals("late_inline")) {
+            late_inline_scope = new Stack<CallSite>();
+            site = new CallSite(-999, method(search(atts, "method")));
+            late_inline_scope.push(site);
         } else if (qname.equals("jvms")) {
             // <jvms bci='4' method='java/io/DataInputStream readChar ()C' bytes='40' count='5815' iicount='20815'/>
             if (currentTrap != null) {
                 currentTrap.addJVMS(atts.getValue("method"), Integer.parseInt(atts.getValue("bci")));
+            } else if (late_inline_scope != null) {
+                bci = Integer.parseInt(search(atts, "bci"));
+                site = new CallSite(bci, method(search(atts, "method")));
+                late_inline_scope.push(site);
             } else {
-                // Ignore <eliminate_allocation type='667'> and <eliminate_lock lock='1'>
+                // Ignore <eliminate_allocation type='667'>,
+                //        <eliminate_lock lock='1'>,
+                //        <replace_string_concat arguments='2' string_alloc='0' multiple='0'>
             }
         } else if (qname.equals("nmethod")) {
             String id = makeId(atts);
@@ -379,7 +391,7 @@
             Method m = method(search(atts, "method"));
             if (scopes.size() == 0) {
                 compile.setMethod(m);
-                scopes.push(compile.getCall());
+                scopes.push(site);
             } else {
                 if (site.getMethod() == m) {
                     scopes.push(site);
@@ -393,7 +405,7 @@
             }
         } else if (qname.equals("parse_done")) {
             CallSite call = scopes.pop();
-            call.setEndNodes(Integer.parseInt(search(atts, "nodes")));
+            call.setEndNodes(Integer.parseInt(search(atts, "nodes", "1")));
             call.setTimeStamp(Double.parseDouble(search(atts, "stamp")));
             scopes.push(call);
         }
@@ -408,6 +420,43 @@
             scopes.pop();
         } else if (qname.equals("uncommon_trap")) {
             currentTrap = null;
+        } else if (qname.equals("late_inline")) {
+            // Populate late inlining info.
+
+            // late_inline scopes are specified in reverse order:
+            // compiled method should be on top of stack.
+            CallSite caller = late_inline_scope.pop();
+            Method m = compile.getMethod();
+            if (m != caller.getMethod()) {
+                System.out.println(m);
+                System.out.println(caller.getMethod() + " bci: " + bci);
+                throw new InternalError("call site and late_inline info don't match");
+            }
+
+            // late_inline contains caller+bci info, convert it
+            // to bci+callee info used by LogCompilation.
+            site = compile.getLateInlineCall();
+            do {
+                bci = caller.getBci();
+                // Next inlined call.
+                caller = late_inline_scope.pop();
+                CallSite callee =  new CallSite(bci, caller.getMethod());
+                site.add(callee);
+                site = callee;
+            } while (!late_inline_scope.empty());
+
+            if (caller.getBci() != -999) {
+                System.out.println(caller.getMethod());
+                throw new InternalError("broken late_inline info");
+            }
+            if (site.getMethod() != caller.getMethod()) {
+                System.out.println(site.getMethod());
+                System.out.println(caller.getMethod());
+                throw new InternalError("call site and late_inline info don't match");
+            }
+            // late_inline is followed by parse with scopes.size() == 0,
+            // 'site' will be pushed to scopes.
+            late_inline_scope = null;
         } else if (qname.equals("task")) {
             types.clear();
             methods.clear();
--- a/src/share/tools/ProjectCreator/BuildConfig.java	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/tools/ProjectCreator/BuildConfig.java	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -243,6 +243,7 @@
         sysDefines.add("_WINDOWS");
         sysDefines.add("HOTSPOT_BUILD_USER=\\\""+System.getProperty("user.name")+"\\\"");
         sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\"");
+        sysDefines.add("INCLUDE_TRACE");
         sysDefines.add("_JNI_IMPLEMENTATION_");
         if (vars.get("PlatformName").equals("Win32")) {
             sysDefines.add("HOTSPOT_LIB_ARCH=\\\"i386\\\"");
--- a/src/share/tools/hsdis/hsdis.c	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/tools/hsdis/hsdis.c	Tue Apr 10 10:42:34 2012 -0700
@@ -356,7 +356,7 @@
       if (plen > mach_size)  plen = mach_size;
       strncpy(mach_option, p, plen);
       mach_option[plen] = '\0';
-    } else if (plen > 6 && strncmp(p, "hsdis-", 6)) {
+    } else if (plen > 6 && strncmp(p, "hsdis-", 6) == 0) {
       // do not pass these to the next level
     } else {
       /* just copy it; {i386,sparc}-dis.c might like to see it  */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/tools/whitebox/sun/hotspot/WhiteBox.java	Tue Apr 10 10:42:34 2012 -0700
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.hotspot;
+import java.security.BasicPermission;
+
+public class WhiteBox {
+
+  @SuppressWarnings("serial")
+  public static class WhiteBoxPermission extends BasicPermission {
+    public WhiteBoxPermission(String s) {
+      super(s);
+    }
+  }
+
+  private WhiteBox() {}
+  private static final WhiteBox instance = new WhiteBox();
+  private static native void registerNatives();
+
+  /**
+   * Returns the singleton WhiteBox instance.
+   *
+   * The returned WhiteBox object should be carefully guarded
+   * by the caller, since it can be used to read and write data
+   * at arbitrary memory addresses. It must never be passed to
+   * untrusted code.
+   */
+  public synchronized static WhiteBox getWhiteBox() {
+    SecurityManager sm = System.getSecurityManager();
+    if (sm != null) {
+      sm.checkPermission(new WhiteBoxPermission("getInstance"));
+    }
+    return instance;
+  }
+
+  static {
+    registerNatives();
+  }
+
+  // Memory
+  public native long getObjectAddress(Object o);
+  public native int  getHeapOopSize();
+
+  // G1
+  public native boolean g1InConcurrentMark();
+  public native boolean g1IsHumongous(Object o);
+  public native long    g1NumFreeRegions();
+  public native int     g1RegionSize();
+}
--- a/src/share/vm/c1/c1_Canonicalizer.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_Canonicalizer.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -594,6 +594,13 @@
   return false;
 }
 
+static bool is_safepoint(BlockEnd* x, BlockBegin* sux) {
+  // An Instruction with multiple successors, x, is replaced by a Goto
+  // to a single successor, sux. Is a safepoint check needed = was the
+  // instruction being replaced a safepoint and the single remaining
+  // successor a back branch?
+  return x->is_safepoint() && (sux->bci() < x->state_before()->bci());
+}
 
 void Canonicalizer::do_If(If* x) {
   // move const to right
@@ -614,7 +621,7 @@
     case If::geq: sux = x->sux_for(true);  break;
     }
     // If is a safepoint then the debug information should come from the state_before of the If.
-    set_canonical(new Goto(sux, x->state_before(), x->is_safepoint()));
+    set_canonical(new Goto(sux, x->state_before(), is_safepoint(x, sux)));
     return;
   }
 
@@ -626,7 +633,7 @@
                                                        x->sux_for(false));
       if (sux != NULL) {
         // If is a safepoint then the debug information should come from the state_before of the If.
-        set_canonical(new Goto(sux, x->state_before(), x->is_safepoint()));
+        set_canonical(new Goto(sux, x->state_before(), is_safepoint(x, sux)));
       }
     }
   } else if (rt->as_IntConstant() != NULL) {
@@ -694,10 +701,12 @@
     }
   } else if (rt == objectNull && (l->as_NewInstance() || l->as_NewArray())) {
     if (x->cond() == Instruction::eql) {
-      set_canonical(new Goto(x->fsux(), x->state_before(), x->is_safepoint()));
+      BlockBegin* sux = x->fsux();
+      set_canonical(new Goto(sux, x->state_before(), is_safepoint(x, sux)));
     } else {
       assert(x->cond() == Instruction::neq, "only other valid case");
-      set_canonical(new Goto(x->tsux(), x->state_before(), x->is_safepoint()));
+      BlockBegin* sux = x->tsux();
+      set_canonical(new Goto(sux, x->state_before(), is_safepoint(x, sux)));
     }
   }
 }
@@ -710,7 +719,7 @@
     if (v >= x->lo_key() && v <= x->hi_key()) {
       sux = x->sux_at(v - x->lo_key());
     }
-    set_canonical(new Goto(sux, x->state_before(), x->is_safepoint()));
+    set_canonical(new Goto(sux, x->state_before(), is_safepoint(x, sux)));
   } else if (x->number_of_sux() == 1) {
     // NOTE: Code permanently disabled for now since the switch statement's
     //       tag expression may produce side-effects in which case it must
@@ -741,7 +750,7 @@
         sux = x->sux_at(i);
       }
     }
-    set_canonical(new Goto(sux, x->state_before(), x->is_safepoint()));
+    set_canonical(new Goto(sux, x->state_before(), is_safepoint(x, sux)));
   } else if (x->number_of_sux() == 1) {
     // NOTE: Code permanently disabled for now since the switch statement's
     //       tag expression may produce side-effects in which case it must
@@ -899,3 +908,4 @@
 void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
 void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
 void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {}
+void Canonicalizer::do_MemBar(MemBar* x) {}
--- a/src/share/vm/c1/c1_Canonicalizer.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_Canonicalizer.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -104,6 +104,7 @@
   virtual void do_ProfileCall    (ProfileCall*     x);
   virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   virtual void do_RuntimeCall    (RuntimeCall*     x);
+  virtual void do_MemBar         (MemBar*          x);
 };
 
 #endif // SHARE_VM_C1_C1_CANONICALIZER_HPP
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1181,6 +1181,11 @@
   bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
   Instruction *i = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb));
 
+  assert(i->as_Goto() == NULL ||
+         (i->as_Goto()->sux_at(0) == tsux  && i->as_Goto()->is_safepoint() == tsux->bci() < stream()->cur_bci()) ||
+         (i->as_Goto()->sux_at(0) == fsux  && i->as_Goto()->is_safepoint() == fsux->bci() < stream()->cur_bci()),
+         "safepoint state of Goto returned by canonicalizer incorrect");
+
   if (is_profiling()) {
     If* if_node = i->as_If();
     if (if_node != NULL) {
@@ -1301,9 +1306,19 @@
       if (sw.dest_offset_at(i) < 0) has_bb = true;
     }
     // add default successor
+    if (sw.default_offset() < 0) has_bb = true;
     sux->at_put(i, block_at(bci() + sw.default_offset()));
     ValueStack* state_before = has_bb ? copy_state_before() : NULL;
-    append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
+    Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
+#ifdef ASSERT
+    if (res->as_Goto()) {
+      for (i = 0; i < l; i++) {
+        if (sux->at(i) == res->as_Goto()->sux_at(0)) {
+          assert(res->as_Goto()->is_safepoint() == sw.dest_offset_at(i) < 0, "safepoint state of Goto returned by canonicalizer incorrect");
+        }
+      }
+    }
+#endif
   }
 }
 
@@ -1336,9 +1351,19 @@
       keys->at_put(i, pair.match());
     }
     // add default successor
+    if (sw.default_offset() < 0) has_bb = true;
     sux->at_put(i, block_at(bci() + sw.default_offset()));
     ValueStack* state_before = has_bb ? copy_state_before() : NULL;
-    append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
+    Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
+#ifdef ASSERT
+    if (res->as_Goto()) {
+      for (i = 0; i < l; i++) {
+        if (sux->at(i) == res->as_Goto()->sux_at(0)) {
+          assert(res->as_Goto()->is_safepoint() == sw.pair_at(i).offset() < 0, "safepoint state of Goto returned by canonicalizer incorrect");
+        }
+      }
+    }
+#endif
   }
 }
 
@@ -1395,6 +1420,12 @@
     call_register_finalizer();
   }
 
+  bool need_mem_bar = false;
+  if (method()->name() == ciSymbol::object_initializer_name() &&
+      scope()->wrote_final()) {
+    need_mem_bar = true;
+  }
+
   // Check to see whether we are inlining. If so, Return
   // instructions become Gotos to the continuation point.
   if (continuation() != NULL) {
@@ -1414,6 +1445,10 @@
       monitorexit(state()->lock_at(0), SynchronizationEntryBCI);
     }
 
+    if (need_mem_bar) {
+      append(new MemBar(lir_membar_storestore));
+    }
+
     // State at end of inlined method is the state of the caller
     // without the method parameters on stack, including the
     // return value, if any, of the inlined method on operand stack.
@@ -1433,7 +1468,6 @@
     // the continuation point.
     append_with_bci(goto_callee, scope_data()->continuation()->bci());
     incr_num_returns();
-
     return;
   }
 
@@ -1449,6 +1483,10 @@
     append_split(new MonitorExit(receiver, state()->unlock()));
   }
 
+  if (need_mem_bar) {
+      append(new MemBar(lir_membar_storestore));
+  }
+
   append(new Return(x));
 }
 
@@ -1481,6 +1519,9 @@
     }
   }
 
+  if (field->is_final() && (code == Bytecodes::_putfield)) {
+    scope()->set_wrote_final();
+  }
 
   const int offset = !needs_patching ? field->offset() : -1;
   switch (code) {
@@ -1592,6 +1633,7 @@
   // this happened while running the JCK invokevirtual tests under doit.  TKR
   ciMethod* cha_monomorphic_target = NULL;
   ciMethod* exact_target = NULL;
+  Value better_receiver = NULL;
   if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
       !target->is_method_handle_invoke()) {
     Value receiver = NULL;
@@ -1653,6 +1695,18 @@
       ciInstanceKlass* singleton = NULL;
       if (target->holder()->nof_implementors() == 1) {
         singleton = target->holder()->implementor(0);
+
+        assert(holder->is_interface(), "invokeinterface to non interface?");
+        ciInstanceKlass* decl_interface = (ciInstanceKlass*)holder;
+        // the number of implementors for decl_interface is less or
+        // equal to the number of implementors for target->holder() so
+        // if number of implementors of target->holder() == 1 then
+        // number of implementors for decl_interface is 0 or 1. If
+        // it's 0 then no class implements decl_interface and there's
+        // no point in inlining.
+        if (!holder->is_loaded() || decl_interface->nof_implementors() != 1) {
+          singleton = NULL;
+        }
       }
       if (singleton) {
         cha_monomorphic_target = target->find_monomorphic_target(calling_klass, target->holder(), singleton);
@@ -1667,7 +1721,9 @@
           CheckCast* c = new CheckCast(klass, receiver, copy_state_for_exception());
           c->set_incompatible_class_change_check();
           c->set_direct_compare(klass->is_final());
-          append_split(c);
+          // pass the result of the checkcast so that the compiler has
+          // more accurate type info in the inlinee
+          better_receiver = append_split(c);
         }
       }
     }
@@ -1709,7 +1765,7 @@
       }
       if (!success) {
         // static binding => check if callee is ok
-        success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
+        success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), better_receiver);
       }
       CHECK_BAILOUT();
 
@@ -3034,7 +3090,7 @@
 }
 
 
-bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
+bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Value receiver) {
   // Clear out any existing inline bailout condition
   clear_inline_bailout();
 
@@ -3056,7 +3112,7 @@
   } else if (callee->is_abstract()) {
     INLINE_BAILOUT("abstract")
   } else {
-    return try_inline_full(callee, holder_known);
+    return try_inline_full(callee, holder_known, NULL, receiver);
   }
 }
 
@@ -3405,7 +3461,7 @@
 }
 
 
-bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBegin* cont_block) {
+bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBegin* cont_block, Value receiver) {
   assert(!callee->is_native(), "callee must not be native");
   if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) {
     INLINE_BAILOUT("inlining prohibited by policy");
@@ -3541,6 +3597,9 @@
       Value  arg = caller_state->stack_at_inc(i);
       // NOTE: take base() of arg->type() to avoid problems storing
       // constants
+      if (receiver != NULL && par_no == 0) {
+        arg = receiver;
+      }
       store_local(callee_state, arg, arg->type()->base(), par_no);
     }
   }
@@ -3683,56 +3742,61 @@
       // Get the two MethodHandle inputs from the Phi.
       Value op1 = phi->operand_at(0);
       Value op2 = phi->operand_at(1);
-      ciMethodHandle* mh1 = op1->type()->as_ObjectType()->constant_value()->as_method_handle();
-      ciMethodHandle* mh2 = op2->type()->as_ObjectType()->constant_value()->as_method_handle();
-
-      // Set the callee to have access to the class and signature in
-      // the MethodHandleCompiler.
-      mh1->set_callee(callee);
-      mh1->set_caller(method());
-      mh2->set_callee(callee);
-      mh2->set_caller(method());
-
-      // Get adapters for the MethodHandles.
-      ciMethod* mh1_adapter = mh1->get_method_handle_adapter();
-      ciMethod* mh2_adapter = mh2->get_method_handle_adapter();
-
-      if (mh1_adapter != NULL && mh2_adapter != NULL) {
-        set_inline_cleanup_info();
-
-        // Build the If guard
-        BlockBegin* one = new BlockBegin(next_bci());
-        BlockBegin* two = new BlockBegin(next_bci());
-        BlockBegin* end = new BlockBegin(next_bci());
-        Instruction* iff = append(new If(phi, If::eql, false, op1, one, two, NULL, false));
-        block()->set_end(iff->as_BlockEnd());
-
-        // Connect up the states
-        one->merge(block()->end()->state());
-        two->merge(block()->end()->state());
-
-        // Save the state for the second inlinee
-        ValueStack* state_before = copy_state_before();
-
-        // Parse first adapter
-        _last = _block = one;
-        if (!try_inline_full(mh1_adapter, /*holder_known=*/ true, end)) {
-          restore_inline_cleanup_info();
-          block()->clear_end();  // remove appended iff
-          return false;
+      ObjectType* op1type = op1->type()->as_ObjectType();
+      ObjectType* op2type = op2->type()->as_ObjectType();
+
+      if (op1type->is_constant() && op2type->is_constant()) {
+        ciMethodHandle* mh1 = op1type->constant_value()->as_method_handle();
+        ciMethodHandle* mh2 = op2type->constant_value()->as_method_handle();
+
+        // Set the callee to have access to the class and signature in
+        // the MethodHandleCompiler.
+        mh1->set_callee(callee);
+        mh1->set_caller(method());
+        mh2->set_callee(callee);
+        mh2->set_caller(method());
+
+        // Get adapters for the MethodHandles.
+        ciMethod* mh1_adapter = mh1->get_method_handle_adapter();
+        ciMethod* mh2_adapter = mh2->get_method_handle_adapter();
+
+        if (mh1_adapter != NULL && mh2_adapter != NULL) {
+          set_inline_cleanup_info();
+
+          // Build the If guard
+          BlockBegin* one = new BlockBegin(next_bci());
+          BlockBegin* two = new BlockBegin(next_bci());
+          BlockBegin* end = new BlockBegin(next_bci());
+          Instruction* iff = append(new If(phi, If::eql, false, op1, one, two, NULL, false));
+          block()->set_end(iff->as_BlockEnd());
+
+          // Connect up the states
+          one->merge(block()->end()->state());
+          two->merge(block()->end()->state());
+
+          // Save the state for the second inlinee
+          ValueStack* state_before = copy_state_before();
+
+          // Parse first adapter
+          _last = _block = one;
+          if (!try_inline_full(mh1_adapter, /*holder_known=*/ true, end, NULL)) {
+            restore_inline_cleanup_info();
+            block()->clear_end();  // remove appended iff
+            return false;
+          }
+
+          // Parse second adapter
+          _last = _block = two;
+          _state = state_before;
+          if (!try_inline_full(mh2_adapter, /*holder_known=*/ true, end, NULL)) {
+            restore_inline_cleanup_info();
+            block()->clear_end();  // remove appended iff
+            return false;
+          }
+
+          connect_to_end(end);
+          return true;
         }
-
-        // Parse second adapter
-        _last = _block = two;
-        _state = state_before;
-        if (!try_inline_full(mh2_adapter, /*holder_known=*/ true, end)) {
-          restore_inline_cleanup_info();
-          block()->clear_end();  // remove appended iff
-          return false;
-        }
-
-        connect_to_end(end);
-        return true;
       }
     }
   }
--- a/src/share/vm/c1/c1_GraphBuilder.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -337,9 +337,9 @@
   void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false);
 
   // inliners
-  bool try_inline(           ciMethod* callee, bool holder_known);
+  bool try_inline(           ciMethod* callee, bool holder_known, Value receiver = NULL);
   bool try_inline_intrinsics(ciMethod* callee);
-  bool try_inline_full(      ciMethod* callee, bool holder_known, BlockBegin* cont_block = NULL);
+  bool try_inline_full(      ciMethod* callee, bool holder_known, BlockBegin* cont_block, Value receiver);
   bool try_inline_jsr(int jsr_dest_bci);
 
   // JSR 292 support
--- a/src/share/vm/c1/c1_IR.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_IR.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -141,6 +141,7 @@
   _xhandlers          = new XHandlers(method);
   _number_of_locks    = 0;
   _monitor_pairing_ok = method->has_balanced_monitors();
+  _wrote_final        = false;
   _start              = NULL;
 
   if (osr_bci == -1) {
--- a/src/share/vm/c1/c1_IR.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_IR.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -149,6 +149,7 @@
   XHandlers*    _xhandlers;                      // the exception handlers
   int           _number_of_locks;                // the number of monitor lock slots needed
   bool          _monitor_pairing_ok;             // the monitor pairing info
+  bool          _wrote_final;                    // has written final field
   BlockBegin*   _start;                          // the start block, successsors are method entries
 
   BitMap        _requires_phi_function;          // bit is set if phi functions at loop headers are necessary for a local variable
@@ -181,6 +182,8 @@
   void          set_min_number_of_locks(int n)   { if (n > _number_of_locks) _number_of_locks = n; }
   bool          monitor_pairing_ok() const       { return _monitor_pairing_ok; }
   BlockBegin*   start() const                    { return _start; }
+  void          set_wrote_final()                { _wrote_final = true; }
+  bool          wrote_final    () const          { return _wrote_final; }
 };
 
 
--- a/src/share/vm/c1/c1_Instruction.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_Instruction.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -107,6 +107,7 @@
 class   ProfileCall;
 class   ProfileInvoke;
 class   RuntimeCall;
+class   MemBar;
 
 // A Value is a reference to the instruction creating the value
 typedef Instruction* Value;
@@ -204,6 +205,7 @@
   virtual void do_ProfileCall    (ProfileCall*     x) = 0;
   virtual void do_ProfileInvoke  (ProfileInvoke*   x) = 0;
   virtual void do_RuntimeCall    (RuntimeCall*     x) = 0;
+  virtual void do_MemBar         (MemBar*          x) = 0;
 };
 
 
@@ -2351,6 +2353,23 @@
   virtual void state_values_do(ValueVisitor*);
 };
 
+LEAF(MemBar, Instruction)
+ private:
+  LIR_Code _code;
+
+ public:
+  MemBar(LIR_Code code)
+    : Instruction(voidType)
+    , _code(code)
+  {
+    pin();
+  }
+
+  LIR_Code code()           { return _code; }
+
+  virtual void input_values_do(ValueVisitor*)   {}
+};
+
 class BlockPair: public CompilationResourceObj {
  private:
   BlockBegin* _from;
--- a/src/share/vm/c1/c1_InstructionPrinter.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_InstructionPrinter.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -855,4 +855,20 @@
   output()->put(')');
 }
 
+void InstructionPrinter::do_MemBar(MemBar* x) {
+  if (os::is_MP()) {
+    LIR_Code code = x->code();
+    switch (code) {
+      case lir_membar_acquire   : output()->print("membar_acquire"); break;
+      case lir_membar_release   : output()->print("membar_release"); break;
+      case lir_membar           : output()->print("membar"); break;
+      case lir_membar_loadload  : output()->print("membar_loadload"); break;
+      case lir_membar_storestore: output()->print("membar_storestore"); break;
+      case lir_membar_loadstore : output()->print("membar_loadstore"); break;
+      case lir_membar_storeload : output()->print("membar_storeload"); break;
+      default                   : ShouldNotReachHere(); break;
+    }
+  }
+}
+
 #endif // PRODUCT
--- a/src/share/vm/c1/c1_InstructionPrinter.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_InstructionPrinter.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -132,6 +132,7 @@
   virtual void do_ProfileCall    (ProfileCall*     x);
   virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   virtual void do_RuntimeCall    (RuntimeCall*     x);
+  virtual void do_MemBar         (MemBar*          x);
 };
 #endif // PRODUCT
 
--- a/src/share/vm/c1/c1_LIR.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_LIR.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -464,6 +464,10 @@
     case lir_membar:                   // result and info always invalid
     case lir_membar_acquire:           // result and info always invalid
     case lir_membar_release:           // result and info always invalid
+    case lir_membar_loadload:          // result and info always invalid
+    case lir_membar_storestore:        // result and info always invalid
+    case lir_membar_loadstore:         // result and info always invalid
+    case lir_membar_storeload:         // result and info always invalid
     {
       assert(op->as_Op0() != NULL, "must be");
       assert(op->_info == NULL, "info not used by this instruction");
@@ -1607,6 +1611,10 @@
      case lir_membar:                s = "membar";        break;
      case lir_membar_acquire:        s = "membar_acquire"; break;
      case lir_membar_release:        s = "membar_release"; break;
+     case lir_membar_loadload:       s = "membar_loadload";   break;
+     case lir_membar_storestore:     s = "membar_storestore"; break;
+     case lir_membar_loadstore:      s = "membar_loadstore";  break;
+     case lir_membar_storeload:      s = "membar_storeload";  break;
      case lir_word_align:            s = "word_align";    break;
      case lir_label:                 s = "label";         break;
      case lir_nop:                   s = "nop";           break;
--- a/src/share/vm/c1/c1_LIR.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_LIR.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -866,6 +866,10 @@
       , lir_membar
       , lir_membar_acquire
       , lir_membar_release
+      , lir_membar_loadload
+      , lir_membar_storestore
+      , lir_membar_loadstore
+      , lir_membar_storeload
       , lir_get_thread
   , end_op0
   , begin_op1
@@ -1354,9 +1358,10 @@
   CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
 
  public:
-  LIR_OpBranch(LIR_Condition cond, Label* lbl)
+  LIR_OpBranch(LIR_Condition cond, BasicType type, Label* lbl)
     : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
     , _cond(cond)
+    , _type(type)
     , _label(lbl)
     , _block(NULL)
     , _ublock(NULL)
@@ -1917,6 +1922,10 @@
   void membar()                                  { append(new LIR_Op0(lir_membar)); }
   void membar_acquire()                          { append(new LIR_Op0(lir_membar_acquire)); }
   void membar_release()                          { append(new LIR_Op0(lir_membar_release)); }
+  void membar_loadload()                         { append(new LIR_Op0(lir_membar_loadload)); }
+  void membar_storestore()                       { append(new LIR_Op0(lir_membar_storestore)); }
+  void membar_loadstore()                        { append(new LIR_Op0(lir_membar_loadstore)); }
+  void membar_storeload()                        { append(new LIR_Op0(lir_membar_storeload)); }
 
   void nop()                                     { append(new LIR_Op0(lir_nop)); }
   void build_frame()                             { append(new LIR_Op0(lir_build_frame)); }
@@ -2053,7 +2062,7 @@
   void jump(CodeStub* stub) {
     append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
   }
-  void branch(LIR_Condition cond, Label* lbl)        { append(new LIR_OpBranch(cond, lbl)); }
+  void branch(LIR_Condition cond, BasicType type, Label* lbl)        { append(new LIR_OpBranch(cond, type, lbl)); }
   void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
     assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
     append(new LIR_OpBranch(cond, type, block));
--- a/src/share/vm/c1/c1_LIRAssembler.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -664,6 +664,22 @@
       membar_release();
       break;
 
+    case lir_membar_loadload:
+      membar_loadload();
+      break;
+
+    case lir_membar_storestore:
+      membar_storestore();
+      break;
+
+    case lir_membar_loadstore:
+      membar_loadstore();
+      break;
+
+    case lir_membar_storeload:
+      membar_storeload();
+      break;
+
     case lir_get_thread:
       get_thread(op->result_opr());
       break;
--- a/src/share/vm/c1/c1_LIRAssembler.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -241,6 +241,10 @@
   void membar();
   void membar_acquire();
   void membar_release();
+  void membar_loadload();
+  void membar_storestore();
+  void membar_loadstore();
+  void membar_storeload();
   void get_thread(LIR_Opr result);
 
   void verify_oop_map(CodeEmitInfo* info);
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -2350,7 +2350,7 @@
     } else {
       LabelObj* L = new LabelObj();
       __ cmp(lir_cond_less, value, low_key);
-      __ branch(lir_cond_less, L->label());
+      __ branch(lir_cond_less, T_INT, L->label());
       __ cmp(lir_cond_lessEqual, value, high_key);
       __ branch(lir_cond_lessEqual, T_INT, dest);
       __ branch_destination(L->label());
@@ -3165,3 +3165,20 @@
   }
   return result;
 }
+
+void LIRGenerator::do_MemBar(MemBar* x) {
+  if (os::is_MP()) {
+    LIR_Code code = x->code();
+    switch(code) {
+      case lir_membar_acquire   : __ membar_acquire(); break;
+      case lir_membar_release   : __ membar_release(); break;
+      case lir_membar           : __ membar(); break;
+      case lir_membar_loadload  : __ membar_loadload(); break;
+      case lir_membar_storestore: __ membar_storestore(); break;
+      case lir_membar_loadstore : __ membar_loadstore(); break;
+      case lir_membar_storeload : __ membar_storeload(); break;
+      default                   : ShouldNotReachHere(); break;
+    }
+  }
+}
+
--- a/src/share/vm/c1/c1_LIRGenerator.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_LIRGenerator.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -525,6 +525,7 @@
   virtual void do_ProfileCall    (ProfileCall*     x);
   virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   virtual void do_RuntimeCall    (RuntimeCall*     x);
+  virtual void do_MemBar         (MemBar*          x);
 };
 
 
--- a/src/share/vm/c1/c1_LinearScan.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_LinearScan.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1884,7 +1884,7 @@
 
   if (move_resolver.has_mappings()) {
     // insert moves after first instruction
-    move_resolver.set_insert_position(block->lir(), 1);
+    move_resolver.set_insert_position(block->lir(), 0);
     move_resolver.resolve_and_append_moves();
   }
 }
@@ -2464,12 +2464,15 @@
 
 
 // frequently used constants
-ConstantOopWriteValue LinearScan::_oop_null_scope_value = ConstantOopWriteValue(NULL);
-ConstantIntValue      LinearScan::_int_m1_scope_value = ConstantIntValue(-1);
-ConstantIntValue      LinearScan::_int_0_scope_value =  ConstantIntValue(0);
-ConstantIntValue      LinearScan::_int_1_scope_value =  ConstantIntValue(1);
-ConstantIntValue      LinearScan::_int_2_scope_value =  ConstantIntValue(2);
-LocationValue         _illegal_value = LocationValue(Location());
+// Allocate them with new so they are never destroyed (otherwise, a
+// forced exit could destroy these objects while they are still in
+// use).
+ConstantOopWriteValue* LinearScan::_oop_null_scope_value = new (ResourceObj::C_HEAP) ConstantOopWriteValue(NULL);
+ConstantIntValue*      LinearScan::_int_m1_scope_value = new (ResourceObj::C_HEAP) ConstantIntValue(-1);
+ConstantIntValue*      LinearScan::_int_0_scope_value =  new (ResourceObj::C_HEAP) ConstantIntValue(0);
+ConstantIntValue*      LinearScan::_int_1_scope_value =  new (ResourceObj::C_HEAP) ConstantIntValue(1);
+ConstantIntValue*      LinearScan::_int_2_scope_value =  new (ResourceObj::C_HEAP) ConstantIntValue(2);
+LocationValue*         _illegal_value = new (ResourceObj::C_HEAP) LocationValue(Location());
 
 void LinearScan::init_compute_debug_info() {
   // cache for frequently used scope values
@@ -2508,7 +2511,7 @@
     case T_OBJECT: {
       jobject value = c->as_jobject();
       if (value == NULL) {
-        scope_values->append(&_oop_null_scope_value);
+        scope_values->append(_oop_null_scope_value);
       } else {
         scope_values->append(new ConstantOopWriteValue(c->as_jobject()));
       }
@@ -2519,10 +2522,10 @@
     case T_FLOAT: {
       int value = c->as_jint_bits();
       switch (value) {
-        case -1: scope_values->append(&_int_m1_scope_value); break;
-        case 0:  scope_values->append(&_int_0_scope_value); break;
-        case 1:  scope_values->append(&_int_1_scope_value); break;
-        case 2:  scope_values->append(&_int_2_scope_value); break;
+        case -1: scope_values->append(_int_m1_scope_value); break;
+        case 0:  scope_values->append(_int_0_scope_value); break;
+        case 1:  scope_values->append(_int_1_scope_value); break;
+        case 2:  scope_values->append(_int_2_scope_value); break;
         default: scope_values->append(new ConstantIntValue(c->as_jint_bits())); break;
       }
       return 1;
@@ -2531,7 +2534,7 @@
     case T_LONG: // fall through
     case T_DOUBLE: {
 #ifdef _LP64
-      scope_values->append(&_int_0_scope_value);
+      scope_values->append(_int_0_scope_value);
       scope_values->append(new ConstantLongValue(c->as_jlong_bits()));
 #else
       if (hi_word_offset_in_bytes > lo_word_offset_in_bytes) {
@@ -2657,7 +2660,7 @@
       }
       // Does this reverse on x86 vs. sparc?
       first =  new LocationValue(loc1);
-      second = &_int_0_scope_value;
+      second = _int_0_scope_value;
 #else
       Location loc1, loc2;
       if (!frame_map()->locations_for_slot(opr->double_stack_ix(), Location::normal, &loc1, &loc2)) {
@@ -2671,7 +2674,7 @@
 #ifdef _LP64
       VMReg rname_first = opr->as_register_lo()->as_VMReg();
       first = new LocationValue(Location::new_reg_loc(Location::lng, rname_first));
-      second = &_int_0_scope_value;
+      second = _int_0_scope_value;
 #else
       VMReg rname_first = opr->as_register_lo()->as_VMReg();
       VMReg rname_second = opr->as_register_hi()->as_VMReg();
@@ -2694,7 +2697,7 @@
       VMReg rname_first  = opr->as_xmm_double_reg()->as_VMReg();
 #  ifdef _LP64
       first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
-      second = &_int_0_scope_value;
+      second = _int_0_scope_value;
 #  else
       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
       // %%% This is probably a waste but we'll keep things as they were for now
@@ -2741,7 +2744,7 @@
 
 #ifdef _LP64
       first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
-      second = &_int_0_scope_value;
+      second = _int_0_scope_value;
 #else
       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
       // %%% This is probably a waste but we'll keep things as they were for now
@@ -2822,7 +2825,7 @@
     }
   } else {
     // append a dummy value because real value not needed
-    scope_values->append(&_illegal_value);
+    scope_values->append(_illegal_value);
     return 1;
   }
 }
@@ -2865,7 +2868,7 @@
     nof_locals = cur_scope->method()->max_locals();
     locals = new GrowableArray<ScopeValue*>(nof_locals);
     for(int i = 0; i < nof_locals; i++) {
-      locals->append(&_illegal_value);
+      locals->append(_illegal_value);
     }
   }
 
--- a/src/share/vm/c1/c1_LinearScan.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_LinearScan.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -160,11 +160,11 @@
   // TODO: cached scope values for registers could be static
   ScopeValueArray           _scope_value_cache;
 
-  static ConstantOopWriteValue _oop_null_scope_value;
-  static ConstantIntValue    _int_m1_scope_value;
-  static ConstantIntValue    _int_0_scope_value;
-  static ConstantIntValue    _int_1_scope_value;
-  static ConstantIntValue    _int_2_scope_value;
+  static ConstantOopWriteValue* _oop_null_scope_value;
+  static ConstantIntValue*    _int_m1_scope_value;
+  static ConstantIntValue*    _int_0_scope_value;
+  static ConstantIntValue*    _int_1_scope_value;
+  static ConstantIntValue*    _int_2_scope_value;
 
   // accessors
   IR*           ir() const                       { return _ir; }
--- a/src/share/vm/c1/c1_Optimizer.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_Optimizer.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -509,6 +509,7 @@
   void do_ProfileCall    (ProfileCall*     x);
   void do_ProfileInvoke  (ProfileInvoke*   x);
   void do_RuntimeCall    (RuntimeCall*     x);
+  void do_MemBar         (MemBar*          x);
 };
 
 
@@ -678,6 +679,7 @@
 void NullCheckVisitor::do_ProfileCall    (ProfileCall*     x) { nce()->clear_last_explicit_null_check(); }
 void NullCheckVisitor::do_ProfileInvoke  (ProfileInvoke*   x) {}
 void NullCheckVisitor::do_RuntimeCall    (RuntimeCall*     x) {}
+void NullCheckVisitor::do_MemBar         (MemBar*          x) {}
 
 
 void NullCheckEliminator::visit(Value* p) {
--- a/src/share/vm/c1/c1_Runtime1.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -413,8 +413,9 @@
     }
     bci = branch_bci + offset;
   }
-
+  assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");
   osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
+  assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");
   return osr_nm;
 }
 
@@ -596,7 +597,6 @@
 
 JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* thread, int index))
   NOT_PRODUCT(_throw_range_check_exception_count++;)
-  Events::log("throw_range_check");
   char message[jintAsStringSize];
   sprintf(message, "%d", index);
   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
@@ -605,7 +605,6 @@
 
 JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* thread, int index))
   NOT_PRODUCT(_throw_index_exception_count++;)
-  Events::log("throw_index");
   char message[16];
   sprintf(message, "%d", index);
   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IndexOutOfBoundsException(), message);
@@ -803,11 +802,7 @@
   // Note also that in the presence of inlining it is not guaranteed
   // that caller_method() == caller_code->method()
 
-
   int bci = vfst.bci();
-
-  Events::log("patch_code @ " INTPTR_FORMAT , caller_frame.pc());
-
   Bytecodes::Code code = caller_method()->java_code_at(bci);
 
 #ifndef PRODUCT
--- a/src/share/vm/c1/c1_ValueMap.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_ValueMap.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -125,6 +125,7 @@
             // otherwise it is possible that they are not evaluated
             f->pin(Instruction::PinGlobalValueNumbering);
           }
+          assert(x->type()->tag() == f->type()->tag(), "should have same type");
 
           return f;
 
--- a/src/share/vm/c1/c1_ValueMap.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/c1/c1_ValueMap.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -200,6 +200,7 @@
   void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ }
   void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
   void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
+  void do_MemBar         (MemBar*          x) { /* nothing to do */ };
 };
 
 
--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -359,7 +359,7 @@
       case Bytecodes::_nop:
         break;
       case Bytecodes::_aconst_null:
-        state.apush(empty_map);
+        state.apush(unknown_obj);
         break;
       case Bytecodes::_iconst_m1:
       case Bytecodes::_iconst_0:
@@ -392,6 +392,8 @@
         if (tag.is_long() || tag.is_double()) {
           // Only longs and doubles use 2 stack slots.
           state.lpush();
+        } else if (tag.basic_type() == T_OBJECT) {
+          state.apush(unknown_obj);
         } else {
           state.spush();
         }
--- a/src/share/vm/ci/ciEnv.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/ci/ciEnv.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -284,6 +284,20 @@
   // Return state of appropriate compilability
   int compilable() { return _compilable; }
 
+  const char* retry_message() const {
+    switch (_compilable) {
+      case ciEnv::MethodCompilable_not_at_tier:
+        return "retry at different tier";
+      case ciEnv::MethodCompilable_never:
+        return "not retryable";
+      case ciEnv::MethodCompilable:
+        return NULL;
+      default:
+        ShouldNotReachHere();
+        return NULL;
+    }
+  }
+
   bool break_at_compile() { return _break_at_compile; }
   void set_break_at_compile(bool z) { _break_at_compile = z; }
 
--- a/src/share/vm/classfile/classFileParser.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/classfile/classFileParser.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -2315,13 +2315,32 @@
 #define RECOGNIZED_INNER_CLASS_MODIFIERS (JVM_RECOGNIZED_CLASS_MODIFIERS | JVM_ACC_PRIVATE | JVM_ACC_PROTECTED | JVM_ACC_STATIC)
 
 // Return number of classes in the inner classes attribute table
-u2 ClassFileParser::parse_classfile_inner_classes_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {
+u2 ClassFileParser::parse_classfile_inner_classes_attribute(u1* inner_classes_attribute_start,
+                                                            bool parsed_enclosingmethod_attribute,
+                                                            u2 enclosing_method_class_index,
+                                                            u2 enclosing_method_method_index,
+                                                            constantPoolHandle cp,
+                                                            instanceKlassHandle k, TRAPS) {
   ClassFileStream* cfs = stream();
-  cfs->guarantee_more(2, CHECK_0);  // length
-  u2 length = cfs->get_u2_fast();
-
-  // 4-tuples of shorts [inner_class_info_index, outer_class_info_index, inner_name_index, inner_class_access_flags]
-  typeArrayOop ic = oopFactory::new_permanent_shortArray(length*4, CHECK_0);
+  u1* current_mark = cfs->current();
+  u2 length = 0;
+  if (inner_classes_attribute_start != NULL) {
+    cfs->set_current(inner_classes_attribute_start);
+    cfs->guarantee_more(2, CHECK_0);  // length
+    length = cfs->get_u2_fast();
+  }
+
+  // 4-tuples of shorts of inner classes data and 2 shorts of enclosing
+  // method data:
+  //   [inner_class_info_index,
+  //    outer_class_info_index,
+  //    inner_name_index,
+  //    inner_class_access_flags,
+  //    ...
+  //    enclosing_method_class_index,
+  //    enclosing_method_method_index]
+  int size = length * 4 + (parsed_enclosingmethod_attribute ? 2 : 0);
+  typeArrayOop ic = oopFactory::new_permanent_shortArray(size, CHECK_0);
   typeArrayHandle inner_classes(THREAD, ic);
   int index = 0;
   int cp_size = cp->length();
@@ -2372,8 +2391,8 @@
 
   // 4347400: make sure there's no duplicate entry in the classes array
   if (_need_verify && _major_version >= JAVA_1_5_VERSION) {
-    for(int i = 0; i < inner_classes->length(); i += 4) {
-      for(int j = i + 4; j < inner_classes->length(); j += 4) {
+    for(int i = 0; i < length * 4; i += 4) {
+      for(int j = i + 4; j < length * 4; j += 4) {
         guarantee_property((inner_classes->ushort_at(i)   != inner_classes->ushort_at(j) ||
                             inner_classes->ushort_at(i+1) != inner_classes->ushort_at(j+1) ||
                             inner_classes->ushort_at(i+2) != inner_classes->ushort_at(j+2) ||
@@ -2384,8 +2403,19 @@
     }
   }
 
+  // Set EnclosingMethod class and method indexes.
+  if (parsed_enclosingmethod_attribute) {
+    inner_classes->short_at_put(index++, enclosing_method_class_index);
+    inner_classes->short_at_put(index++, enclosing_method_method_index);
+  }
+  assert(index == size, "wrong size");
+
   // Update instanceKlass with inner class info.
   k->set_inner_classes(inner_classes());
+
+  // Restore buffer's current position.
+  cfs->set_current(current_mark);
+
   return length;
 }
 
@@ -2490,6 +2520,10 @@
   int runtime_visible_annotations_length = 0;
   u1* runtime_invisible_annotations = NULL;
   int runtime_invisible_annotations_length = 0;
+  u1* inner_classes_attribute_start = NULL;
+  u4  inner_classes_attribute_length = 0;
+  u2  enclosing_method_class_index = 0;
+  u2  enclosing_method_method_index = 0;
   // Iterate over attributes
   while (attributes_count--) {
     cfs->guarantee_more(6, CHECK);  // attribute_name_index, attribute_length
@@ -2522,11 +2556,9 @@
       } else {
         parsed_innerclasses_attribute = true;
       }
-      u2 num_of_classes = parse_classfile_inner_classes_attribute(cp, k, CHECK);
-      if (_need_verify && _major_version >= JAVA_1_5_VERSION) {
-        guarantee_property(attribute_length == sizeof(num_of_classes) + 4 * sizeof(u2) * num_of_classes,
-                          "Wrong InnerClasses attribute length in class file %s", CHECK);
-      }
+      inner_classes_attribute_start = cfs->get_u1_buffer();
+      inner_classes_attribute_length = attribute_length;
+      cfs->skip_u1(inner_classes_attribute_length, CHECK);
     } else if (tag == vmSymbols::tag_synthetic()) {
       // Check for Synthetic tag
       // Shouldn't we check that the synthetic flags wasn't already set? - not required in spec
@@ -2568,22 +2600,21 @@
           parsed_enclosingmethod_attribute = true;
         }
         cfs->guarantee_more(4, CHECK);  // class_index, method_index
-        u2 class_index  = cfs->get_u2_fast();
-        u2 method_index = cfs->get_u2_fast();
-        if (class_index == 0) {
+        enclosing_method_class_index  = cfs->get_u2_fast();
+        enclosing_method_method_index = cfs->get_u2_fast();
+        if (enclosing_method_class_index == 0) {
           classfile_parse_error("Invalid class index in EnclosingMethod attribute in class file %s", CHECK);
         }
         // Validate the constant pool indices and types
-        if (!cp->is_within_bounds(class_index) ||
-            !is_klass_reference(cp, class_index)) {
+        if (!cp->is_within_bounds(enclosing_method_class_index) ||
+            !is_klass_reference(cp, enclosing_method_class_index)) {
           classfile_parse_error("Invalid or out-of-bounds class index in EnclosingMethod attribute in class file %s", CHECK);
         }
-        if (method_index != 0 &&
-            (!cp->is_within_bounds(method_index) ||
-             !cp->tag_at(method_index).is_name_and_type())) {
+        if (enclosing_method_method_index != 0 &&
+            (!cp->is_within_bounds(enclosing_method_method_index) ||
+             !cp->tag_at(enclosing_method_method_index).is_name_and_type())) {
           classfile_parse_error("Invalid or out-of-bounds method index in EnclosingMethod attribute in class file %s", CHECK);
         }
-        k->set_enclosing_method_indices(class_index, method_index);
       } else if (tag == vmSymbols::tag_bootstrap_methods() &&
                  _major_version >= Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
         if (parsed_bootstrap_methods_attribute)
@@ -2606,6 +2637,20 @@
                                                      CHECK);
   k->set_class_annotations(annotations());
 
+  if (parsed_innerclasses_attribute || parsed_enclosingmethod_attribute) {
+    u2 num_of_classes = parse_classfile_inner_classes_attribute(
+                            inner_classes_attribute_start,
+                            parsed_innerclasses_attribute,
+                            enclosing_method_class_index,
+                            enclosing_method_method_index,
+                            cp, k, CHECK);
+    if (parsed_innerclasses_attribute &&_need_verify && _major_version >= JAVA_1_5_VERSION) {
+      guarantee_property(
+        inner_classes_attribute_length == sizeof(num_of_classes) + 4 * sizeof(u2) * num_of_classes,
+        "Wrong InnerClasses attribute length in class file %s", CHECK);
+    }
+  }
+
   if (_max_bootstrap_specifier_index >= 0) {
     guarantee_property(parsed_bootstrap_methods_attribute,
                        "Missing BootstrapMethods attribute in class file %s", CHECK);
--- a/src/share/vm/classfile/classFileParser.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/classfile/classFileParser.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -130,7 +130,11 @@
   void parse_classfile_sourcefile_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
   void parse_classfile_source_debug_extension_attribute(constantPoolHandle cp,
                                                 instanceKlassHandle k, int length, TRAPS);
-  u2   parse_classfile_inner_classes_attribute(constantPoolHandle cp,
+  u2   parse_classfile_inner_classes_attribute(u1* inner_classes_attribute_start,
+                                               bool parsed_enclosingmethod_attribute,
+                                               u2 enclosing_method_class_index,
+                                               u2 enclosing_method_method_index,
+                                               constantPoolHandle cp,
                                                instanceKlassHandle k, TRAPS);
   void parse_classfile_attributes(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
   void parse_classfile_synthetic_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
--- a/src/share/vm/classfile/dictionary.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/classfile/dictionary.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -618,7 +618,8 @@
   ResourceMark rm;
   HandleMark   hm;
 
-  tty->print_cr("Java system dictionary (classes=%d)", number_of_entries());
+  tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
+                 table_size(), number_of_entries());
   tty->print_cr("^ indicates that initiating loader is different from "
                 "defining loader");
 
--- a/src/share/vm/classfile/javaClasses.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/classfile/javaClasses.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1347,7 +1347,13 @@
     return _backtrace();
   }
 
-  inline void push(methodOop method, short bci, TRAPS) {
+  inline void push(methodOop method, int bci, TRAPS) {
+    // Smear the -1 bci to 0 since the array only holds unsigned
+    // shorts.  The later line number lookup would just smear the -1
+    // to a 0 even if it could be recorded.
+    if (bci == SynchronizationEntryBCI) bci = 0;
+    assert(bci == (jushort)bci, "doesn't fit");
+
     if (_index >= trace_chunk_size) {
       methodHandle mhandle(THREAD, method);
       expand(CHECK);
@@ -1574,8 +1580,13 @@
   int chunk_count = 0;
 
   for (;!st.at_end(); st.next()) {
-    // add element
-    bcis->ushort_at_put(chunk_count, st.bci());
+    // Add entry and smear the -1 bci to 0 since the array only holds
+    // unsigned shorts.  The later line number lookup would just smear
+    // the -1 to a 0 even if it could be recorded.
+    int bci = st.bci();
+    if (bci == SynchronizationEntryBCI) bci = 0;
+    assert(bci == (jushort)bci, "doesn't fit");
+    bcis->ushort_at_put(chunk_count, bci);
     methods->obj_at_put(chunk_count, st.method());
 
     chunk_count++;
--- a/src/share/vm/classfile/systemDictionary.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/classfile/systemDictionary.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,9 @@
 
 
 int         SystemDictionary::_number_of_modifications = 0;
+int         SystemDictionary::_sdgeneration               = 0;
+const int   SystemDictionary::_primelist[_prime_array_size] = {1009,2017,4049,5051,10103,
+              20201,40423,99991};
 
 oop         SystemDictionary::_system_loader_lock_obj     =  NULL;
 
@@ -1178,8 +1181,8 @@
 
 klassOop SystemDictionary::find_shared_class(Symbol* class_name) {
   if (shared_dictionary() != NULL) {
-    unsigned int d_hash = dictionary()->compute_hash(class_name, Handle());
-    int d_index = dictionary()->hash_to_index(d_hash);
+    unsigned int d_hash = shared_dictionary()->compute_hash(class_name, Handle());
+    int d_index = shared_dictionary()->hash_to_index(d_hash);
     return shared_dictionary()->find_shared_class(d_index, d_hash, class_name);
   } else {
     return NULL;
@@ -1750,7 +1753,21 @@
   placeholders()->oops_do(blk);
 }
 
-
+// Calculate a "good" systemdictionary size based
+// on predicted or current loaded classes count
+int SystemDictionary::calculate_systemdictionary_size(int classcount) {
+  int newsize = _old_default_sdsize;
+  if ((classcount > 0)  && !DumpSharedSpaces) {
+    int desiredsize = classcount/_average_depth_goal;
+    for (newsize = _primelist[_sdgeneration]; _sdgeneration < _prime_array_size -1;
+         newsize = _primelist[++_sdgeneration]) {
+      if (desiredsize <=  newsize) {
+        break;
+      }
+    }
+  }
+  return newsize;
+}
 bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
   bool result = dictionary()->do_unloading(is_alive);
   constraints()->purge_loader_constraints(is_alive);
@@ -1873,7 +1890,8 @@
   // Allocate arrays
   assert(dictionary() == NULL,
          "SystemDictionary should only be initialized once");
-  _dictionary          = new Dictionary(_nof_buckets);
+  _sdgeneration        = 0;
+  _dictionary          = new Dictionary(calculate_systemdictionary_size(PredictedLoadedClassCount));
   _placeholders        = new PlaceholderTable(_nof_buckets);
   _number_of_modifications = 0;
   _loader_constraints  = new LoaderConstraintTable(_loader_constraint_size);
--- a/src/share/vm/classfile/systemDictionary.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/classfile/systemDictionary.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -346,6 +346,8 @@
   // loaders.  Returns "true" iff something was unloaded.
   static bool do_unloading(BoolObjectClosure* is_alive);
 
+  static int calculate_systemdictionary_size(int loadedclasses);
+
   // Applies "f->do_oop" to all root oops in the system dictionary.
   static void oops_do(OopClosure* f);
 
@@ -538,12 +540,20 @@
     _loader_constraint_size = 107,                     // number of entries in constraint table
     _resolution_error_size  = 107,                     // number of entries in resolution error table
     _invoke_method_size     = 139,                     // number of entries in invoke method table
-    _nof_buckets            = 1009                     // number of buckets in hash table
+    _nof_buckets            = 1009,                    // number of buckets in hash table for placeholders
+    _old_default_sdsize     = 1009,                    // backward compat for system dictionary size
+    _prime_array_size       = 8,                       // array of primes for system dictionary size
+    _average_depth_goal     = 3                        // goal for lookup length
   };
 
 
   // Static variables
 
+  // hashtable sizes for system dictionary to allow growth
+  // prime numbers for system dictionary size
+  static int                     _sdgeneration;
+  static const int               _primelist[_prime_array_size];
+
   // Hashtable holding loaded classes.
   static Dictionary*            _dictionary;
 
--- a/src/share/vm/classfile/vmSymbols.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/classfile/vmSymbols.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -284,6 +284,7 @@
   template(run_method_name,                           "run")                                      \
   template(exit_method_name,                          "exit")                                     \
   template(add_method_name,                           "add")                                      \
+  template(remove_method_name,                        "remove")                                   \
   template(parent_name,                               "parent")                                   \
   template(threads_name,                              "threads")                                  \
   template(groups_name,                               "groups")                                   \
@@ -508,6 +509,9 @@
   template(clear_name,                                 "clear")                                                   \
   template(trigger_method_signature,                   "(ILjava/lang/management/MemoryUsage;)V")                                                 \
   template(startAgent_name,                            "startAgent")                                              \
+  template(startRemoteAgent_name,                      "startRemoteManagementAgent")                              \
+  template(startLocalAgent_name,                       "startLocalManagementAgent")                               \
+  template(stopRemoteAgent_name,                       "stopRemoteManagementAgent")                               \
   template(java_lang_management_ThreadInfo_constructor_signature, "(Ljava/lang/Thread;ILjava/lang/Object;Ljava/lang/Thread;JJJJ[Ljava/lang/StackTraceElement;)V") \
   template(java_lang_management_ThreadInfo_with_locks_constructor_signature, "(Ljava/lang/Thread;ILjava/lang/Object;Ljava/lang/Thread;JJJJ[Ljava/lang/StackTraceElement;[Ljava/lang/Object;[I[Ljava/lang/Object;)V") \
   template(long_long_long_long_void_signature,         "(JJJJ)V")                                                 \
--- a/src/share/vm/code/compiledIC.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/code/compiledIC.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -165,7 +165,6 @@
                    instruction_address(), method->print_value_string(), entry);
   }
 
-  Events::log("compiledIC " INTPTR_FORMAT " --> megamorphic " INTPTR_FORMAT, this, (address)method());
   // We can't check this anymore. With lazy deopt we could have already
   // cleaned this IC entry before we even return. This is possible if
   // we ran out of space in the inline cache buffer trying to do the
--- a/src/share/vm/code/nmethod.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/code/nmethod.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -462,6 +462,7 @@
   _speculatively_disconnected = 0;
   _has_unsafe_access          = 0;
   _has_method_handle_invokes  = 0;
+  _lazy_critical_native       = 0;
   _marked_for_deoptimization  = 0;
   _lock_count                 = 0;
   _stack_traversal_mark       = 0;
@@ -704,7 +705,6 @@
       xtty->tail("print_native_nmethod");
     }
   }
-  Events::log("Create nmethod " INTPTR_FORMAT, this);
 }
 
 // For dtrace wrappers
@@ -781,7 +781,6 @@
       xtty->tail("print_dtrace_nmethod");
     }
   }
-  Events::log("Create nmethod " INTPTR_FORMAT, this);
 }
 #endif // def HAVE_DTRACE_H
 
@@ -889,13 +888,6 @@
   if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
     print_nmethod(printnmethods);
   }
-
-  // Note: Do not verify in here as the CodeCache_lock is
-  //       taken which would conflict with the CompiledIC_lock
-  //       which taken during the verification of call sites.
-  //       (was bug - gri 10/25/99)
-
-  Events::log("Create nmethod " INTPTR_FORMAT, this);
 }
 
 
@@ -1386,7 +1378,7 @@
   assert_locked_or_safepoint(CodeCache_lock);
 
   // completely deallocate this method
-  EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
+  Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
   if (PrintMethodFlushing) {
     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
         _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
--- a/src/share/vm/code/nmethod.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/code/nmethod.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -175,6 +175,7 @@
   // set during construction
   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
+  unsigned int _lazy_critical_native:1;      // Lazy JNI critical native
 
   // Protected by Patching_lock
   unsigned char _state;                      // {alive, not_entrant, zombie, unloaded}
@@ -430,7 +431,10 @@
   void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
 
   bool  is_speculatively_disconnected() const     { return _speculatively_disconnected; }
-  void  set_speculatively_disconnected(bool z)     { _speculatively_disconnected = z; }
+  void  set_speculatively_disconnected(bool z)    { _speculatively_disconnected = z; }
+
+  bool  is_lazy_critical_native() const           { return _lazy_critical_native; }
+  void  set_lazy_critical_native(bool z)          { _lazy_critical_native = z; }
 
   int   comp_level() const                        { return _comp_level; }
 
--- a/src/share/vm/compiler/compileBroker.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/compiler/compileBroker.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/sweeper.hpp"
 #include "utilities/dtrace.hpp"
+#include "utilities/events.hpp"
 #ifdef COMPILER1
 #include "c1/c1_Compiler.hpp"
 #endif
@@ -189,6 +190,44 @@
 GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
 
 
+class CompilationLog : public StringEventLog {
+ public:
+  CompilationLog() : StringEventLog("Compilation events") {
+  }
+
+  void log_compile(JavaThread* thread, CompileTask* task) {
+    StringLogMessage lm;
+    stringStream msg = lm.stream();
+    // msg.time_stamp().update_to(tty->time_stamp().ticks());
+    task->print_compilation(&msg, true);
+    log(thread, "%s", (const char*)lm);
+  }
+
+  void log_nmethod(JavaThread* thread, nmethod* nm) {
+    log(thread, "nmethod %d%s " INTPTR_FORMAT " code ["INTPTR_FORMAT ", " INTPTR_FORMAT "]",
+        nm->compile_id(), nm->is_osr_method() ? "%" : "",
+        nm, nm->code_begin(), nm->code_end());
+  }
+
+  void log_failure(JavaThread* thread, CompileTask* task, const char* reason, const char* retry_message) {
+    StringLogMessage lm;
+    lm.print("%4d   COMPILE SKIPPED: %s", task->compile_id(), reason);
+    if (retry_message != NULL) {
+      lm.append(" (%s)", retry_message);
+    }
+    lm.print("\n");
+    log(thread, "%s", (const char*)lm);
+  }
+};
+
+static CompilationLog* _compilation_log = NULL;
+
+void compileBroker_init() {
+  if (LogEvents) {
+    _compilation_log = new CompilationLog();
+  }
+}
+
 CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) {
   CompilerThread* thread = CompilerThread::current();
   thread->set_task(task);
@@ -326,8 +365,12 @@
 
 // ------------------------------------------------------------------
 // CompileTask::print_compilation_impl
-void CompileTask::print_compilation_impl(outputStream* st, methodOop method, int compile_id, int comp_level, bool is_osr_method, int osr_bci, bool is_blocking, const char* msg) {
-  st->print("%7d ", (int) st->time_stamp().milliseconds());  // print timestamp
+void CompileTask::print_compilation_impl(outputStream* st, methodOop method, int compile_id, int comp_level,
+                                         bool is_osr_method, int osr_bci, bool is_blocking,
+                                         const char* msg, bool short_form) {
+  if (!short_form) {
+    st->print("%7d ", (int) st->time_stamp().milliseconds());  // print timestamp
+  }
   st->print("%4d ", compile_id);    // print compilation number
 
   // For unloaded methods the transition to zombie occurs after the
@@ -370,7 +413,9 @@
   if (msg != NULL) {
     st->print("   %s", msg);
   }
-  st->cr();
+  if (!short_form) {
+    st->cr();
+  }
 }
 
 // ------------------------------------------------------------------
@@ -426,12 +471,12 @@
 
 // ------------------------------------------------------------------
 // CompileTask::print_compilation
-void CompileTask::print_compilation(outputStream* st) {
+void CompileTask::print_compilation(outputStream* st, bool short_form) {
   oop rem = JNIHandles::resolve(method_handle());
   assert(rem != NULL && rem->is_method(), "must be");
   methodOop method = (methodOop) rem;
   bool is_osr_method = osr_bci() != InvocationEntryBci;
-  print_compilation_impl(st, method, compile_id(), comp_level(), is_osr_method, osr_bci(), is_blocking());
+  print_compilation_impl(st, method, compile_id(), comp_level(), is_osr_method, osr_bci(), is_blocking(), NULL, short_form);
 }
 
 // ------------------------------------------------------------------
@@ -855,23 +900,23 @@
     // Note that this only sets the JavaThread _priority field, which by
     // definition is limited to Java priorities and not OS priorities.
     // The os-priority is set in the CompilerThread startup code itself
+
     java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
-    // CLEANUP PRIORITIES: This -if- statement hids a bug whereby the compiler
-    // threads never have their OS priority set.  The assumption here is to
-    // enable the Performance group to do flag tuning, figure out a suitable
-    // CompilerThreadPriority, and then remove this 'if' statement (and
-    // comment) and unconditionally set the priority.
+
+    // Note that we cannot call os::set_priority because it expects Java
+    // priorities and we are *explicitly* using OS priorities so that it's
+    // possible to set the compiler thread priority higher than any Java
+    // thread.
 
-    // Compiler Threads should be at the highest Priority
-    if ( CompilerThreadPriority != -1 )
-      os::set_native_priority( compiler_thread, CompilerThreadPriority );
-    else
-      os::set_native_priority( compiler_thread, os::java_to_os_priority[NearMaxPriority]);
-
-      // Note that I cannot call os::set_priority because it expects Java
-      // priorities and I am *explicitly* using OS priorities so that it's
-      // possible to set the compiler thread priority higher than any Java
-      // thread.
+    int native_prio = CompilerThreadPriority;
+    if (native_prio == -1) {
+      if (UseCriticalCompilerThreadPriority) {
+        native_prio = os::java_to_os_priority[CriticalPriority];
+      } else {
+        native_prio = os::java_to_os_priority[NearMaxPriority];
+      }
+    }
+    os::set_native_priority(compiler_thread, native_prio);
 
     java_lang_Thread::set_daemon(thread_oop());
 
@@ -879,6 +924,7 @@
     Threads::add(compiler_thread);
     Thread::start(compiler_thread);
   }
+
   // Let go of Threads_lock before yielding
   os::yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
 
@@ -961,7 +1007,7 @@
                                         methodHandle hot_method,
                                         int hot_count,
                                         const char* comment,
-                                        TRAPS) {
+                                        Thread* thread) {
   // do nothing if compiler thread(s) is not available
   if (!_initialized ) {
     return;
@@ -1037,7 +1083,7 @@
 
   // Acquire our lock.
   {
-    MutexLocker locker(queue->lock(), THREAD);
+    MutexLocker locker(queue->lock(), thread);
 
     // Make sure the method has not slipped into the queues since
     // last we checked; note that those checks were "fast bail-outs".
@@ -1119,7 +1165,7 @@
 nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
                                        int comp_level,
                                        methodHandle hot_method, int hot_count,
-                                       const char* comment, TRAPS) {
+                                       const char* comment, Thread* THREAD) {
   // make sure arguments make sense
   assert(method->method_holder()->klass_part()->oop_is_instance(), "not an instance method");
   assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
@@ -1173,10 +1219,10 @@
   assert(!HAS_PENDING_EXCEPTION, "No exception should be present");
   // some prerequisites that are compiler specific
   if (compiler(comp_level)->is_c2() || compiler(comp_level)->is_shark()) {
-    method->constants()->resolve_string_constants(CHECK_0);
+    method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NULL);
     // Resolve all classes seen in the signature of the method
     // we are compiling.
-    methodOopDesc::load_signature_classes(method, CHECK_0);
+    methodOopDesc::load_signature_classes(method, CHECK_AND_CLEAR_NULL);
   }
 
   // If the method is native, do the lookup in the thread requesting
@@ -1230,7 +1276,7 @@
       return NULL;
     }
   } else {
-    compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, CHECK_0);
+    compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, THREAD);
   }
 
   // return requested nmethod
@@ -1648,6 +1694,10 @@
   CompilerThread* thread = CompilerThread::current();
   ResourceMark rm(thread);
 
+  if (LogEvents) {
+    _compilation_log->log_compile(thread, task);
+  }
+
   // Common flags.
   uint compile_id = task->compile_id();
   int osr_bci = task->osr_bci();
@@ -1716,22 +1766,30 @@
       ci_env.record_method_not_compilable("compile failed", !TieredCompilation);
     }
 
+    // Copy this bit to the enclosing block:
+    compilable = ci_env.compilable();
+
     if (ci_env.failing()) {
-      // Copy this bit to the enclosing block:
-      compilable = ci_env.compilable();
+      const char* retry_message = ci_env.retry_message();
+      if (_compilation_log != NULL) {
+        _compilation_log->log_failure(thread, task, ci_env.failure_reason(), retry_message);
+      }
       if (PrintCompilation) {
-        const char* reason = ci_env.failure_reason();
-        if (compilable == ciEnv::MethodCompilable_not_at_tier) {
-          tty->print_cr("%4d   COMPILE SKIPPED: %s (retry at different tier)", compile_id, reason);
-        } else if (compilable == ciEnv::MethodCompilable_never) {
-          tty->print_cr("%4d   COMPILE SKIPPED: %s (not retryable)", compile_id, reason);
-        } else if (compilable == ciEnv::MethodCompilable) {
-          tty->print_cr("%4d   COMPILE SKIPPED: %s", compile_id, reason);
+        tty->print("%4d   COMPILE SKIPPED: %s", compile_id, ci_env.failure_reason());
+        if (retry_message != NULL) {
+          tty->print(" (%s)", retry_message);
         }
+        tty->cr();
       }
     } else {
       task->mark_success();
       task->set_num_inlined_bytecodes(ci_env.num_inlined_bytecodes());
+      if (_compilation_log != NULL) {
+        nmethod* code = task->code();
+        if (code != NULL) {
+          _compilation_log->log_nmethod(thread, code);
+        }
+      }
     }
   }
   pop_jni_handle_block();
--- a/src/share/vm/compiler/compileBroker.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/compiler/compileBroker.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -98,12 +98,16 @@
   void         set_prev(CompileTask* prev)       { _prev = prev; }
 
 private:
-  static void  print_compilation_impl(outputStream* st, methodOop method, int compile_id, int comp_level, bool is_osr_method = false, int osr_bci = -1, bool is_blocking = false, const char* msg = NULL);
+  static void  print_compilation_impl(outputStream* st, methodOop method, int compile_id, int comp_level,
+                                      bool is_osr_method = false, int osr_bci = -1, bool is_blocking = false,
+                                      const char* msg = NULL, bool short_form = false);
 
 public:
-  void         print_compilation(outputStream* st = tty);
+  void         print_compilation(outputStream* st = tty, bool short_form = false);
   static void  print_compilation(outputStream* st, const nmethod* nm, const char* msg = NULL) {
-    print_compilation_impl(st, nm->method(), nm->compile_id(), nm->comp_level(), nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false, msg);
+    print_compilation_impl(st, nm->method(), nm->compile_id(), nm->comp_level(),
+                           nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false,
+                           msg);
   }
 
   static void  print_inlining(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg = NULL);
@@ -333,7 +337,7 @@
                                   methodHandle hot_method,
                                   int hot_count,
                                   const char* comment,
-                                  TRAPS);
+                                  Thread* thread);
   static CompileQueue* compile_queue(int comp_level) {
     if (is_c2_compile(comp_level)) return _c2_method_queue;
     if (is_c1_compile(comp_level)) return _c1_method_queue;
@@ -363,7 +367,7 @@
                                  int comp_level,
                                  methodHandle hot_method,
                                  int hot_count,
-                                 const char* comment, TRAPS);
+                                 const char* comment, Thread* thread);
 
   static void compiler_thread_loop();
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -5594,6 +5594,7 @@
     GenCollectedHeap::StrongRootsScope srs(gch);
     workers->run_task(&tsk);
   } else {
+    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
     GenCollectedHeap::StrongRootsScope srs(gch);
     tsk.work(0);
   }
@@ -5608,6 +5609,8 @@
   ResourceMark rm;
   HandleMark   hm;
   GenCollectedHeap* gch = GenCollectedHeap::heap();
+  ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
+
   MarkRefsIntoAndScanClosure
     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
              &_markStack, &_revisitStack, this,
@@ -6089,7 +6092,11 @@
   _inter_sweep_timer.reset();
   _inter_sweep_timer.start();
 
-  update_time_of_last_gc(os::javaTimeMillis());
+  // We need to use a monotonically non-deccreasing time in ms
+  // or we will see time-warp warnings and os::javaTimeMillis()
+  // does not guarantee monotonicity.
+  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+  update_time_of_last_gc(now);
 
   // NOTE on abstract state transitions:
   // Mutators allocate-live and/or mark the mod-union table dirty
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,10 +75,25 @@
   set_name("Concurrent Mark-Sweep GC Thread");
 
   if (os::create_thread(this, os::cgc_thread)) {
-    // XXX: need to set this to low priority
-    // unless "agressive mode" set; priority
-    // should be just less than that of VMThread.
-    os::set_priority(this, NearMaxPriority);
+    // An old comment here said: "Priority should be just less
+    // than that of VMThread".  Since the VMThread runs at
+    // NearMaxPriority, the old comment was inaccurate, but
+    // changing the default priority to NearMaxPriority-1
+    // could change current behavior, so the default of
+    // NearMaxPriority stays in place.
+    //
+    // Note that there's a possibility of the VMThread
+    // starving if UseCriticalCMSThreadPriority is on.
+    // That won't happen on Solaris for various reasons,
+    // but may well happen on non-Solaris platforms.
+    int native_prio;
+    if (UseCriticalCMSThreadPriority) {
+      native_prio = os::java_to_os_priority[CriticalPriority];
+    } else {
+      native_prio = os::java_to_os_priority[NearMaxPriority];
+    }
+    os::set_native_priority(this, native_prio);
+
     if (!DisableStartThread) {
       os::start_thread(this);
     }
--- a/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,6 +48,8 @@
 
 #ifndef PRODUCT
 bool CSetChooserCache::verify() {
+  guarantee(false, "CSetChooserCache::verify(): don't call this any more");
+
   int index = _first;
   HeapRegion *prev = NULL;
   for (int i = 0; i < _occupancy; ++i) {
@@ -75,6 +77,8 @@
 #endif // PRODUCT
 
 void CSetChooserCache::insert(HeapRegion *hr) {
+  guarantee(false, "CSetChooserCache::insert(): don't call this any more");
+
   assert(!is_full(), "cache should not be empty");
   hr->calc_gc_efficiency();
 
@@ -104,6 +108,9 @@
 }
 
 HeapRegion *CSetChooserCache::remove_first() {
+  guarantee(false, "CSetChooserCache::remove_first(): "
+                   "don't call this any more");
+
   if (_occupancy > 0) {
     assert(_cache[_first] != NULL, "cache should have at least one region");
     HeapRegion *ret = _cache[_first];
@@ -118,16 +125,35 @@
   }
 }
 
-static inline int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
+// Even though we don't use the GC efficiency in our heuristics as
+// much as we used to, we still order according to GC efficiency. This
+// will cause regions with a lot of live objects and large RSets to
+// end up at the end of the array. Given that we might skip collecting
+// the last few old regions, if after a few mixed GCs the remaining
+// have reclaimable bytes under a certain threshold, the hope is that
+// the ones we'll skip are ones with both large RSets and a lot of
+// live objects, not the ones with just a lot of live objects if we
+// ordered according to the amount of reclaimable bytes per region.
+static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
   if (hr1 == NULL) {
-    if (hr2 == NULL) return 0;
-    else return 1;
+    if (hr2 == NULL) {
+      return 0;
+    } else {
+      return 1;
+    }
   } else if (hr2 == NULL) {
     return -1;
   }
-  if (hr2->gc_efficiency() < hr1->gc_efficiency()) return -1;
-  else if (hr1->gc_efficiency() < hr2->gc_efficiency()) return 1;
-  else return 0;
+
+  double gc_eff1 = hr1->gc_efficiency();
+  double gc_eff2 = hr2->gc_efficiency();
+  if (gc_eff1 > gc_eff2) {
+    return -1;
+  } if (gc_eff1 < gc_eff2) {
+    return 1;
+  } else {
+    return 0;
+  }
 }
 
 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
@@ -151,51 +177,61 @@
   //
   _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
                                              ResourceObj::C_HEAP),
-                  100),
-                 true),
-  _curMarkedIndex(0),
-  _numMarkedRegions(0),
-  _unmarked_age_1_returned_as_new(false),
-  _first_par_unreserved_idx(0)
-{}
-
-
+                  100), true /* C_Heap */),
+    _curr_index(0), _length(0),
+    _regionLiveThresholdBytes(0), _remainingReclaimableBytes(0),
+    _first_par_unreserved_idx(0) {
+  _regionLiveThresholdBytes =
+    HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100;
+}
 
 #ifndef PRODUCT
 bool CollectionSetChooser::verify() {
+  guarantee(_length >= 0, err_msg("_length: %d", _length));
+  guarantee(0 <= _curr_index && _curr_index <= _length,
+            err_msg("_curr_index: %d _length: %d", _curr_index, _length));
   int index = 0;
-  guarantee(_curMarkedIndex <= _numMarkedRegions,
-            "_curMarkedIndex should be within bounds");
-  while (index < _curMarkedIndex) {
-    guarantee(_markedRegions.at(index++) == NULL,
-              "all entries before _curMarkedIndex should be NULL");
+  size_t sum_of_reclaimable_bytes = 0;
+  while (index < _curr_index) {
+    guarantee(_markedRegions.at(index) == NULL,
+              "all entries before _curr_index should be NULL");
+    index += 1;
   }
   HeapRegion *prev = NULL;
-  while (index < _numMarkedRegions) {
+  while (index < _length) {
     HeapRegion *curr = _markedRegions.at(index++);
     guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL");
     int si = curr->sort_index();
     guarantee(!curr->is_young(), "should not be young!");
+    guarantee(!curr->isHumongous(), "should not be humongous!");
     guarantee(si > -1 && si == (index-1), "sort index invariant");
     if (prev != NULL) {
-      guarantee(orderRegions(prev, curr) != 1, "regions should be sorted");
+      guarantee(orderRegions(prev, curr) != 1,
+                err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
+                        prev->gc_efficiency(), curr->gc_efficiency()));
     }
+    sum_of_reclaimable_bytes += curr->reclaimable_bytes();
     prev = curr;
   }
-  return _cache.verify();
+  guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes,
+            err_msg("reclaimable bytes inconsistent, "
+                    "remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT,
+                    _remainingReclaimableBytes, sum_of_reclaimable_bytes));
+  return true;
 }
 #endif
 
-void
-CollectionSetChooser::fillCache() {
-  while (!_cache.is_full() && (_curMarkedIndex < _numMarkedRegions)) {
-    HeapRegion* hr = _markedRegions.at(_curMarkedIndex);
+void CollectionSetChooser::fillCache() {
+  guarantee(false, "fillCache: don't call this any more");
+
+  while (!_cache.is_full() && (_curr_index < _length)) {
+    HeapRegion* hr = _markedRegions.at(_curr_index);
     assert(hr != NULL,
            err_msg("Unexpected NULL hr in _markedRegions at index %d",
-                   _curMarkedIndex));
-    _curMarkedIndex += 1;
+                   _curr_index));
+    _curr_index += 1;
     assert(!hr->is_young(), "should not be young!");
-    assert(hr->sort_index() == _curMarkedIndex-1, "sort_index invariant");
+    assert(hr->sort_index() == _curr_index-1, "sort_index invariant");
     _markedRegions.at_put(hr->sort_index(), NULL);
     _cache.insert(hr);
     assert(!_cache.is_empty(), "cache should not be empty");
@@ -203,9 +239,7 @@
   assert(verify(), "cache should be consistent");
 }
 
-void
-CollectionSetChooser::sortMarkedHeapRegions() {
-  guarantee(_cache.is_empty(), "cache should be empty");
+void CollectionSetChooser::sortMarkedHeapRegions() {
   // First trim any unused portion of the top in the parallel case.
   if (_first_par_unreserved_idx > 0) {
     if (G1PrintParCleanupStats) {
@@ -217,43 +251,78 @@
     _markedRegions.trunc_to(_first_par_unreserved_idx);
   }
   _markedRegions.sort(orderRegions);
-  assert(_numMarkedRegions <= _markedRegions.length(), "Requirement");
-  assert(_numMarkedRegions == 0
-         || _markedRegions.at(_numMarkedRegions-1) != NULL,
-         "Testing _numMarkedRegions");
-  assert(_numMarkedRegions == _markedRegions.length()
-         || _markedRegions.at(_numMarkedRegions) == NULL,
-         "Testing _numMarkedRegions");
+  assert(_length <= _markedRegions.length(), "Requirement");
+  assert(_length == 0 || _markedRegions.at(_length - 1) != NULL,
+         "Testing _length");
+  assert(_length == _markedRegions.length() ||
+                        _markedRegions.at(_length) == NULL, "Testing _length");
   if (G1PrintParCleanupStats) {
-    gclog_or_tty->print_cr("     Sorted %d marked regions.", _numMarkedRegions);
+    gclog_or_tty->print_cr("     Sorted %d marked regions.", _length);
   }
-  for (int i = 0; i < _numMarkedRegions; i++) {
+  for (int i = 0; i < _length; i++) {
     assert(_markedRegions.at(i) != NULL, "Should be true by sorting!");
     _markedRegions.at(i)->set_sort_index(i);
   }
   if (G1PrintRegionLivenessInfo) {
     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
-    for (int i = 0; i < _numMarkedRegions; ++i) {
+    for (int i = 0; i < _length; ++i) {
       HeapRegion* r = _markedRegions.at(i);
       cl.doHeapRegion(r);
     }
   }
-  assert(verify(), "should now be sorted");
+  assert(verify(), "CSet chooser verification");
 }
 
-void
-CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
+size_t CollectionSetChooser::calcMinOldCSetLength() {
+  // The min old CSet region bound is based on the maximum desired
+  // number of mixed GCs after a cycle. I.e., even if some old regions
+  // look expensive, we should add them to the CSet anyway to make
+  // sure we go through the available old regions in no more than the
+  // maximum desired number of mixed GCs.
+  //
+  // The calculation is based on the number of marked regions we added
+  // to the CSet chooser in the first place, not how many remain, so
+  // that the result is the same during all mixed GCs that follow a cycle.
+
+  const size_t region_num = (size_t) _length;
+  const size_t gc_num = (size_t) G1MixedGCCountTarget;
+  size_t result = region_num / gc_num;
+  // emulate ceiling
+  if (result * gc_num < region_num) {
+    result += 1;
+  }
+  return result;
+}
+
+size_t CollectionSetChooser::calcMaxOldCSetLength() {
+  // The max old CSet region bound is based on the threshold expressed
+  // as a percentage of the heap size. I.e., it should bound the
+  // number of old regions added to the CSet irrespective of how many
+  // of them are available.
+
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  const size_t region_num = g1h->n_regions();
+  const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
+  size_t result = region_num * perc / 100;
+  // emulate ceiling
+  if (100 * result < region_num * perc) {
+    result += 1;
+  }
+  return result;
+}
+
+void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
   assert(!hr->isHumongous(),
          "Humongous regions shouldn't be added to the collection set");
   assert(!hr->is_young(), "should not be young!");
   _markedRegions.append(hr);
-  _numMarkedRegions++;
+  _length++;
+  _remainingReclaimableBytes += hr->reclaimable_bytes();
   hr->calc_gc_efficiency();
 }
 
-void
-CollectionSetChooser::
-prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) {
+void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions,
+                                                             size_t chunkSize) {
   _first_par_unreserved_idx = 0;
   int n_threads = ParallelGCThreads;
   if (UseDynamicNumberOfGCThreads) {
@@ -274,8 +343,7 @@
   _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL);
 }
 
-jint
-CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
+jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
   // Don't do this assert because this can be called at a point
   // where the loop up stream will not execute again but might
   // try to claim more chunks (loop test has not been done yet).
@@ -287,83 +355,37 @@
   return res - n_regions;
 }
 
-void
-CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
+void CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
   assert(_markedRegions.at(index) == NULL, "precondition");
   assert(!hr->is_young(), "should not be young!");
   _markedRegions.at_put(index, hr);
   hr->calc_gc_efficiency();
 }
 
-void
-CollectionSetChooser::incNumMarkedHeapRegions(jint inc_by) {
-  (void)Atomic::add(inc_by, &_numMarkedRegions);
-}
-
-void
-CollectionSetChooser::clearMarkedHeapRegions(){
-  for (int i = 0; i < _markedRegions.length(); i++) {
-    HeapRegion* r =   _markedRegions.at(i);
-    if (r != NULL) r->set_sort_index(-1);
+void CollectionSetChooser::updateTotals(jint region_num,
+                                        size_t reclaimable_bytes) {
+  // Only take the lock if we actually need to update the totals.
+  if (region_num > 0) {
+    assert(reclaimable_bytes > 0, "invariant");
+    // We could have just used atomics instead of taking the
+    // lock. However, we currently don't have an atomic add for size_t.
+    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
+    _length += (int) region_num;
+    _remainingReclaimableBytes += reclaimable_bytes;
+  } else {
+    assert(reclaimable_bytes == 0, "invariant");
   }
-  _markedRegions.clear();
-  _curMarkedIndex = 0;
-  _numMarkedRegions = 0;
-  _cache.clear();
-};
-
-void
-CollectionSetChooser::updateAfterFullCollection() {
-  clearMarkedHeapRegions();
 }
 
-// if time_remaining < 0.0, then this method should try to return
-// a region, whether it fits within the remaining time or not
-HeapRegion*
-CollectionSetChooser::getNextMarkedRegion(double time_remaining,
-                                          double avg_prediction) {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  G1CollectorPolicy* g1p = g1h->g1_policy();
-  fillCache();
-  if (_cache.is_empty()) {
-    assert(_curMarkedIndex == _numMarkedRegions,
-           "if cache is empty, list should also be empty");
-    ergo_verbose0(ErgoCSetConstruction,
-                  "stop adding old regions to CSet",
-                  ergo_format_reason("cache is empty"));
-    return NULL;
-  }
-
-  HeapRegion *hr = _cache.get_first();
-  assert(hr != NULL, "if cache not empty, first entry should be non-null");
-  double predicted_time = g1h->predict_region_elapsed_time_ms(hr, false);
-
-  if (g1p->adaptive_young_list_length()) {
-    if (time_remaining - predicted_time < 0.0) {
-      g1h->check_if_region_is_too_expensive(predicted_time);
-      ergo_verbose2(ErgoCSetConstruction,
-                    "stop adding old regions to CSet",
-                    ergo_format_reason("predicted old region time higher than remaining time")
-                    ergo_format_ms("predicted old region time")
-                    ergo_format_ms("remaining time"),
-                    predicted_time, time_remaining);
-      return NULL;
-    }
-  } else {
-    double threshold = 2.0 * avg_prediction;
-    if (predicted_time > threshold) {
-      ergo_verbose2(ErgoCSetConstruction,
-                    "stop adding old regions to CSet",
-                    ergo_format_reason("predicted old region time higher than threshold")
-                    ergo_format_ms("predicted old region time")
-                    ergo_format_ms("threshold"),
-                    predicted_time, threshold);
-      return NULL;
+void CollectionSetChooser::clearMarkedHeapRegions() {
+  for (int i = 0; i < _markedRegions.length(); i++) {
+    HeapRegion* r = _markedRegions.at(i);
+    if (r != NULL) {
+      r->set_sort_index(-1);
     }
   }
-
-  HeapRegion *hr2 = _cache.remove_first();
-  assert(hr == hr2, "cache contents should not have changed");
-
-  return hr;
-}
+  _markedRegions.clear();
+  _curr_index = 0;
+  _length = 0;
+  _remainingReclaimableBytes = 0;
+};
--- a/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,28 +28,6 @@
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "utilities/growableArray.hpp"
 
-// We need to sort heap regions by collection desirability.
-// This sorting is currently done in two "stages". An initial sort is
-// done following a cleanup pause as soon as all of the marked but
-// non-empty regions have been identified and the completely empty
-// ones reclaimed.
-// This gives us a global sort on a GC efficiency metric
-// based on predictive data available at that time. However,
-// any of these regions that are collected will only be collected
-// during a future GC pause, by which time it is possible that newer
-// data might allow us to revise and/or refine the earlier
-// pause predictions, leading to changes in expected gc efficiency
-// order. To somewhat mitigate this obsolescence, more so in the
-// case of regions towards the end of the list, which will be
-// picked later, these pre-sorted regions from the _markedRegions
-// array are not used as is, but a small prefix thereof is
-// insertion-sorted again into a small cache, based on more
-// recent remembered set information. Regions are then drawn
-// from this cache to construct the collection set at each
-// incremental GC.
-// This scheme and/or its implementation may be subject to
-// revision in the future.
-
 class CSetChooserCache VALUE_OBJ_CLASS_SPEC {
 private:
   enum {
@@ -103,24 +81,82 @@
 class CollectionSetChooser: public CHeapObj {
 
   GrowableArray<HeapRegion*> _markedRegions;
-  int _curMarkedIndex;
-  int _numMarkedRegions;
-  CSetChooserCache _cache;
+
+  // The index of the next candidate old region to be considered for
+  // addition to the CSet.
+  int _curr_index;
+
+  // The number of candidate old regions added to the CSet chooser.
+  int _length;
 
-  // True iff last collection pause ran of out new "age 0" regions, and
-  // returned an "age 1" region.
-  bool _unmarked_age_1_returned_as_new;
+  CSetChooserCache _cache;
+  jint _first_par_unreserved_idx;
 
-  jint _first_par_unreserved_idx;
+  // If a region has more live bytes than this threshold, it will not
+  // be added to the CSet chooser and will not be a candidate for
+  // collection.
+  size_t _regionLiveThresholdBytes;
+
+  // The sum of reclaimable bytes over all the regions in the CSet chooser.
+  size_t _remainingReclaimableBytes;
 
 public:
 
-  HeapRegion* getNextMarkedRegion(double time_so_far, double avg_prediction);
+  // Return the current candidate region to be considered for
+  // collection without removing it from the CSet chooser.
+  HeapRegion* peek() {
+    HeapRegion* res = NULL;
+    if (_curr_index < _length) {
+      res = _markedRegions.at(_curr_index);
+      assert(res != NULL,
+             err_msg("Unexpected NULL hr in _markedRegions at index %d",
+                     _curr_index));
+    }
+    return res;
+  }
+
+  // Remove the given region from the CSet chooser and move to the
+  // next one. The given region should be the current candidate region
+  // in the CSet chooser.
+  void remove_and_move_to_next(HeapRegion* hr) {
+    assert(hr != NULL, "pre-condition");
+    assert(_curr_index < _length, "pre-condition");
+    assert(_markedRegions.at(_curr_index) == hr, "pre-condition");
+    hr->set_sort_index(-1);
+    _markedRegions.at_put(_curr_index, NULL);
+    assert(hr->reclaimable_bytes() <= _remainingReclaimableBytes,
+           err_msg("remaining reclaimable bytes inconsistent "
+                   "from region: "SIZE_FORMAT" remaining: "SIZE_FORMAT,
+                   hr->reclaimable_bytes(), _remainingReclaimableBytes));
+    _remainingReclaimableBytes -= hr->reclaimable_bytes();
+    _curr_index += 1;
+  }
 
   CollectionSetChooser();
 
   void sortMarkedHeapRegions();
   void fillCache();
+
+  // Determine whether to add the given region to the CSet chooser or
+  // not. Currently, we skip humongous regions (we never add them to
+  // the CSet, we only reclaim them during cleanup) and regions whose
+  // live bytes are over the threshold.
+  bool shouldAdd(HeapRegion* hr) {
+    assert(hr->is_marked(), "pre-condition");
+    assert(!hr->is_young(), "should never consider young regions");
+    return !hr->isHumongous() &&
+            hr->live_bytes() < _regionLiveThresholdBytes;
+  }
+
+  // Calculate the minimum number of old regions we'll add to the CSet
+  // during a mixed GC.
+  size_t calcMinOldCSetLength();
+
+  // Calculate the maximum number of old regions we'll add to the CSet
+  // during a mixed GC.
+  size_t calcMaxOldCSetLength();
+
+  // Serial version.
   void addMarkedHeapRegion(HeapRegion *hr);
 
   // Must be called before calls to getParMarkedHeapRegionChunk.
@@ -133,14 +169,21 @@
   // Set the marked array entry at index to hr.  Careful to claim the index
   // first if in parallel.
   void setMarkedHeapRegion(jint index, HeapRegion* hr);
-  // Atomically increment the number of claimed regions by "inc_by".
-  void incNumMarkedHeapRegions(jint inc_by);
+  // Atomically increment the number of added regions by region_num
+  // and the amount of reclaimable bytes by reclaimable_bytes.
+  void updateTotals(jint region_num, size_t reclaimable_bytes);
 
   void clearMarkedHeapRegions();
 
-  void updateAfterFullCollection();
+  // Return the number of candidate regions that remain to be collected.
+  size_t remainingRegions() { return _length - _curr_index; }
 
-  bool unmarked_age_1_returned_as_new() { return _unmarked_age_1_returned_as_new; }
+  // Determine whether the CSet chooser has more candidate regions or not.
+  bool isEmpty() { return remainingRegions() == 0; }
+
+  // Return the reclaimable bytes that remain to be collected on
+  // all the candidate regions in the CSet chooser.
+  size_t remainingReclaimableBytes () { return _remainingReclaimableBytes; }
 
   // Returns true if the used portion of "_markedRegions" is properly
   // sorted, otherwise asserts false.
@@ -148,9 +191,17 @@
   bool verify(void);
   bool regionProperlyOrdered(HeapRegion* r) {
     int si = r->sort_index();
-    return (si == -1) ||
-      (si > -1 && _markedRegions.at(si) == r) ||
-      (si < -1 && _cache.region_in_cache(r));
+    if (si > -1) {
+      guarantee(_curr_index <= si && si < _length,
+                err_msg("curr: %d sort index: %d: length: %d",
+                        _curr_index, si, _length));
+      guarantee(_markedRegions.at(si) == r,
+                err_msg("sort index: %d at: "PTR_FORMAT" r: "PTR_FORMAT,
+                        si, _markedRegions.at(si), r));
+    } else {
+      guarantee(si == -1, err_msg("sort index: %d", si));
+    }
+    return true;
   }
 #endif
 
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -42,8 +42,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
 
-//
-// CMS Bit Map Wrapper
+// Concurrent marking bit map wrapper
 
 CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter) :
   _bm((uintptr_t*)NULL,0),
@@ -53,13 +52,13 @@
   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
 
-  guarantee(brs.is_reserved(), "couldn't allocate CMS bit map");
+  guarantee(brs.is_reserved(), "couldn't allocate concurrent marking bit map");
   // For now we'll just commit all of the bit map up fromt.
   // Later on we'll try to be more parsimonious with swap.
   guarantee(_virtual_space.initialize(brs, brs.size()),
-            "couldn't reseve backing store for CMS bit map");
+            "couldn't reseve backing store for concurrent marking bit map");
   assert(_virtual_space.committed_size() == brs.size(),
-         "didn't reserve backing store for all of CMS bit map?");
+         "didn't reserve backing store for all of concurrent marking bit map?");
   _bm.set_map((uintptr_t*)_virtual_space.low());
   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
          _bmWordSize, "inconsistency in bit map sizing");
@@ -104,17 +103,6 @@
   return (int) (diff >> _shifter);
 }
 
-bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
-  HeapWord* left  = MAX2(_bmStartWord, mr.start());
-  HeapWord* right = MIN2(_bmStartWord + _bmWordSize, mr.end());
-  if (right > left) {
-    // Right-open interval [leftOffset, rightOffset).
-    return _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
-  } else {
-    return true;
-  }
-}
-
 void CMBitMapRO::mostly_disjoint_range_union(BitMap*   from_bitmap,
                                              size_t    from_start_index,
                                              HeapWord* to_start_word,
@@ -431,8 +419,6 @@
     assert(newOop->is_oop(), "Expected an oop");
     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
            "only grey objects on this stack");
-    // iterate over the oops in this oop, marking and pushing
-    // the ones in CMS generation.
     newOop->oop_iterate(cl);
     if (yield_after && _cm->do_yield_check()) {
       res = false;
@@ -474,6 +460,84 @@
               && !nextMarkBitMap()->isMarked((HeapWord*)obj)));
 }
 
+CMRootRegions::CMRootRegions() :
+  _young_list(NULL), _cm(NULL), _scan_in_progress(false),
+  _should_abort(false),  _next_survivor(NULL) { }
+
+void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
+  _young_list = g1h->young_list();
+  _cm = cm;
+}
+
+void CMRootRegions::prepare_for_scan() {
+  assert(!scan_in_progress(), "pre-condition");
+
+  // Currently, only survivors can be root regions.
+  assert(_next_survivor == NULL, "pre-condition");
+  _next_survivor = _young_list->first_survivor_region();
+  _scan_in_progress = (_next_survivor != NULL);
+  _should_abort = false;
+}
+
+HeapRegion* CMRootRegions::claim_next() {
+  if (_should_abort) {
+    // If someone has set the should_abort flag, we return NULL to
+    // force the caller to bail out of their loop.
+    return NULL;
+  }
+
+  // Currently, only survivors can be root regions.
+  HeapRegion* res = _next_survivor;
+  if (res != NULL) {
+    MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
+    // Read it again in case it changed while we were waiting for the lock.
+    res = _next_survivor;
+    if (res != NULL) {
+      if (res == _young_list->last_survivor_region()) {
+        // We just claimed the last survivor so store NULL to indicate
+        // that we're done.
+        _next_survivor = NULL;
+      } else {
+        _next_survivor = res->get_next_young_region();
+      }
+    } else {
+      // Someone else claimed the last survivor while we were trying
+      // to take the lock so nothing else to do.
+    }
+  }
+  assert(res == NULL || res->is_survivor(), "post-condition");
+
+  return res;
+}
+
+void CMRootRegions::scan_finished() {
+  assert(scan_in_progress(), "pre-condition");
+
+  // Currently, only survivors can be root regions.
+  if (!_should_abort) {
+    assert(_next_survivor == NULL, "we should have claimed all survivors");
+  }
+  _next_survivor = NULL;
+
+  {
+    MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
+    _scan_in_progress = false;
+    RootRegionScan_lock->notify_all();
+  }
+}
+
+bool CMRootRegions::wait_until_scan_finished() {
+  if (!scan_in_progress()) return false;
+
+  {
+    MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
+    while (scan_in_progress()) {
+      RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
+    }
+  }
+  return true;
+}
+
 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 #endif // _MSC_VER
@@ -498,6 +562,7 @@
   _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
            CardTableModRefBS::card_shift,
            false /* in_resource_area*/),
+
   _prevMarkBitMap(&_markBitMap1),
   _nextMarkBitMap(&_markBitMap2),
   _at_least_one_mark_complete(false),
@@ -526,7 +591,11 @@
   _cleanup_times(),
   _total_counting_time(0.0),
   _total_rs_scrub_time(0.0),
-  _parallel_workers(NULL) {
+
+  _parallel_workers(NULL),
+
+  _count_card_bitmaps(NULL),
+  _count_marked_bytes(NULL) {
   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
   if (verbose_level < no_verbose) {
     verbose_level = no_verbose;
@@ -557,9 +626,16 @@
   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
   satb_qs.set_buffer_size(G1SATBBufferSize);
 
+  _root_regions.init(_g1h, this);
+
   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num);
   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num);
 
+  _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_task_num);
+  _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_task_num);
+
+  BitMap::idx_t card_bm_size = _card_bm.size();
+
   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
   _active_tasks = _max_task_num;
   for (int i = 0; i < (int) _max_task_num; ++i) {
@@ -567,10 +643,26 @@
     task_queue->initialize();
     _task_queues->register_queue(i, task_queue);
 
-    _tasks[i] = new CMTask(i, this, task_queue, _task_queues);
+    _count_card_bitmaps[i] = BitMap(card_bm_size, false);
+    _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions);
+
+    _tasks[i] = new CMTask(i, this,
+                           _count_marked_bytes[i],
+                           &_count_card_bitmaps[i],
+                           task_queue, _task_queues);
+
     _accum_task_vtime[i] = 0.0;
   }
 
+  // Calculate the card number for the bottom of the heap. Used
+  // in biasing indexes into the accounting card bitmaps.
+  _heap_bottom_card_num =
+    intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
+                                CardTableModRefBS::card_shift);
+
+  // Clear all the liveness counting data
+  clear_all_count_data();
+
   if (ConcGCThreads > ParallelGCThreads) {
     vm_exit_during_initialization("Can't have more ConcGCThreads "
                                   "than ParallelGCThreads.");
@@ -750,11 +842,6 @@
   ShouldNotReachHere();
 }
 
-// This closure is used to mark refs into the g1 generation
-// from external roots in the CMS bit map.
-// Called at the first checkpoint.
-//
-
 void ConcurrentMark::clearNextBitmap() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   G1CollectorPolicy* g1p = g1h->g1_policy();
@@ -794,6 +881,9 @@
     assert(!g1h->mark_in_progress(), "invariant");
   }
 
+  // Clear the liveness counting data
+  clear_all_count_data();
+
   // Repeat the asserts from above.
   guarantee(cmThread()->during_cycle(), "invariant");
   guarantee(!g1h->mark_in_progress(), "invariant");
@@ -854,6 +944,8 @@
   satb_mq_set.set_active_all_threads(true, /* new active value */
                                      false /* expected_active */);
 
+  _root_regions.prepare_for_scan();
+
   // update_g1_committed() will be called at the end of an evac pause
   // when marking is on. So, it's also called at the end of the
   // initial-mark pause to update the heap end, if the heap expands
@@ -1147,6 +1239,69 @@
   return 0;
 }
 
+void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
+  // Currently, only survivors can be root regions.
+  assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
+  G1RootRegionScanClosure cl(_g1h, this, worker_id);
+
+  const uintx interval = PrefetchScanIntervalInBytes;
+  HeapWord* curr = hr->bottom();
+  const HeapWord* end = hr->top();
+  while (curr < end) {
+    Prefetch::read(curr, interval);
+    oop obj = oop(curr);
+    int size = obj->oop_iterate(&cl);
+    assert(size == obj->size(), "sanity");
+    curr += size;
+  }
+}
+
+class CMRootRegionScanTask : public AbstractGangTask {
+private:
+  ConcurrentMark* _cm;
+
+public:
+  CMRootRegionScanTask(ConcurrentMark* cm) :
+    AbstractGangTask("Root Region Scan"), _cm(cm) { }
+
+  void work(uint worker_id) {
+    assert(Thread::current()->is_ConcurrentGC_thread(),
+           "this should only be done by a conc GC thread");
+
+    CMRootRegions* root_regions = _cm->root_regions();
+    HeapRegion* hr = root_regions->claim_next();
+    while (hr != NULL) {
+      _cm->scanRootRegion(hr, worker_id);
+      hr = root_regions->claim_next();
+    }
+  }
+};
+
+void ConcurrentMark::scanRootRegions() {
+  // scan_in_progress() will have been set to true only if there was
+  // at least one root region to scan. So, if it's false, we
+  // should not attempt to do any further work.
+  if (root_regions()->scan_in_progress()) {
+    _parallel_marking_threads = calc_parallel_marking_threads();
+    assert(parallel_marking_threads() <= max_parallel_marking_threads(),
+           "Maximum number of marking threads exceeded");
+    uint active_workers = MAX2(1U, parallel_marking_threads());
+
+    CMRootRegionScanTask task(this);
+    if (parallel_marking_threads() > 0) {
+      _parallel_workers->set_active_workers((int) active_workers);
+      _parallel_workers->run_task(&task);
+    } else {
+      task.work(0);
+    }
+
+    // It's possible that has_aborted() is true here without actually
+    // aborting the survivor scan earlier. This is OK as it's
+    // mainly used for sanity checking.
+    root_regions()->scan_finished();
+  }
+}
+
 void ConcurrentMark::markFromRoots() {
   // we might be tempted to assert that:
   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
@@ -1225,6 +1380,10 @@
       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
     }
   } else {
+    // Aggregate the per-task counting data that we have accumulated
+    // while marking.
+    aggregate_count_data();
+
     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
     // We're done with marking.
     // This is the end of  the marking cycle, we're expected all
@@ -1262,48 +1421,41 @@
   g1p->record_concurrent_mark_remark_end();
 }
 
-#define CARD_BM_TEST_MODE 0
-
+// Used to calculate the # live objects per region
+// for verification purposes
 class CalcLiveObjectsClosure: public HeapRegionClosure {
 
   CMBitMapRO* _bm;
   ConcurrentMark* _cm;
-  bool _changed;
-  bool _yield;
-  size_t _words_done;
-  size_t _tot_live;
-  size_t _tot_used;
-  size_t _regions_done;
-  double _start_vtime_sec;
-
   BitMap* _region_bm;
   BitMap* _card_bm;
+
+  // Debugging
+  size_t _tot_words_done;
+  size_t _tot_live;
+  size_t _tot_used;
+
+  size_t _region_marked_bytes;
+
   intptr_t _bottom_card_num;
-  bool _final;
 
   void mark_card_num_range(intptr_t start_card_num, intptr_t last_card_num) {
-    for (intptr_t i = start_card_num; i <= last_card_num; i++) {
-#if CARD_BM_TEST_MODE
-      guarantee(_card_bm->at(i - _bottom_card_num), "Should already be set.");
-#else
-      _card_bm->par_at_put(i - _bottom_card_num, 1);
-#endif
+    assert(start_card_num <= last_card_num, "sanity");
+    BitMap::idx_t start_idx = start_card_num - _bottom_card_num;
+    BitMap::idx_t last_idx = last_card_num - _bottom_card_num;
+
+    for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
+      _card_bm->par_at_put(i, 1);
     }
   }
 
 public:
-  CalcLiveObjectsClosure(bool final,
-                         CMBitMapRO *bm, ConcurrentMark *cm,
+  CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm,
                          BitMap* region_bm, BitMap* card_bm) :
-    _bm(bm), _cm(cm), _changed(false), _yield(true),
-    _words_done(0), _tot_live(0), _tot_used(0),
-    _region_bm(region_bm), _card_bm(card_bm),_final(final),
-    _regions_done(0), _start_vtime_sec(0.0)
-  {
-    _bottom_card_num =
-      intptr_t(uintptr_t(G1CollectedHeap::heap()->reserved_region().start()) >>
-               CardTableModRefBS::card_shift);
-  }
+    _bm(bm), _cm(cm), _region_bm(region_bm), _card_bm(card_bm),
+    _region_marked_bytes(0), _tot_words_done(0),
+    _tot_live(0), _tot_used(0),
+    _bottom_card_num(cm->heap_bottom_card_num()) { }
 
   // It takes a region that's not empty (i.e., it has at least one
   // live object in it and sets its corresponding bit on the region
@@ -1319,29 +1471,16 @@
       _region_bm->par_at_put((BitMap::idx_t) index, true);
     } else {
       // Starts humongous case: calculate how many regions are part of
-      // this humongous region and then set the bit range. It might
-      // have been a bit more efficient to look at the object that
-      // spans these humongous regions to calculate their number from
-      // the object's size. However, it's a good idea to calculate
-      // this based on the metadata itself, and not the region
-      // contents, so that this code is not aware of what goes into
-      // the humongous regions (in case this changes in the future).
+      // this humongous region and then set the bit range.
       G1CollectedHeap* g1h = G1CollectedHeap::heap();
-      size_t end_index = index + 1;
-      while (end_index < g1h->n_regions()) {
-        HeapRegion* chr = g1h->region_at(end_index);
-        if (!chr->continuesHumongous()) break;
-        end_index += 1;
-      }
+      HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
+      size_t end_index = last_hr->hrs_index() + 1;
       _region_bm->par_at_put_range((BitMap::idx_t) index,
                                    (BitMap::idx_t) end_index, true);
     }
   }
 
   bool doHeapRegion(HeapRegion* hr) {
-    if (!_final && _regions_done == 0) {
-      _start_vtime_sec = os::elapsedVTime();
-    }
 
     if (hr->continuesHumongous()) {
       // We will ignore these here and process them when their
@@ -1355,48 +1494,41 @@
     }
 
     HeapWord* nextTop = hr->next_top_at_mark_start();
-    HeapWord* start   = hr->top_at_conc_mark_count();
-    assert(hr->bottom() <= start && start <= hr->end() &&
-           hr->bottom() <= nextTop && nextTop <= hr->end() &&
-           start <= nextTop,
-           "Preconditions.");
-    // Otherwise, record the number of word's we'll examine.
+    HeapWord* start   = hr->bottom();
+
+    assert(start <= hr->end() && start <= nextTop && nextTop <= hr->end(),
+           err_msg("Preconditions not met - "
+                   "start: "PTR_FORMAT", nextTop: "PTR_FORMAT", end: "PTR_FORMAT,
+                   start, nextTop, hr->end()));
+
+    // Record the number of word's we'll examine.
     size_t words_done = (nextTop - start);
+
     // Find the first marked object at or after "start".
     start = _bm->getNextMarkedWordAddress(start, nextTop);
+
     size_t marked_bytes = 0;
 
     // Below, the term "card num" means the result of shifting an address
     // by the card shift -- address 0 corresponds to card number 0.  One
     // must subtract the card num of the bottom of the heap to obtain a
     // card table index.
+
     // The first card num of the sequence of live cards currently being
     // constructed.  -1 ==> no sequence.
     intptr_t start_card_num = -1;
+
     // The last card num of the sequence of live cards currently being
     // constructed.  -1 ==> no sequence.
     intptr_t last_card_num = -1;
 
     while (start < nextTop) {
-      if (_yield && _cm->do_yield_check()) {
-        // We yielded.  It might be for a full collection, in which case
-        // all bets are off; terminate the traversal.
-        if (_cm->has_aborted()) {
-          _changed = false;
-          return true;
-        } else {
-          // Otherwise, it might be a collection pause, and the region
-          // we're looking at might be in the collection set.  We'll
-          // abandon this region.
-          return false;
-        }
-      }
       oop obj = oop(start);
       int obj_sz = obj->size();
+
       // The card num of the start of the current object.
       intptr_t obj_card_num =
         intptr_t(uintptr_t(start) >> CardTableModRefBS::card_shift);
-
       HeapWord* obj_last = start + obj_sz - 1;
       intptr_t obj_last_card_num =
         intptr_t(uintptr_t(obj_last) >> CardTableModRefBS::card_shift);
@@ -1414,110 +1546,404 @@
             start_card_num = obj_card_num;
           }
         }
-#if CARD_BM_TEST_MODE
-        /*
-        gclog_or_tty->print_cr("Setting bits from %d/%d.",
-                               obj_card_num - _bottom_card_num,
-                               obj_last_card_num - _bottom_card_num);
-        */
-        for (intptr_t j = obj_card_num; j <= obj_last_card_num; j++) {
-          _card_bm->par_at_put(j - _bottom_card_num, 1);
-        }
-#endif
       }
       // In any case, we set the last card num.
       last_card_num = obj_last_card_num;
 
       marked_bytes += (size_t)obj_sz * HeapWordSize;
+
       // Find the next marked object after this one.
       start = _bm->getNextMarkedWordAddress(start + 1, nextTop);
-      _changed = true;
     }
+
     // Handle the last range, if any.
     if (start_card_num != -1) {
       mark_card_num_range(start_card_num, last_card_num);
     }
-    if (_final) {
-      // Mark the allocated-since-marking portion...
-      HeapWord* tp = hr->top();
-      if (nextTop < tp) {
-        start_card_num =
-          intptr_t(uintptr_t(nextTop) >> CardTableModRefBS::card_shift);
-        last_card_num =
-          intptr_t(uintptr_t(tp) >> CardTableModRefBS::card_shift);
-        mark_card_num_range(start_card_num, last_card_num);
-        // This definitely means the region has live objects.
-        set_bit_for_region(hr);
-      }
+
+    // Mark the allocated-since-marking portion...
+    HeapWord* top = hr->top();
+    if (nextTop < top) {
+      start_card_num = intptr_t(uintptr_t(nextTop) >> CardTableModRefBS::card_shift);
+      last_card_num = intptr_t(uintptr_t(top) >> CardTableModRefBS::card_shift);
+
+      mark_card_num_range(start_card_num, last_card_num);
+
+      // This definitely means the region has live objects.
+      set_bit_for_region(hr);
     }
 
-    hr->add_to_marked_bytes(marked_bytes);
     // Update the live region bitmap.
     if (marked_bytes > 0) {
       set_bit_for_region(hr);
     }
-    hr->set_top_at_conc_mark_count(nextTop);
+
+    // Set the marked bytes for the current region so that
+    // it can be queried by a calling verificiation routine
+    _region_marked_bytes = marked_bytes;
+
     _tot_live += hr->next_live_bytes();
     _tot_used += hr->used();
-    _words_done = words_done;
-
-    if (!_final) {
-      ++_regions_done;
-      if (_regions_done % 10 == 0) {
-        double end_vtime_sec = os::elapsedVTime();
-        double elapsed_vtime_sec = end_vtime_sec - _start_vtime_sec;
-        if (elapsed_vtime_sec > (10.0 / 1000.0)) {
-          jlong sleep_time_ms =
-            (jlong) (elapsed_vtime_sec * _cm->cleanup_sleep_factor() * 1000.0);
-          os::sleep(Thread::current(), sleep_time_ms, false);
-          _start_vtime_sec = end_vtime_sec;
-        }
-      }
-    }
+    _tot_words_done = words_done;
 
     return false;
   }
 
-  bool changed() { return _changed;  }
-  void reset()   { _changed = false; _words_done = 0; }
-  void no_yield() { _yield = false; }
-  size_t words_done() { return _words_done; }
-  size_t tot_live() { return _tot_live; }
-  size_t tot_used() { return _tot_used; }
+  size_t region_marked_bytes() const { return _region_marked_bytes; }
+
+  // Debugging
+  size_t tot_words_done() const      { return _tot_words_done; }
+  size_t tot_live() const            { return _tot_live; }
+  size_t tot_used() const            { return _tot_used; }
+};
+
+// Heap region closure used for verifying the counting data
+// that was accumulated concurrently and aggregated during
+// the remark pause. This closure is applied to the heap
+// regions during the STW cleanup pause.
+
+class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
+  ConcurrentMark* _cm;
+  CalcLiveObjectsClosure _calc_cl;
+  BitMap* _region_bm;   // Region BM to be verified
+  BitMap* _card_bm;     // Card BM to be verified
+  bool _verbose;        // verbose output?
+
+  BitMap* _exp_region_bm; // Expected Region BM values
+  BitMap* _exp_card_bm;   // Expected card BM values
+
+  int _failures;
+
+public:
+  VerifyLiveObjectDataHRClosure(ConcurrentMark* cm,
+                                BitMap* region_bm,
+                                BitMap* card_bm,
+                                BitMap* exp_region_bm,
+                                BitMap* exp_card_bm,
+                                bool verbose) :
+    _cm(cm),
+    _calc_cl(_cm->nextMarkBitMap(), _cm, exp_region_bm, exp_card_bm),
+    _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
+    _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
+    _failures(0) { }
+
+  int failures() const { return _failures; }
+
+  bool doHeapRegion(HeapRegion* hr) {
+    if (hr->continuesHumongous()) {
+      // We will ignore these here and process them when their
+      // associated "starts humongous" region is processed (see
+      // set_bit_for_heap_region()). Note that we cannot rely on their
+      // associated "starts humongous" region to have their bit set to
+      // 1 since, due to the region chunking in the parallel region
+      // iteration, a "continues humongous" region might be visited
+      // before its associated "starts humongous".
+      return false;
+    }
+
+    int failures = 0;
+
+    // Call the CalcLiveObjectsClosure to walk the marking bitmap for
+    // this region and set the corresponding bits in the expected region
+    // and card bitmaps.
+    bool res = _calc_cl.doHeapRegion(hr);
+    assert(res == false, "should be continuing");
+
+    MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
+                    Mutex::_no_safepoint_check_flag);
+
+    // Verify that _top_at_conc_count == ntams
+    if (hr->top_at_conc_mark_count() != hr->next_top_at_mark_start()) {
+      if (_verbose) {
+        gclog_or_tty->print_cr("Region " SIZE_FORMAT ": top at conc count incorrect: "
+                               "expected " PTR_FORMAT ", actual: " PTR_FORMAT,
+                               hr->hrs_index(), hr->next_top_at_mark_start(),
+                               hr->top_at_conc_mark_count());
+      }
+      failures += 1;
+    }
+
+    // Verify the marked bytes for this region.
+    size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
+    size_t act_marked_bytes = hr->next_marked_bytes();
+
+    // We're not OK if expected marked bytes > actual marked bytes. It means
+    // we have missed accounting some objects during the actual marking.
+    if (exp_marked_bytes > act_marked_bytes) {
+      if (_verbose) {
+        gclog_or_tty->print_cr("Region " SIZE_FORMAT ": marked bytes mismatch: "
+                               "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
+                               hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
+      }
+      failures += 1;
+    }
+
+    // Verify the bit, for this region, in the actual and expected
+    // (which was just calculated) region bit maps.
+    // We're not OK if the bit in the calculated expected region
+    // bitmap is set and the bit in the actual region bitmap is not.
+    BitMap::idx_t index = (BitMap::idx_t)hr->hrs_index();
+
+    bool expected = _exp_region_bm->at(index);
+    bool actual = _region_bm->at(index);
+    if (expected && !actual) {
+      if (_verbose) {
+        gclog_or_tty->print_cr("Region " SIZE_FORMAT ": region bitmap mismatch: "
+                               "expected: %d, actual: %d",
+                               hr->hrs_index(), expected, actual);
+      }
+      failures += 1;
+    }
+
+    // Verify that the card bit maps for the cards spanned by the current
+    // region match. We have an error if we have a set bit in the expected
+    // bit map and the corresponding bit in the actual bitmap is not set.
+
+    BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
+    BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
+
+    for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
+      expected = _exp_card_bm->at(i);
+      actual = _card_bm->at(i);
+
+      if (expected && !actual) {
+        if (_verbose) {
+          gclog_or_tty->print_cr("Region " SIZE_FORMAT ": card bitmap mismatch at " SIZE_FORMAT ": "
+                                 "expected: %d, actual: %d",
+                                 hr->hrs_index(), i, expected, actual);
+        }
+        failures += 1;
+      }
+    }
+
+    if (failures > 0 && _verbose)  {
+      gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
+                             "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
+                             HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start(),
+                             _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
+    }
+
+    _failures += failures;
+
+    // We could stop iteration over the heap when we
+    // find the first voilating region by returning true.
+    return false;
+  }
 };
 
 
-void ConcurrentMark::calcDesiredRegions() {
-  _region_bm.clear();
-  _card_bm.clear();
-  CalcLiveObjectsClosure calccl(false /*final*/,
-                                nextMarkBitMap(), this,
-                                &_region_bm, &_card_bm);
-  G1CollectedHeap *g1h = G1CollectedHeap::heap();
-  g1h->heap_region_iterate(&calccl);
-
-  do {
-    calccl.reset();
-    g1h->heap_region_iterate(&calccl);
-  } while (calccl.changed());
-}
+class G1ParVerifyFinalCountTask: public AbstractGangTask {
+protected:
+  G1CollectedHeap* _g1h;
+  ConcurrentMark* _cm;
+  BitMap* _actual_region_bm;
+  BitMap* _actual_card_bm;
+
+  uint    _n_workers;
+
+  BitMap* _expected_region_bm;
+  BitMap* _expected_card_bm;
+
+  int  _failures;
+  bool _verbose;
+
+public:
+  G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
+                            BitMap* region_bm, BitMap* card_bm,
+                            BitMap* expected_region_bm, BitMap* expected_card_bm)
+    : AbstractGangTask("G1 verify final counting"),
+      _g1h(g1h), _cm(_g1h->concurrent_mark()),
+      _actual_region_bm(region_bm), _actual_card_bm(card_bm),
+      _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
+      _failures(0), _verbose(false),
+      _n_workers(0) {
+    assert(VerifyDuringGC, "don't call this otherwise");
+
+    // Use the value already set as the number of active threads
+    // in the call to run_task().
+    if (G1CollectedHeap::use_parallel_gc_threads()) {
+      assert( _g1h->workers()->active_workers() > 0,
+        "Should have been previously set");
+      _n_workers = _g1h->workers()->active_workers();
+    } else {
+      _n_workers = 1;
+    }
+
+    assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
+    assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
+
+    _verbose = _cm->verbose_medium();
+  }
+
+  void work(uint worker_id) {
+    assert(worker_id < _n_workers, "invariant");
+
+    VerifyLiveObjectDataHRClosure verify_cl(_cm,
+                                            _actual_region_bm, _actual_card_bm,
+                                            _expected_region_bm,
+                                            _expected_card_bm,
+                                            _verbose);
+
+    if (G1CollectedHeap::use_parallel_gc_threads()) {
+      _g1h->heap_region_par_iterate_chunked(&verify_cl,
+                                            worker_id,
+                                            _n_workers,
+                                            HeapRegion::VerifyCountClaimValue);
+    } else {
+      _g1h->heap_region_iterate(&verify_cl);
+    }
+
+    Atomic::add(verify_cl.failures(), &_failures);
+  }
+
+  int failures() const { return _failures; }
+};
+
+// Final update of count data (during cleanup).
+// Adds [top_at_count, NTAMS) to the marked bytes for each
+// region. Sets the bits in the card bitmap corresponding
+// to the interval [top_at_count, top], and sets the
+// liveness bit for each region containing live data
+// in the region bitmap.
+
+class FinalCountDataUpdateClosure: public HeapRegionClosure {
+  ConcurrentMark* _cm;
+  BitMap* _region_bm;
+  BitMap* _card_bm;
+
+  size_t _total_live_bytes;
+  size_t _total_used_bytes;
+  size_t _total_words_done;
+
+  void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) {
+    assert(start_idx <= last_idx, "sanity");
+
+    // Set the inclusive bit range [start_idx, last_idx].
+    // For small ranges (up to 8 cards) use a simple loop; otherwise
+    // use par_at_put_range.
+    if ((last_idx - start_idx) <= 8) {
+      for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
+        _card_bm->par_set_bit(i);
+      }
+    } else {
+      assert(last_idx < _card_bm->size(), "sanity");
+      // Note BitMap::par_at_put_range() is exclusive.
+      _card_bm->par_at_put_range(start_idx, last_idx+1, true);
+    }
+  }
+
+  // It takes a region that's not empty (i.e., it has at least one
+  // live object in it and sets its corresponding bit on the region
+  // bitmap to 1. If the region is "starts humongous" it will also set
+  // to 1 the bits on the region bitmap that correspond to its
+  // associated "continues humongous" regions.
+  void set_bit_for_region(HeapRegion* hr) {
+    assert(!hr->continuesHumongous(), "should have filtered those out");
+
+    size_t index = hr->hrs_index();
+    if (!hr->startsHumongous()) {
+      // Normal (non-humongous) case: just set the bit.
+      _region_bm->par_set_bit((BitMap::idx_t) index);
+    } else {
+      // Starts humongous case: calculate how many regions are part of
+      // this humongous region and then set the bit range.
+      G1CollectedHeap* g1h = G1CollectedHeap::heap();
+      HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
+      size_t end_index = last_hr->hrs_index() + 1;
+      _region_bm->par_at_put_range((BitMap::idx_t) index,
+                                   (BitMap::idx_t) end_index, true);
+    }
+  }
+
+ public:
+  FinalCountDataUpdateClosure(ConcurrentMark* cm,
+                              BitMap* region_bm,
+                              BitMap* card_bm) :
+    _cm(cm), _region_bm(region_bm), _card_bm(card_bm),
+    _total_words_done(0), _total_live_bytes(0), _total_used_bytes(0) { }
+
+  bool doHeapRegion(HeapRegion* hr) {
+
+    if (hr->continuesHumongous()) {
+      // We will ignore these here and process them when their
+      // associated "starts humongous" region is processed (see
+      // set_bit_for_heap_region()). Note that we cannot rely on their
+      // associated "starts humongous" region to have their bit set to
+      // 1 since, due to the region chunking in the parallel region
+      // iteration, a "continues humongous" region might be visited
+      // before its associated "starts humongous".
+      return false;
+    }
+
+    HeapWord* start = hr->top_at_conc_mark_count();
+    HeapWord* ntams = hr->next_top_at_mark_start();
+    HeapWord* top   = hr->top();
+
+    assert(hr->bottom() <= start && start <= hr->end() &&
+           hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
+
+    size_t words_done = ntams - hr->bottom();
+
+    if (start < ntams) {
+      // Region was changed between remark and cleanup pauses
+      // We need to add (ntams - start) to the marked bytes
+      // for this region, and set bits for the range
+      // [ card_idx(start), card_idx(ntams) ) in the card bitmap.
+      size_t live_bytes = (ntams - start) * HeapWordSize;
+      hr->add_to_marked_bytes(live_bytes);
+
+      // Record the new top at conc count
+      hr->set_top_at_conc_mark_count(ntams);
+
+      // The setting of the bits in the card bitmap takes place below
+    }
+
+    // Mark the allocated-since-marking portion...
+    if (ntams < top) {
+      // This definitely means the region has live objects.
+      set_bit_for_region(hr);
+    }
+
+    // Now set the bits for [start, top]
+    BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
+    BitMap::idx_t last_idx = _cm->card_bitmap_index_for(top);
+    set_card_bitmap_range(start_idx, last_idx);
+
+    // Set the bit for the region if it contains live data
+    if (hr->next_marked_bytes() > 0) {
+      set_bit_for_region(hr);
+    }
+
+    _total_words_done += words_done;
+    _total_used_bytes += hr->used();
+    _total_live_bytes += hr->next_marked_bytes();
+
+    return false;
+  }
+
+  size_t total_words_done() const { return _total_words_done; }
+  size_t total_live_bytes() const { return _total_live_bytes; }
+  size_t total_used_bytes() const { return _total_used_bytes; }
+};
 
 class G1ParFinalCountTask: public AbstractGangTask {
 protected:
   G1CollectedHeap* _g1h;
-  CMBitMap* _bm;
+  ConcurrentMark* _cm;
+  BitMap* _actual_region_bm;
+  BitMap* _actual_card_bm;
+
   uint    _n_workers;
+
   size_t *_live_bytes;
   size_t *_used_bytes;
-  BitMap* _region_bm;
-  BitMap* _card_bm;
+
 public:
-  G1ParFinalCountTask(G1CollectedHeap* g1h, CMBitMap* bm,
-                      BitMap* region_bm, BitMap* card_bm)
-    : AbstractGangTask("G1 final counting"), _g1h(g1h),
-    _bm(bm), _region_bm(region_bm), _card_bm(card_bm),
-    _n_workers(0)
-  {
+  G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
+    : AbstractGangTask("G1 final counting"),
+      _g1h(g1h), _cm(_g1h->concurrent_mark()),
+      _actual_region_bm(region_bm), _actual_card_bm(card_bm),
+      _n_workers(0) {
     // Use the value already set as the number of active threads
     // in the call to run_task().  Needed for the allocation of
     // _live_bytes and _used_bytes.
@@ -1539,29 +1965,32 @@
   }
 
   void work(uint worker_id) {
-    CalcLiveObjectsClosure calccl(true /*final*/,
-                                  _bm, _g1h->concurrent_mark(),
-                                  _region_bm, _card_bm);
-    calccl.no_yield();
+    assert(worker_id < _n_workers, "invariant");
+
+    FinalCountDataUpdateClosure final_update_cl(_cm,
+                                                _actual_region_bm,
+                                                _actual_card_bm);
+
     if (G1CollectedHeap::use_parallel_gc_threads()) {
-      _g1h->heap_region_par_iterate_chunked(&calccl, worker_id,
-                                            (int) _n_workers,
+      _g1h->heap_region_par_iterate_chunked(&final_update_cl,
+                                            worker_id,
+                                            _n_workers,
                                             HeapRegion::FinalCountClaimValue);
     } else {
-      _g1h->heap_region_iterate(&calccl);
+      _g1h->heap_region_iterate(&final_update_cl);
     }
-    assert(calccl.complete(), "Shouldn't have yielded!");
-
-    assert(worker_id < _n_workers, "invariant");
-    _live_bytes[worker_id] = calccl.tot_live();
-    _used_bytes[worker_id] = calccl.tot_used();
-  }
+
+    _live_bytes[worker_id] = final_update_cl.total_live_bytes();
+    _used_bytes[worker_id] = final_update_cl.total_used_bytes();
+  }
+
   size_t live_bytes()  {
     size_t live_bytes = 0;
     for (uint i = 0; i < _n_workers; ++i)
       live_bytes += _live_bytes[i];
     return live_bytes;
   }
+
   size_t used_bytes()  {
     size_t used_bytes = 0;
     for (uint i = 0; i < _n_workers; ++i)
@@ -1724,8 +2153,7 @@
   G1ParScrubRemSetTask(G1CollectedHeap* g1h,
                        BitMap* region_bm, BitMap* card_bm) :
     AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
-    _region_bm(region_bm), _card_bm(card_bm)
-  {}
+    _region_bm(region_bm), _card_bm(card_bm) { }
 
   void work(uint worker_id) {
     if (G1CollectedHeap::use_parallel_gc_threads()) {
@@ -1772,11 +2200,10 @@
   uint n_workers;
 
   // Do counting once more with the world stopped for good measure.
-  G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
-                                        &_region_bm, &_card_bm);
+  G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
+
   if (G1CollectedHeap::use_parallel_gc_threads()) {
-    assert(g1h->check_heap_region_claim_values(
-                                               HeapRegion::InitialClaimValue),
+   assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
            "sanity check");
 
     g1h->set_par_threads();
@@ -1787,14 +2214,42 @@
     // Done with the parallel phase so reset to 0.
     g1h->set_par_threads(0);
 
-    assert(g1h->check_heap_region_claim_values(
-                                             HeapRegion::FinalCountClaimValue),
+    assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
            "sanity check");
   } else {
     n_workers = 1;
     g1_par_count_task.work(0);
   }
 
+  if (VerifyDuringGC) {
+    // Verify that the counting data accumulated during marking matches
+    // that calculated by walking the marking bitmap.
+
+    // Bitmaps to hold expected values
+    BitMap expected_region_bm(_region_bm.size(), false);
+    BitMap expected_card_bm(_card_bm.size(), false);
+
+    G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
+                                                 &_region_bm,
+                                                 &_card_bm,
+                                                 &expected_region_bm,
+                                                 &expected_card_bm);
+
+    if (G1CollectedHeap::use_parallel_gc_threads()) {
+      g1h->set_par_threads((int)n_workers);
+      g1h->workers()->run_task(&g1_par_verify_task);
+      // Done with the parallel phase so reset to 0.
+      g1h->set_par_threads(0);
+
+      assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
+             "sanity check");
+    } else {
+      g1_par_verify_task.work(0);
+    }
+
+    guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
+  }
+
   size_t known_garbage_bytes =
     g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes();
   g1p->set_known_garbage_bytes(known_garbage_bytes);
@@ -1905,6 +2360,10 @@
   // races with it goes around and waits for completeCleanup to finish.
   g1h->increment_total_collections();
 
+  // We reclaimed old regions so we should calculate the sizes to make
+  // sure we update the old gen/space data.
+  g1h->g1mm()->update_sizes();
+
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
     gclog_or_tty->print(" VerifyDuringGC:(after)");
@@ -1983,12 +2442,11 @@
 class G1CMKeepAliveClosure: public OopClosure {
   G1CollectedHeap* _g1;
   ConcurrentMark*  _cm;
-  CMBitMap*        _bitMap;
  public:
-  G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
-                       CMBitMap* bitMap) :
-    _g1(g1), _cm(cm),
-    _bitMap(bitMap) {}
+  G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm) :
+    _g1(g1), _cm(cm) {
+    assert(Thread::current()->is_VM_thread(), "otherwise fix worker id");
+  }
 
   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   virtual void do_oop(      oop* p) { do_oop_work(p); }
@@ -2004,26 +2462,25 @@
     }
 
     if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) {
-      _bitMap->mark(addr);
+      _cm->mark_and_count(obj);
       _cm->mark_stack_push(obj);
     }
   }
 };
 
 class G1CMDrainMarkingStackClosure: public VoidClosure {
+  ConcurrentMark*               _cm;
   CMMarkStack*                  _markStack;
-  CMBitMap*                     _bitMap;
   G1CMKeepAliveClosure*         _oopClosure;
  public:
-  G1CMDrainMarkingStackClosure(CMBitMap* bitMap, CMMarkStack* markStack,
+  G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMMarkStack* markStack,
                                G1CMKeepAliveClosure* oopClosure) :
-    _bitMap(bitMap),
+    _cm(cm),
     _markStack(markStack),
-    _oopClosure(oopClosure)
-  {}
+    _oopClosure(oopClosure) { }
 
   void do_void() {
-    _markStack->drain((OopClosure*)_oopClosure, _bitMap, false);
+    _markStack->drain((OopClosure*)_oopClosure, _cm->nextMarkBitMap(), false);
   }
 };
 
@@ -2102,8 +2559,7 @@
   CMTask* _task;
  public:
   G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task) :
-    _cm(cm), _task(task)
-  {}
+    _cm(cm), _task(task) { }
 
   void do_void() {
     do {
@@ -2242,9 +2698,9 @@
     rp->setup_policy(clear_all_soft_refs);
     assert(_markStack.isEmpty(), "mark stack should be empty");
 
-    G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
+    G1CMKeepAliveClosure g1_keep_alive(g1h, this);
     G1CMDrainMarkingStackClosure
-      g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
+      g1_drain_mark_stack(this, &_markStack, &g1_keep_alive);
 
     // We use the work gang from the G1CollectedHeap and we utilize all
     // the worker threads.
@@ -2616,18 +3072,6 @@
 // during an evacuation pause). This was a late change to the code and
 // is currently not being taken advantage of.
 
-class CMGlobalObjectClosure : public ObjectClosure {
-private:
-  ConcurrentMark* _cm;
-
-public:
-  void do_object(oop obj) {
-    _cm->deal_with_reference(obj);
-  }
-
-  CMGlobalObjectClosure(ConcurrentMark* cm) : _cm(cm) { }
-};
-
 void ConcurrentMark::deal_with_reference(oop obj) {
   if (verbose_high()) {
     gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT,
@@ -2672,6 +3116,18 @@
   }
 }
 
+class CMGlobalObjectClosure : public ObjectClosure {
+private:
+  ConcurrentMark* _cm;
+
+public:
+  void do_object(oop obj) {
+    _cm->deal_with_reference(obj);
+  }
+
+  CMGlobalObjectClosure(ConcurrentMark* cm) : _cm(cm) { }
+};
+
 void ConcurrentMark::drainAllSATBBuffers() {
   guarantee(false, "drainAllSATBBuffers(): don't call this any more");
 
@@ -2693,15 +3149,6 @@
   assert(satb_mq_set.completed_buffers_num() == 0, "invariant");
 }
 
-void ConcurrentMark::clear(oop p) {
-  assert(p != NULL && p->is_oop(), "expected an oop");
-  HeapWord* addr = (HeapWord*)p;
-  assert(addr >= _nextMarkBitMap->startWord() ||
-         addr < _nextMarkBitMap->endWord(), "in a region");
-
-  _nextMarkBitMap->clear(addr);
-}
-
 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
   // Note we are overriding the read-only view of the prev map here, via
   // the cast.
@@ -3015,6 +3462,192 @@
   }
 }
 
+// Aggregate the counting data that was constructed concurrently
+// with marking.
+class AggregateCountDataHRClosure: public HeapRegionClosure {
+  ConcurrentMark* _cm;
+  BitMap* _cm_card_bm;
+  size_t _max_task_num;
+
+ public:
+  AggregateCountDataHRClosure(ConcurrentMark *cm,
+                              BitMap* cm_card_bm,
+                              size_t max_task_num) :
+    _cm(cm), _cm_card_bm(cm_card_bm),
+    _max_task_num(max_task_num) { }
+
+  bool is_card_aligned(HeapWord* p) {
+    return ((uintptr_t(p) & (CardTableModRefBS::card_size - 1)) == 0);
+  }
+
+  bool doHeapRegion(HeapRegion* hr) {
+    if (hr->continuesHumongous()) {
+      // We will ignore these here and process them when their
+      // associated "starts humongous" region is processed.
+      // Note that we cannot rely on their associated
+      // "starts humongous" region to have their bit set to 1
+      // since, due to the region chunking in the parallel region
+      // iteration, a "continues humongous" region might be visited
+      // before its associated "starts humongous".
+      return false;
+    }
+
+    HeapWord* start = hr->bottom();
+    HeapWord* limit = hr->next_top_at_mark_start();
+    HeapWord* end = hr->end();
+
+    assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
+           err_msg("Preconditions not met - "
+                   "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
+                   "top: "PTR_FORMAT", end: "PTR_FORMAT,
+                   start, limit, hr->top(), hr->end()));
+
+    assert(hr->next_marked_bytes() == 0, "Precondition");
+
+    if (start == limit) {
+      // NTAMS of this region has not been set so nothing to do.
+      return false;
+    }
+
+    assert(is_card_aligned(start), "sanity");
+    assert(is_card_aligned(end), "sanity");
+
+    BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
+    BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
+    BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
+
+    // If ntams is not card aligned then we bump the index for
+    // limit so that we get the card spanning ntams.
+    if (!is_card_aligned(limit)) {
+      limit_idx += 1;
+    }
+
+    assert(limit_idx <= end_idx, "or else use atomics");
+
+    // Aggregate the "stripe" in the count data associated with hr.
+    size_t hrs_index = hr->hrs_index();
+    size_t marked_bytes = 0;
+
+    for (int i = 0; (size_t)i < _max_task_num; i += 1) {
+      size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
+      BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
+
+      // Fetch the marked_bytes in this region for task i and
+      // add it to the running total for this region.
+      marked_bytes += marked_bytes_array[hrs_index];
+
+      // Now union the bitmaps[0,max_task_num)[start_idx..limit_idx)
+      // into the global card bitmap.
+      BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
+
+      while (scan_idx < limit_idx) {
+        assert(task_card_bm->at(scan_idx) == true, "should be");
+        _cm_card_bm->set_bit(scan_idx);
+        assert(_cm_card_bm->at(scan_idx) == true, "should be");
+
+        // BitMap::get_next_one_offset() can handle the case when
+        // its left_offset parameter is greater than its right_offset
+        // parameter. If does, however, have an early exit if
+        // left_offset == right_offset. So let's limit the value
+        // passed in for left offset here.
+        BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
+        scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
+      }
+    }
+
+    // Update the marked bytes for this region.
+    hr->add_to_marked_bytes(marked_bytes);
+
+    // Now set the top at count to NTAMS.
+    hr->set_top_at_conc_mark_count(limit);
+
+    // Next heap region
+    return false;
+  }
+};
+
+class G1AggregateCountDataTask: public AbstractGangTask {
+protected:
+  G1CollectedHeap* _g1h;
+  ConcurrentMark* _cm;
+  BitMap* _cm_card_bm;
+  size_t _max_task_num;
+  int _active_workers;
+
+public:
+  G1AggregateCountDataTask(G1CollectedHeap* g1h,
+                           ConcurrentMark* cm,
+                           BitMap* cm_card_bm,
+                           size_t max_task_num,
+                           int n_workers) :
+    AbstractGangTask("Count Aggregation"),
+    _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
+    _max_task_num(max_task_num),
+    _active_workers(n_workers) { }
+
+  void work(uint worker_id) {
+    AggregateCountDataHRClosure cl(_cm, _cm_card_bm, _max_task_num);
+
+    if (G1CollectedHeap::use_parallel_gc_threads()) {
+      _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
+                                            _active_workers,
+                                            HeapRegion::AggregateCountClaimValue);
+    } else {
+      _g1h->heap_region_iterate(&cl);
+    }
+  }
+};
+
+
+void ConcurrentMark::aggregate_count_data() {
+  int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
+                        _g1h->workers()->active_workers() :
+                        1);
+
+  G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
+                                           _max_task_num, n_workers);
+
+  if (G1CollectedHeap::use_parallel_gc_threads()) {
+    assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
+           "sanity check");
+    _g1h->set_par_threads(n_workers);
+    _g1h->workers()->run_task(&g1_par_agg_task);
+    _g1h->set_par_threads(0);
+
+    assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
+           "sanity check");
+    _g1h->reset_heap_region_claim_values();
+  } else {
+    g1_par_agg_task.work(0);
+  }
+}
+
+// Clear the per-worker arrays used to store the per-region counting data
+void ConcurrentMark::clear_all_count_data() {
+  // Clear the global card bitmap - it will be filled during
+  // liveness count aggregation (during remark) and the
+  // final counting task.
+  _card_bm.clear();
+
+  // Clear the global region bitmap - it will be filled as part
+  // of the final counting task.
+  _region_bm.clear();
+
+  size_t max_regions = _g1h->max_regions();
+  assert(_max_task_num != 0, "unitialized");
+
+  for (int i = 0; (size_t) i < _max_task_num; i += 1) {
+    BitMap* task_card_bm = count_card_bitmap_for(i);
+    size_t* marked_bytes_array = count_marked_bytes_array_for(i);
+
+    assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
+    assert(marked_bytes_array != NULL, "uninitialized");
+
+    memset(marked_bytes_array, 0, (max_regions * sizeof(size_t)));
+    task_card_bm->clear();
+  }
+}
+
 void ConcurrentMark::print_stats() {
   if (verbose_stats()) {
     gclog_or_tty->print_cr("---------------------------------------------------------------------");
@@ -3350,6 +3983,8 @@
 void ConcurrentMark::abort() {
   // Clear all marks to force marking thread to do nothing
   _nextMarkBitMap->clearAll();
+  // Clear the liveness counting data
+  clear_all_count_data();
   // Empty mark stack
   clear_marking_state();
   for (int i = 0; i < (int)_max_task_num; ++i) {
@@ -3402,23 +4037,15 @@
                          (_init_times.sum() + _remark_times.sum() +
                           _cleanup_times.sum())/1000.0);
   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
-                "(%8.2f s marking, %8.2f s counting).",
+                "(%8.2f s marking).",
                 cmThread()->vtime_accum(),
-                cmThread()->vtime_mark_accum(),
-                cmThread()->vtime_count_accum());
+                cmThread()->vtime_mark_accum());
 }
 
 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
   _parallel_workers->print_worker_threads_on(st);
 }
 
-// Closures
-// XXX: there seems to be a lot of code  duplication here;
-// should refactor and consolidate the shared code.
-
-// This closure is used to mark refs into the CMS generation in
-// the CMS bit map. Called at the first checkpoint.
-
 // We take a break if someone is trying to stop the world.
 bool ConcurrentMark::do_yield_check(uint worker_id) {
   if (should_yield()) {
@@ -4704,6 +5331,8 @@
 
 CMTask::CMTask(int task_id,
                ConcurrentMark* cm,
+               size_t* marked_bytes,
+               BitMap* card_bm,
                CMTaskQueue* task_queue,
                CMTaskQueueSet* task_queues)
   : _g1h(G1CollectedHeap::heap()),
@@ -4713,7 +5342,9 @@
     _task_queue(task_queue),
     _task_queues(task_queues),
     _cm_oop_closure(NULL),
-    _aborted_region(MemRegion()) {
+    _aborted_region(MemRegion()),
+    _marked_bytes_array(marked_bytes),
+    _card_bm(card_bm) {
   guarantee(task_queue != NULL, "invariant");
   guarantee(task_queues != NULL, "invariant");
 
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -84,8 +84,8 @@
   }
 
   // iteration
-  bool iterate(BitMapClosure* cl) { return _bm.iterate(cl); }
-  bool iterate(BitMapClosure* cl, MemRegion mr);
+  inline bool iterate(BitMapClosure* cl, MemRegion mr);
+  inline bool iterate(BitMapClosure* cl);
 
   // Return the address corresponding to the next marked bit at or after
   // "addr", and before "limit", if "limit" is non-NULL.  If there is no
@@ -349,10 +349,62 @@
   high_verbose       // per object verbose
 } CMVerboseLevel;
 
+class YoungList;
+
+// Root Regions are regions that are not empty at the beginning of a
+// marking cycle and which we might collect during an evacuation pause
+// while the cycle is active. Given that, during evacuation pauses, we
+// do not copy objects that are explicitly marked, what we have to do
+// for the root regions is to scan them and mark all objects reachable
+// from them. According to the SATB assumptions, we only need to visit
+// each object once during marking. So, as long as we finish this scan
+// before the next evacuation pause, we can copy the objects from the
+// root regions without having to mark them or do anything else to them.
+//
+// Currently, we only support root region scanning once (at the start
+// of the marking cycle) and the root regions are all the survivor
+// regions populated during the initial-mark pause.
+class CMRootRegions VALUE_OBJ_CLASS_SPEC {
+private:
+  YoungList*           _young_list;
+  ConcurrentMark*      _cm;
+
+  volatile bool        _scan_in_progress;
+  volatile bool        _should_abort;
+  HeapRegion* volatile _next_survivor;
+
+public:
+  CMRootRegions();
+  // We actually do most of the initialization in this method.
+  void init(G1CollectedHeap* g1h, ConcurrentMark* cm);
+
+  // Reset the claiming / scanning of the root regions.
+  void prepare_for_scan();
+
+  // Forces get_next() to return NULL so that the iteration aborts early.
+  void abort() { _should_abort = true; }
+
+  // Return true if the CM thread are actively scanning root regions,
+  // false otherwise.
+  bool scan_in_progress() { return _scan_in_progress; }
+
+  // Claim the next root region to scan atomically, or return NULL if
+  // all have been claimed.
+  HeapRegion* claim_next();
+
+  // Flag that we're done with root region scanning and notify anyone
+  // who's waiting on it. If aborted is false, assume that all regions
+  // have been claimed.
+  void scan_finished();
+
+  // If CM threads are still scanning root regions, wait until they
+  // are done. Return true if we had to wait, false otherwise.
+  bool wait_until_scan_finished();
+};
 
 class ConcurrentMarkThread;
 
-class ConcurrentMark: public CHeapObj {
+class ConcurrentMark : public CHeapObj {
   friend class ConcurrentMarkThread;
   friend class CMTask;
   friend class CMBitMapClosure;
@@ -386,7 +438,7 @@
 
   FreeRegionList        _cleanup_list;
 
-  // CMS marking support structures
+  // Concurrent marking support structures
   CMBitMap                _markBitMap1;
   CMBitMap                _markBitMap2;
   CMBitMapRO*             _prevMarkBitMap; // completed mark bitmap
@@ -400,6 +452,9 @@
   HeapWord*               _heap_start;
   HeapWord*               _heap_end;
 
+  // Root region tracking and claiming.
+  CMRootRegions           _root_regions;
+
   // For gray objects
   CMMarkStack             _markStack; // Grey objects behind global finger.
   CMRegionStack           _regionStack; // Grey regions behind global finger.
@@ -426,7 +481,6 @@
   WorkGangBarrierSync     _first_overflow_barrier_sync;
   WorkGangBarrierSync     _second_overflow_barrier_sync;
 
-
   // this is set by any task, when an overflow on the global data
   // structures is detected.
   volatile bool           _has_overflown;
@@ -554,9 +608,9 @@
   bool has_overflown()           { return _has_overflown; }
   void set_has_overflown()       { _has_overflown = true; }
   void clear_has_overflown()     { _has_overflown = false; }
+  bool restart_for_overflow()    { return _restart_for_overflow; }
 
   bool has_aborted()             { return _has_aborted; }
-  bool restart_for_overflow()    { return _restart_for_overflow; }
 
   // Methods to enter the two overflow sync barriers
   void enter_first_sync_barrier(int task_num);
@@ -578,6 +632,27 @@
     }
   }
 
+  // Live Data Counting data structures...
+  // These data structures are initialized at the start of
+  // marking. They are written to while marking is active.
+  // They are aggregated during remark; the aggregated values
+  // are then used to populate the _region_bm, _card_bm, and
+  // the total live bytes, which are then subsequently updated
+  // during cleanup.
+
+  // An array of bitmaps (one bit map per task). Each bitmap
+  // is used to record the cards spanned by the live objects
+  // marked by that task/worker.
+  BitMap*  _count_card_bitmaps;
+
+  // Used to record the number of marked live bytes
+  // (for each region, by worker thread).
+  size_t** _count_marked_bytes;
+
+  // Card index of the bottom of the G1 heap. Used for biasing indices into
+  // the card bitmaps.
+  intptr_t _heap_bottom_card_num;
+
 public:
   // Manipulation of the global mark stack.
   // Notice that the first mark_stack_push is CAS-based, whereas the
@@ -671,6 +746,8 @@
   // Returns true if there are any aborted memory regions.
   bool has_aborted_regions();
 
+  CMRootRegions* root_regions() { return &_root_regions; }
+
   bool concurrent_marking_in_progress() {
     return _concurrent_marking_in_progress;
   }
@@ -703,6 +780,7 @@
 
   ConcurrentMark(ReservedSpace rs, int max_regions);
   ~ConcurrentMark();
+
   ConcurrentMarkThread* cmThread() { return _cmThread; }
 
   CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
@@ -720,8 +798,17 @@
   // G1CollectedHeap
 
   // This notifies CM that a root during initial-mark needs to be
-  // grayed. It is MT-safe.
-  inline void grayRoot(oop obj, size_t word_size);
+  // grayed. It is MT-safe. word_size is the size of the object in
+  // words. It is passed explicitly as sometimes we cannot calculate
+  // it from the given object because it might be in an inconsistent
+  // state (e.g., in to-space and being copied). So the caller is
+  // responsible for dealing with this issue (e.g., get the size from
+  // the from-space image when the to-space image might be
+  // inconsistent) and always passing the size. hr is the region that
+  // contains the object and it's passed optionally from callers who
+  // might already have it (no point in recalculating it).
+  inline void grayRoot(oop obj, size_t word_size,
+                       uint worker_id, HeapRegion* hr = NULL);
 
   // It's used during evacuation pauses to gray a region, if
   // necessary, and it's MT-safe. It assumes that the caller has
@@ -772,6 +859,13 @@
   void checkpointRootsInitialPre();
   void checkpointRootsInitialPost();
 
+  // Scan all the root regions and mark everything reachable from
+  // them.
+  void scanRootRegions();
+
+  // Scan a single root region and mark everything reachable from it.
+  void scanRootRegion(HeapRegion* hr, uint worker_id);
+
   // Do concurrent phase of marking, to a tentative transitive closure.
   void markFromRoots();
 
@@ -781,15 +875,13 @@
 
   void checkpointRootsFinal(bool clear_all_soft_refs);
   void checkpointRootsFinalWork();
-  void calcDesiredRegions();
   void cleanup();
   void completeCleanup();
 
   // Mark in the previous bitmap.  NB: this is usually read-only, so use
   // this carefully!
   inline void markPrev(oop p);
-  inline void markNext(oop p);
-  void clear(oop p);
+
   // Clears marks for all objects in the given range, for the prev,
   // next, or both bitmaps.  NB: the previous bitmap is usually
   // read-only, so use this carefully!
@@ -913,6 +1005,114 @@
   bool verbose_high() {
     return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
   }
+
+  // Counting data structure accessors
+
+  // Returns the card number of the bottom of the G1 heap.
+  // Used in biasing indices into accounting card bitmaps.
+  intptr_t heap_bottom_card_num() const {
+    return _heap_bottom_card_num;
+  }
+
+  // Returns the card bitmap for a given task or worker id.
+  BitMap* count_card_bitmap_for(uint worker_id) {
+    assert(0 <= worker_id && worker_id < _max_task_num, "oob");
+    assert(_count_card_bitmaps != NULL, "uninitialized");
+    BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
+    assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
+    return task_card_bm;
+  }
+
+  // Returns the array containing the marked bytes for each region,
+  // for the given worker or task id.
+  size_t* count_marked_bytes_array_for(uint worker_id) {
+    assert(0 <= worker_id && worker_id < _max_task_num, "oob");
+    assert(_count_marked_bytes != NULL, "uninitialized");
+    size_t* marked_bytes_array = _count_marked_bytes[worker_id];
+    assert(marked_bytes_array != NULL, "uninitialized");
+    return marked_bytes_array;
+  }
+
+  // Returns the index in the liveness accounting card table bitmap
+  // for the given address
+  inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr);
+
+  // Counts the size of the given memory region in the the given
+  // marked_bytes array slot for the given HeapRegion.
+  // Sets the bits in the given card bitmap that are associated with the
+  // cards that are spanned by the memory region.
+  inline void count_region(MemRegion mr, HeapRegion* hr,
+                           size_t* marked_bytes_array,
+                           BitMap* task_card_bm);
+
+  // Counts the given memory region in the task/worker counting
+  // data structures for the given worker id.
+  inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
+
+  // Counts the given memory region in the task/worker counting
+  // data structures for the given worker id.
+  inline void count_region(MemRegion mr, uint worker_id);
+
+  // Counts the given object in the given task/worker counting
+  // data structures.
+  inline void count_object(oop obj, HeapRegion* hr,
+                           size_t* marked_bytes_array,
+                           BitMap* task_card_bm);
+
+  // Counts the given object in the task/worker counting data
+  // structures for the given worker id.
+  inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
+
+  // Attempts to mark the given object and, if successful, counts
+  // the object in the given task/worker counting structures.
+  inline bool par_mark_and_count(oop obj, HeapRegion* hr,
+                                 size_t* marked_bytes_array,
+                                 BitMap* task_card_bm);
+
+  // Attempts to mark the given object and, if successful, counts
+  // the object in the task/worker counting structures for the
+  // given worker id.
+  inline bool par_mark_and_count(oop obj, size_t word_size,
+                                 HeapRegion* hr, uint worker_id);
+
+  // Attempts to mark the given object and, if successful, counts
+  // the object in the task/worker counting structures for the
+  // given worker id.
+  inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
+
+  // Similar to the above routine but we don't know the heap region that
+  // contains the object to be marked/counted, which this routine looks up.
+  inline bool par_mark_and_count(oop obj, uint worker_id);
+
+  // Similar to the above routine but there are times when we cannot
+  // safely calculate the size of obj due to races and we, therefore,
+  // pass the size in as a parameter. It is the caller's reponsibility
+  // to ensure that the size passed in for obj is valid.
+  inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
+
+  // Unconditionally mark the given object, and unconditinally count
+  // the object in the counting structures for worker id 0.
+  // Should *not* be called from parallel code.
+  inline bool mark_and_count(oop obj, HeapRegion* hr);
+
+  // Similar to the above routine but we don't know the heap region that
+  // contains the object to be marked/counted, which this routine looks up.
+  // Should *not* be called from parallel code.
+  inline bool mark_and_count(oop obj);
+
+protected:
+  // Clear all the per-task bitmaps and arrays used to store the
+  // counting data.
+  void clear_all_count_data();
+
+  // Aggregates the counting data for each worker/task
+  // that was constructed while marking. Also sets
+  // the amount of marked bytes for each region and
+  // the top at concurrent mark count.
+  void aggregate_count_data();
+
+  // Verification routine
+  void verify_count_data();
 };
 
 // A class representing a marking task.
@@ -1031,6 +1231,12 @@
 
   TruncatedSeq                _marking_step_diffs_ms;
 
+  // Counting data structures. Embedding the task's marked_bytes_array
+  // and card bitmap into the actual task saves having to go through
+  // the ConcurrentMark object.
+  size_t*                     _marked_bytes_array;
+  BitMap*                     _card_bm;
+
   // LOTS of statistics related with this task
 #if _MARKING_STATS_
   NumberSeq                   _all_clock_intervals_ms;
@@ -1196,6 +1402,7 @@
   }
 
   CMTask(int task_num, ConcurrentMark *cm,
+         size_t* marked_bytes, BitMap* card_bm,
          CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
 
   // it prints statistics associated with this task
--- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -28,6 +28,214 @@
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 
+// Returns the index in the liveness accounting card bitmap
+// for the given address
+inline BitMap::idx_t ConcurrentMark::card_bitmap_index_for(HeapWord* addr) {
+  // Below, the term "card num" means the result of shifting an address
+  // by the card shift -- address 0 corresponds to card number 0.  One
+  // must subtract the card num of the bottom of the heap to obtain a
+  // card table index.
+
+  intptr_t card_num = intptr_t(uintptr_t(addr) >> CardTableModRefBS::card_shift);
+  return card_num - heap_bottom_card_num();
+}
+
+// Counts the given memory region in the given task/worker
+// counting data structures.
+inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
+                                         size_t* marked_bytes_array,
+                                         BitMap* task_card_bm) {
+  G1CollectedHeap* g1h = _g1h;
+  HeapWord* start = mr.start();
+  HeapWord* last = mr.last();
+  size_t region_size_bytes = mr.byte_size();
+  size_t index = hr->hrs_index();
+
+  assert(!hr->continuesHumongous(), "should not be HC region");
+  assert(hr == g1h->heap_region_containing(start), "sanity");
+  assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
+  assert(marked_bytes_array != NULL, "pre-condition");
+  assert(task_card_bm != NULL, "pre-condition");
+
+  // Add to the task local marked bytes for this region.
+  marked_bytes_array[index] += region_size_bytes;
+
+  BitMap::idx_t start_idx = card_bitmap_index_for(start);
+  BitMap::idx_t last_idx = card_bitmap_index_for(last);
+
+  // The card bitmap is task/worker specific => no need to use 'par' routines.
+  // Set bits in the inclusive bit range [start_idx, last_idx].
+  //
+  // For small ranges use a simple loop; otherwise use set_range
+  // The range are the cards that are spanned by the object/region
+  // so 8 cards will allow objects/regions up to 4K to be handled
+  // using the loop.
+  if ((last_idx - start_idx) <= 8) {
+    for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
+     task_card_bm->set_bit(i);
+    }
+  } else {
+    assert(last_idx < task_card_bm->size(), "sanity");
+    // Note: BitMap::set_range() is exclusive.
+    task_card_bm->set_range(start_idx, last_idx+1);
+  }
+}
+
+// Counts the given memory region in the task/worker counting
+// data structures for the given worker id.
+inline void ConcurrentMark::count_region(MemRegion mr,
+                                         HeapRegion* hr,
+                                         uint worker_id) {
+  size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
+  BitMap* task_card_bm = count_card_bitmap_for(worker_id);
+  count_region(mr, hr, marked_bytes_array, task_card_bm);
+}
+
+// Counts the given memory region, which may be a single object, in the
+// task/worker counting data structures for the given worker id.
+inline void ConcurrentMark::count_region(MemRegion mr, uint worker_id) {
+  HeapWord* addr = mr.start();
+  HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
+  count_region(mr, hr, worker_id);
+}
+
+// Counts the given object in the given task/worker counting data structures.
+inline void ConcurrentMark::count_object(oop obj,
+                                         HeapRegion* hr,
+                                         size_t* marked_bytes_array,
+                                         BitMap* task_card_bm) {
+  MemRegion mr((HeapWord*)obj, obj->size());
+  count_region(mr, hr, marked_bytes_array, task_card_bm);
+}
+
+// Counts the given object in the task/worker counting data
+// structures for the given worker id.
+inline void ConcurrentMark::count_object(oop obj,
+                                         HeapRegion* hr,
+                                         uint worker_id) {
+  size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
+  BitMap* task_card_bm = count_card_bitmap_for(worker_id);
+  HeapWord* addr = (HeapWord*) obj;
+  count_object(obj, hr, marked_bytes_array, task_card_bm);
+}
+
+// Attempts to mark the given object and, if successful, counts
+// the object in the given task/worker counting structures.
+inline bool ConcurrentMark::par_mark_and_count(oop obj,
+                                               HeapRegion* hr,
+                                               size_t* marked_bytes_array,
+                                               BitMap* task_card_bm) {
+  HeapWord* addr = (HeapWord*)obj;
+  if (_nextMarkBitMap->parMark(addr)) {
+    // Update the task specific count data for the object.
+    count_object(obj, hr, marked_bytes_array, task_card_bm);
+    return true;
+  }
+  return false;
+}
+
+// Attempts to mark the given object and, if successful, counts
+// the object in the task/worker counting structures for the
+// given worker id.
+inline bool ConcurrentMark::par_mark_and_count(oop obj,
+                                               size_t word_size,
+                                               HeapRegion* hr,
+                                               uint worker_id) {
+  HeapWord* addr = (HeapWord*)obj;
+  if (_nextMarkBitMap->parMark(addr)) {
+    MemRegion mr(addr, word_size);
+    count_region(mr, hr, worker_id);
+    return true;
+  }
+  return false;
+}
+
+// Attempts to mark the given object and, if successful, counts
+// the object in the task/worker counting structures for the
+// given worker id.
+inline bool ConcurrentMark::par_mark_and_count(oop obj,
+                                               HeapRegion* hr,
+                                               uint worker_id) {
+  HeapWord* addr = (HeapWord*)obj;
+  if (_nextMarkBitMap->parMark(addr)) {
+    // Update the task specific count data for the object.
+    count_object(obj, hr, worker_id);
+    return true;
+  }
+  return false;
+}
+
+// As above - but we don't know the heap region containing the
+// object and so have to supply it.
+inline bool ConcurrentMark::par_mark_and_count(oop obj, uint worker_id) {
+  HeapWord* addr = (HeapWord*)obj;
+  HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
+  return par_mark_and_count(obj, hr, worker_id);
+}
+
+// Similar to the above routine but we already know the size, in words, of
+// the object that we wish to mark/count
+inline bool ConcurrentMark::par_mark_and_count(oop obj,
+                                               size_t word_size,
+                                               uint worker_id) {
+  HeapWord* addr = (HeapWord*)obj;
+  if (_nextMarkBitMap->parMark(addr)) {
+    // Update the task specific count data for the object.
+    MemRegion mr(addr, word_size);
+    count_region(mr, worker_id);
+    return true;
+  }
+  return false;
+}
+
+// Unconditionally mark the given object, and unconditinally count
+// the object in the counting structures for worker id 0.
+// Should *not* be called from parallel code.
+inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {
+  HeapWord* addr = (HeapWord*)obj;
+  _nextMarkBitMap->mark(addr);
+  // Update the task specific count data for the object.
+  count_object(obj, hr, 0 /* worker_id */);
+  return true;
+}
+
+// As above - but we don't have the heap region containing the
+// object, so we have to supply it.
+inline bool ConcurrentMark::mark_and_count(oop obj) {
+  HeapWord* addr = (HeapWord*)obj;
+  HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
+  return mark_and_count(obj, hr);
+}
+
+inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
+  HeapWord* start_addr = MAX2(startWord(), mr.start());
+  HeapWord* end_addr = MIN2(endWord(), mr.end());
+
+  if (end_addr > start_addr) {
+    // Right-open interval [start-offset, end-offset).
+    BitMap::idx_t start_offset = heapWordToOffset(start_addr);
+    BitMap::idx_t end_offset = heapWordToOffset(end_addr);
+
+    start_offset = _bm.get_next_one_offset(start_offset, end_offset);
+    while (start_offset < end_offset) {
+      HeapWord* obj_addr = offsetToHeapWord(start_offset);
+      oop obj = (oop) obj_addr;
+      if (!cl->do_bit(start_offset)) {
+        return false;
+      }
+      HeapWord* next_addr = MIN2(obj_addr + obj->size(), end_addr);
+      BitMap::idx_t next_offset = heapWordToOffset(next_addr);
+      start_offset = _bm.get_next_one_offset(next_offset, end_offset);
+    }
+  }
+  return true;
+}
+
+inline bool CMBitMapRO::iterate(BitMapClosure* cl) {
+  MemRegion mr(startWord(), sizeInWords());
+  return iterate(cl, mr);
+}
+
 inline void CMTask::push(oop obj) {
   HeapWord* objAddr = (HeapWord*) obj;
   assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
@@ -84,7 +292,7 @@
 
   HeapWord* objAddr = (HeapWord*) obj;
   assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
- if (_g1h->is_in_g1_reserved(objAddr)) {
+  if (_g1h->is_in_g1_reserved(objAddr)) {
     assert(obj != NULL, "null check is implicit");
     if (!_nextMarkBitMap->isMarked(objAddr)) {
       // Only get the containing region if the object is not marked on the
@@ -98,9 +306,9 @@
         }
 
         // we need to mark it first
-        if (_nextMarkBitMap->parMark(objAddr)) {
+        if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) {
           // No OrderAccess:store_load() is needed. It is implicit in the
-          // CAS done in parMark(objAddr) above
+          // CAS done in CMBitMap::parMark() call in the routine above.
           HeapWord* global_finger = _cm->finger();
 
 #if _CHECK_BOTH_FINGERS_
@@ -160,25 +368,20 @@
   ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
 }
 
-inline void ConcurrentMark::markNext(oop p) {
-  assert(!_nextMarkBitMap->isMarked((HeapWord*) p), "sanity");
-  _nextMarkBitMap->mark((HeapWord*) p);
-}
-
-inline void ConcurrentMark::grayRoot(oop obj, size_t word_size) {
+inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
+                                     uint worker_id, HeapRegion* hr) {
+  assert(obj != NULL, "pre-condition");
   HeapWord* addr = (HeapWord*) obj;
+  if (hr == NULL) {
+    hr = _g1h->heap_region_containing_raw(addr);
+  } else {
+    assert(hr->is_in(addr), "pre-condition");
+  }
+  assert(hr != NULL, "sanity");
+  // Given that we're looking for a region that contains an object
+  // header it's impossible to get back a HC region.
+  assert(!hr->continuesHumongous(), "sanity");
 
-  // Currently we don't do anything with word_size but we will use it
-  // in the very near future in the liveness calculation piggy-backing
-  // changes.
-
-#ifdef ASSERT
-  HeapRegion* hr = _g1h->heap_region_containing(addr);
-  assert(hr != NULL, "sanity");
-  assert(!hr->is_survivor(), "should not allocate survivors during IM");
-  assert(addr < hr->next_top_at_mark_start(),
-         err_msg("addr: "PTR_FORMAT" hr: "HR_FORMAT" NTAMS: "PTR_FORMAT,
-                 addr, HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start()));
   // We cannot assert that word_size == obj->size() given that obj
   // might not be in a consistent state (another thread might be in
   // the process of copying it). So the best thing we can do is to
@@ -188,10 +391,11 @@
          err_msg("size: "SIZE_FORMAT" capacity: "SIZE_FORMAT" "HR_FORMAT,
                  word_size * HeapWordSize, hr->capacity(),
                  HR_FORMAT_PARAMS(hr)));
-#endif // ASSERT
 
-  if (!_nextMarkBitMap->isMarked(addr)) {
-    _nextMarkBitMap->parMark(addr);
+  if (addr < hr->next_top_at_mark_start()) {
+    if (!_nextMarkBitMap->isMarked(addr)) {
+      par_mark_and_count(obj, word_size, hr, worker_id);
+    }
   }
 }
 
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,9 +44,7 @@
   _started(false),
   _in_progress(false),
   _vtime_accum(0.0),
-  _vtime_mark_accum(0.0),
-  _vtime_count_accum(0.0)
-{
+  _vtime_mark_accum(0.0) {
   create_and_start();
 }
 
@@ -94,9 +92,36 @@
       ResourceMark rm;
       HandleMark   hm;
       double cycle_start = os::elapsedVTime();
-      double mark_start_sec = os::elapsedTime();
       char verbose_str[128];
 
+      // We have to ensure that we finish scanning the root regions
+      // before the next GC takes place. To ensure this we have to
+      // make sure that we do not join the STS until the root regions
+      // have been scanned. If we did then it's possible that a
+      // subsequent GC could block us from joining the STS and proceed
+      // without the root regions have been scanned which would be a
+      // correctness issue.
+
+      double scan_start = os::elapsedTime();
+      if (!cm()->has_aborted()) {
+        if (PrintGC) {
+          gclog_or_tty->date_stamp(PrintGCDateStamps);
+          gclog_or_tty->stamp(PrintGCTimeStamps);
+          gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
+        }
+
+        _cm->scanRootRegions();
+
+        double scan_end = os::elapsedTime();
+        if (PrintGC) {
+          gclog_or_tty->date_stamp(PrintGCDateStamps);
+          gclog_or_tty->stamp(PrintGCTimeStamps);
+          gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf]",
+                                 scan_end - scan_start);
+        }
+      }
+
+      double mark_start_sec = os::elapsedTime();
       if (PrintGC) {
         gclog_or_tty->date_stamp(PrintGCDateStamps);
         gclog_or_tty->stamp(PrintGCTimeStamps);
@@ -130,7 +155,7 @@
 
           CMCheckpointRootsFinalClosure final_cl(_cm);
           sprintf(verbose_str, "GC remark");
-          VM_CGC_Operation op(&final_cl, verbose_str);
+          VM_CGC_Operation op(&final_cl, verbose_str, true /* needs_pll */);
           VMThread::execute(&op);
         }
         if (cm()->restart_for_overflow() &&
@@ -148,36 +173,12 @@
         }
       } while (cm()->restart_for_overflow());
 
-      double counting_start_time = os::elapsedVTime();
-      if (!cm()->has_aborted()) {
-        double count_start_sec = os::elapsedTime();
-        if (PrintGC) {
-          gclog_or_tty->date_stamp(PrintGCDateStamps);
-          gclog_or_tty->stamp(PrintGCTimeStamps);
-          gclog_or_tty->print_cr("[GC concurrent-count-start]");
-        }
-
-        _sts.join();
-        _cm->calcDesiredRegions();
-        _sts.leave();
-
-        if (!cm()->has_aborted()) {
-          double count_end_sec = os::elapsedTime();
-          if (PrintGC) {
-            gclog_or_tty->date_stamp(PrintGCDateStamps);
-            gclog_or_tty->stamp(PrintGCTimeStamps);
-            gclog_or_tty->print_cr("[GC concurrent-count-end, %1.7lf]",
-                                   count_end_sec - count_start_sec);
-          }
-        }
-      }
-
       double end_time = os::elapsedVTime();
-      _vtime_count_accum += (end_time - counting_start_time);
       // Update the total virtual time before doing this, since it will try
       // to measure it to get the vtime for this marking.  We purposely
       // neglect the presumably-short "completeCleanup" phase here.
       _vtime_accum = (end_time - _vtime_start);
+
       if (!cm()->has_aborted()) {
         if (g1_policy->adaptive_young_list_length()) {
           double now = os::elapsedTime();
@@ -188,7 +189,7 @@
 
         CMCleanUp cl_cl(_cm);
         sprintf(verbose_str, "GC cleanup");
-        VM_CGC_Operation op(&cl_cl, verbose_str);
+        VM_CGC_Operation op(&cl_cl, verbose_str, false /* needs_pll */);
         VMThread::execute(&op);
       } else {
         // We don't want to update the marking status if a GC pause
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,6 @@
   double _vtime_accum;  // Accumulated virtual time.
 
   double _vtime_mark_accum;
-  double _vtime_count_accum;
 
  public:
   virtual void run();
@@ -69,8 +68,6 @@
   double vtime_accum();
   // Marking virtual time so far
   double vtime_mark_accum();
-  // Counting virtual time so far.
-  double vtime_count_accum() { return _vtime_count_accum; }
 
   ConcurrentMark* cm()     { return _cm; }
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -174,13 +174,10 @@
   }
 };
 
-YoungList::YoungList(G1CollectedHeap* g1h)
-  : _g1h(g1h), _head(NULL),
-    _length(0),
-    _last_sampled_rs_lengths(0),
-    _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
-{
-  guarantee( check_list_empty(false), "just making sure..." );
+YoungList::YoungList(G1CollectedHeap* g1h) :
+    _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
+    _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
+  guarantee(check_list_empty(false), "just making sure...");
 }
 
 void YoungList::push_region(HeapRegion *hr) {
@@ -961,7 +958,7 @@
         should_try_gc = false;
       } else {
         // Read the GC count while still holding the Heap_lock.
-        gc_count_before = SharedHeap::heap()->total_collections();
+        gc_count_before = total_collections();
         should_try_gc = true;
       }
     }
@@ -979,7 +976,7 @@
         // failed to allocate. No point in trying to allocate
         // further. We'll just return NULL.
         MutexLockerEx x(Heap_lock);
-        *gc_count_before_ret = SharedHeap::heap()->total_collections();
+        *gc_count_before_ret = total_collections();
         return NULL;
       }
     } else {
@@ -996,7 +993,7 @@
     // iteration (after taking the Heap_lock).
     result = _mutator_alloc_region.attempt_allocation(word_size,
                                                       false /* bot_updates */);
-    if (result != NULL ){
+    if (result != NULL) {
       return result;
     }
 
@@ -1029,6 +1026,16 @@
   assert(isHumongous(word_size), "attempt_allocation_humongous() "
          "should only be called for humongous allocations");
 
+  // Humongous objects can exhaust the heap quickly, so we should check if we
+  // need to start a marking cycle at each humongous object allocation. We do
+  // the check before we do the actual allocation. The reason for doing it
+  // before the allocation is that we avoid having to keep track of the newly
+  // allocated memory while we do a GC.
+  if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
+                                           word_size)) {
+    collect(GCCause::_g1_humongous_allocation);
+  }
+
   // We will loop until a) we manage to successfully perform the
   // allocation or b) we successfully schedule a collection which
   // fails to perform the allocation. b) is the only case when we'll
@@ -1053,7 +1060,7 @@
         should_try_gc = false;
       } else {
         // Read the GC count while still holding the Heap_lock.
-        gc_count_before = SharedHeap::heap()->total_collections();
+        gc_count_before = total_collections();
         should_try_gc = true;
       }
     }
@@ -1075,7 +1082,7 @@
         // failed to allocate. No point in trying to allocate
         // further. We'll just return NULL.
         MutexLockerEx x(Heap_lock);
-        *gc_count_before_ret = SharedHeap::heap()->total_collections();
+        *gc_count_before_ret = total_collections();
         return NULL;
       }
     } else {
@@ -1111,7 +1118,11 @@
     return _mutator_alloc_region.attempt_allocation_locked(word_size,
                                                       false /* bot_updates */);
   } else {
-    return humongous_obj_allocate(word_size);
+    HeapWord* result = humongous_obj_allocate(word_size);
+    if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
+      g1_policy()->set_initiate_conc_mark_if_possible();
+    }
+    return result;
   }
 
   ShouldNotReachHere();
@@ -1228,9 +1239,7 @@
   SvcGCMarker sgcm(SvcGCMarker::FULL);
   ResourceMark rm;
 
-  if (PrintHeapAtGC) {
-    Universe::print_heap_before_gc();
-  }
+  print_heap_before_gc();
 
   HRSPhaseSetter x(HRSPhaseFullGC);
   verify_region_sets_optional();
@@ -1257,7 +1266,18 @@
     double start = os::elapsedTime();
     g1_policy()->record_full_collection_start();
 
+    // Note: When we have a more flexible GC logging framework that
+    // allows us to add optional attributes to a GC log record we
+    // could consider timing and reporting how long we wait in the
+    // following two methods.
     wait_while_free_regions_coming();
+    // If we start the compaction before the CM threads finish
+    // scanning the root regions we might trip them over as we'll
+    // be moving objects / updating references. So let's wait until
+    // they are done. By telling them to abort, they should complete
+    // early.
+    _cm->root_regions()->abort();
+    _cm->root_regions()->wait_until_scan_finished();
     append_secondary_free_list_if_not_empty_with_lock();
 
     gc_prologue(true);
@@ -1286,7 +1306,8 @@
     ref_processor_cm()->verify_no_references_recorded();
 
     // Abandon current iterations of concurrent marking and concurrent
-    // refinement, if any are in progress.
+    // refinement, if any are in progress. We have to do this before
+    // wait_until_scan_finished() below.
     concurrent_mark()->abort();
 
     // Make sure we'll choose a new allocation region afterwards.
@@ -1470,9 +1491,7 @@
   _hrs.verify_optional();
   verify_region_sets_optional();
 
-  if (PrintHeapAtGC) {
-    Universe::print_heap_after_gc();
-  }
+  print_heap_after_gc();
   g1mm()->update_sizes();
   post_full_gc_dump();
 
@@ -2293,9 +2312,12 @@
 }
 
 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
-  return
-    ((cause == GCCause::_gc_locker           && GCLockerInvokesConcurrent) ||
-     (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
+  switch (cause) {
+    case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
+    case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
+    case GCCause::_g1_humongous_allocation: return true;
+    default:                                return false;
+  }
 }
 
 #ifndef PRODUCT
@@ -2389,47 +2411,68 @@
 }
 
 void G1CollectedHeap::collect(GCCause::Cause cause) {
-  // The caller doesn't have the Heap_lock
-  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
+  assert_heap_not_locked();
 
   unsigned int gc_count_before;
   unsigned int full_gc_count_before;
-  {
-    MutexLocker ml(Heap_lock);
-
-    // Read the GC count while holding the Heap_lock
-    gc_count_before = SharedHeap::heap()->total_collections();
-    full_gc_count_before = SharedHeap::heap()->total_full_collections();
-  }
-
-  if (should_do_concurrent_full_gc(cause)) {
-    // Schedule an initial-mark evacuation pause that will start a
-    // concurrent cycle. We're setting word_size to 0 which means that
-    // we are not requesting a post-GC allocation.
-    VM_G1IncCollectionPause op(gc_count_before,
-                               0,     /* word_size */
-                               true,  /* should_initiate_conc_mark */
-                               g1_policy()->max_pause_time_ms(),
-                               cause);
-    VMThread::execute(&op);
-  } else {
-    if (cause == GCCause::_gc_locker
-        DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
-
-      // Schedule a standard evacuation pause. We're setting word_size
-      // to 0 which means that we are not requesting a post-GC allocation.
+  bool retry_gc;
+
+  do {
+    retry_gc = false;
+
+    {
+      MutexLocker ml(Heap_lock);
+
+      // Read the GC count while holding the Heap_lock
+      gc_count_before = total_collections();
+      full_gc_count_before = total_full_collections();
+    }
+
+    if (should_do_concurrent_full_gc(cause)) {
+      // Schedule an initial-mark evacuation pause that will start a
+      // concurrent cycle. We're setting word_size to 0 which means that
+      // we are not requesting a post-GC allocation.
       VM_G1IncCollectionPause op(gc_count_before,
                                  0,     /* word_size */
-                                 false, /* should_initiate_conc_mark */
+                                 true,  /* should_initiate_conc_mark */
                                  g1_policy()->max_pause_time_ms(),
                                  cause);
+
       VMThread::execute(&op);
+      if (!op.pause_succeeded()) {
+        if (full_gc_count_before == total_full_collections()) {
+          retry_gc = op.should_retry_gc();
+        } else {
+          // A Full GC happened while we were trying to schedule the
+          // initial-mark GC. No point in starting a new cycle given
+          // that the whole heap was collected anyway.
+        }
+
+        if (retry_gc) {
+          if (GC_locker::is_active_and_needs_gc()) {
+            GC_locker::stall_until_clear();
+          }
+        }
+      }
     } else {
-      // Schedule a Full GC.
-      VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
-      VMThread::execute(&op);
+      if (cause == GCCause::_gc_locker
+          DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
+
+        // Schedule a standard evacuation pause. We're setting word_size
+        // to 0 which means that we are not requesting a post-GC allocation.
+        VM_G1IncCollectionPause op(gc_count_before,
+                                   0,     /* word_size */
+                                   false, /* should_initiate_conc_mark */
+                                   g1_policy()->max_pause_time_ms(),
+                                   cause);
+        VMThread::execute(&op);
+      } else {
+        // Schedule a Full GC.
+        VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
+        VMThread::execute(&op);
+      }
     }
-  }
+  } while (retry_gc);
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
@@ -3130,12 +3173,12 @@
 
     // We apply the relevant closures to all the oops in the
     // system dictionary, the string table and the code cache.
-    const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+    const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
 
     process_strong_roots(true,      // activate StrongRootsScope
                          true,      // we set "collecting perm gen" to true,
                                     // so we don't reset the dirty cards in the perm gen.
-                         SharedHeap::ScanningOption(so),  // roots scanning options
+                         ScanningOption(so),  // roots scanning options
                          &rootsCl,
                          &blobsCl,
                          &rootsCl);
@@ -3406,16 +3449,6 @@
   }
 }
 
-double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
-                                                       bool young) {
-  return _g1_policy->predict_region_elapsed_time_ms(hr, young);
-}
-
-void G1CollectedHeap::check_if_region_is_too_expensive(double
-                                                           predicted_time_ms) {
-  _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
-}
-
 size_t G1CollectedHeap::pending_card_num() {
   size_t extra_cards = 0;
   JavaThread *curr = Threads::first();
@@ -3537,27 +3570,31 @@
   SvcGCMarker sgcm(SvcGCMarker::MINOR);
   ResourceMark rm;
 
-  if (PrintHeapAtGC) {
-    Universe::print_heap_before_gc();
-  }
+  print_heap_before_gc();
 
   HRSPhaseSetter x(HRSPhaseEvacuation);
   verify_region_sets_optional();
   verify_dirty_young_regions();
 
+  // This call will decide whether this pause is an initial-mark
+  // pause. If it is, during_initial_mark_pause() will return true
+  // for the duration of this pause.
+  g1_policy()->decide_on_conc_mark_initiation();
+
+  // We do not allow initial-mark to be piggy-backed on a mixed GC.
+  assert(!g1_policy()->during_initial_mark_pause() ||
+          g1_policy()->gcs_are_young(), "sanity");
+
+  // We also do not allow mixed GCs during marking.
+  assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
+
+  // Record whether this pause is an initial mark. When the current
+  // thread has completed its logging output and it's safe to signal
+  // the CM thread, the flag's value in the policy has been reset.
+  bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
+
+  // Inner scope for scope based logging, timers, and stats collection
   {
-    // This call will decide whether this pause is an initial-mark
-    // pause. If it is, during_initial_mark_pause() will return true
-    // for the duration of this pause.
-    g1_policy()->decide_on_conc_mark_initiation();
-
-    // We do not allow initial-mark to be piggy-backed on a mixed GC.
-    assert(!g1_policy()->during_initial_mark_pause() ||
-            g1_policy()->gcs_are_young(), "sanity");
-
-    // We also do not allow mixed GCs during marking.
-    assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
-
     char verbose_str[128];
     sprintf(verbose_str, "GC pause ");
     if (g1_policy()->gcs_are_young()) {
@@ -3613,7 +3650,6 @@
         Universe::verify(/* allow dirty */ false,
                          /* silent      */ false,
                          /* option      */ VerifyOption_G1UsePrevMarking);
-
       }
 
       COMPILER2_PRESENT(DerivedPointerTable::clear());
@@ -3656,6 +3692,18 @@
         g1_policy()->record_collection_pause_start(start_time_sec,
                                                    start_used_bytes);
 
+        double scan_wait_start = os::elapsedTime();
+        // We have to wait until the CM threads finish scanning the
+        // root regions as it's the only way to ensure that all the
+        // objects on them have been correctly scanned before we start
+        // moving them during the GC.
+        bool waited = _cm->root_regions()->wait_until_scan_finished();
+        if (waited) {
+          double scan_wait_end = os::elapsedTime();
+          double wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
+          g1_policy()->record_root_region_scan_wait_time(wait_time_ms);
+        }
+
 #if YOUNG_LIST_VERBOSE
         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
         _young_list->print();
@@ -3672,12 +3720,12 @@
         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-        g1_policy()->choose_collection_set(target_pause_time_ms);
+        g1_policy()->finalize_cset(target_pause_time_ms);
 
         _cm->note_start_of_gc();
         // We should not verify the per-thread SATB buffers given that
         // we have not filtered them yet (we'll do so during the
-        // GC). We also call this after choose_collection_set() to
+        // GC). We also call this after finalize_cset() to
         // ensure that the CSet has been finalized.
         _cm->verify_no_cset_oops(true  /* verify_stacks */,
                                  true  /* verify_enqueued_buffers */,
@@ -3765,16 +3813,14 @@
         }
 
         if (g1_policy()->during_initial_mark_pause()) {
+          // We have to do this before we notify the CM threads that
+          // they can start working to make sure that all the
+          // appropriate initialization is done on the CM object.
           concurrent_mark()->checkpointRootsInitialPost();
           set_marking_started();
-          // CAUTION: after the doConcurrentMark() call below,
-          // the concurrent marking thread(s) could be running
-          // concurrently with us. Make sure that anything after
-          // this point does not assume that we are the only GC thread
-          // running. Note: of course, the actual marking work will
-          // not start until the safepoint itself is released in
-          // ConcurrentGCThread::safepoint_desynchronize().
-          doConcurrentMark();
+          // Note that we don't actually trigger the CM thread at
+          // this point. We do that later when we're sure that
+          // the current thread has completed its logging output.
         }
 
         allocate_dummy_regions();
@@ -3884,15 +3930,22 @@
     }
   }
 
+  // The closing of the inner scope, immediately above, will complete
+  // the PrintGC logging output. The record_collection_pause_end() call
+  // above will complete the logging output of PrintGCDetails.
+  //
+  // It is not yet to safe, however, to tell the concurrent mark to
+  // start as we have some optional output below. We don't want the
+  // output from the concurrent mark thread interfering with this
+  // logging output either.
+
   _hrs.verify_optional();
   verify_region_sets_optional();
 
   TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 
-  if (PrintHeapAtGC) {
-    Universe::print_heap_after_gc();
-  }
+  print_heap_after_gc();
   g1mm()->update_sizes();
 
   if (G1SummarizeRSetStats &&
@@ -3901,6 +3954,21 @@
     g1_rem_set()->print_summary_info();
   }
 
+  // It should now be safe to tell the concurrent mark thread to start
+  // without its logging output interfering with the logging output
+  // that came from the pause.
+
+  if (should_start_conc_mark) {
+    // CAUTION: after the doConcurrentMark() call below,
+    // the concurrent marking thread(s) could be running
+    // concurrently with us. Make sure that anything after
+    // this point does not assume that we are the only GC thread
+    // running. Note: of course, the actual marking work will
+    // not start until the safepoint itself is released in
+    // ConcurrentGCThread::safepoint_desynchronize().
+    doConcurrentMark();
+  }
+
   return true;
 }
 
@@ -4162,7 +4230,7 @@
 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
   ParGCAllocBuffer(gclab_word_size), _retired(false) { }
 
-G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
+G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
   : _g1h(g1h),
     _refs(g1h->task_queue(queue_num)),
     _dcq(&g1h->dirty_card_queue_set()),
@@ -4283,6 +4351,7 @@
                                      G1ParScanThreadState* par_scan_state) :
   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
   _par_scan_state(par_scan_state),
+  _worker_id(par_scan_state->queue_num()),
   _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
   _mark_in_progress(_g1->mark_in_progress()) { }
 
@@ -4294,7 +4363,7 @@
 #endif // ASSERT
 
   // We know that the object is not moving so it's safe to read its size.
-  _cm->grayRoot(obj, (size_t) obj->size());
+  _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
 }
 
 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
@@ -4316,7 +4385,7 @@
   // worker so we cannot trust that its to-space image is
   // well-formed. So we have to read its size from its from-space
   // image which we know should not be changing.
-  _cm->grayRoot(to_obj, (size_t) from_obj->size());
+  _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
 }
 
 oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
@@ -4406,6 +4475,8 @@
   assert(barrier != G1BarrierRS || obj != NULL,
          "Precondition: G1BarrierRS implies obj is non-NULL");
 
+  assert(_worker_id == _par_scan_state->queue_num(), "sanity");
+
   // here the null check is implicit in the cset_fast_test() test
   if (_g1->in_cset_fast_test(obj)) {
     oop forwardee;
@@ -4424,7 +4495,7 @@
 
     // When scanning the RS, we only care about objs in CS.
     if (barrier == G1BarrierRS) {
-      _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
+      _par_scan_state->update_rs(_from, p, _worker_id);
     }
   } else {
     // The object is not in collection set. If we're a root scanning
@@ -4436,7 +4507,7 @@
   }
 
   if (barrier == G1BarrierEvac && obj != NULL) {
-    _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
+    _par_scan_state->update_rs(_from, p, _worker_id);
   }
 
   if (do_gen_barrier && obj != NULL) {
@@ -4677,7 +4748,7 @@
 void
 G1CollectedHeap::
 g1_process_strong_roots(bool collecting_perm_gen,
-                        SharedHeap::ScanningOption so,
+                        ScanningOption so,
                         OopClosure* scan_non_heap_roots,
                         OopsInHeapRegionClosure* scan_rs,
                         OopsInGenClosure* scan_perm,
@@ -5666,16 +5737,6 @@
 
       // And the region is empty.
       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
-
-      // If marking is in progress then clear any objects marked in
-      // the current region. Note mark_in_progress() returns false,
-      // even during an initial mark pause, until the set_marking_started()
-      // call which takes place later in the pause.
-      if (mark_in_progress()) {
-        assert(!g1_policy()->during_initial_mark_pause(), "sanity");
-        _cm->nextMarkBitMap()->clearRange(used_mr);
-      }
-
       free_region(cur, &pre_used, &local_free_list, false /* par */);
     } else {
       cur->uninstall_surv_rate_group();
@@ -5742,8 +5803,9 @@
 }
 
 void G1CollectedHeap::reset_free_regions_coming() {
+  assert(free_regions_coming(), "pre-condition");
+
   {
-    assert(free_regions_coming(), "pre-condition");
     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
     _free_regions_coming = false;
     SecondaryFreeList_lock->notify_all();
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -355,6 +355,7 @@
   // explicitly started if:
   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
+  // (c) cause == _g1_humongous_allocation
   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 
   // Keeps track of how many "full collections" (i.e., Full GCs or
@@ -769,7 +770,7 @@
   // the "i" of the calling parallel worker thread's work(i) function.
   // In the sequential case this param will be ignored.
   void g1_process_strong_roots(bool collecting_perm_gen,
-                               SharedHeap::ScanningOption so,
+                               ScanningOption so,
                                OopClosure* scan_non_heap_roots,
                                OopsInHeapRegionClosure* scan_rs,
                                OopsInGenClosure* scan_perm,
@@ -1172,11 +1173,21 @@
     _old_set.remove(hr);
   }
 
+  size_t non_young_capacity_bytes() {
+    return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
+  }
+
   void set_free_regions_coming();
   void reset_free_regions_coming();
   bool free_regions_coming() { return _free_regions_coming; }
   void wait_while_free_regions_coming();
 
+  // Determine whether the given region is one that we are using as an
+  // old GC alloc region.
+  bool is_old_gc_alloc_region(HeapRegion* hr) {
+    return hr == _retained_old_gc_alloc_region;
+  }
+
   // Perform a collection of the heap; intended for use in implementing
   // "System.gc".  This probably implies as full a collection as the
   // "CollectedHeap" supports.
@@ -1657,8 +1668,6 @@
 public:
   void stop_conc_gc_threads();
 
-  double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
-  void check_if_region_is_too_expensive(double predicted_time_ms);
   size_t pending_card_num();
   size_t max_pending_card_num();
   size_t cards_scanned();
@@ -1904,7 +1913,7 @@
   G1ParScanPartialArrayClosure* _partial_scan_cl;
 
   int _hash_seed;
-  int _queue_num;
+  uint _queue_num;
 
   size_t _term_attempts;
 
@@ -1948,7 +1957,7 @@
   }
 
 public:
-  G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num);
+  G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
 
   ~G1ParScanThreadState() {
     FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
@@ -2040,7 +2049,7 @@
   }
 
   int* hash_seed() { return &_hash_seed; }
-  int  queue_num() { return _queue_num; }
+  uint queue_num() { return _queue_num; }
 
   size_t term_attempts() const  { return _term_attempts; }
   void note_term_attempt() { _term_attempts++; }
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -141,6 +141,7 @@
 
   _cur_clear_ct_time_ms(0.0),
   _mark_closure_time_ms(0.0),
+  _root_region_scan_wait_time_ms(0.0),
 
   _cur_ref_proc_time_ms(0.0),
   _cur_ref_enq_time_ms(0.0),
@@ -205,7 +206,6 @@
 
   _initiate_conc_mark_if_possible(false),
   _during_initial_mark_pause(false),
-  _should_revert_to_young_gcs(false),
   _last_young_gc(false),
   _last_gc_was_young(false),
 
@@ -213,8 +213,6 @@
   _survivor_bytes_before_gc(0),
   _capacity_before_gc(0),
 
-  _prev_collection_pause_used_at_end_bytes(0),
-
   _eden_cset_region_length(0),
   _survivor_cset_region_length(0),
   _old_cset_region_length(0),
@@ -296,9 +294,6 @@
   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
   _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads];
 
-  // start conservatively
-  _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
-
   int index;
   if (ParallelGCThreads == 0)
     index = 0;
@@ -630,16 +625,9 @@
       // possible to maximize how many old regions we can add to it.
     }
   } else {
-    if (gcs_are_young()) {
-      young_list_target_length = _young_list_fixed_length;
-    } else {
-      // A bit arbitrary: during mixed GCs we allocate half
-      // the young regions to try to add old regions to the CSet.
-      young_list_target_length = _young_list_fixed_length / 2;
-      // We choose to accept that we might go under the desired min
-      // length given that we intentionally ask for a smaller young gen.
-      desired_min_length = absolute_min_length;
-    }
+    // The user asked for a fixed young gen so we'll fix the young gen
+    // whether the next GC is young or mixed.
+    young_list_target_length = _young_list_fixed_length;
   }
 
   // Make sure we don't go over the desired max length, nor under the
@@ -873,7 +861,6 @@
   // transitions and make sure we start with young GCs after the Full GC.
   set_gcs_are_young(true);
   _last_young_gc = false;
-  _should_revert_to_young_gcs = false;
   clear_initiate_conc_mark_if_possible();
   clear_during_initial_mark_pause();
   _known_garbage_bytes = 0;
@@ -890,7 +877,7 @@
   // Reset survivors SurvRateGroup.
   _survivor_surv_rate_group->reset();
   update_young_list_target_length();
-  _collectionSetChooser->updateAfterFullCollection();
+  _collectionSetChooser->clearMarkedHeapRegions();
 }
 
 void G1CollectorPolicy::record_stop_world_start() {
@@ -905,19 +892,10 @@
     gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
   }
 
-  if (!during_initial_mark_pause()) {
-    // We only need to do this here as the policy will only be applied
-    // to the GC we're about to start. so, no point is calculating this
-    // every time we calculate / recalculate the target young length.
-    update_survivors_policy();
-  } else {
-    // The marking phase has a "we only copy implicitly live
-    // objects during marking" invariant. The easiest way to ensure it
-    // holds is not to allocate any survivor regions and tenure all
-    // objects. In the future we might change this and handle survivor
-    // regions specially during marking.
-    tenure_all_objects();
-  }
+  // We only need to do this here as the policy will only be applied
+  // to the GC we're about to start. so, no point is calculating this
+  // every time we calculate / recalculate the target young length.
+  update_survivors_policy();
 
   assert(_g1->used() == _g1->recalculate_used(),
          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
@@ -969,6 +947,9 @@
   // This is initialized to zero here and is set during
   // the evacuation pause if marking is in progress.
   _cur_satb_drain_time_ms = 0.0;
+  // This is initialized to zero here and is set during the evacuation
+  // pause if we actually waited for the root region scanning to finish.
+  _root_region_scan_wait_time_ms = 0.0;
 
   _last_gc_was_young = false;
 
@@ -1007,7 +988,6 @@
 }
 
 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
-  _should_revert_to_young_gcs = false;
   _last_young_gc = true;
   _in_marking_window = false;
 }
@@ -1140,6 +1120,50 @@
   return ret;
 }
 
+bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
+  if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
+    return false;
+  }
+
+  size_t marking_initiating_used_threshold =
+    (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
+  size_t cur_used_bytes = _g1->non_young_capacity_bytes();
+  size_t alloc_byte_size = alloc_word_size * HeapWordSize;
+
+  if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
+    if (gcs_are_young()) {
+      ergo_verbose5(ErgoConcCycles,
+        "request concurrent cycle initiation",
+        ergo_format_reason("occupancy higher than threshold")
+        ergo_format_byte("occupancy")
+        ergo_format_byte("allocation request")
+        ergo_format_byte_perc("threshold")
+        ergo_format_str("source"),
+        cur_used_bytes,
+        alloc_byte_size,
+        marking_initiating_used_threshold,
+        (double) InitiatingHeapOccupancyPercent,
+        source);
+      return true;
+    } else {
+      ergo_verbose5(ErgoConcCycles,
+        "do not request concurrent cycle initiation",
+        ergo_format_reason("still doing mixed collections")
+        ergo_format_byte("occupancy")
+        ergo_format_byte("allocation request")
+        ergo_format_byte_perc("threshold")
+        ergo_format_str("source"),
+        cur_used_bytes,
+        alloc_byte_size,
+        marking_initiating_used_threshold,
+        (double) InitiatingHeapOccupancyPercent,
+        source);
+    }
+  }
+
+  return false;
+}
+
 // Anything below that is considered to be zero
 #define MIN_TIMER_GRANULARITY 0.0000001
 
@@ -1166,45 +1190,15 @@
 #endif // PRODUCT
 
   last_pause_included_initial_mark = during_initial_mark_pause();
-  if (last_pause_included_initial_mark)
+  if (last_pause_included_initial_mark) {
     record_concurrent_mark_init_end(0.0);
-
-  size_t marking_initiating_used_threshold =
-    (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
-
-  if (!_g1->mark_in_progress() && !_last_young_gc) {
-    assert(!last_pause_included_initial_mark, "invariant");
-    if (cur_used_bytes > marking_initiating_used_threshold) {
-      if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
-        assert(!during_initial_mark_pause(), "we should not see this here");
-
-        ergo_verbose3(ErgoConcCycles,
-                      "request concurrent cycle initiation",
-                      ergo_format_reason("occupancy higher than threshold")
-                      ergo_format_byte("occupancy")
-                      ergo_format_byte_perc("threshold"),
-                      cur_used_bytes,
-                      marking_initiating_used_threshold,
-                      (double) InitiatingHeapOccupancyPercent);
-
-        // Note: this might have already been set, if during the last
-        // pause we decided to start a cycle but at the beginning of
-        // this pause we decided to postpone it. That's OK.
-        set_initiate_conc_mark_if_possible();
-      } else {
-        ergo_verbose2(ErgoConcCycles,
-                  "do not request concurrent cycle initiation",
-                  ergo_format_reason("occupancy lower than previous occupancy")
-                  ergo_format_byte("occupancy")
-                  ergo_format_byte("previous occupancy"),
-                  cur_used_bytes,
-                  _prev_collection_pause_used_at_end_bytes);
-      }
-    }
+  } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
+    // Note: this might have already been set, if during the last
+    // pause we decided to start a cycle but at the beginning of
+    // this pause we decided to postpone it. That's OK.
+    set_initiate_conc_mark_if_possible();
   }
 
-  _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
-
   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
                           end_time_sec, false);
 
@@ -1257,6 +1251,10 @@
   // is in progress.
   other_time_ms -= _cur_satb_drain_time_ms;
 
+  // Subtract the root region scanning wait time. It's initialized to
+  // zero at the start of the pause.
+  other_time_ms -= _root_region_scan_wait_time_ms;
+
   if (parallel) {
     other_time_ms -= _cur_collection_par_time_ms;
   } else {
@@ -1289,6 +1287,8 @@
     // each other. Therefore we unconditionally record the SATB drain
     // time - even if it's zero.
     body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
+    body_summary->record_root_region_scan_wait_time_ms(
+                                               _root_region_scan_wait_time_ms);
 
     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
     body_summary->record_satb_filtering_time_ms(satb_filtering_time);
@@ -1385,6 +1385,9 @@
                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
                            elapsed_ms / 1000.0);
 
+    if (_root_region_scan_wait_time_ms > 0.0) {
+      print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
+    }
     if (parallel) {
       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
       print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
@@ -1474,12 +1477,14 @@
   }
 
   if (_last_young_gc) {
+    // This is supposed to to be the "last young GC" before we start
+    // doing mixed GCs. Here we decide whether to start mixed GCs or not.
+
     if (!last_pause_included_initial_mark) {
-      ergo_verbose2(ErgoMixedGCs,
-                    "start mixed GCs",
-                    ergo_format_byte_perc("known garbage"),
-                    _known_garbage_bytes, _known_garbage_ratio * 100.0);
-      set_gcs_are_young(false);
+      if (next_gc_should_be_mixed("start mixed GCs",
+                                  "do not start mixed GCs")) {
+        set_gcs_are_young(false);
+      }
     } else {
       ergo_verbose0(ErgoMixedGCs,
                     "do not start mixed GCs",
@@ -1489,39 +1494,14 @@
   }
 
   if (!_last_gc_was_young) {
-    if (_should_revert_to_young_gcs) {
-      ergo_verbose2(ErgoMixedGCs,
-                    "end mixed GCs",
-                    ergo_format_reason("mixed GCs end requested")
-                    ergo_format_byte_perc("known garbage"),
-                    _known_garbage_bytes, _known_garbage_ratio * 100.0);
-      set_gcs_are_young(true);
-    } else if (_known_garbage_ratio < 0.05) {
-      ergo_verbose3(ErgoMixedGCs,
-               "end mixed GCs",
-               ergo_format_reason("known garbage percent lower than threshold")
-               ergo_format_byte_perc("known garbage")
-               ergo_format_perc("threshold"),
-               _known_garbage_bytes, _known_garbage_ratio * 100.0,
-               0.05 * 100.0);
-      set_gcs_are_young(true);
-    } else if (adaptive_young_list_length() &&
-              (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
-      ergo_verbose5(ErgoMixedGCs,
-                    "end mixed GCs",
-                    ergo_format_reason("current GC efficiency lower than "
-                                       "predicted young GC efficiency")
-                    ergo_format_double("GC efficiency factor")
-                    ergo_format_double("current GC efficiency")
-                    ergo_format_double("predicted young GC efficiency")
-                    ergo_format_byte_perc("known garbage"),
-                    get_gc_eff_factor(), cur_efficiency,
-                    predict_young_gc_eff(),
-                    _known_garbage_bytes, _known_garbage_ratio * 100.0);
+    // This is a mixed GC. Here we decide whether to continue doing
+    // mixed GCs or not.
+
+    if (!next_gc_should_be_mixed("continue mixed GCs",
+                                 "do not continue mixed GCs")) {
       set_gcs_are_young(true);
     }
   }
-  _should_revert_to_young_gcs = false;
 
   if (_last_gc_was_young && !_during_marking) {
     _young_gc_eff_seq->add(cur_efficiency);
@@ -1630,15 +1610,6 @@
 
     _pending_cards_seq->add((double) _pending_cards);
     _rs_lengths_seq->add((double) _max_rs_lengths);
-
-    double expensive_region_limit_ms =
-      (double) MaxGCPauseMillis - predict_constant_other_time_ms();
-    if (expensive_region_limit_ms < 0.0) {
-      // this means that the other time was predicted to be longer than
-      // than the max pause time
-      expensive_region_limit_ms = (double) MaxGCPauseMillis;
-    }
-    _expensive_region_limit_ms = expensive_region_limit_ms;
   }
 
   _in_marking_window = new_in_marking_window;
@@ -1820,13 +1791,11 @@
   if (hr->is_marked())
     bytes_to_copy = hr->max_live_bytes();
   else {
-    guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
-               "invariant" );
+    assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
     int age = hr->age_in_surv_rate_group();
     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
   }
-
   return bytes_to_copy;
 }
 
@@ -1842,22 +1811,6 @@
   _recorded_rs_lengths = rs_lengths;
 }
 
-void G1CollectorPolicy::check_if_region_is_too_expensive(double
-                                                           predicted_time_ms) {
-  // I don't think we need to do this when in young GC mode since
-  // marking will be initiated next time we hit the soft limit anyway...
-  if (predicted_time_ms > _expensive_region_limit_ms) {
-    ergo_verbose2(ErgoMixedGCs,
-              "request mixed GCs end",
-              ergo_format_reason("predicted region time higher than threshold")
-              ergo_format_ms("predicted region time")
-              ergo_format_ms("threshold"),
-              predicted_time_ms, _expensive_region_limit_ms);
-    // no point in doing another mixed GC
-    _should_revert_to_young_gcs = true;
-  }
-}
-
 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
                                                double elapsed_ms) {
   _recent_gc_times_ms->add(elapsed_ms);
@@ -1988,6 +1941,7 @@
   if (summary->get_total_seq()->num() > 0) {
     print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
     if (body_summary != NULL) {
+      print_summary(1, "Root Region Scan Wait", body_summary->get_root_region_scan_wait_seq());
       if (parallel) {
         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
         print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
@@ -2029,15 +1983,17 @@
           // parallel
           NumberSeq* other_parts[] = {
             body_summary->get_satb_drain_seq(),
+            body_summary->get_root_region_scan_wait_seq(),
             body_summary->get_parallel_seq(),
             body_summary->get_clear_ct_seq()
           };
           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
-                                                3, other_parts);
+                                          4, other_parts);
         } else {
           // serial
           NumberSeq* other_parts[] = {
             body_summary->get_satb_drain_seq(),
+            body_summary->get_root_region_scan_wait_seq(),
             body_summary->get_update_rs_seq(),
             body_summary->get_ext_root_scan_seq(),
             body_summary->get_satb_filtering_seq(),
@@ -2045,7 +2001,7 @@
             body_summary->get_obj_copy_seq()
           };
           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
-                                                6, other_parts);
+                                          7, other_parts);
         }
         check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
       }
@@ -2253,12 +2209,12 @@
 }
 
 class KnownGarbageClosure: public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
   CollectionSetChooser* _hrSorted;
 
 public:
   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
-    _hrSorted(hrSorted)
-  {}
+    _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
 
   bool doHeapRegion(HeapRegion* r) {
     // We only include humongous regions in collection
@@ -2267,11 +2223,10 @@
 
     // Do we have any marking information for this region?
     if (r->is_marked()) {
-      // We don't include humongous regions in collection
-      // sets because we collect them immediately at the end of a marking
-      // cycle.  We also don't include young regions because we *must*
-      // include them in the next collection pause.
-      if (!r->isHumongous() && !r->is_young()) {
+      // We will skip any region that's currently used as an old GC
+      // alloc region (we should not consider those for collection
+      // before we fill them up).
+      if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
         _hrSorted->addMarkedHeapRegion(r);
       }
     }
@@ -2280,8 +2235,10 @@
 };
 
 class ParKnownGarbageHRClosure: public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
   CollectionSetChooser* _hrSorted;
   jint _marked_regions_added;
+  size_t _reclaimable_bytes_added;
   jint _chunk_size;
   jint _cur_chunk_idx;
   jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
@@ -2299,6 +2256,7 @@
     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
     _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
     _marked_regions_added++;
+    _reclaimable_bytes_added += r->reclaimable_bytes();
     _cur_chunk_idx++;
   }
 
@@ -2306,10 +2264,10 @@
   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
                            jint chunk_size,
                            int worker) :
-    _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
-    _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
-    _invokes(0)
-  {}
+      _g1h(G1CollectedHeap::heap()),
+      _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
+      _marked_regions_added(0), _reclaimable_bytes_added(0),
+      _cur_chunk_idx(0), _cur_chunk_end(0), _invokes(0) { }
 
   bool doHeapRegion(HeapRegion* r) {
     // We only include humongous regions in collection
@@ -2319,17 +2277,17 @@
 
     // Do we have any marking information for this region?
     if (r->is_marked()) {
-      // We don't include humongous regions in collection
-      // sets because we collect them immediately at the end of a marking
-      // cycle.
-      // We also do not include young regions in collection sets
-      if (!r->isHumongous() && !r->is_young()) {
+      // We will skip any region that's currently used as an old GC
+      // alloc region (we should not consider those for collection
+      // before we fill them up).
+      if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
         add_region(r);
       }
     }
     return false;
   }
   jint marked_regions_added() { return _marked_regions_added; }
+  size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; }
   int invokes() { return _invokes; }
 };
 
@@ -2341,8 +2299,7 @@
   ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
     AbstractGangTask("ParKnownGarbageTask"),
     _hrSorted(hrSorted), _chunk_size(chunk_size),
-    _g1(G1CollectedHeap::heap())
-  {}
+    _g1(G1CollectedHeap::heap()) { }
 
   void work(uint worker_id) {
     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
@@ -2353,7 +2310,9 @@
                                          _g1->workers()->active_workers(),
                                          HeapRegion::InitialClaimValue);
     jint regions_added = parKnownGarbageCl.marked_regions_added();
-    _hrSorted->incNumMarkedHeapRegions(regions_added);
+    size_t reclaimable_bytes_added =
+                                   parKnownGarbageCl.reclaimable_bytes_added();
+    _hrSorted->updateTotals(regions_added, reclaimable_bytes_added);
     if (G1PrintParCleanupStats) {
       gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
                  worker_id, parKnownGarbageCl.invokes(), regions_added);
@@ -2637,7 +2596,43 @@
 }
 #endif // !PRODUCT
 
-void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) {
+bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
+                                                const char* false_action_str) {
+  CollectionSetChooser* cset_chooser = _collectionSetChooser;
+  if (cset_chooser->isEmpty()) {
+    ergo_verbose0(ErgoMixedGCs,
+                  false_action_str,
+                  ergo_format_reason("candidate old regions not available"));
+    return false;
+  }
+  size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes();
+  size_t capacity_bytes = _g1->capacity();
+  double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
+  double threshold = (double) G1HeapWastePercent;
+  if (perc < threshold) {
+    ergo_verbose4(ErgoMixedGCs,
+              false_action_str,
+              ergo_format_reason("reclaimable percentage lower than threshold")
+              ergo_format_region("candidate old regions")
+              ergo_format_byte_perc("reclaimable")
+              ergo_format_perc("threshold"),
+              cset_chooser->remainingRegions(),
+              reclaimable_bytes, perc, threshold);
+    return false;
+  }
+
+  ergo_verbose4(ErgoMixedGCs,
+                true_action_str,
+                ergo_format_reason("candidate old regions available")
+                ergo_format_region("candidate old regions")
+                ergo_format_byte_perc("reclaimable")
+                ergo_format_perc("threshold"),
+                cset_chooser->remainingRegions(),
+                reclaimable_bytes, perc, threshold);
+  return true;
+}
+
+void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
   // Set this here - in case we're not doing young collections.
   double non_young_start_time_sec = os::elapsedTime();
 
@@ -2651,7 +2646,6 @@
 
   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
   double predicted_pause_time_ms = base_time_ms;
-
   double time_remaining_ms = target_pause_time_ms - base_time_ms;
 
   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
@@ -2661,22 +2655,6 @@
                 ergo_format_ms("target pause time"),
                 base_time_ms, time_remaining_ms, target_pause_time_ms);
 
-  // the 10% and 50% values are arbitrary...
-  double threshold = 0.10 * target_pause_time_ms;
-  if (time_remaining_ms < threshold) {
-    double prev_time_remaining_ms = time_remaining_ms;
-    time_remaining_ms = 0.50 * target_pause_time_ms;
-    ergo_verbose3(ErgoCSetConstruction,
-                  "adjust remaining time",
-                  ergo_format_reason("remaining time lower than threshold")
-                  ergo_format_ms("remaining time")
-                  ergo_format_ms("threshold")
-                  ergo_format_ms("adjusted remaining time"),
-                  prev_time_remaining_ms, threshold, time_remaining_ms);
-  }
-
-  size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
-
   HeapRegion* hr;
   double young_start_time_sec = os::elapsedTime();
 
@@ -2731,78 +2709,97 @@
   non_young_start_time_sec = young_end_time_sec;
 
   if (!gcs_are_young()) {
-    bool should_continue = true;
-    NumberSeq seq;
-    double avg_prediction = 100000000000000000.0; // something very large
-
-    double prev_predicted_pause_time_ms = predicted_pause_time_ms;
-    do {
-      // Note that add_old_region_to_cset() increments the
-      // _old_cset_region_length field and cset_region_length() returns the
-      // sum of _eden_cset_region_length, _survivor_cset_region_length, and
-      // _old_cset_region_length. So, as old regions are added to the
-      // CSet, _old_cset_region_length will be incremented and
-      // cset_region_length(), which is used below, will always reflect
-      // the the total number of regions added up to this point to the CSet.
-
-      hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
-                                                      avg_prediction);
-      if (hr != NULL) {
-        _g1->old_set_remove(hr);
-        double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
-        time_remaining_ms -= predicted_time_ms;
-        predicted_pause_time_ms += predicted_time_ms;
-        add_old_region_to_cset(hr);
-        seq.add(predicted_time_ms);
-        avg_prediction = seq.avg() + seq.sd();
+    CollectionSetChooser* cset_chooser = _collectionSetChooser;
+    assert(cset_chooser->verify(), "CSet Chooser verification - pre");
+    const size_t min_old_cset_length = cset_chooser->calcMinOldCSetLength();
+    const size_t max_old_cset_length = cset_chooser->calcMaxOldCSetLength();
+
+    size_t expensive_region_num = 0;
+    bool check_time_remaining = adaptive_young_list_length();
+    HeapRegion* hr = cset_chooser->peek();
+    while (hr != NULL) {
+      if (old_cset_region_length() >= max_old_cset_length) {
+        // Added maximum number of old regions to the CSet.
+        ergo_verbose2(ErgoCSetConstruction,
+                      "finish adding old regions to CSet",
+                      ergo_format_reason("old CSet region num reached max")
+                      ergo_format_region("old")
+                      ergo_format_region("max"),
+                      old_cset_region_length(), max_old_cset_length);
+        break;
       }
 
-      should_continue = true;
-      if (hr == NULL) {
-        // No need for an ergo verbose message here,
-        // getNextMarkRegion() does this when it returns NULL.
-        should_continue = false;
+      double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
+      if (check_time_remaining) {
+        if (predicted_time_ms > time_remaining_ms) {
+          // Too expensive for the current CSet.
+
+          if (old_cset_region_length() >= min_old_cset_length) {
+            // We have added the minimum number of old regions to the CSet,
+            // we are done with this CSet.
+            ergo_verbose4(ErgoCSetConstruction,
+                          "finish adding old regions to CSet",
+                          ergo_format_reason("predicted time is too high")
+                          ergo_format_ms("predicted time")
+                          ergo_format_ms("remaining time")
+                          ergo_format_region("old")
+                          ergo_format_region("min"),
+                          predicted_time_ms, time_remaining_ms,
+                          old_cset_region_length(), min_old_cset_length);
+            break;
+          }
+
+          // We'll add it anyway given that we haven't reached the
+          // minimum number of old regions.
+          expensive_region_num += 1;
+        }
       } else {
-        if (adaptive_young_list_length()) {
-          if (time_remaining_ms < 0.0) {
-            ergo_verbose1(ErgoCSetConstruction,
-                          "stop adding old regions to CSet",
-                          ergo_format_reason("remaining time is lower than 0")
-                          ergo_format_ms("remaining time"),
-                          time_remaining_ms);
-            should_continue = false;
-          }
-        } else {
-          if (cset_region_length() >= _young_list_fixed_length) {
-            ergo_verbose2(ErgoCSetConstruction,
-                          "stop adding old regions to CSet",
-                          ergo_format_reason("CSet length reached target")
-                          ergo_format_region("CSet")
-                          ergo_format_region("young target"),
-                          cset_region_length(), _young_list_fixed_length);
-            should_continue = false;
-          }
+        if (old_cset_region_length() >= min_old_cset_length) {
+          // In the non-auto-tuning case, we'll finish adding regions
+          // to the CSet if we reach the minimum.
+          ergo_verbose2(ErgoCSetConstruction,
+                        "finish adding old regions to CSet",
+                        ergo_format_reason("old CSet region num reached min")
+                        ergo_format_region("old")
+                        ergo_format_region("min"),
+                        old_cset_region_length(), min_old_cset_length);
+          break;
         }
       }
-    } while (should_continue);
-
-    if (!adaptive_young_list_length() &&
-        cset_region_length() < _young_list_fixed_length) {
-      ergo_verbose2(ErgoCSetConstruction,
-                    "request mixed GCs end",
-                    ergo_format_reason("CSet length lower than target")
-                    ergo_format_region("CSet")
-                    ergo_format_region("young target"),
-                    cset_region_length(), _young_list_fixed_length);
-      _should_revert_to_young_gcs  = true;
+
+      // We will add this region to the CSet.
+      time_remaining_ms -= predicted_time_ms;
+      predicted_pause_time_ms += predicted_time_ms;
+      cset_chooser->remove_and_move_to_next(hr);
+      _g1->old_set_remove(hr);
+      add_old_region_to_cset(hr);
+
+      hr = cset_chooser->peek();
+    }
+    if (hr == NULL) {
+      ergo_verbose0(ErgoCSetConstruction,
+                    "finish adding old regions to CSet",
+                    ergo_format_reason("candidate old regions not available"));
     }
 
-    ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
-                  "add old regions to CSet",
-                  ergo_format_region("old")
-                  ergo_format_ms("predicted old region time"),
-                  old_cset_region_length(),
-                  predicted_pause_time_ms - prev_predicted_pause_time_ms);
+    if (expensive_region_num > 0) {
+      // We print the information once here at the end, predicated on
+      // whether we added any apparently expensive regions or not, to
+      // avoid generating output per region.
+      ergo_verbose4(ErgoCSetConstruction,
+                    "added expensive regions to CSet",
+                    ergo_format_reason("old CSet region num not reached min")
+                    ergo_format_region("old")
+                    ergo_format_region("expensive")
+                    ergo_format_region("min")
+                    ergo_format_ms("remaining time"),
+                    old_cset_region_length(),
+                    expensive_region_num,
+                    min_old_cset_length,
+                    time_remaining_ms);
+    }
+
+    assert(cset_chooser->verify(), "CSet Chooser verification - post");
   }
 
   stop_incremental_cset_building();
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -65,6 +65,7 @@
 
 class MainBodySummary: public CHeapObj {
   define_num_seq(satb_drain) // optional
+  define_num_seq(root_region_scan_wait)
   define_num_seq(parallel) // parallel only
     define_num_seq(ext_root_scan)
     define_num_seq(satb_filtering)
@@ -177,7 +178,6 @@
   double _cur_collection_start_sec;
   size_t _cur_collection_pause_used_at_start_bytes;
   size_t _cur_collection_pause_used_regions_at_start;
-  size_t _prev_collection_pause_used_at_end_bytes;
   double _cur_collection_par_time_ms;
   double _cur_satb_drain_time_ms;
   double _cur_clear_ct_time_ms;
@@ -312,16 +312,13 @@
   double _recorded_non_young_free_cset_time_ms;
 
   double _sigma;
-  double _expensive_region_limit_ms;
 
   size_t _rs_lengths_prediction;
 
   size_t _known_garbage_bytes;
   double _known_garbage_ratio;
 
-  double sigma() {
-    return _sigma;
-  }
+  double sigma() { return _sigma; }
 
   // A function that prevents us putting too much stock in small sample
   // sets.  Returns a number between 2.0 and 1.0, depending on the number
@@ -491,8 +488,6 @@
            get_new_prediction(_non_young_other_cost_per_region_ms_seq);
   }
 
-  void check_if_region_is_too_expensive(double predicted_time_ms);
-
   double predict_young_collection_elapsed_time_ms(size_t adjustment);
   double predict_base_elapsed_time_ms(size_t pending_cards);
   double predict_base_elapsed_time_ms(size_t pending_cards,
@@ -707,7 +702,6 @@
   // initial-mark work.
   volatile bool _during_initial_mark_pause;
 
-  bool _should_revert_to_young_gcs;
   bool _last_young_gc;
 
   // This set of variables tracks the collector efficiency, in order to
@@ -716,6 +710,7 @@
   double _mark_remark_start_sec;
   double _mark_cleanup_start_sec;
   double _mark_closure_time_ms;
+  double _root_region_scan_wait_time_ms;
 
   // Update the young list target length either by setting it to the
   // desired fixed value or by calculating it using G1's pause
@@ -800,6 +795,8 @@
 
   GenRemSet::Name  rem_set_name()     { return GenRemSet::CardTable; }
 
+  bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
+
   // Update the heuristic info to record a collection pause of the given
   // start time, where the given number of bytes were used at the start.
   // This may involve changing the desired size of a collection set.
@@ -816,6 +813,10 @@
     _mark_closure_time_ms = mark_closure_time_ms;
   }
 
+  void record_root_region_scan_wait_time(double time_ms) {
+    _root_region_scan_wait_time_ms = time_ms;
+  }
+
   void record_concurrent_mark_remark_start();
   void record_concurrent_mark_remark_end();
 
@@ -939,10 +940,16 @@
     return _bytes_copied_during_gc;
   }
 
+  // Determine whether there are candidate regions so that the
+  // next GC should be mixed. The two action strings are used
+  // in the ergo output when the method returns true or false.
+  bool next_gc_should_be_mixed(const char* true_action_str,
+                               const char* false_action_str);
+
   // Choose a new collection set.  Marks the chosen regions as being
   // "in_collection_set", and links them together.  The head and number of
   // the collection set are available via access methods.
-  void choose_collection_set(double target_pause_time_ms);
+  void finalize_cset(double target_pause_time_ms);
 
   // The head of the list (via "next_in_collection_set()") representing the
   // current collection set.
@@ -1146,11 +1153,6 @@
     _survivor_surv_rate_group->stop_adding_regions();
   }
 
-  void tenure_all_objects() {
-    _max_survivor_regions = 0;
-    _tenuring_threshold = 0;
-  }
-
   void record_survivor_regions(size_t      regions,
                                HeapRegion* head,
                                HeapRegion* tail) {
--- a/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -131,8 +131,8 @@
                              ", " _name_ ": "SIZE_FORMAT" bytes (%1.2f %%)"
 
 // Generates the format string
-#define ergo_format(_action_, _extra_format_)                   \
-  " %1.3f: [G1Ergonomics (%s) " _action_ _extra_format_ "]"
+#define ergo_format(_extra_format_)                           \
+  " %1.3f: [G1Ergonomics (%s) %s" _extra_format_ "]"
 
 // Conditionally, prints an ergonomic decision record. _extra_format_
 // is the format string for the optional items we'd like to print
@@ -145,20 +145,21 @@
 // them to the print method. For convenience, we have wrapper macros
 // below which take a specific number of arguments and set the rest to
 // a default value.
-#define ergo_verbose_common(_tag_, _action_, _extra_format_,            \
+#define ergo_verbose_common(_tag_, _action_, _extra_format_,                \
                             _arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_) \
-  do {                                                                  \
-    if (G1ErgoVerbose::enabled((_tag_))) {                              \
-      gclog_or_tty->print_cr(ergo_format(_action_, _extra_format_),     \
-                             os::elapsedTime(),                         \
-                             G1ErgoVerbose::to_string((_tag_)),         \
-                             (_arg0_), (_arg1_), (_arg2_),              \
-                             (_arg3_), (_arg4_), (_arg5_));             \
-    }                                                                   \
+  do {                                                                      \
+    if (G1ErgoVerbose::enabled((_tag_))) {                                  \
+      gclog_or_tty->print_cr(ergo_format(_extra_format_),                   \
+                             os::elapsedTime(),                             \
+                             G1ErgoVerbose::to_string((_tag_)),             \
+                             (_action_),                                    \
+                             (_arg0_), (_arg1_), (_arg2_),                  \
+                             (_arg3_), (_arg4_), (_arg5_));                 \
+    }                                                                       \
   } while (0)
 
 
-#define ergo_verbose(_tag_, _action_)                           \
+#define ergo_verbose(_tag_, _action_)                                   \
   ergo_verbose_common(_tag_, _action_, "", 0, 0, 0, 0, 0, 0)
 
 #define ergo_verbose0(_tag_, _action_, _extra_format_)                  \
--- a/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -70,16 +70,20 @@
   OopsInHeapRegionClosure *_update_rset_cl;
   bool _during_initial_mark;
   bool _during_conc_mark;
+  uint _worker_id;
+
 public:
   RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
                                  HeapRegion* hr,
                                  OopsInHeapRegionClosure* update_rset_cl,
                                  bool during_initial_mark,
-                                 bool during_conc_mark) :
+                                 bool during_conc_mark,
+                                 uint worker_id) :
     _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0),
     _update_rset_cl(update_rset_cl),
     _during_initial_mark(during_initial_mark),
-    _during_conc_mark(during_conc_mark) { }
+    _during_conc_mark(during_conc_mark),
+    _worker_id(worker_id) { }
 
   size_t marked_bytes() { return _marked_bytes; }
 
@@ -123,7 +127,7 @@
         // explicitly and all objects in the CSet are considered
         // (implicitly) live. So, we won't mark them explicitly and
         // we'll leave them over NTAMS.
-        _cm->markNext(obj);
+        _cm->grayRoot(obj, obj_size, _worker_id, _hr);
       }
       _marked_bytes += (obj_size * HeapWordSize);
       obj->set_mark(markOopDesc::prototype());
@@ -155,12 +159,14 @@
   G1CollectedHeap* _g1h;
   ConcurrentMark* _cm;
   OopsInHeapRegionClosure *_update_rset_cl;
+  uint _worker_id;
 
 public:
   RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
-                                OopsInHeapRegionClosure* update_rset_cl) :
+                                OopsInHeapRegionClosure* update_rset_cl,
+                                uint worker_id) :
     _g1h(g1h), _update_rset_cl(update_rset_cl),
-    _cm(_g1h->concurrent_mark()) { }
+    _worker_id(worker_id), _cm(_g1h->concurrent_mark()) { }
 
   bool doHeapRegion(HeapRegion *hr) {
     bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
@@ -173,7 +179,8 @@
       if (hr->evacuation_failed()) {
         RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl,
                                             during_initial_mark,
-                                            during_conc_mark);
+                                            during_conc_mark,
+                                            _worker_id);
 
         MemRegion mr(hr->bottom(), hr->end());
         // We'll recreate the prev marking info so we'll first clear
@@ -226,7 +233,7 @@
       update_rset_cl = &immediate_update;
     }
 
-    RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl);
+    RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl, worker_id);
 
     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
     _g1h->collection_set_iterate_from(hr, &rsfp_cl);
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -126,7 +126,6 @@
 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
                                     bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  EventMark m("1 mark object");
   TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
   GenMarkSweep::trace(" 1");
 
@@ -292,7 +291,6 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   Generation* pg = g1h->perm_gen();
 
-  EventMark m("2 compute new addresses");
   TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
   GenMarkSweep::trace("2");
 
@@ -337,7 +335,6 @@
   Generation* pg = g1h->perm_gen();
 
   // Adjust the pointers to reflect the new locations
-  EventMark m("3 adjust pointers");
   TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
   GenMarkSweep::trace("3");
 
@@ -402,7 +399,6 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   Generation* pg = g1h->perm_gen();
 
-  EventMark m("4 compact heap");
   TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
   GenMarkSweep::trace("4");
 
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,9 @@
                G1MonitoringSupport::pad_capacity(0, 3) /* min_capacity */,
                G1MonitoringSupport::pad_capacity(g1mm->young_gen_max(), 3),
                G1MonitoringSupport::pad_capacity(0, 3) /* curr_capacity */) {
-  update_all();
+  if (UsePerfData) {
+    update_all();
+  }
 }
 
 G1OldGenerationCounters::G1OldGenerationCounters(G1MonitoringSupport* g1mm,
@@ -53,7 +55,9 @@
                G1MonitoringSupport::pad_capacity(0) /* min_capacity */,
                G1MonitoringSupport::pad_capacity(g1mm->old_gen_max()),
                G1MonitoringSupport::pad_capacity(0) /* curr_capacity */) {
-  update_all();
+  if (UsePerfData) {
+    update_all();
+  }
 }
 
 void G1YoungGenerationCounters::update_all() {
@@ -149,10 +153,6 @@
     pad_capacity(0) /* max_capacity */,
     pad_capacity(0) /* init_capacity */,
     _young_collection_counters);
-  // Given that this survivor space is not used, we update it here
-  // once to reflect that its used space is 0 so that we don't have to
-  // worry about updating it again later.
-  _from_counters->update_used(0);
 
   //  name "generation.0.space.2"
   // See _old_space_counters for additional counters
@@ -160,6 +160,13 @@
     pad_capacity(overall_reserved()) /* max_capacity */,
     pad_capacity(survivor_space_committed()) /* init_capacity */,
     _young_collection_counters);
+
+  if (UsePerfData) {
+    // Given that this survivor space is not used, we update it here
+    // once to reflect that its used space is 0 so that we don't have to
+    // worry about updating it again later.
+    _from_counters->update_used(0);
+  }
 }
 
 void G1MonitoringSupport::recalculate_sizes() {
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -89,16 +89,15 @@
 //
 // * Min Capacity
 //
-//    We set this to 0 for all spaces. We could consider setting the old
-//    min capacity to the min capacity of the heap (see 7078465).
+//    We set this to 0 for all spaces.
 //
 // * Max Capacity
 //
 //    For jstat, we set the max capacity of all spaces to heap_capacity,
-//    given that we don't always have a reasonably upper bound on how big
-//    each space can grow. For the memory pools, we actually make the max
-//    capacity undefined. We could consider setting the old max capacity
-//    to the max capacity of the heap (see 7078465).
+//    given that we don't always have a reasonable upper bound on how big
+//    each space can grow. For the memory pools, we make the max
+//    capacity undefined with the exception of the old memory pool for
+//    which we make the max capacity same as the max heap capacity.
 //
 // If we had more accurate occupancy / capacity information per
 // region set the above calculations would be greatly simplified and
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -51,6 +51,7 @@
   G1RemSet* _g1_rem;
   ConcurrentMark* _cm;
   G1ParScanThreadState* _par_scan_state;
+  uint _worker_id;
   bool _during_initial_mark;
   bool _mark_in_progress;
 public:
@@ -219,6 +220,7 @@
 
 // Closure for iterating over object fields during concurrent marking
 class G1CMOopClosure : public OopClosure {
+private:
   G1CollectedHeap*   _g1h;
   ConcurrentMark*    _cm;
   CMTask*            _task;
@@ -229,4 +231,92 @@
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 };
 
+// Closure to scan the root regions during concurrent marking
+class G1RootRegionScanClosure : public OopClosure {
+private:
+  G1CollectedHeap* _g1h;
+  ConcurrentMark*  _cm;
+  uint _worker_id;
+public:
+  G1RootRegionScanClosure(G1CollectedHeap* g1h, ConcurrentMark* cm,
+                          uint worker_id) :
+    _g1h(g1h), _cm(cm), _worker_id(worker_id) { }
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(      oop* p) { do_oop_nv(p); }
+  virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
+};
+
+// Closure that applies the given two closures in sequence.
+// Used by the RSet refinement code (when updating RSets
+// during an evacuation pause) to record cards containing
+// pointers into the collection set.
+
+class G1Mux2Closure : public OopClosure {
+  OopClosure* _c1;
+  OopClosure* _c2;
+public:
+  G1Mux2Closure(OopClosure *c1, OopClosure *c2);
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(oop* p)        { do_oop_nv(p); }
+  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
+};
+
+// A closure that returns true if it is actually applied
+// to a reference
+
+class G1TriggerClosure : public OopClosure {
+  bool _triggered;
+public:
+  G1TriggerClosure();
+  bool triggered() const { return _triggered; }
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(oop* p)        { do_oop_nv(p); }
+  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
+};
+
+// A closure which uses a triggering closure to determine
+// whether to apply an oop closure.
+
+class G1InvokeIfNotTriggeredClosure: public OopClosure {
+  G1TriggerClosure* _trigger_cl;
+  OopClosure* _oop_cl;
+public:
+  G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc);
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(oop* p)        { do_oop_nv(p); }
+  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
+};
+
+class G1UpdateRSOrPushRefOopClosure: public OopClosure {
+  G1CollectedHeap* _g1;
+  G1RemSet* _g1_rem_set;
+  HeapRegion* _from;
+  OopsInHeapRegionClosure* _push_ref_cl;
+  bool _record_refs_into_cset;
+  int _worker_i;
+
+public:
+  G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
+                                G1RemSet* rs,
+                                OopsInHeapRegionClosure* push_ref_cl,
+                                bool record_refs_into_cset,
+                                int worker_i = 0);
+
+  void set_from(HeapRegion* from) {
+    assert(from != NULL, "from region must be non-NULL");
+    _from = from;
+  }
+
+  bool self_forwarded(oop obj) {
+    bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
+    return result;
+  }
+
+  bool apply_to_weak_ref_discovered_field() { return true; }
+
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
+  virtual void do_oop(oop* p)       { do_oop_nv(p); }
+};
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,8 @@
 // perf-critical inner loop.
 #define FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT 0
 
-template <class T> inline void FilterIntoCSClosure::do_oop_nv(T* p) {
+template <class T>
+inline void FilterIntoCSClosure::do_oop_nv(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop) &&
       _g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop))) {
@@ -53,7 +54,8 @@
 
 #define FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT 0
 
-template <class T> inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
+template <class T>
+inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     HeapWord* obj_hw = (HeapWord*)oopDesc::decode_heap_oop_not_null(heap_oop);
@@ -67,7 +69,8 @@
 }
 
 // This closure is applied to the fields of the objects that have just been copied.
-template <class T> inline void G1ParScanClosure::do_oop_nv(T* p) {
+template <class T>
+inline void G1ParScanClosure::do_oop_nv(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
 
   if (!oopDesc::is_null(heap_oop)) {
@@ -96,7 +99,8 @@
   }
 }
 
-template <class T> inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
+template <class T>
+inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
 
   if (!oopDesc::is_null(heap_oop)) {
@@ -111,7 +115,8 @@
   }
 }
 
-template <class T> inline void G1CMOopClosure::do_oop_nv(T* p) {
+template <class T>
+inline void G1CMOopClosure::do_oop_nv(T* p) {
   assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
   assert(!_g1h->is_on_master_free_list(
                     _g1h->heap_region_containing((HeapWord*) p)), "invariant");
@@ -125,4 +130,97 @@
   _task->deal_with_reference(obj);
 }
 
+template <class T>
+inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj);
+    if (hr != NULL) {
+      _cm->grayRoot(obj, obj->size(), _worker_id, hr);
+    }
+  }
+}
+
+template <class T>
+inline void G1Mux2Closure::do_oop_nv(T* p) {
+  // Apply first closure; then apply the second.
+  _c1->do_oop(p);
+  _c2->do_oop(p);
+}
+
+template <class T>
+inline void G1TriggerClosure::do_oop_nv(T* p) {
+  // Record that this closure was actually applied (triggered).
+  _triggered = true;
+}
+
+template <class T>
+inline void G1InvokeIfNotTriggeredClosure::do_oop_nv(T* p) {
+  if (!_trigger_cl->triggered()) {
+    _oop_cl->do_oop(p);
+  }
+}
+
+template <class T>
+inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
+  oop obj = oopDesc::load_decode_heap_oop(p);
+#ifdef ASSERT
+  // can't do because of races
+  // assert(obj == NULL || obj->is_oop(), "expected an oop");
+
+  // Do the safe subset of is_oop
+  if (obj != NULL) {
+#ifdef CHECK_UNHANDLED_OOPS
+    oopDesc* o = obj.obj();
+#else
+    oopDesc* o = obj;
+#endif // CHECK_UNHANDLED_OOPS
+    assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
+    assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
+  }
+#endif // ASSERT
+
+  assert(_from != NULL, "from region must be non-NULL");
+
+  HeapRegion* to = _g1->heap_region_containing(obj);
+  if (to != NULL && _from != to) {
+    // The _record_refs_into_cset flag is true during the RSet
+    // updating part of an evacuation pause. It is false at all
+    // other times:
+    //  * rebuilding the rembered sets after a full GC
+    //  * during concurrent refinement.
+    //  * updating the remembered sets of regions in the collection
+    //    set in the event of an evacuation failure (when deferred
+    //    updates are enabled).
+
+    if (_record_refs_into_cset && to->in_collection_set()) {
+      // We are recording references that point into the collection
+      // set and this particular reference does exactly that...
+      // If the referenced object has already been forwarded
+      // to itself, we are handling an evacuation failure and
+      // we have already visited/tried to copy this object
+      // there is no need to retry.
+      if (!self_forwarded(obj)) {
+        assert(_push_ref_cl != NULL, "should not be null");
+        // Push the reference in the refs queue of the G1ParScanThreadState
+        // instance for this worker thread.
+        _push_ref_cl->do_oop(p);
+      }
+
+      // Deferred updates to the CSet are either discarded (in the normal case),
+      // or processed (if an evacuation failure occurs) at the end
+      // of the collection.
+      // See G1RemSet::cleanup_after_oops_into_collection_set_do().
+    } else {
+      // We either don't care about pushing references that point into the
+      // collection set (i.e. we're not during an evacuation pause) _or_
+      // the reference doesn't point into the collection set. Either way
+      // we add the reference directly to the RSet of the region containing
+      // the referenced object.
+      _g1_rem_set->par_write_ref(_from, p, _worker_i);
+    }
+  }
+}
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -569,40 +569,26 @@
 
 static IntHistogram out_of_histo(50, 50);
 
-class TriggerClosure : public OopClosure {
-  bool _trigger;
-public:
-  TriggerClosure() : _trigger(false) { }
-  bool value() const { return _trigger; }
-  template <class T> void do_oop_nv(T* p) { _trigger = true; }
-  virtual void do_oop(oop* p)        { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
-};
+
+G1TriggerClosure::G1TriggerClosure() :
+  _triggered(false) { }
+
+G1InvokeIfNotTriggeredClosure::G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t_cl,
+                                                             OopClosure* oop_cl)  :
+  _trigger_cl(t_cl), _oop_cl(oop_cl) { }
 
-class InvokeIfNotTriggeredClosure: public OopClosure {
-  TriggerClosure* _t;
-  OopClosure* _oc;
-public:
-  InvokeIfNotTriggeredClosure(TriggerClosure* t, OopClosure* oc):
-    _t(t), _oc(oc) { }
-  template <class T> void do_oop_nv(T* p) {
-    if (!_t->value()) _oc->do_oop(p);
-  }
-  virtual void do_oop(oop* p)        { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
-};
+G1Mux2Closure::G1Mux2Closure(OopClosure *c1, OopClosure *c2) :
+  _c1(c1), _c2(c2) { }
 
-class Mux2Closure : public OopClosure {
-  OopClosure* _c1;
-  OopClosure* _c2;
-public:
-  Mux2Closure(OopClosure *c1, OopClosure *c2) : _c1(c1), _c2(c2) { }
-  template <class T> void do_oop_nv(T* p) {
-    _c1->do_oop(p); _c2->do_oop(p);
-  }
-  virtual void do_oop(oop* p)        { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
-};
+G1UpdateRSOrPushRefOopClosure::
+G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
+                              G1RemSet* rs,
+                              OopsInHeapRegionClosure* push_ref_cl,
+                              bool record_refs_into_cset,
+                              int worker_i) :
+  _g1(g1h), _g1_rem_set(rs), _from(NULL),
+  _record_refs_into_cset(record_refs_into_cset),
+  _push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
 
 bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
                                                    bool check_for_refs_into_cset) {
@@ -629,17 +615,17 @@
     assert((size_t)worker_i < n_workers(), "index of worker larger than _cset_rs_update_cl[].length");
     oops_in_heap_closure = _cset_rs_update_cl[worker_i];
   }
-  UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
-                                               _g1->g1_rem_set(),
-                                               oops_in_heap_closure,
-                                               check_for_refs_into_cset,
-                                               worker_i);
+  G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
+                                                 _g1->g1_rem_set(),
+                                                 oops_in_heap_closure,
+                                                 check_for_refs_into_cset,
+                                                 worker_i);
   update_rs_oop_cl.set_from(r);
 
-  TriggerClosure trigger_cl;
+  G1TriggerClosure trigger_cl;
   FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl);
-  InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
-  Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
+  G1InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
+  G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
 
   FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r,
                         (check_for_refs_into_cset ?
@@ -688,7 +674,7 @@
     _conc_refine_cards++;
   }
 
-  return trigger_cl.value();
+  return trigger_cl.triggered();
 }
 
 bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
--- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -191,44 +191,5 @@
   virtual void do_oop(      oop* p) { do_oop_work(p); }
 };
 
-class UpdateRSOrPushRefOopClosure: public OopClosure {
-  G1CollectedHeap* _g1;
-  G1RemSet* _g1_rem_set;
-  HeapRegion* _from;
-  OopsInHeapRegionClosure* _push_ref_cl;
-  bool _record_refs_into_cset;
-  int _worker_i;
-
-  template <class T> void do_oop_work(T* p);
-
-public:
-  UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
-                              G1RemSet* rs,
-                              OopsInHeapRegionClosure* push_ref_cl,
-                              bool record_refs_into_cset,
-                              int worker_i = 0) :
-    _g1(g1h),
-    _g1_rem_set(rs),
-    _from(NULL),
-    _record_refs_into_cset(record_refs_into_cset),
-    _push_ref_cl(push_ref_cl),
-    _worker_i(worker_i) { }
-
-  void set_from(HeapRegion* from) {
-    assert(from != NULL, "from region must be non-NULL");
-    _from = from;
-  }
-
-  bool self_forwarded(oop obj) {
-    bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
-    return result;
-  }
-
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(oop* p)       { do_oop_work(p); }
-
-  bool apply_to_weak_ref_discovered_field() { return true; }
-};
-
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
--- a/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -85,66 +85,4 @@
   }
 }
 
-template <class T>
-inline void UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
-  oop obj = oopDesc::load_decode_heap_oop(p);
-#ifdef ASSERT
-  // can't do because of races
-  // assert(obj == NULL || obj->is_oop(), "expected an oop");
-
-  // Do the safe subset of is_oop
-  if (obj != NULL) {
-#ifdef CHECK_UNHANDLED_OOPS
-    oopDesc* o = obj.obj();
-#else
-    oopDesc* o = obj;
-#endif // CHECK_UNHANDLED_OOPS
-    assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
-    assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
-  }
-#endif // ASSERT
-
-  assert(_from != NULL, "from region must be non-NULL");
-
-  HeapRegion* to = _g1->heap_region_containing(obj);
-  if (to != NULL && _from != to) {
-    // The _record_refs_into_cset flag is true during the RSet
-    // updating part of an evacuation pause. It is false at all
-    // other times:
-    //  * rebuilding the rembered sets after a full GC
-    //  * during concurrent refinement.
-    //  * updating the remembered sets of regions in the collection
-    //    set in the event of an evacuation failure (when deferred
-    //    updates are enabled).
-
-    if (_record_refs_into_cset && to->in_collection_set()) {
-      // We are recording references that point into the collection
-      // set and this particular reference does exactly that...
-      // If the referenced object has already been forwarded
-      // to itself, we are handling an evacuation failure and
-      // we have already visited/tried to copy this object
-      // there is no need to retry.
-      if (!self_forwarded(obj)) {
-        assert(_push_ref_cl != NULL, "should not be null");
-        // Push the reference in the refs queue of the G1ParScanThreadState
-        // instance for this worker thread.
-        _push_ref_cl->do_oop(p);
-      }
-
-      // Deferred updates to the CSet are either discarded (in the normal case),
-      // or processed (if an evacuation failure occurs) at the end
-      // of the collection.
-      // See G1RemSet::cleanup_after_oops_into_collection_set_do().
-    } else {
-      // We either don't care about pushing references that point into the
-      // collection set (i.e. we're not during an evacuation pause) _or_
-      // the reference doesn't point into the collection set. Either way
-      // we add the reference directly to the RSet of the region containing
-      // the referenced object.
-      _g1_rem_set->par_write_ref(_from, p, _worker_i);
-    }
-  }
-}
-
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -297,7 +297,22 @@
                                                                             \
   develop(uintx, G1DefaultMaxNewGenPercent, 80,                             \
           "Percentage (0-100) of the heap size to use as maximum "          \
-          "young gen size.")
+          "young gen size.")                                                \
+                                                                            \
+  develop(uintx, G1OldCSetRegionLiveThresholdPercent, 90,                   \
+          "Threshold for regions to be added to the collection set. "       \
+          "Regions with more live bytes that this will not be collected.")  \
+                                                                            \
+  product(uintx, G1HeapWastePercent, 5,                                     \
+          "Amount of space, expressed as a percentage of the heap size, "   \
+          "that G1 is willing not to collect to avoid expensive GCs.")      \
+                                                                            \
+  product(uintx, G1MixedGCCountTarget, 4,                                   \
+          "The target number of mixed GCs after a marking cycle.")          \
+                                                                            \
+  develop(uintx, G1OldCSetRegionThresholdPercent, 10,                       \
+          "An upper bound for the number of old CSet regions expressed "    \
+          "as a percentage of the heap size.")
 
 G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
 
--- a/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,12 +32,14 @@
 
 // Forward declarations.
 enum G1Barrier {
-  G1BarrierNone, G1BarrierRS, G1BarrierEvac
+  G1BarrierNone,
+  G1BarrierRS,
+  G1BarrierEvac
 };
 
-template<bool do_gen_barrier, G1Barrier barrier,
-         bool do_mark_object>
+template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
 class G1ParCopyClosure;
+
 class G1ParScanClosure;
 class G1ParPushHeapRSClosure;
 
@@ -46,6 +48,13 @@
 class FilterIntoCSClosure;
 class FilterOutOfRegionClosure;
 class G1CMOopClosure;
+class G1RootRegionScanClosure;
+
+// Specialized oop closures from g1RemSet.cpp
+class G1Mux2Closure;
+class G1TriggerClosure;
+class G1InvokeIfNotTriggeredClosure;
+class G1UpdateRSOrPushRefOopClosure;
 
 #ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES
 #error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined."
@@ -57,7 +66,12 @@
       f(G1ParPushHeapRSClosure,_nv)                     \
       f(FilterIntoCSClosure,_nv)                        \
       f(FilterOutOfRegionClosure,_nv)                   \
-      f(G1CMOopClosure,_nv)
+      f(G1CMOopClosure,_nv)                             \
+      f(G1RootRegionScanClosure,_nv)                    \
+      f(G1Mux2Closure,_nv)                              \
+      f(G1TriggerClosure,_nv)                           \
+      f(G1InvokeIfNotTriggeredClosure,_nv)              \
+      f(G1UpdateRSOrPushRefOopClosure,_nv)
 
 #ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES
 #error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined."
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -387,13 +387,12 @@
   ct_bs->clear(MemRegion(bottom(), end()));
 }
 
-// <PREDICTION>
 void HeapRegion::calc_gc_efficiency() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  _gc_efficiency = (double) garbage_bytes() /
-                            g1h->predict_region_elapsed_time_ms(this, false);
+  G1CollectorPolicy* g1p = g1h->g1_policy();
+  _gc_efficiency = (double) reclaimable_bytes() /
+                            g1p->predict_region_elapsed_time_ms(this, false);
 }
-// </PREDICTION>
 
 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
   assert(!isHumongous(), "sanity / pre-condition");
@@ -659,7 +658,7 @@
   // If we're within a stop-world GC, then we might look at a card in a
   // GC alloc region that extends onto a GC LAB, which may not be
   // parseable.  Stop such at the "saved_mark" of the region.
-  if (G1CollectedHeap::heap()->is_gc_active()) {
+  if (g1h->is_gc_active()) {
     mr = mr.intersection(used_region_at_save_marks());
   } else {
     mr = mr.intersection(used_region());
@@ -688,53 +687,63 @@
     OrderAccess::storeload();
   }
 
+  // Cache the boundaries of the memory region in some const locals
+  HeapWord* const start = mr.start();
+  HeapWord* const end = mr.end();
+
   // We used to use "block_start_careful" here.  But we're actually happy
   // to update the BOT while we do this...
-  HeapWord* cur = block_start(mr.start());
-  assert(cur <= mr.start(), "Postcondition");
+  HeapWord* cur = block_start(start);
+  assert(cur <= start, "Postcondition");
+
+  oop obj;
 
-  while (cur <= mr.start()) {
-    if (oop(cur)->klass_or_null() == NULL) {
+  HeapWord* next = cur;
+  while (next <= start) {
+    cur = next;
+    obj = oop(cur);
+    if (obj->klass_or_null() == NULL) {
       // Ran into an unparseable point.
       return cur;
     }
     // Otherwise...
-    int sz = oop(cur)->size();
-    if (cur + sz > mr.start()) break;
-    // Otherwise, go on.
-    cur = cur + sz;
+    next = (cur + obj->size());
   }
-  oop obj;
-  obj = oop(cur);
-  // If we finish this loop...
-  assert(cur <= mr.start()
-         && obj->klass_or_null() != NULL
-         && cur + obj->size() > mr.start(),
+
+  // If we finish the above loop...We have a parseable object that
+  // begins on or before the start of the memory region, and ends
+  // inside or spans the entire region.
+
+  assert(obj == oop(cur), "sanity");
+  assert(cur <= start &&
+         obj->klass_or_null() != NULL &&
+         (cur + obj->size()) > start,
          "Loop postcondition");
+
   if (!g1h->is_obj_dead(obj)) {
     obj->oop_iterate(cl, mr);
   }
 
-  HeapWord* next;
-  while (cur < mr.end()) {
+  while (cur < end) {
     obj = oop(cur);
     if (obj->klass_or_null() == NULL) {
       // Ran into an unparseable point.
       return cur;
     };
+
     // Otherwise:
     next = (cur + obj->size());
+
     if (!g1h->is_obj_dead(obj)) {
-      if (next < mr.end()) {
+      if (next < end || !obj->is_objArray()) {
+        // This object either does not span the MemRegion
+        // boundary, or if it does it's not an array.
+        // Apply closure to whole object.
         obj->oop_iterate(cl);
       } else {
-        // this obj spans the boundary.  If it's an array, stop at the
-        // boundary.
-        if (obj->is_objArray()) {
-          obj->oop_iterate(cl, mr);
-        } else {
-          obj->oop_iterate(cl);
-        }
+        // This obj is an array that spans the boundary.
+        // Stop at the boundary.
+        obj->oop_iterate(cl, mr);
       }
     }
     cur = next;
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -374,7 +374,9 @@
     ParVerifyClaimValue        = 4,
     RebuildRSClaimValue        = 5,
     CompleteMarkCSetClaimValue = 6,
-    ParEvacFailureClaimValue   = 7
+    ParEvacFailureClaimValue   = 7,
+    AggregateCountClaimValue   = 8,
+    VerifyCountClaimValue      = 9
   };
 
   inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
@@ -413,6 +415,16 @@
     return used_at_mark_start_bytes - marked_bytes();
   }
 
+  // Return the amount of bytes we'll reclaim if we collect this
+  // region. This includes not only the known garbage bytes in the
+  // region but also any unallocated space in it, i.e., [top, end),
+  // since it will also be reclaimed if we collect the region.
+  size_t reclaimable_bytes() {
+    size_t known_live_bytes = live_bytes();
+    assert(known_live_bytes <= capacity(), "sanity");
+    return capacity() - known_live_bytes;
+  }
+
   // An upper bound on the number of live bytes in the region.
   size_t max_live_bytes() { return used() - garbage_bytes(); }
 
@@ -646,10 +658,8 @@
     init_top_at_mark_start();
   }
 
-  // <PREDICTION>
   void calc_gc_efficiency(void);
   double gc_efficiency() { return _gc_efficiency;}
-  // </PREDICTION>
 
   bool is_young() const     { return _young_type != NotYoung; }
   bool is_survivor() const  { return _young_type == Survivor; }
--- a/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -72,10 +72,11 @@
 }
 
 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
-  if (during_initial_mark) {
-    if (is_survivor()) {
-      assert(false, "should not allocate survivors during IM");
-    } else {
+  if (is_survivor()) {
+    // This is how we always allocate survivors.
+    assert(_next_top_at_mark_start == bottom(), "invariant");
+  } else {
+    if (during_initial_mark) {
       // During initial-mark we'll explicitly mark any objects on old
       // regions that are pointed to by roots. Given that explicit
       // marks only make sense under NTAMS it'd be nice if we could
@@ -84,11 +85,6 @@
       // NTAMS to the end of the region so all marks will be below
       // NTAMS. We'll set it to the actual top when we retire this region.
       _next_top_at_mark_start = end();
-    }
-  } else {
-    if (is_survivor()) {
-      // This is how we always allocate survivors.
-      assert(_next_top_at_mark_start == bottom(), "invariant");
     } else {
       // We could have re-used this old region as to-space over a
       // couple of GCs since the start of the concurrent marking
@@ -101,19 +97,15 @@
 }
 
 inline void HeapRegion::note_end_of_copying(bool during_initial_mark) {
-  if (during_initial_mark) {
-    if (is_survivor()) {
-      assert(false, "should not allocate survivors during IM");
-    } else {
+  if (is_survivor()) {
+    // This is how we always allocate survivors.
+    assert(_next_top_at_mark_start == bottom(), "invariant");
+  } else {
+    if (during_initial_mark) {
       // See the comment for note_start_of_copying() for the details
       // on this.
       assert(_next_top_at_mark_start == end(), "pre-condition");
       _next_top_at_mark_start = top();
-    }
-  } else {
-    if (is_survivor()) {
-      // This is how we always allocate survivors.
-      assert(_next_top_at_mark_start == bottom(), "invariant");
     } else {
       // See the comment for note_start_of_copying() for the details
       // on this.
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -59,6 +59,7 @@
 class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
   friend class hrs_ext_msg;
   friend class HRSPhaseSetter;
+  friend class VMStructs;
 
 protected:
   static size_t calculate_region_num(HeapRegion* hr);
--- a/src/share/vm/gc_implementation/g1/survRateGroup.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/survRateGroup.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,33 +38,36 @@
     _summary_surv_rates(NULL),
     _surv_rate(NULL),
     _accum_surv_rate_pred(NULL),
-    _surv_rate_pred(NULL)
-{
+    _surv_rate_pred(NULL),
+    _stats_arrays_length(0) {
   reset();
   if (summary_surv_rates_len > 0) {
     size_t length = summary_surv_rates_len;
-      _summary_surv_rates = NEW_C_HEAP_ARRAY(NumberSeq*, length);
-    if (_summary_surv_rates == NULL) {
-      vm_exit_out_of_memory(sizeof(NumberSeq*) * length,
-                            "Not enough space for surv rate summary");
+    _summary_surv_rates = NEW_C_HEAP_ARRAY(NumberSeq*, length);
+    for (size_t i = 0; i < length; ++i) {
+      _summary_surv_rates[i] = new NumberSeq();
     }
-    for (size_t i = 0; i < length; ++i)
-      _summary_surv_rates[i] = new NumberSeq();
   }
 
   start_adding_regions();
 }
 
-
-void SurvRateGroup::reset()
-{
+void SurvRateGroup::reset() {
   _all_regions_allocated = 0;
   _setup_seq_num         = 0;
-  _stats_arrays_length   = 0;
   _accum_surv_rate       = 0.0;
   _last_pred             = 0.0;
   // the following will set up the arrays with length 1
   _region_num            = 1;
+
+  // The call to stop_adding_regions() will use "new" to refill
+  // the _surv_rate_pred array, so we need to make sure to call
+  // "delete".
+  for (size_t i = 0; i < _stats_arrays_length; ++i) {
+    delete _surv_rate_pred[i];
+  }
+  _stats_arrays_length = 0;
+
   stop_adding_regions();
   guarantee( _stats_arrays_length == 1, "invariant" );
   guarantee( _surv_rate_pred[0] != NULL, "invariant" );
@@ -73,72 +76,47 @@
   _region_num = 0;
 }
 
-
 void
 SurvRateGroup::start_adding_regions() {
   _setup_seq_num   = _stats_arrays_length;
   _region_num      = 0;
   _accum_surv_rate = 0.0;
-
-#if 0
-  gclog_or_tty->print_cr("[%s] start adding regions, seq num %d, length %d",
-                         _name, _setup_seq_num, _region_num);
-#endif // 0
 }
 
 void
 SurvRateGroup::stop_adding_regions() {
-
-#if 0
-  gclog_or_tty->print_cr("[%s] stop adding regions, length %d", _name, _region_num);
-#endif // 0
-
   if (_region_num > _stats_arrays_length) {
     double* old_surv_rate = _surv_rate;
     double* old_accum_surv_rate_pred = _accum_surv_rate_pred;
     TruncatedSeq** old_surv_rate_pred = _surv_rate_pred;
 
     _surv_rate = NEW_C_HEAP_ARRAY(double, _region_num);
-    if (_surv_rate == NULL) {
-      vm_exit_out_of_memory(sizeof(double) * _region_num,
-                            "Not enough space for surv rate array.");
-    }
     _accum_surv_rate_pred = NEW_C_HEAP_ARRAY(double, _region_num);
-    if (_accum_surv_rate_pred == NULL) {
-      vm_exit_out_of_memory(sizeof(double) * _region_num,
-                         "Not enough space for accum surv rate pred array.");
-    }
     _surv_rate_pred = NEW_C_HEAP_ARRAY(TruncatedSeq*, _region_num);
-    if (_surv_rate == NULL) {
-      vm_exit_out_of_memory(sizeof(TruncatedSeq*) * _region_num,
-                            "Not enough space for surv rate pred array.");
-    }
 
-    for (size_t i = 0; i < _stats_arrays_length; ++i)
+    for (size_t i = 0; i < _stats_arrays_length; ++i) {
       _surv_rate_pred[i] = old_surv_rate_pred[i];
-
-#if 0
-    gclog_or_tty->print_cr("[%s] stop adding regions, new seqs %d to %d",
-                  _name, _array_length, _region_num - 1);
-#endif // 0
-
+    }
     for (size_t i = _stats_arrays_length; i < _region_num; ++i) {
       _surv_rate_pred[i] = new TruncatedSeq(10);
-      // _surv_rate_pred[i]->add(last_pred);
     }
 
     _stats_arrays_length = _region_num;
 
-    if (old_surv_rate != NULL)
+    if (old_surv_rate != NULL) {
       FREE_C_HEAP_ARRAY(double, old_surv_rate);
-    if (old_accum_surv_rate_pred != NULL)
+    }
+    if (old_accum_surv_rate_pred != NULL) {
       FREE_C_HEAP_ARRAY(double, old_accum_surv_rate_pred);
-    if (old_surv_rate_pred != NULL)
-      FREE_C_HEAP_ARRAY(NumberSeq*, old_surv_rate_pred);
+    }
+    if (old_surv_rate_pred != NULL) {
+      FREE_C_HEAP_ARRAY(TruncatedSeq*, old_surv_rate_pred);
+    }
   }
 
-  for (size_t i = 0; i < _stats_arrays_length; ++i)
+  for (size_t i = 0; i < _stats_arrays_length; ++i) {
     _surv_rate[i] = 0.0;
+  }
 }
 
 double
@@ -187,12 +165,6 @@
 SurvRateGroup::all_surviving_words_recorded(bool propagate) {
   if (propagate && _region_num > 0) { // conservative
     double surv_rate = _surv_rate_pred[_region_num-1]->last();
-
-#if 0
-    gclog_or_tty->print_cr("propagating %1.2lf from %d to %d",
-                  surv_rate, _curr_length, _array_length - 1);
-#endif // 0
-
     for (size_t i = _region_num; i < _stats_arrays_length; ++i) {
       guarantee( _surv_rate[i] <= 0.00001,
                  "the slot should not have been updated" );
--- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,6 +40,8 @@
   nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
   nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
   nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
+  nonstatic_field(G1CollectedHeap, _old_set,            HeapRegionSetBase)    \
+  nonstatic_field(G1CollectedHeap, _humongous_set,      HeapRegionSetBase)    \
                                                                               \
   nonstatic_field(G1MonitoringSupport, _eden_committed,     size_t)           \
   nonstatic_field(G1MonitoringSupport, _eden_used,          size_t)           \
@@ -47,6 +49,10 @@
   nonstatic_field(G1MonitoringSupport, _survivor_used,      size_t)           \
   nonstatic_field(G1MonitoringSupport, _old_committed,      size_t)           \
   nonstatic_field(G1MonitoringSupport, _old_used,           size_t)           \
+                                                                              \
+  nonstatic_field(HeapRegionSetBase,   _length,             size_t)           \
+  nonstatic_field(HeapRegionSetBase,   _region_num,         size_t)           \
+  nonstatic_field(HeapRegionSetBase,   _total_used_bytes,   size_t)           \
 
 
 #define VM_TYPES_G1(declare_type, declare_toplevel_type)                      \
@@ -55,6 +61,7 @@
                                                                               \
   declare_type(HeapRegion, ContiguousSpace)                                   \
   declare_toplevel_type(HeapRegionSeq)                                        \
+  declare_toplevel_type(HeapRegionSetBase)                                    \
   declare_toplevel_type(G1MonitoringSupport)                                  \
                                                                               \
   declare_toplevel_type(G1CollectedHeap*)                                     \
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,8 @@
 VM_G1CollectForAllocation::VM_G1CollectForAllocation(
                                                   unsigned int gc_count_before,
                                                   size_t word_size)
-  : VM_G1OperationWithAllocRequest(gc_count_before, word_size) {
+  : VM_G1OperationWithAllocRequest(gc_count_before, word_size,
+                                   GCCause::_allocation_failure) {
   guarantee(word_size > 0, "an allocation should always be requested");
 }
 
@@ -57,9 +58,10 @@
                                       bool           should_initiate_conc_mark,
                                       double         target_pause_time_ms,
                                       GCCause::Cause gc_cause)
-  : VM_G1OperationWithAllocRequest(gc_count_before, word_size),
+  : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
     _should_initiate_conc_mark(should_initiate_conc_mark),
     _target_pause_time_ms(target_pause_time_ms),
+    _should_retry_gc(false),
     _full_collections_completed_before(0) {
   guarantee(target_pause_time_ms > 0.0,
             err_msg("target_pause_time_ms = %1.6lf should be positive",
@@ -70,12 +72,29 @@
   _gc_cause = gc_cause;
 }
 
+bool VM_G1IncCollectionPause::doit_prologue() {
+  bool res = VM_GC_Operation::doit_prologue();
+  if (!res) {
+    if (_should_initiate_conc_mark) {
+      // The prologue can fail for a couple of reasons. The first is that another GC
+      // got scheduled and prevented the scheduling of the initial mark GC. The
+      // second is that the GC locker may be active and the heap can't be expanded.
+      // In both cases we want to retry the GC so that the initial mark pause is
+      // actually scheduled. In the second case, however, we should stall until
+      // until the GC locker is no longer active and then retry the initial mark GC.
+      _should_retry_gc = true;
+    }
+  }
+  return res;
+}
+
 void VM_G1IncCollectionPause::doit() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   assert(!_should_initiate_conc_mark ||
   ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
-   (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
-         "only a GC locker or a System.gc() induced GC should start a cycle");
+   (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
+    _gc_cause == GCCause::_g1_humongous_allocation),
+         "only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle");
 
   if (_word_size > 0) {
     // An allocation has been requested. So, try to do that first.
@@ -105,11 +124,25 @@
     // next GC pause to be an initial mark; it returns false if a
     // marking cycle is already in progress.
     //
-    // If a marking cycle is already in progress just return and skip
-    // the pause - the requesting thread should block in doit_epilogue
-    // until the marking cycle is complete.
+    // If a marking cycle is already in progress just return and skip the
+    // pause below - if the reason for requesting this initial mark pause
+    // was due to a System.gc() then the requesting thread should block in
+    // doit_epilogue() until the marking cycle is complete.
+    //
+    // If this initial mark pause was requested as part of a humongous
+    // allocation then we know that the marking cycle must just have
+    // been started by another thread (possibly also allocating a humongous
+    // object) as there was no active marking cycle when the requesting
+    // thread checked before calling collect() in
+    // attempt_allocation_humongous(). Retrying the GC, in this case,
+    // will cause the requesting thread to spin inside collect() until the
+    // just started marking cycle is complete - which may be a while. So
+    // we do NOT retry the GC.
     if (!res) {
-      assert(_word_size == 0, "ExplicitGCInvokesConcurrent shouldn't be allocating");
+      assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
+      if (_gc_cause != GCCause::_g1_humongous_allocation) {
+        _should_retry_gc = true;
+      }
       return;
     }
   }
@@ -122,6 +155,13 @@
                                       true /* expect_null_cur_alloc_region */);
   } else {
     assert(_result == NULL, "invariant");
+    if (!_pause_succeeded) {
+      // Another possible reason reason for the pause to not be successful
+      // is that, again, the GC locker is active (and has become active
+      // since the prologue was executed). In this case we should retry
+      // the pause after waiting for the GC locker to become inactive.
+      _should_retry_gc = true;
+    }
   }
 }
 
@@ -167,6 +207,7 @@
 }
 
 void VM_CGC_Operation::acquire_pending_list_lock() {
+  assert(_needs_pll, "don't call this otherwise");
   // The caller may block while communicating
   // with the SLT thread in order to acquire/release the PLL.
   ConcurrentMarkThread::slt()->
@@ -174,6 +215,7 @@
 }
 
 void VM_CGC_Operation::release_and_notify_pending_list_lock() {
+  assert(_needs_pll, "don't call this otherwise");
   // The caller may block while communicating
   // with the SLT thread in order to acquire/release the PLL.
   ConcurrentMarkThread::slt()->
@@ -197,7 +239,9 @@
 bool VM_CGC_Operation::doit_prologue() {
   // Note the relative order of the locks must match that in
   // VM_GC_Operation::doit_prologue() or deadlocks can occur
-  acquire_pending_list_lock();
+  if (_needs_pll) {
+    acquire_pending_list_lock();
+  }
 
   Heap_lock->lock();
   SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
@@ -209,5 +253,7 @@
   // VM_GC_Operation::doit_epilogue()
   SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
   Heap_lock->unlock();
-  release_and_notify_pending_list_lock();
+  if (_needs_pll) {
+    release_and_notify_pending_list_lock();
+  }
 }
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,8 +43,9 @@
 
 public:
   VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
-                                 size_t       word_size)
-    : VM_GC_Operation(gc_count_before, GCCause::_allocation_failure),
+                                 size_t       word_size,
+                                 GCCause::Cause gc_cause)
+    : VM_GC_Operation(gc_count_before, gc_cause),
       _word_size(word_size), _result(NULL), _pause_succeeded(false) { }
   HeapWord* result() { return _result; }
   bool pause_succeeded() { return _pause_succeeded; }
@@ -77,6 +78,7 @@
 class VM_G1IncCollectionPause: public VM_G1OperationWithAllocRequest {
 private:
   bool         _should_initiate_conc_mark;
+  bool         _should_retry_gc;
   double       _target_pause_time_ms;
   unsigned int _full_collections_completed_before;
 public:
@@ -86,11 +88,13 @@
                           double         target_pause_time_ms,
                           GCCause::Cause gc_cause);
   virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
+  virtual bool doit_prologue();
   virtual void doit();
   virtual void doit_epilogue();
   virtual const char* name() const {
     return "garbage-first incremental collection pause";
   }
+  bool should_retry_gc() const { return _should_retry_gc; }
 };
 
 // Concurrent GC stop-the-world operations such as remark and cleanup;
@@ -98,6 +102,7 @@
 class VM_CGC_Operation: public VM_Operation {
   VoidClosure* _cl;
   const char* _printGCMessage;
+  bool _needs_pll;
 
 protected:
   // java.lang.ref.Reference support
@@ -105,8 +110,8 @@
   void release_and_notify_pending_list_lock();
 
 public:
-  VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg)
-    : _cl(cl), _printGCMessage(printGCMsg) { }
+  VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pll)
+    : _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll) { }
   virtual VMOp_Type type() const { return VMOp_CGC_Operation; }
   virtual void doit();
   virtual bool doit_prologue();
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1042,7 +1042,11 @@
     size_policy->avg_survived()->sample(from()->used());
   }
 
-  update_time_of_last_gc(os::javaTimeMillis());
+  // We need to use a monotonically non-deccreasing time in ms
+  // or we will see time-warp warnings and os::javaTimeMillis()
+  // does not guarantee monotonicity.
+  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+  update_time_of_last_gc(now);
 
   SpecializationStats::print();
 
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -418,25 +418,17 @@
       gc_count = Universe::heap()->total_collections();
 
       result = young_gen()->allocate(size);
-
-      // (1) If the requested object is too large to easily fit in the
-      //     young_gen, or
-      // (2) If GC is locked out via GCLocker, young gen is full and
-      //     the need for a GC already signalled to GCLocker (done
-      //     at a safepoint),
-      // ... then, rather than force a safepoint and (a potentially futile)
-      // collection (attempt) for each allocation, try allocation directly
-      // in old_gen. For case (2) above, we may in the future allow
-      // TLAB allocation directly in the old gen.
       if (result != NULL) {
         return result;
       }
-      if (size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
-        result = old_gen()->allocate(size);
-        if (result != NULL) {
-          return result;
-        }
+
+      // If certain conditions hold, try allocating from the old gen.
+      result = mem_allocate_old_gen(size);
+      if (result != NULL) {
+        return result;
       }
+
+      // Failed to allocate without a gc.
       if (GC_locker::is_active_and_needs_gc()) {
         // If this thread is not in a jni critical section, we stall
         // the requestor until the critical section has cleared and
@@ -460,7 +452,6 @@
     }
 
     if (result == NULL) {
-
       // Generate a VM operation
       VM_ParallelGCFailedAllocation op(size, gc_count);
       VMThread::execute(&op);
@@ -523,6 +514,42 @@
   return result;
 }
 
+// A "death march" is a series of ultra-slow allocations in which a full gc is
+// done before each allocation, and after the full gc the allocation still
+// cannot be satisfied from the young gen.  This routine detects that condition;
+// it should be called after a full gc has been done and the allocation
+// attempted from the young gen. The parameter 'addr' should be the result of
+// that young gen allocation attempt.
+void
+ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
+  if (addr != NULL) {
+    _death_march_count = 0;  // death march has ended
+  } else if (_death_march_count == 0) {
+    if (should_alloc_in_eden(size)) {
+      _death_march_count = 1;    // death march has started
+    }
+  }
+}
+
+HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
+  if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
+    // Size is too big for eden, or gc is locked out.
+    return old_gen()->allocate(size);
+  }
+
+  // If a "death march" is in progress, allocate from the old gen a limited
+  // number of times before doing a GC.
+  if (_death_march_count > 0) {
+    if (_death_march_count < 64) {
+      ++_death_march_count;
+      return old_gen()->allocate(size);
+    } else {
+      _death_march_count = 0;
+    }
+  }
+  return NULL;
+}
+
 // Failed allocation policy. Must be called from the VM thread, and
 // only at a safepoint! Note that this method has policy for allocation
 // flow, and NOT collection policy. So we do not check for gc collection
@@ -535,27 +562,22 @@
   assert(!Universe::heap()->is_gc_active(), "not reentrant");
   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 
-  size_t mark_sweep_invocation_count = total_invocations();
-
-  // We assume (and assert!) that an allocation at this point will fail
-  // unless we collect.
+  // We assume that allocation in eden will fail unless we collect.
 
   // First level allocation failure, scavenge and allocate in young gen.
   GCCauseSetter gccs(this, GCCause::_allocation_failure);
-  PSScavenge::invoke();
+  const bool invoked_full_gc = PSScavenge::invoke();
   HeapWord* result = young_gen()->allocate(size);
 
   // Second level allocation failure.
   //   Mark sweep and allocate in young generation.
-  if (result == NULL) {
-    // There is some chance the scavenge method decided to invoke mark_sweep.
-    // Don't mark sweep twice if so.
-    if (mark_sweep_invocation_count == total_invocations()) {
-      invoke_full_gc(false);
-      result = young_gen()->allocate(size);
-    }
+  if (result == NULL && !invoked_full_gc) {
+    invoke_full_gc(false);
+    result = young_gen()->allocate(size);
   }
 
+  death_march_check(result, size);
+
   // Third level allocation failure.
   //   After mark sweep and young generation allocation failure,
   //   allocate in old generation.
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,7 @@
   // Collection of generations that are adjacent in the
   // space reserved for the heap.
   AdjoiningGenerations* _gens;
+  unsigned int _death_march_count;
 
   static GCTaskManager*          _gc_task_manager;      // The task manager.
 
@@ -71,8 +72,13 @@
   static inline size_t total_invocations();
   HeapWord* allocate_new_tlab(size_t size);
 
+  inline bool should_alloc_in_eden(size_t size) const;
+  inline void death_march_check(HeapWord* const result, size_t size);
+  HeapWord* mem_allocate_old_gen(size_t size);
+
  public:
   ParallelScavengeHeap() : CollectedHeap() {
+    _death_march_count = 0;
     set_alignment(_perm_gen_alignment, intra_heap_alignment());
     set_alignment(_young_gen_alignment, intra_heap_alignment());
     set_alignment(_old_gen_alignment, intra_heap_alignment());
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,12 @@
     PSMarkSweep::total_invocations();
 }
 
+inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const
+{
+  const size_t eden_size = young_gen()->eden_space()->capacity_in_words();
+  return size < eden_size / 2;
+}
+
 inline void ParallelScavengeHeap::invoke_scavenge()
 {
   PSScavenge::invoke();
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -100,12 +100,12 @@
 
 // This method contains no policy. You should probably
 // be calling invoke() instead.
-void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
+bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
   assert(ref_processor() != NULL, "Sanity");
 
   if (GC_locker::check_active_before_gc()) {
-    return;
+    return false;
   }
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@@ -132,9 +132,7 @@
 
   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
 
-  if (PrintHeapAtGC) {
-    Universe::print_heap_before_gc();
-  }
+  heap->print_heap_before_gc();
 
   // Fill in TLABs
   heap->accumulate_statistics_all_tlabs();
@@ -377,15 +375,15 @@
 
   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
 
-  if (PrintHeapAtGC) {
-    Universe::print_heap_after_gc();
-  }
+  heap->print_heap_after_gc();
 
   heap->post_full_gc_dump();
 
 #ifdef TRACESPINNING
   ParallelTaskTerminator::print_termination_counts();
 #endif
+
+  return true;
 }
 
 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
@@ -504,7 +502,6 @@
 
 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  EventMark m("1 mark object");
   TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
   trace(" 1");
 
@@ -563,7 +560,6 @@
 
 
 void PSMarkSweep::mark_sweep_phase2() {
-  EventMark m("2 compute new addresses");
   TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
   trace("2");
 
@@ -608,7 +604,6 @@
 
 void PSMarkSweep::mark_sweep_phase3() {
   // Adjust the pointers to reflect the new locations
-  EventMark m("3 adjust pointers");
   TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
   trace("3");
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -78,7 +78,7 @@
 
  public:
   static void invoke(bool clear_all_softrefs);
-  static void invoke_no_policy(bool clear_all_softrefs);
+  static bool invoke_no_policy(bool clear_all_softrefs);
 
   static void initialize();
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -983,9 +983,7 @@
   // We need to track unique mark sweep invocations as well.
   _total_invocations++;
 
-  if (PrintHeapAtGC) {
-    Universe::print_heap_before_gc();
-  }
+  heap->print_heap_before_gc();
 
   // Fill in TLABs
   heap->accumulate_statistics_all_tlabs();
@@ -1838,7 +1836,6 @@
 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
                                       bool maximum_compaction)
 {
-  EventMark m("2 summarize");
   TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
   // trace("2");
 
@@ -1996,12 +1993,12 @@
 
 // This method contains no policy. You should probably
 // be calling invoke() instead.
-void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
+bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
   assert(ref_processor() != NULL, "Sanity");
 
   if (GC_locker::check_active_before_gc()) {
-    return;
+    return false;
   }
 
   TimeStamp marking_start;
@@ -2237,9 +2234,7 @@
 
   collection_exit.update();
 
-  if (PrintHeapAtGC) {
-    Universe::print_heap_after_gc();
-  }
+  heap->print_heap_after_gc();
   if (PrintGCTaskTimeStamps) {
     gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
                            INT64_FORMAT,
@@ -2253,6 +2248,8 @@
 #ifdef TRACESPINNING
   ParallelTaskTerminator::print_termination_counts();
 #endif
+
+  return true;
 }
 
 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
@@ -2352,7 +2349,6 @@
 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
                                       bool maximum_heap_compaction) {
   // Recursively traverse all live objects and mark them
-  EventMark m("1 mark object");
   TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
 
   ParallelScavengeHeap* heap = gc_heap();
@@ -2438,7 +2434,6 @@
 
 void PSParallelCompact::adjust_roots() {
   // Adjust the pointers to reflect the new locations
-  EventMark m("3 adjust roots");
   TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
 
   // General strong roots.
@@ -2469,7 +2464,6 @@
 }
 
 void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
-  EventMark m("4 compact perm");
   TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
   // trace("4");
 
@@ -2647,7 +2641,6 @@
 }
 
 void PSParallelCompact::compact() {
-  EventMark m("5 compact");
   // trace("5");
   TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
 
@@ -3502,4 +3495,3 @@
   _updated_int_array_klass_obj = (klassOop)
     summary_data().calc_new_pointer(Universe::intArrayKlassObj());
 }
-
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1057,7 +1057,7 @@
   }
 
   static void invoke(bool maximum_heap_compaction);
-  static void invoke_no_policy(bool maximum_heap_compaction);
+  static bool invoke_no_policy(bool maximum_heap_compaction);
 
   static void post_initialize();
   // Perform initialization for PSParallelCompact that requires
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -247,167 +247,6 @@
   }
 }
 
-//
-// This method is pretty bulky. It would be nice to split it up
-// into smaller submethods, but we need to be careful not to hurt
-// performance.
-//
-
-oop PSPromotionManager::copy_to_survivor_space(oop o) {
-  assert(PSScavenge::should_scavenge(&o), "Sanity");
-
-  oop new_obj = NULL;
-
-  // NOTE! We must be very careful with any methods that access the mark
-  // in o. There may be multiple threads racing on it, and it may be forwarded
-  // at any time. Do not use oop methods for accessing the mark!
-  markOop test_mark = o->mark();
-
-  // The same test as "o->is_forwarded()"
-  if (!test_mark->is_marked()) {
-    bool new_obj_is_tenured = false;
-    size_t new_obj_size = o->size();
-
-    // Find the objects age, MT safe.
-    int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
-      test_mark->displaced_mark_helper()->age() : test_mark->age();
-
-    // Try allocating obj in to-space (unless too old)
-    if (age < PSScavenge::tenuring_threshold()) {
-      new_obj = (oop) _young_lab.allocate(new_obj_size);
-      if (new_obj == NULL && !_young_gen_is_full) {
-        // Do we allocate directly, or flush and refill?
-        if (new_obj_size > (YoungPLABSize / 2)) {
-          // Allocate this object directly
-          new_obj = (oop)young_space()->cas_allocate(new_obj_size);
-        } else {
-          // Flush and fill
-          _young_lab.flush();
-
-          HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
-          if (lab_base != NULL) {
-            _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
-            // Try the young lab allocation again.
-            new_obj = (oop) _young_lab.allocate(new_obj_size);
-          } else {
-            _young_gen_is_full = true;
-          }
-        }
-      }
-    }
-
-    // Otherwise try allocating obj tenured
-    if (new_obj == NULL) {
-#ifndef PRODUCT
-      if (Universe::heap()->promotion_should_fail()) {
-        return oop_promotion_failed(o, test_mark);
-      }
-#endif  // #ifndef PRODUCT
-
-      new_obj = (oop) _old_lab.allocate(new_obj_size);
-      new_obj_is_tenured = true;
-
-      if (new_obj == NULL) {
-        if (!_old_gen_is_full) {
-          // Do we allocate directly, or flush and refill?
-          if (new_obj_size > (OldPLABSize / 2)) {
-            // Allocate this object directly
-            new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
-          } else {
-            // Flush and fill
-            _old_lab.flush();
-
-            HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
-            if(lab_base != NULL) {
-              _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
-              // Try the old lab allocation again.
-              new_obj = (oop) _old_lab.allocate(new_obj_size);
-            }
-          }
-        }
-
-        // This is the promotion failed test, and code handling.
-        // The code belongs here for two reasons. It is slightly
-        // different thatn the code below, and cannot share the
-        // CAS testing code. Keeping the code here also minimizes
-        // the impact on the common case fast path code.
-
-        if (new_obj == NULL) {
-          _old_gen_is_full = true;
-          return oop_promotion_failed(o, test_mark);
-        }
-      }
-    }
-
-    assert(new_obj != NULL, "allocation should have succeeded");
-
-    // Copy obj
-    Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
-
-    // Now we have to CAS in the header.
-    if (o->cas_forward_to(new_obj, test_mark)) {
-      // We won any races, we "own" this object.
-      assert(new_obj == o->forwardee(), "Sanity");
-
-      // Increment age if obj still in new generation. Now that
-      // we're dealing with a markOop that cannot change, it is
-      // okay to use the non mt safe oop methods.
-      if (!new_obj_is_tenured) {
-        new_obj->incr_age();
-        assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
-      }
-
-      // Do the size comparison first with new_obj_size, which we
-      // already have. Hopefully, only a few objects are larger than
-      // _min_array_size_for_chunking, and most of them will be arrays.
-      // So, the is->objArray() test would be very infrequent.
-      if (new_obj_size > _min_array_size_for_chunking &&
-          new_obj->is_objArray() &&
-          PSChunkLargeArrays) {
-        // we'll chunk it
-        oop* const masked_o = mask_chunked_array_oop(o);
-        push_depth(masked_o);
-        TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
-      } else {
-        // we'll just push its contents
-        new_obj->push_contents(this);
-      }
-    }  else {
-      // We lost, someone else "owns" this object
-      guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
-
-      // Try to deallocate the space.  If it was directly allocated we cannot
-      // deallocate it, so we have to test.  If the deallocation fails,
-      // overwrite with a filler object.
-      if (new_obj_is_tenured) {
-        if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
-          CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
-        }
-      } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
-        CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
-      }
-
-      // don't update this before the unallocation!
-      new_obj = o->forwardee();
-    }
-  } else {
-    assert(o->is_forwarded(), "Sanity");
-    new_obj = o->forwardee();
-  }
-
-#ifdef DEBUG
-  // This code must come after the CAS test, or it will print incorrect
-  // information.
-  if (TraceScavenge) {
-    gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
-       PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
-       new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
-  }
-#endif
-
-  return new_obj;
-}
-
 template <class T> void PSPromotionManager::process_array_chunk_work(
                                                  oop obj,
                                                  int start, int end) {
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -171,7 +171,7 @@
   void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
 
   // Promotion methods
-  oop copy_to_survivor_space(oop o);
+  template<bool promote_immediately> oop copy_to_survivor_space(oop o);
   oop oop_promotion_failed(oop obj, markOop obj_mark);
 
   void reset();
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
 
+#include "gc_implementation/parallelScavenge/psOldGen.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
 
@@ -61,6 +62,170 @@
   claim_or_forward_internal_depth(p);
 }
 
+//
+// This method is pretty bulky. It would be nice to split it up
+// into smaller submethods, but we need to be careful not to hurt
+// performance.
+//
+template<bool promote_immediately>
+oop PSPromotionManager::copy_to_survivor_space(oop o) {
+  assert(PSScavenge::should_scavenge(&o), "Sanity");
+
+  oop new_obj = NULL;
+
+  // NOTE! We must be very careful with any methods that access the mark
+  // in o. There may be multiple threads racing on it, and it may be forwarded
+  // at any time. Do not use oop methods for accessing the mark!
+  markOop test_mark = o->mark();
+
+  // The same test as "o->is_forwarded()"
+  if (!test_mark->is_marked()) {
+    bool new_obj_is_tenured = false;
+    size_t new_obj_size = o->size();
+
+    if (!promote_immediately) {
+      // Find the objects age, MT safe.
+      int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
+        test_mark->displaced_mark_helper()->age() : test_mark->age();
+
+      // Try allocating obj in to-space (unless too old)
+      if (age < PSScavenge::tenuring_threshold()) {
+        new_obj = (oop) _young_lab.allocate(new_obj_size);
+        if (new_obj == NULL && !_young_gen_is_full) {
+          // Do we allocate directly, or flush and refill?
+          if (new_obj_size > (YoungPLABSize / 2)) {
+            // Allocate this object directly
+            new_obj = (oop)young_space()->cas_allocate(new_obj_size);
+          } else {
+            // Flush and fill
+            _young_lab.flush();
+
+            HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
+            if (lab_base != NULL) {
+              _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
+              // Try the young lab allocation again.
+              new_obj = (oop) _young_lab.allocate(new_obj_size);
+            } else {
+              _young_gen_is_full = true;
+            }
+          }
+        }
+      }
+    }
+
+    // Otherwise try allocating obj tenured
+    if (new_obj == NULL) {
+#ifndef PRODUCT
+      if (Universe::heap()->promotion_should_fail()) {
+        return oop_promotion_failed(o, test_mark);
+      }
+#endif  // #ifndef PRODUCT
+
+      new_obj = (oop) _old_lab.allocate(new_obj_size);
+      new_obj_is_tenured = true;
+
+      if (new_obj == NULL) {
+        if (!_old_gen_is_full) {
+          // Do we allocate directly, or flush and refill?
+          if (new_obj_size > (OldPLABSize / 2)) {
+            // Allocate this object directly
+            new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
+          } else {
+            // Flush and fill
+            _old_lab.flush();
+
+            HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
+            if(lab_base != NULL) {
+              _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
+              // Try the old lab allocation again.
+              new_obj = (oop) _old_lab.allocate(new_obj_size);
+            }
+          }
+        }
+
+        // This is the promotion failed test, and code handling.
+        // The code belongs here for two reasons. It is slightly
+        // different thatn the code below, and cannot share the
+        // CAS testing code. Keeping the code here also minimizes
+        // the impact on the common case fast path code.
+
+        if (new_obj == NULL) {
+          _old_gen_is_full = true;
+          return oop_promotion_failed(o, test_mark);
+        }
+      }
+    }
+
+    assert(new_obj != NULL, "allocation should have succeeded");
+
+    // Copy obj
+    Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
+
+    // Now we have to CAS in the header.
+    if (o->cas_forward_to(new_obj, test_mark)) {
+      // We won any races, we "own" this object.
+      assert(new_obj == o->forwardee(), "Sanity");
+
+      // Increment age if obj still in new generation. Now that
+      // we're dealing with a markOop that cannot change, it is
+      // okay to use the non mt safe oop methods.
+      if (!new_obj_is_tenured) {
+        new_obj->incr_age();
+        assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
+      }
+
+      // Do the size comparison first with new_obj_size, which we
+      // already have. Hopefully, only a few objects are larger than
+      // _min_array_size_for_chunking, and most of them will be arrays.
+      // So, the is->objArray() test would be very infrequent.
+      if (new_obj_size > _min_array_size_for_chunking &&
+          new_obj->is_objArray() &&
+          PSChunkLargeArrays) {
+        // we'll chunk it
+        oop* const masked_o = mask_chunked_array_oop(o);
+        push_depth(masked_o);
+        TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
+      } else {
+        // we'll just push its contents
+        new_obj->push_contents(this);
+      }
+    }  else {
+      // We lost, someone else "owns" this object
+      guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
+
+      // Try to deallocate the space.  If it was directly allocated we cannot
+      // deallocate it, so we have to test.  If the deallocation fails,
+      // overwrite with a filler object.
+      if (new_obj_is_tenured) {
+        if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
+          CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
+        }
+      } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
+        CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
+      }
+
+      // don't update this before the unallocation!
+      new_obj = o->forwardee();
+    }
+  } else {
+    assert(o->is_forwarded(), "Sanity");
+    new_obj = o->forwardee();
+  }
+
+#ifdef DEBUG
+  // This code must come after the CAS test, or it will print incorrect
+  // information.
+  if (TraceScavenge) {
+    gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
+       PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
+       new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
+  }
+#endif
+
+  return new_obj;
+}
+
+
 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
   if (is_oop_masked(p)) {
     assert(PSChunkLargeArrays, "invariant");
@@ -69,9 +234,9 @@
   } else {
     if (p.is_narrow()) {
       assert(UseCompressedOops, "Error");
-      PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p);
+      PSScavenge::copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(this, p);
     } else {
-      PSScavenge::copy_and_push_safe_barrier(this, (oop*)p);
+      PSScavenge::copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(this, p);
     }
   }
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/symbolTable.hpp"
+#include "code/codeCache.hpp"
 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
@@ -100,7 +101,7 @@
 
     // Weak refs may be visited more than once.
     if (PSScavenge::should_scavenge(p, _to_space)) {
-      PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
+      PSScavenge::copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(_promotion_manager, p);
     }
   }
   virtual void do_oop(oop* p)       { PSKeepAliveClosure::do_oop_work(p); }
@@ -214,36 +215,41 @@
 //
 // Note that this method should only be called from the vm_thread while
 // at a safepoint!
-void PSScavenge::invoke() {
+bool PSScavenge::invoke() {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   assert(!Universe::heap()->is_gc_active(), "not reentrant");
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap();
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 
   PSAdaptiveSizePolicy* policy = heap->size_policy();
   IsGCActiveMark mark;
 
-  bool scavenge_was_done = PSScavenge::invoke_no_policy();
+  const bool scavenge_done = PSScavenge::invoke_no_policy();
+  const bool need_full_gc = !scavenge_done ||
+    policy->should_full_GC(heap->old_gen()->free_in_bytes());
+  bool full_gc_done = false;
 
-  PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
-  if (UsePerfData)
-    counters->update_full_follows_scavenge(0);
-  if (!scavenge_was_done ||
-      policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
-    if (UsePerfData)
-      counters->update_full_follows_scavenge(full_follows_scavenge);
+  if (UsePerfData) {
+    PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
+    const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
+    counters->update_full_follows_scavenge(ffs_val);
+  }
+
+  if (need_full_gc) {
     GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
     CollectorPolicy* cp = heap->collector_policy();
     const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
 
     if (UseParallelOldGC) {
-      PSParallelCompact::invoke_no_policy(clear_all_softrefs);
+      full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
     } else {
-      PSMarkSweep::invoke_no_policy(clear_all_softrefs);
+      full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
     }
   }
+
+  return full_gc_done;
 }
 
 // This method contains no policy. You should probably
@@ -295,9 +301,7 @@
     heap->record_gen_tops_before_GC();
   }
 
-  if (PrintHeapAtGC) {
-    Universe::print_heap_before_gc();
-  }
+  heap->print_heap_before_gc();
 
   assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
   assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
@@ -604,6 +608,8 @@
 
     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
 
+    CodeCache::prune_scavenge_root_nmethods();
+
     // Re-verify object start arrays
     if (VerifyObjectStartArray &&
         VerifyAfterGC) {
@@ -643,9 +649,7 @@
     Universe::verify(false);
   }
 
-  if (PrintHeapAtGC) {
-    Universe::print_heap_after_gc();
-  }
+  heap->print_heap_after_gc();
 
   if (ZapUnusedHeapArea) {
     young_gen->eden_space()->check_mangled_unused_area_complete();
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -117,10 +117,9 @@
   // Called by parallelScavengeHeap to init the tenuring threshold
   static void initialize();
 
-  // Scavenge entry point
-  static void invoke();
-  // Return true is a collection was done.  Return
-  // false if the collection was skipped.
+  // Scavenge entry point.  This may invoke a full gc; return true if so.
+  static bool invoke();
+  // Return true if a collection was done; false otherwise.
   static bool invoke_no_policy();
 
   // If an attempt to promote fails, this method is invoked
@@ -135,7 +134,8 @@
   template <class T> static inline bool should_scavenge(T* p, MutableSpace* to_space);
   template <class T> static inline bool should_scavenge(T* p, bool check_to_space);
 
-  template <class T> inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p);
+  template <class T, bool promote_immediately>
+    inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p);
 
   // Is an object in the young generation
   // This assumes that the HeapWord argument is in the heap,
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
+#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
 
 inline void PSScavenge::save_to_space_top_before_gc() {
@@ -65,7 +66,7 @@
 // Attempt to "claim" oop at p via CAS, push the new obj if successful
 // This version tests the oop* to make sure it is within the heap before
 // attempting marking.
-template <class T>
+template <class T, bool promote_immediately>
 inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
                                                    T*                  p) {
   assert(should_scavenge(p, true), "revisiting object?");
@@ -73,7 +74,7 @@
   oop o = oopDesc::load_decode_heap_oop_not_null(p);
   oop new_obj = o->is_forwarded()
         ? o->forwardee()
-        : pm->copy_to_survivor_space(o);
+        : pm->copy_to_survivor_space<promote_immediately>(o);
   oopDesc::encode_store_heap_oop_not_null(p, new_obj);
 
   // We cannot mark without test, as some code passes us pointers
@@ -86,7 +87,8 @@
   }
 }
 
-class PSScavengeRootsClosure: public OopClosure {
+template<bool promote_immediately>
+class PSRootsClosure: public OopClosure {
  private:
   PSPromotionManager* _promotion_manager;
 
@@ -94,13 +96,16 @@
   template <class T> void do_oop_work(T *p) {
     if (PSScavenge::should_scavenge(p)) {
       // We never card mark roots, maybe call a func without test?
-      PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
+      PSScavenge::copy_and_push_safe_barrier<T, promote_immediately>(_promotion_manager, p);
     }
   }
  public:
-  PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
-  void do_oop(oop* p)       { PSScavengeRootsClosure::do_oop_work(p); }
-  void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); }
+  PSRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
+  void do_oop(oop* p)       { PSRootsClosure::do_oop_work(p); }
+  void do_oop(narrowOop* p) { PSRootsClosure::do_oop_work(p); }
 };
 
+typedef PSRootsClosure</*promote_immediately=*/false> PSScavengeRootsClosure;
+typedef PSRootsClosure</*promote_immediately=*/true> PSPromoteRootsClosure;
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSSCAVENGE_INLINE_HPP
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,7 @@
 
   PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
   PSScavengeRootsClosure roots_closure(pm);
+  PSPromoteRootsClosure  roots_to_old_closure(pm);
 
   switch (_root_type) {
     case universe:
@@ -91,7 +92,7 @@
 
     case code_cache:
       {
-        CodeBlobToOopClosure each_scavengable_code_blob(&roots_closure, /*do_marking=*/ true);
+        CodeBlobToOopClosure each_scavengable_code_blob(&roots_to_old_closure, /*do_marking=*/ true);
         CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob);
       }
       break;
--- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -91,29 +91,37 @@
     MutableSpace *s = ls->space();
     if (s->top() < top()) { // For all spaces preceding the one containing top()
       if (s->free_in_words() > 0) {
-        size_t area_touched_words = pointer_delta(s->end(), s->top());
-        CollectedHeap::fill_with_object(s->top(), area_touched_words);
+        intptr_t cur_top = (intptr_t)s->top();
+        size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
+        while (words_left_to_fill > 0) {
+          size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
+          assert(words_to_fill >= CollectedHeap::min_fill_size(),
+            err_msg("Remaining size ("SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
+            words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size()));
+          CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
+          if (!os::numa_has_static_binding()) {
+            size_t touched_words = words_to_fill;
 #ifndef ASSERT
-        if (!ZapUnusedHeapArea) {
-          area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
-                                    area_touched_words);
-        }
+            if (!ZapUnusedHeapArea) {
+              touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
+                touched_words);
+            }
 #endif
-        if (!os::numa_has_static_binding()) {
-          MemRegion invalid;
-          HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size());
-          HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words),
-                                                       os::vm_page_size());
-          if (crossing_start != crossing_end) {
-            // If object header crossed a small page boundary we mark the area
-            // as invalid rounding it to a page_size().
-            HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
-            HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
-                                 s->end());
-            invalid = MemRegion(start, end);
+            MemRegion invalid;
+            HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
+            HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
+            if (crossing_start != crossing_end) {
+              // If object header crossed a small page boundary we mark the area
+              // as invalid rounding it to a page_size().
+              HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
+              HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
+              invalid = MemRegion(start, end);
+            }
+
+            ls->add_invalid_region(invalid);
           }
-
-          ls->add_invalid_region(invalid);
+          cur_top = cur_top + (words_to_fill * HeapWordSize);
+          words_left_to_fill -= words_to_fill;
         }
       }
     } else {
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,31 @@
 
 size_t CollectedHeap::_filler_array_max_size = 0;
 
+template <>
+void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
+  st->print_cr("GC heap %s", m.is_before ? "before" : "after");
+  st->print_raw(m);
+}
+
+void GCHeapLog::log_heap(bool before) {
+  if (!should_log()) {
+    return;
+  }
+
+  double timestamp = fetch_timestamp();
+  MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
+  int index = compute_log_index();
+  _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
+  _records[index].timestamp = timestamp;
+  _records[index].data.is_before = before;
+  stringStream st(_records[index].data.buffer(), _records[index].data.size());
+  if (before) {
+    Universe::print_heap_before_gc(&st, true);
+  } else {
+    Universe::print_heap_after_gc(&st, true);
+  }
+}
+
 // Memory state functions.
 
 
@@ -60,7 +85,7 @@
   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
   const size_t elements_per_word = HeapWordSize / sizeof(jint);
   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
-                                             max_len * elements_per_word);
+                                             max_len / elements_per_word);
 
   _barrier_set = NULL;
   _is_gc_active = false;
@@ -81,6 +106,12 @@
                              80, GCCause::to_string(_gc_lastcause), CHECK);
   }
   _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
+  // Create the ring log
+  if (LogEvents) {
+    _gc_heap_log = new GCHeapLog();
+  } else {
+    _gc_heap_log = NULL;
+  }
 }
 
 void CollectedHeap::pre_initialize() {
@@ -272,10 +303,6 @@
   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
 }
 
-size_t CollectedHeap::filler_array_max_size() {
-  return _filler_array_max_size;
-}
-
 #ifdef ASSERT
 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
 {
@@ -302,10 +329,11 @@
 
   const size_t payload_size = words - filler_array_hdr_size();
   const size_t len = payload_size * HeapWordSize / sizeof(jint);
+  assert((int)len >= 0, err_msg("size too large " SIZE_FORMAT " becomes %d", words, (int)len));
 
   // Set the length first for concurrent GC.
   ((arrayOop)start)->set_length((int)len);
-  post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
+  post_allocation_setup_common(Universe::intArrayKlassObj(), start);
   DEBUG_ONLY(zap_filler_array(start, words, zap);)
 }
 
@@ -318,8 +346,7 @@
     fill_with_array(start, words, zap);
   } else if (words > 0) {
     assert(words == min_fill_size(), "unaligned size");
-    post_allocation_setup_common(SystemDictionary::Object_klass(), start,
-                                 words);
+    post_allocation_setup_common(SystemDictionary::Object_klass(), start);
   }
 }
 
@@ -449,7 +476,7 @@
     assert(ScavengeRootsInCode > 0, "must be");
     obj = common_mem_allocate_init(size, CHECK_NULL);
   }
-  post_allocation_setup_common(klass, obj, size);
+  post_allocation_setup_common(klass, obj);
   assert(Universe::is_bootstrapping() ||
          !((oop)obj)->blueprint()->oop_is_array(), "must not be an array");
   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
 #include "runtime/handles.hpp"
 #include "runtime/perfData.hpp"
 #include "runtime/safepoint.hpp"
+#include "utilities/events.hpp"
 
 // A "CollectedHeap" is an implementation of a java heap for HotSpot.  This
 // is an abstract class: there may be many different kinds of heaps.  This
@@ -43,6 +44,29 @@
 class Thread;
 class CollectorPolicy;
 
+class GCMessage : public FormatBuffer<1024> {
+ public:
+  bool is_before;
+
+ public:
+  GCMessage() {}
+};
+
+class GCHeapLog : public EventLogBase<GCMessage> {
+ private:
+  void log_heap(bool before);
+
+ public:
+  GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
+
+  void log_heap_before() {
+    log_heap(true);
+  }
+  void log_heap_after() {
+    log_heap(false);
+  }
+};
+
 //
 // CollectedHeap
 //   SharedHeap
@@ -62,6 +86,8 @@
   // Used for filler objects (static, but initialized in ctor).
   static size_t _filler_array_max_size;
 
+  GCHeapLog* _gc_heap_log;
+
   // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
   bool _defer_initial_card_mark;
 
@@ -102,7 +128,6 @@
   // Reinitialize tlabs before resuming mutators.
   virtual void resize_all_tlabs();
 
- protected:
   // Allocate from the current thread's TLAB, with broken-out slow path.
   inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size);
   static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size);
@@ -124,18 +149,14 @@
   inline static HeapWord* common_permanent_mem_allocate_init(size_t size, TRAPS);
 
   // Helper functions for (VM) allocation.
-  inline static void post_allocation_setup_common(KlassHandle klass,
-                                                  HeapWord* obj, size_t size);
+  inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj);
   inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
-                                                            HeapWord* objPtr,
-                                                            size_t size);
+                                                            HeapWord* objPtr);
 
-  inline static void post_allocation_setup_obj(KlassHandle klass,
-                                               HeapWord* obj, size_t size);
+  inline static void post_allocation_setup_obj(KlassHandle klass, HeapWord* obj);
 
   inline static void post_allocation_setup_array(KlassHandle klass,
-                                                 HeapWord* obj, size_t size,
-                                                 int length);
+                                                 HeapWord* obj, int length);
 
   // Clears an allocated object.
   inline static void init_obj(HeapWord* obj, size_t size);
@@ -143,7 +164,6 @@
   // Filler object utilities.
   static inline size_t filler_array_hdr_size();
   static inline size_t filler_array_min_size();
-  static inline size_t filler_array_max_size();
 
   DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
   DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
@@ -171,6 +191,10 @@
     G1CollectedHeap
   };
 
+  static inline size_t filler_array_max_size() {
+    return _filler_array_max_size;
+  }
+
   virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
 
   /**
@@ -340,9 +364,7 @@
   inline static oop permanent_obj_allocate_no_klass_install(KlassHandle klass,
                                                             int size,
                                                             TRAPS);
-  inline static void post_allocation_install_obj_klass(KlassHandle klass,
-                                                       oop obj,
-                                                       int size);
+  inline static void post_allocation_install_obj_klass(KlassHandle klass, oop obj);
   inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS);
 
   // Raw memory allocation facilities
@@ -618,6 +640,24 @@
   // Default implementation does nothing.
   virtual void print_tracing_info() const = 0;
 
+  // If PrintHeapAtGC is set call the appropriate routi
+  void print_heap_before_gc() {
+    if (PrintHeapAtGC) {
+      Universe::print_heap_before_gc();
+    }
+    if (_gc_heap_log != NULL) {
+      _gc_heap_log->log_heap_before();
+    }
+  }
+  void print_heap_after_gc() {
+    if (PrintHeapAtGC) {
+      Universe::print_heap_after_gc();
+    }
+    if (_gc_heap_log != NULL) {
+      _gc_heap_log->log_heap_after();
+    }
+  }
+
   // Heap verification
   virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0;
 
--- a/src/share/vm/gc_interface/collectedHeap.inline.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,15 +50,13 @@
 // Inline allocation implementations.
 
 void CollectedHeap::post_allocation_setup_common(KlassHandle klass,
-                                                 HeapWord* obj,
-                                                 size_t size) {
-  post_allocation_setup_no_klass_install(klass, obj, size);
-  post_allocation_install_obj_klass(klass, oop(obj), (int) size);
+                                                 HeapWord* obj) {
+  post_allocation_setup_no_klass_install(klass, obj);
+  post_allocation_install_obj_klass(klass, oop(obj));
 }
 
 void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
-                                                           HeapWord* objPtr,
-                                                           size_t size) {
+                                                           HeapWord* objPtr) {
   oop obj = (oop)objPtr;
 
   assert(obj != NULL, "NULL object pointer");
@@ -71,8 +69,7 @@
 }
 
 void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
-                                                   oop obj,
-                                                   int size) {
+                                                   oop obj) {
   // These asserts are kind of complicated because of klassKlass
   // and the beginning of the world.
   assert(klass() != NULL || !Universe::is_fully_initialized(), "NULL klass");
@@ -101,9 +98,8 @@
 }
 
 void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
-                                              HeapWord* obj,
-                                              size_t size) {
-  post_allocation_setup_common(klass, obj, size);
+                                              HeapWord* obj) {
+  post_allocation_setup_common(klass, obj);
   assert(Universe::is_bootstrapping() ||
          !((oop)obj)->blueprint()->oop_is_array(), "must not be an array");
   // notify jvmti and dtrace
@@ -112,14 +108,13 @@
 
 void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
                                                 HeapWord* obj,
-                                                size_t size,
                                                 int length) {
   // Set array length before setting the _klass field
   // in post_allocation_setup_common() because the klass field
   // indicates that the object is parsable by concurrent GC.
   assert(length >= 0, "length should be non-negative");
   ((arrayOop)obj)->set_length(length);
-  post_allocation_setup_common(klass, obj, size);
+  post_allocation_setup_common(klass, obj);
   assert(((oop)obj)->blueprint()->oop_is_array(), "must be an array");
   // notify jvmti and dtrace (must be after length is set for dtrace)
   post_allocation_notify(klass, (oop)obj);
@@ -256,7 +251,7 @@
   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
   assert(size >= 0, "int won't convert to size_t");
   HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
-  post_allocation_setup_obj(klass, obj, size);
+  post_allocation_setup_obj(klass, obj);
   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
   return (oop)obj;
 }
@@ -269,7 +264,7 @@
   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
   assert(size >= 0, "int won't convert to size_t");
   HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
-  post_allocation_setup_array(klass, obj, size, length);
+  post_allocation_setup_array(klass, obj, length);
   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
   return (oop)obj;
 }
@@ -283,7 +278,7 @@
   assert(size >= 0, "int won't convert to size_t");
   HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL);
   ((oop)obj)->set_klass_gap(0);
-  post_allocation_setup_array(klass, obj, size, length);
+  post_allocation_setup_array(klass, obj, length);
 #ifndef PRODUCT
   const size_t hs = oopDesc::header_size()+1;
   Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs);
@@ -293,7 +288,7 @@
 
 oop CollectedHeap::permanent_obj_allocate(KlassHandle klass, int size, TRAPS) {
   oop obj = permanent_obj_allocate_no_klass_install(klass, size, CHECK_NULL);
-  post_allocation_install_obj_klass(klass, obj, size);
+  post_allocation_install_obj_klass(klass, obj);
   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value((HeapWord*) obj,
                                                               size));
   return obj;
@@ -306,7 +301,7 @@
   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
   assert(size >= 0, "int won't convert to size_t");
   HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
-  post_allocation_setup_no_klass_install(klass, obj, size);
+  post_allocation_setup_no_klass_install(klass, obj);
 #ifndef PRODUCT
   const size_t hs = oopDesc::header_size();
   Universe::heap()->check_for_bad_heap_word_value(obj+hs, size-hs);
@@ -322,7 +317,7 @@
   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
   assert(size >= 0, "int won't convert to size_t");
   HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
-  post_allocation_setup_array(klass, obj, size, length);
+  post_allocation_setup_array(klass, obj, length);
   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
   return (oop)obj;
 }
--- a/src/share/vm/gc_interface/gcCause.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_interface/gcCause.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -84,6 +84,9 @@
     case _g1_inc_collection_pause:
       return "G1 Evacuation Pause";
 
+    case _g1_humongous_allocation:
+      return "G1 Humongous Allocation";
+
     case _last_ditch_collection:
       return "Last ditch collection";
 
--- a/src/share/vm/gc_interface/gcCause.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/gc_interface/gcCause.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,6 +66,7 @@
     _adaptive_size_policy,
 
     _g1_inc_collection_pause,
+    _g1_humongous_allocation,
 
     _last_ditch_collection,
     _last_gc_cause
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -859,7 +859,9 @@
   const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci;
   const int bci = branch_bcp != NULL ? method->bci_from(fr.interpreter_frame_bcp()) : InvocationEntryBci;
 
+  assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");
   nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, NULL, thread);
+  assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");
 
   if (osr_nm != NULL) {
     // We may need to do on-stack replacement which requires that no
--- a/src/share/vm/memory/cardTableModRefBS.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/memory/cardTableModRefBS.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -72,6 +72,9 @@
     CT_MR_BS_last_reserved      = 16
   };
 
+  // a word's worth (row) of clean card values
+  static const intptr_t clean_card_row = (intptr_t)(-1);
+
   // dirty and precleaned are equivalent wrt younger_refs_iter.
   static bool card_is_dirty_wrt_gen_iter(jbyte cv) {
     return cv == dirty_card || cv == precleaned_card;
--- a/src/share/vm/memory/cardTableRS.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/memory/cardTableRS.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -173,6 +173,10 @@
             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
 }
 
+bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) {
+  return (((intptr_t)entry) & (BytesPerWord-1)) == 0;
+}
+
 void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
   assert(mr.word_size() > 0, "Error");
   assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned");
@@ -194,6 +198,17 @@
         const MemRegion mrd(start_of_non_clean, end_of_non_clean);
         _dirty_card_closure->do_MemRegion(mrd);
       }
+
+      // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary
+      if (is_word_aligned(cur_entry)) {
+        jbyte* cur_row = cur_entry - BytesPerWord;
+        while (cur_row >= limit && *((intptr_t*)cur_row) ==  CardTableRS::clean_card_row()) {
+          cur_row -= BytesPerWord;
+        }
+        cur_entry = cur_row + BytesPerWord;
+        cur_hw = _ct->addr_for(cur_entry);
+      }
+
       // Reset the dirty window, while continuing to look
       // for the next dirty card that will start a
       // new dirty window.
--- a/src/share/vm/memory/cardTableRS.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/memory/cardTableRS.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,10 @@
     return CardTableModRefBS::clean_card;
   }
 
+  static intptr_t clean_card_row() {
+    return CardTableModRefBS::clean_card_row;
+  }
+
   static bool
   card_is_dirty_wrt_gen_iter(jbyte cv) {
     return CardTableModRefBS::card_is_dirty_wrt_gen_iter(cv);
@@ -176,6 +180,8 @@
   // Work methods called by the clear_card()
   inline bool clear_card_serial(jbyte* entry);
   inline bool clear_card_parallel(jbyte* entry);
+  // check alignment of pointer
+  bool is_word_aligned(jbyte* entry);
 
 public:
   ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct);
--- a/src/share/vm/memory/compactingPermGenGen.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/memory/compactingPermGenGen.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -240,9 +240,6 @@
     if (_ro_space == NULL || _rw_space == NULL)
       vm_exit_during_initialization("Could not allocate a shared space");
 
-    // Cover both shared spaces entirely with cards.
-    _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end));
-
     if (UseSharedSpaces) {
 
       // Map in the regions in the shared file.
@@ -279,10 +276,14 @@
         delete _rw_space;
         _rw_space = NULL;
         shared_end = (HeapWord*)(rs.base() + rs.size());
-        _rs->resize_covered_region(MemRegion(shared_bottom, shared_bottom));
       }
     }
 
+    if (spec()->enable_shared_spaces()) {
+      // Cover both shared spaces entirely with cards.
+      _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end));
+    }
+
     // Reserved region includes shared spaces for oop.is_in_reserved().
     _reserved.set_end(shared_end);
 
--- a/src/share/vm/memory/defNewGeneration.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/memory/defNewGeneration.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -655,7 +655,12 @@
   from()->set_concurrent_iteration_safe_limit(from()->top());
   to()->set_concurrent_iteration_safe_limit(to()->top());
   SpecializationStats::print();
-  update_time_of_last_gc(os::javaTimeMillis());
+
+  // We need to use a monotonically non-deccreasing time in ms
+  // or we will see time-warp warnings and os::javaTimeMillis()
+  // does not guarantee monotonicity.
+  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+  update_time_of_last_gc(now);
 }
 
 class RemoveForwardPointerClosure: public ObjectClosure {
--- a/src/share/vm/memory/dump.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/memory/dump.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -297,16 +297,14 @@
 
       if (obj->blueprint()->oop_is_instanceKlass()) {
         instanceKlass* ik = instanceKlass::cast((klassOop)obj);
-        typeArrayOop inner_classes = ik->inner_classes();
-        if (inner_classes != NULL) {
-          constantPoolOop constants = ik->constants();
-          int n = inner_classes->length();
-          for (int i = 0; i < n; i += instanceKlass::inner_class_next_offset) {
-            int ioff = i + instanceKlass::inner_class_inner_name_offset;
-            int index = inner_classes->ushort_at(ioff);
-            if (index != 0) {
-              _closure->do_symbol(constants->symbol_at_addr(index));
-            }
+        instanceKlassHandle ik_h((klassOop)obj);
+        InnerClassesIterator iter(ik_h);
+        constantPoolOop constants = ik->constants();
+        for (; !iter.done(); iter.next()) {
+          int index = iter.inner_name_index();
+
+          if (index != 0) {
+            _closure->do_symbol(constants->symbol_at_addr(index));
           }
         }
       }
--- a/src/share/vm/memory/gcLocker.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/memory/gcLocker.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,37 +32,90 @@
 volatile bool GC_locker::_needs_gc       = false;
 volatile bool GC_locker::_doing_gc       = false;
 
+#ifdef ASSERT
+volatile jint GC_locker::_debug_jni_lock_count = 0;
+#endif
+
+
+#ifdef ASSERT
+void GC_locker::verify_critical_count() {
+  if (SafepointSynchronize::is_at_safepoint()) {
+    assert(!needs_gc() || _debug_jni_lock_count == _jni_lock_count, "must agree");
+    int count = 0;
+    // Count the number of threads with critical operations in progress
+    for (JavaThread* thr = Threads::first(); thr; thr = thr->next()) {
+      if (thr->in_critical()) {
+        count++;
+      }
+    }
+    if (_jni_lock_count != count) {
+      tty->print_cr("critical counts don't match: %d != %d", _jni_lock_count, count);
+      for (JavaThread* thr = Threads::first(); thr; thr = thr->next()) {
+        if (thr->in_critical()) {
+          tty->print_cr(INTPTR_FORMAT " in_critical %d", thr, thr->in_critical());
+        }
+      }
+    }
+    assert(_jni_lock_count == count, "must be equal");
+  }
+}
+#endif
+
+bool GC_locker::check_active_before_gc() {
+  assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
+  if (is_active() && !_needs_gc) {
+    verify_critical_count();
+    _needs_gc = true;
+    if (PrintJNIGCStalls && PrintGCDetails) {
+      ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
+      gclog_or_tty->print_cr("%.3f: Setting _needs_gc. Thread \"%s\" %d locked.",
+                             gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
+    }
+
+  }
+  return is_active();
+}
+
 void GC_locker::stall_until_clear() {
   assert(!JavaThread::current()->in_critical(), "Would deadlock");
-  if (PrintJNIGCStalls && PrintGCDetails) {
-    ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
-    gclog_or_tty->print_cr(
-      "Allocation failed. Thread \"%s\" is stalled by JNI critical section.",
-      JavaThread::current()->name());
+  MutexLocker   ml(JNICritical_lock);
+
+  if (needs_gc()) {
+    if (PrintJNIGCStalls && PrintGCDetails) {
+      ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
+      gclog_or_tty->print_cr("%.3f: Allocation failed. Thread \"%s\" is stalled by JNI critical section, %d locked.",
+                             gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
+    }
   }
-  MutexLocker   ml(JNICritical_lock);
+
   // Wait for _needs_gc  to be cleared
-  while (GC_locker::needs_gc()) {
+  while (needs_gc()) {
     JNICritical_lock->wait();
   }
 }
 
-void GC_locker::jni_lock_slow() {
+void GC_locker::jni_lock(JavaThread* thread) {
+  assert(!thread->in_critical(), "shouldn't currently be in a critical region");
   MutexLocker mu(JNICritical_lock);
   // Block entering threads if we know at least one thread is in a
   // JNI critical region and we need a GC.
   // We check that at least one thread is in a critical region before
   // blocking because blocked threads are woken up by a thread exiting
   // a JNI critical region.
-  while ((is_jni_active() && needs_gc()) || _doing_gc) {
+  while ((needs_gc() && is_jni_active()) || _doing_gc) {
     JNICritical_lock->wait();
   }
-  jni_lock();
+  thread->enter_critical();
+  _jni_lock_count++;
+  increment_debug_jni_lock_count();
 }
 
-void GC_locker::jni_unlock_slow() {
+void GC_locker::jni_unlock(JavaThread* thread) {
+  assert(thread->in_last_critical(), "should be exiting critical region");
   MutexLocker mu(JNICritical_lock);
-  jni_unlock();
+  _jni_lock_count--;
+  decrement_debug_jni_lock_count();
+  thread->exit_critical();
   if (needs_gc() && !is_jni_active()) {
     // We're the last thread out. Cause a GC to occur.
     // GC will also check is_active, so this check is not
@@ -74,11 +127,17 @@
       {
         // Must give up the lock while at a safepoint
         MutexUnlocker munlock(JNICritical_lock);
+        if (PrintJNIGCStalls && PrintGCDetails) {
+          ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
+          gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
+                                 gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
+        }
         Universe::heap()->collect(GCCause::_gc_locker);
       }
       _doing_gc = false;
     }
-    clear_needs_gc();
+
+    _needs_gc = false;
     JNICritical_lock->notify_all();
   }
 }
--- a/src/share/vm/memory/gcLocker.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/memory/gcLocker.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,53 +51,80 @@
 
 class GC_locker: public AllStatic {
  private:
-  static volatile jint _jni_lock_count;  // number of jni active instances
+  // The _jni_lock_count keeps track of the number of threads that are
+  // currently in a critical region.  It's only kept up to date when
+  // _needs_gc is true.  The current value is computed during
+  // safepointing and decremented during the slow path of GC_locker
+  // unlocking.
+  static volatile jint _jni_lock_count;  // number of jni active instances.
+
   static volatile jint _lock_count;      // number of other active instances
   static volatile bool _needs_gc;        // heap is filling, we need a GC
                                          // note: bool is typedef'd as jint
   static volatile bool _doing_gc;        // unlock_critical() is doing a GC
 
+#ifdef ASSERT
+  // This lock count is updated for all operations and is used to
+  // validate the jni_lock_count that is computed during safepoints.
+  static volatile jint _debug_jni_lock_count;
+#endif
+
   // Accessors
   static bool is_jni_active() {
+    assert(_needs_gc, "only valid when _needs_gc is set");
     return _jni_lock_count > 0;
   }
 
-  static void set_needs_gc() {
-    assert(SafepointSynchronize::is_at_safepoint(),
-      "needs_gc is only set at a safepoint");
-    _needs_gc = true;
-  }
-
-  static void clear_needs_gc() {
-    assert_lock_strong(JNICritical_lock);
-    _needs_gc = false;
-  }
+  // At a safepoint, visit all threads and count the number of active
+  // critical sections.  This is used to ensure that all active
+  // critical sections are exited before a new one is started.
+  static void verify_critical_count() NOT_DEBUG_RETURN;
 
-  static void jni_lock() {
-    Atomic::inc(&_jni_lock_count);
-    CHECK_UNHANDLED_OOPS_ONLY(
-      if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count++; })
-    assert(Universe::heap() == NULL || !Universe::heap()->is_gc_active(),
-           "locking failed");
-  }
+  static void jni_lock(JavaThread* thread);
+  static void jni_unlock(JavaThread* thread);
 
-  static void jni_unlock() {
-    Atomic::dec(&_jni_lock_count);
-    CHECK_UNHANDLED_OOPS_ONLY(
-      if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count--; })
+  static bool is_active_internal() {
+    verify_critical_count();
+    return _lock_count > 0 || _jni_lock_count > 0;
   }
 
-  static void jni_lock_slow();
-  static void jni_unlock_slow();
-
  public:
   // Accessors
-  static bool is_active();
+  static bool is_active() {
+    assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
+    return is_active_internal();
+  }
   static bool needs_gc()       { return _needs_gc;                        }
+
   // Shorthand
-  static bool is_active_and_needs_gc() { return is_active() && needs_gc();}
+  static bool is_active_and_needs_gc() {
+    // Use is_active_internal since _needs_gc can change from true to
+    // false outside of a safepoint, triggering the assert in
+    // is_active.
+    return needs_gc() && is_active_internal();
+  }
 
-  // Calls set_needs_gc() if is_active() is true. Returns is_active().
+  // In debug mode track the locking state at all times
+  static void increment_debug_jni_lock_count() {
+#ifdef ASSERT
+    assert(_debug_jni_lock_count >= 0, "bad value");
+    Atomic::inc(&_debug_jni_lock_count);
+#endif
+  }
+  static void decrement_debug_jni_lock_count() {
+#ifdef ASSERT
+    assert(_debug_jni_lock_count > 0, "bad value");
+    Atomic::dec(&_debug_jni_lock_count);
+#endif
+  }
+
+  // Set the current lock count
+  static void set_jni_lock_count(int count) {
+    _jni_lock_count = count;
+    verify_critical_count();
+  }
+
+  // Sets _needs_gc if is_active() is true. Returns is_active().
   static bool check_active_before_gc();
 
   // Stalls the caller (who should not be in a jni critical section)
@@ -131,22 +158,24 @@
   // JNI critical regions are the only participants in this scheme
   // because they are, by spec, well bounded while in a critical region.
   //
-  // Each of the following two method is split into a fast path and a slow
-  // path. JNICritical_lock is only grabbed in the slow path.
+  // Each of the following two method is split into a fast path and a
+  // slow path. JNICritical_lock is only grabbed in the slow path.
   // _needs_gc is initially false and every java thread will go
-  // through the fast path (which does the same thing as the slow path
-  // when _needs_gc is false). When GC happens at a safepoint,
-  // GC_locker::is_active() is checked. Since there is no safepoint in the
-  // fast path of lock_critical() and unlock_critical(), there is no race
-  // condition between the fast path and GC. After _needs_gc is set at a
-  // safepoint, every thread will go through the slow path after the safepoint.
-  // Since after a safepoint, each of the following two methods is either
-  // entered from the method entry and falls into the slow path, or is
-  // resumed from the safepoints in the method, which only exist in the slow
-  // path. So when _needs_gc is set, the slow path is always taken, till
-  // _needs_gc is cleared.
+  // through the fast path, which simply increments or decrements the
+  // current thread's critical count.  When GC happens at a safepoint,
+  // GC_locker::is_active() is checked. Since there is no safepoint in
+  // the fast path of lock_critical() and unlock_critical(), there is
+  // no race condition between the fast path and GC. After _needs_gc
+  // is set at a safepoint, every thread will go through the slow path
+  // after the safepoint.  Since after a safepoint, each of the
+  // following two methods is either entered from the method entry and
+  // falls into the slow path, or is resumed from the safepoints in
+  // the method, which only exist in the slow path. So when _needs_gc
+  // is set, the slow path is always taken, till _needs_gc is cleared.
   static void lock_critical(JavaThread* thread);
   static void unlock_critical(JavaThread* thread);
+
+  static address needs_gc_address() { return (address) &_needs_gc; }
 };
 
 
--- a/src/share/vm/memory/gcLocker.inline.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/memory/gcLocker.inline.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,17 +27,6 @@
 
 #include "memory/gcLocker.hpp"
 
-inline bool GC_locker::is_active() {
-  return _lock_count > 0 || _jni_lock_count > 0;
-}
-
-inline bool GC_locker::check_active_before_gc() {
-  if (is_active()) {
-    set_needs_gc();
-  }
-  return is_active();
-}
-
 inline void GC_locker::lock() {
   // cast away volatile
   Atomic::inc(&_lock_count);
@@ -56,24 +45,28 @@
 
 inline void GC_locker::lock_critical(JavaThread* thread) {
   if (!thread->in_critical()) {
-    if (!needs_gc()) {
-      jni_lock();
-    } else {
-      jni_lock_slow();
+    if (needs_gc()) {
+      // jni_lock call calls enter_critical under the lock so that the
+      // global lock count and per thread count are in agreement.
+      jni_lock(thread);
+      return;
     }
+    increment_debug_jni_lock_count();
   }
   thread->enter_critical();
 }
 
 inline void GC_locker::unlock_critical(JavaThread* thread) {
+  if (thread->in_last_critical()) {
+    if (needs_gc()) {
+      // jni_unlock call calls exit_critical under the lock so that
+      // the global lock count and per thread count are in agreement.
+      jni_unlock(thread);
+      return;
+    }
+    decrement_debug_jni_lock_count();
+  }
   thread->exit_critical();
-  if (!thread->in_critical()) {
-    if (!needs_gc()) {
-      jni_unlock();
-    } else {
-      jni_unlock_slow();
-    }
-  }
 }
 
 #endif // SHARE_VM_MEMORY_GCLOCKER_INLINE_HPP
--- a/src/share/vm/memory/genCollectedHeap.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -479,11 +479,9 @@
 
   const size_t perm_prev_used = perm_gen()->used();
 
-  if (PrintHeapAtGC) {
-    Universe::print_heap_before_gc();
-    if (Verbose) {
-      gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
-    }
+  print_heap_before_gc();
+  if (Verbose) {
+    gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
   }
 
   {
@@ -685,9 +683,7 @@
   AdaptiveSizePolicy* sp = gen_policy()->size_policy();
   AdaptiveSizePolicyOutput(sp, total_collections());
 
-  if (PrintHeapAtGC) {
-    Universe::print_heap_after_gc();
-  }
+  print_heap_after_gc();
 
 #ifdef TRACESPINNING
   ParallelTaskTerminator::print_termination_counts();
--- a/src/share/vm/memory/genMarkSweep.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/memory/genMarkSweep.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -176,7 +176,11 @@
 
   // Update time of last gc for all generations we collected
   // (which curently is all the generations in the heap).
-  gch->update_time_of_last_gc(os::javaTimeMillis());
+  // We need to use a monotonically non-deccreasing time in ms
+  // or we will see time-warp warnings and os::javaTimeMillis()
+  // does not guarantee monotonicity.
+  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+  gch->update_time_of_last_gc(now);
 }
 
 void GenMarkSweep::allocate_stacks() {
@@ -254,7 +258,6 @@
 void GenMarkSweep::mark_sweep_phase1(int level,
                                   bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  EventMark m("1 mark object");
   TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
   trace(" 1");
 
@@ -325,7 +328,6 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   Generation* pg = gch->perm_gen();
 
-  EventMark m("2 compute new addresses");
   TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
   trace("2");
 
@@ -350,7 +352,6 @@
   Generation* pg = gch->perm_gen();
 
   // Adjust the pointers to reflect the new locations
-  EventMark m("3 adjust pointers");
   TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
   trace("3");
 
@@ -411,7 +412,6 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   Generation* pg = gch->perm_gen();
 
-  EventMark m("4 compact heap");
   TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
   trace("4");
 
--- a/src/share/vm/memory/universe.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/memory/universe.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1303,22 +1303,22 @@
   }
 }
 
-void Universe::print_heap_before_gc(outputStream* st) {
+void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) {
   st->print_cr("{Heap before GC invocations=%u (full %u):",
                heap()->total_collections(),
                heap()->total_full_collections());
-  if (!PrintHeapAtGCExtended) {
+  if (!PrintHeapAtGCExtended || ignore_extended) {
     heap()->print_on(st);
   } else {
     heap()->print_extended_on(st);
   }
 }
 
-void Universe::print_heap_after_gc(outputStream* st) {
+void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
   st->print_cr("Heap after GC invocations=%u (full %u):",
                heap()->total_collections(),
                heap()->total_full_collections());
-  if (!PrintHeapAtGCExtended) {
+  if (!PrintHeapAtGCExtended || ignore_extended) {
     heap()->print_on(st);
   } else {
     heap()->print_extended_on(st);
--- a/src/share/vm/memory/universe.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/memory/universe.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -424,8 +424,8 @@
   static void print_heap_at_SIGBREAK();
   static void print_heap_before_gc() { print_heap_before_gc(gclog_or_tty); }
   static void print_heap_after_gc()  { print_heap_after_gc(gclog_or_tty); }
-  static void print_heap_before_gc(outputStream* st);
-  static void print_heap_after_gc(outputStream* st);
+  static void print_heap_before_gc(outputStream* st, bool ignore_extended = false);
+  static void print_heap_after_gc(outputStream* st, bool ignore_extended = false);
 
   // Change the number of dummy objects kept reachable by the full gc dummy
   // array; this should trigger relocation in a sliding compaction collector.
--- a/src/share/vm/oops/arrayKlass.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/oops/arrayKlass.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -153,6 +153,7 @@
   }
   if (length > arrayOopDesc::max_array_length(T_ARRAY)) {
     report_java_out_of_memory("Requested array size exceeds VM limit");
+    JvmtiExport::post_array_size_exhausted();
     THROW_OOP_0(Universe::out_of_memory_error_array_size());
   }
   int size = objArrayOopDesc::object_size(length);
--- a/src/share/vm/oops/arrayOop.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/oops/arrayOop.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #ifndef PRODUCT
 
 #include "oops/arrayOop.hpp"
+#include "oops/oop.inline.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 bool arrayOopDesc::check_max_length_overflow(BasicType type) {
--- a/src/share/vm/oops/constantPoolOop.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/oops/constantPoolOop.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -269,7 +269,7 @@
 methodOop constantPoolOopDesc::method_at_if_loaded(constantPoolHandle cpool,
                                                    int which, Bytecodes::Code invoke_code) {
   assert(!constantPoolCacheOopDesc::is_secondary_index(which), "no indy instruction here");
-  if (cpool->cache() == NULL)  return false;  // nothing to load yet
+  if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
   int cache_index = which - CPCACHE_INDEX_TAG;
   if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) {
     if (PrintMiscellaneous && (Verbose||WizardMode)) {
--- a/src/share/vm/oops/cpCacheOop.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/oops/cpCacheOop.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -504,17 +504,17 @@
 
 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
   // print separator
-  if (index == 0) tty->print_cr("                 -------------");
+  if (index == 0) st->print_cr("                 -------------");
   // print entry
-  tty->print("%3d  ("PTR_FORMAT")  ", index, (intptr_t)this);
+  st->print("%3d  ("PTR_FORMAT")  ", index, (intptr_t)this);
   if (is_secondary_entry())
-    tty->print_cr("[%5d|secondary]", main_entry_index());
+    st->print_cr("[%5d|secondary]", main_entry_index());
   else
-    tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
-  tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)(oop)_f1);
-  tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f2);
-  tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_flags);
-  tty->print_cr("                 -------------");
+    st->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
+  st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)(oop)_f1);
+  st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f2);
+  st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_flags);
+  st->print_cr("                 -------------");
 }
 
 void ConstantPoolCacheEntry::verify(outputStream* st) const {
--- a/src/share/vm/oops/instanceKlass.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/oops/instanceKlass.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -669,6 +669,7 @@
   if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
   if (length > arrayOopDesc::max_array_length(T_OBJECT)) {
     report_java_out_of_memory("Requested array size exceeds VM limit");
+    JvmtiExport::post_array_size_exhausted();
     THROW_OOP_0(Universe::out_of_memory_error_array_size());
   }
   int size = objArrayOopDesc::object_size(length);
@@ -1132,6 +1133,36 @@
   return probe;
 }
 
+u2 instanceKlass::enclosing_method_data(int offset) {
+  typeArrayOop inner_class_list = inner_classes();
+  if (inner_class_list == NULL) {
+    return 0;
+  }
+  int length = inner_class_list->length();
+  if (length % inner_class_next_offset == 0) {
+    return 0;
+  } else {
+    int index = length - enclosing_method_attribute_size;
+    typeArrayHandle inner_class_list_h(inner_class_list);
+    assert(offset < enclosing_method_attribute_size, "invalid offset");
+    return inner_class_list_h->ushort_at(index + offset);
+  }
+}
+
+void instanceKlass::set_enclosing_method_indices(u2 class_index,
+                                                 u2 method_index) {
+  typeArrayOop inner_class_list = inner_classes();
+  assert (inner_class_list != NULL, "_inner_classes list is not set up");
+  int length = inner_class_list->length();
+  if (length % inner_class_next_offset == enclosing_method_attribute_size) {
+    int index = length - enclosing_method_attribute_size;
+    typeArrayHandle inner_class_list_h(inner_class_list);
+    inner_class_list_h->ushort_at_put(
+      index + enclosing_method_class_index_offset, class_index);
+    inner_class_list_h->ushort_at_put(
+      index + enclosing_method_method_index_offset, method_index);
+  }
+}
 
 // Lookup or create a jmethodID.
 // This code is called by the VMThread and JavaThreads so the
@@ -2106,28 +2137,21 @@
   jint access = access_flags().as_int();
 
   // But check if it happens to be member class.
-  typeArrayOop inner_class_list = inner_classes();
-  int length = (inner_class_list == NULL) ? 0 : inner_class_list->length();
-  assert (length % instanceKlass::inner_class_next_offset == 0, "just checking");
-  if (length > 0) {
-    typeArrayHandle inner_class_list_h(THREAD, inner_class_list);
-    instanceKlassHandle ik(THREAD, k);
-    for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) {
-      int ioff = inner_class_list_h->ushort_at(
-                      i + instanceKlass::inner_class_inner_class_info_offset);
-
-      // Inner class attribute can be zero, skip it.
-      // Strange but true:  JVM spec. allows null inner class refs.
-      if (ioff == 0) continue;
-
-      // only look at classes that are already loaded
-      // since we are looking for the flags for our self.
-      Symbol* inner_name = ik->constants()->klass_name_at(ioff);
-      if ((ik->name() == inner_name)) {
-        // This is really a member class.
-        access = inner_class_list_h->ushort_at(i + instanceKlass::inner_class_access_flags_offset);
-        break;
-      }
+  instanceKlassHandle ik(THREAD, k);
+  InnerClassesIterator iter(ik);
+  for (; !iter.done(); iter.next()) {
+    int ioff = iter.inner_class_info_index();
+    // Inner class attribute can be zero, skip it.
+    // Strange but true:  JVM spec. allows null inner class refs.
+    if (ioff == 0) continue;
+
+    // only look at classes that are already loaded
+    // since we are looking for the flags for our self.
+    Symbol* inner_name = ik->constants()->klass_name_at(ioff);
+    if ((ik->name() == inner_name)) {
+      // This is really a member class.
+      access = iter.inner_access_flags();
+      break;
     }
   }
   // Remember to strip ACC_SUPER bit
--- a/src/share/vm/oops/instanceKlass.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/oops/instanceKlass.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -188,7 +188,17 @@
   klassOop        _host_klass;
   // Class signers.
   objArrayOop     _signers;
-  // inner_classes attribute.
+  // The InnerClasses attribute and EnclosingMethod attribute. The
+  // _inner_classes is an array of shorts. If the class has InnerClasses
+  // attribute, then the _inner_classes array begins with 4-tuples of shorts
+  // [inner_class_info_index, outer_class_info_index,
+  // inner_name_index, inner_class_access_flags] for the InnerClasses
+  // attribute. If the EnclosingMethod attribute exists, it occupies the
+  // last two shorts [class_index, method_index] of the array. If only
+  // the InnerClasses attribute exists, the _inner_classes array length is
+  // number_of_inner_classes * 4. If the class has both InnerClasses
+  // and EnclosingMethod attributes the _inner_classes array length is
+  // number_of_inner_classes * 4 + enclosing_method_attribute_size.
   typeArrayOop    _inner_classes;
   // Implementors of this interface (not valid if it overflows)
   klassOop        _implementors[implementors_limit];
@@ -240,7 +250,6 @@
   Thread*         _init_thread;          // Pointer to current thread doing initialization (to handle recusive initialization)
   int             _vtable_len;           // length of Java vtable (in words)
   int             _itable_len;           // length of Java itable (in words)
-  ReferenceType   _reference_type;       // reference type
   OopMapCache*    volatile _oop_map_cache;   // OopMapCache for all methods in the klass (allocated lazily)
   JNIid*          _jni_ids;              // First JNI identifier for static fields in this class
   jmethodID*      _methods_jmethod_ids;  // jmethodIDs corresponding to method_idnum, or NULL if none
@@ -252,8 +261,6 @@
   // Array of interesting part(s) of the previous version(s) of this
   // instanceKlass. See PreviousVersionWalker below.
   GrowableArray<PreviousVersionNode *>* _previous_versions;
-  u2              _enclosing_method_class_index;  // Constant pool index for class of enclosing method, or 0 if none
-  u2              _enclosing_method_method_index; // Constant pool index for name and type of enclosing method, or 0 if none
   // JVMTI fields can be moved to their own structure - see 6315920
   unsigned char * _cached_class_file_bytes;       // JVMTI: cached class file, before retransformable agent modified it in CFLH
   jint            _cached_class_file_len;         // JVMTI: length of above
@@ -265,6 +272,8 @@
   // _idnum_allocated_count.
   u1              _init_state;                    // state of class
 
+  u1              _reference_type;                // reference type
+
   // embedded Java vtable follows here
   // embedded Java itables follows here
   // embedded static fields follows here
@@ -350,6 +359,12 @@
     inner_class_next_offset = 4
   };
 
+  enum EnclosingMethodAttributeOffset {
+    enclosing_method_class_index_offset = 0,
+    enclosing_method_method_index_offset = 1,
+    enclosing_method_attribute_size = 2
+  };
+
   // method override check
   bool is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS);
 
@@ -407,8 +422,11 @@
   void eager_initialize(Thread *thread);
 
   // reference type
-  ReferenceType reference_type() const     { return _reference_type; }
-  void set_reference_type(ReferenceType t) { _reference_type = t; }
+  ReferenceType reference_type() const     { return (ReferenceType)_reference_type; }
+  void set_reference_type(ReferenceType t) {
+    assert(t == (u1)t, "overflow");
+    _reference_type = (u1)t;
+  }
 
   static ByteSize reference_type_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _reference_type)); }
 
@@ -529,11 +547,15 @@
   Symbol* generic_signature() const                   { return _generic_signature; }
   void set_generic_signature(Symbol* sig)             { _generic_signature = sig; }
 
-  u2 enclosing_method_class_index() const             { return _enclosing_method_class_index; }
-  u2 enclosing_method_method_index() const            { return _enclosing_method_method_index; }
+  u2 enclosing_method_data(int offset);
+  u2 enclosing_method_class_index() {
+    return enclosing_method_data(enclosing_method_class_index_offset);
+  }
+  u2 enclosing_method_method_index() {
+    return enclosing_method_data(enclosing_method_method_index_offset);
+  }
   void set_enclosing_method_indices(u2 class_index,
-                                    u2 method_index)  { _enclosing_method_class_index  = class_index;
-                                                        _enclosing_method_method_index = method_index; }
+                                    u2 method_index);
 
   // jmethodID support
   static jmethodID get_jmethod_id(instanceKlassHandle ik_h,
@@ -570,9 +592,9 @@
   void set_method_annotations_of(int idnum, typeArrayOop anno)
                                                 { set_methods_annotations_of(idnum, anno, &_methods_annotations); }
   void set_method_parameter_annotations_of(int idnum, typeArrayOop anno)
-                                                { set_methods_annotations_of(idnum, anno, &_methods_annotations); }
+                                                { set_methods_annotations_of(idnum, anno, &_methods_parameter_annotations); }
   void set_method_default_annotations_of(int idnum, typeArrayOop anno)
-                                                { set_methods_annotations_of(idnum, anno, &_methods_annotations); }
+                                                { set_methods_annotations_of(idnum, anno, &_methods_default_annotations); }
 
   // allocation
   DEFINE_ALLOCATE_PERMANENT(instanceKlass);
@@ -1049,4 +1071,83 @@
   nmethod* get_nmethod()                  { return _nmethod; }
 };
 
+// An iterator that's used to access the inner classes indices in the
+// instanceKlass::_inner_classes array.
+class InnerClassesIterator : public StackObj {
+ private:
+  typeArrayHandle _inner_classes;
+  int _length;
+  int _idx;
+ public:
+
+  InnerClassesIterator(instanceKlassHandle k) {
+    _inner_classes = k->inner_classes();
+    if (k->inner_classes() != NULL) {
+      _length = _inner_classes->length();
+      // The inner class array's length should be the multiple of
+      // inner_class_next_offset if it only contains the InnerClasses
+      // attribute data, or it should be
+      // n*inner_class_next_offset+enclosing_method_attribute_size
+      // if it also contains the EnclosingMethod data.
+      assert((_length % instanceKlass::inner_class_next_offset == 0 ||
+              _length % instanceKlass::inner_class_next_offset == instanceKlass::enclosing_method_attribute_size),
+             "just checking");
+      // Remove the enclosing_method portion if exists.
+      if (_length % instanceKlass::inner_class_next_offset == instanceKlass::enclosing_method_attribute_size) {
+        _length -= instanceKlass::enclosing_method_attribute_size;
+      }
+    } else {
+      _length = 0;
+    }
+    _idx = 0;
+  }
+
+  int length() const {
+    return _length;
+  }
+
+  void next() {
+    _idx += instanceKlass::inner_class_next_offset;
+  }
+
+  bool done() const {
+    return (_idx >= _length);
+  }
+
+  u2 inner_class_info_index() const {
+    return _inner_classes->ushort_at(
+               _idx + instanceKlass::inner_class_inner_class_info_offset);
+  }
+
+  void set_inner_class_info_index(u2 index) {
+    _inner_classes->ushort_at_put(
+               _idx + instanceKlass::inner_class_inner_class_info_offset, index);
+  }
+
+  u2 outer_class_info_index() const {
+    return _inner_classes->ushort_at(
+               _idx + instanceKlass::inner_class_outer_class_info_offset);
+  }
+
+  void set_outer_class_info_index(u2 index) {
+    _inner_classes->ushort_at_put(
+               _idx + instanceKlass::inner_class_outer_class_info_offset, index);
+  }
+
+  u2 inner_name_index() const {
+    return _inner_classes->ushort_at(
+               _idx + instanceKlass::inner_class_inner_name_offset);
+  }
+
+  void set_inner_name_index(u2 index) {
+    _inner_classes->ushort_at_put(
+               _idx + instanceKlass::inner_class_inner_name_offset, index);
+  }
+
+  u2 inner_access_flags() const {
+    return _inner_classes->ushort_at(
+               _idx + instanceKlass::inner_class_access_flags_offset);
+  }
+};
+
 #endif // SHARE_VM_OOPS_INSTANCEKLASS_HPP
--- a/src/share/vm/oops/instanceKlassKlass.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/oops/instanceKlassKlass.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -416,7 +416,6 @@
     ik->set_methods_annotations(NULL);
     ik->set_methods_parameter_annotations(NULL);
     ik->set_methods_default_annotations(NULL);
-    ik->set_enclosing_method_indices(0, 0);
     ik->set_jvmti_cached_class_field_map(NULL);
     ik->set_initial_method_idnum(0);
     assert(k()->is_parsable(), "should be parsable here.");
--- a/src/share/vm/oops/klass.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/oops/klass.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -158,9 +158,7 @@
   kl->set_next_sibling(NULL);
   kl->set_alloc_count(0);
   kl->set_alloc_size(0);
-#ifdef TRACE_SET_KLASS_TRACE_ID
   TRACE_SET_KLASS_TRACE_ID(kl, 0);
-#endif
 
   kl->set_prototype_header(markOopDesc::prototype());
   kl->set_biased_lock_revocation_count(0);
@@ -176,10 +174,9 @@
 }
 
 void Klass_vtbl::post_new_init_klass(KlassHandle& klass,
-                                     klassOop new_klass,
-                                     int size) const {
+                                     klassOop new_klass) const {
   assert(!new_klass->klass_part()->null_vtbl(), "Not a complete klass");
-  CollectedHeap::post_allocation_install_obj_klass(klass, new_klass, size);
+  CollectedHeap::post_allocation_install_obj_klass(klass, new_klass);
 }
 
 void* Klass_vtbl::operator new(size_t ignored, KlassHandle& klass,
--- a/src/share/vm/oops/klass.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/oops/klass.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -149,7 +149,7 @@
   // by the shared "base_create" subroutines.
   //
   virtual void* allocate_permanent(KlassHandle& klass, int size, TRAPS) const = 0;
-  void post_new_init_klass(KlassHandle& klass, klassOop obj, int size) const;
+  void post_new_init_klass(KlassHandle& klass, klassOop obj) const;
 
   // Every subclass on which vtbl_value is called must include this macro.
   // Delay the installation of the klassKlass pointer until after the
@@ -160,7 +160,7 @@
     if (HAS_PENDING_EXCEPTION) return NULL;                                   \
     klassOop new_klass = ((Klass*) result)->as_klassOop();                    \
     OrderAccess::storestore();                                                \
-    post_new_init_klass(klass_klass, new_klass, size);                        \
+    post_new_init_klass(klass_klass, new_klass);                              \
     return result;                                                            \
   }
 
@@ -265,9 +265,7 @@
   markOop  _prototype_header;   // Used when biased locking is both enabled and disabled for this type
   jint     _biased_lock_revocation_count;
 
-#ifdef TRACE_DEFINE_KLASS_TRACE_ID
   TRACE_DEFINE_KLASS_TRACE_ID;
-#endif
  public:
 
   // returns the enclosing klassOop
@@ -688,9 +686,7 @@
   jlong last_biased_lock_bulk_revocation_time() { return _last_biased_lock_bulk_revocation_time; }
   void  set_last_biased_lock_bulk_revocation_time(jlong cur_time) { _last_biased_lock_bulk_revocation_time = cur_time; }
 
-#ifdef TRACE_DEFINE_KLASS_METHODS
   TRACE_DEFINE_KLASS_METHODS;
-#endif
 
   // garbage collection support
   virtual void follow_weak_klass_links(
--- a/src/share/vm/oops/methodOop.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/oops/methodOop.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -596,6 +596,11 @@
   clear_code();
 }
 
+address methodOopDesc::critical_native_function() {
+  methodHandle mh(this);
+  return NativeLookup::lookup_critical_entry(mh);
+}
+
 
 void methodOopDesc::set_signature_handler(address handler) {
   address* signature_handler =  signature_handler_addr();
--- a/src/share/vm/oops/methodOop.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/oops/methodOop.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -403,6 +403,8 @@
     native_bind_event_is_interesting = true
   };
   address native_function() const                { return *(native_function_addr()); }
+  address critical_native_function();
+
   // Must specify a real function (not NULL).
   // Use clear_native_function() to unregister.
   void set_native_function(address function, bool post_event_flag);
--- a/src/share/vm/oops/objArrayKlass.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/oops/objArrayKlass.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,6 +68,7 @@
       return a;
     } else {
       report_java_out_of_memory("Requested array size exceeds VM limit");
+      JvmtiExport::post_array_size_exhausted();
       THROW_OOP_0(Universe::out_of_memory_error_array_size());
     }
   } else {
--- a/src/share/vm/oops/typeArrayKlass.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/oops/typeArrayKlass.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -93,6 +93,7 @@
       return t;
     } else {
       report_java_out_of_memory("Requested array size exceeds VM limit");
+      JvmtiExport::post_array_size_exhausted();
       THROW_OOP_0(Universe::out_of_memory_error_array_size());
     }
   } else {
--- a/src/share/vm/opto/block.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/block.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -284,13 +284,13 @@
   // helper function that adds caller save registers to MachProjNode
   void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
   // Schedule a call next in the block
-  uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call);
+  uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
 
   // Perform basic-block local scheduling
-  Node *select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot);
+  Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
   void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs );
   void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs);
-  bool schedule_local(PhaseCFG *cfg, Matcher &m, int *ready_cnt, VectorSet &next_call);
+  bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
   // Cleanup if any code lands between a Call and his Catch
   void call_catch_cleanup(Block_Array &bbs);
   // Detect implicit-null-check opportunities.  Basically, find NULL checks
--- a/src/share/vm/opto/bytecodeInfo.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -257,6 +257,18 @@
       return "exception method";
   }
 
+  if (callee_method->should_not_inline()) {
+    return "disallowed by CompilerOracle";
+  }
+
+  if (UseStringCache) {
+    // Do not inline StringCache::profile() method used only at the beginning.
+    if (callee_method->name() == ciSymbol::profile_name() &&
+        callee_method->holder()->name() == ciSymbol::java_lang_StringCache()) {
+      return "profiling method";
+    }
+  }
+
   // use frequency-based objections only for non-trivial methods
   if (callee_method->code_size_for_inlining() <= MaxTrivialSize) return NULL;
 
@@ -278,18 +290,6 @@
     }
   }
 
-  if (callee_method->should_not_inline()) {
-    return "disallowed by CompilerOracle";
-  }
-
-  if (UseStringCache) {
-    // Do not inline StringCache::profile() method used only at the beginning.
-    if (callee_method->name() == ciSymbol::profile_name() &&
-        callee_method->holder()->name() == ciSymbol::java_lang_StringCache()) {
-      return "profiling method";
-    }
-  }
-
   return NULL;
 }
 
--- a/src/share/vm/opto/c2_globals.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/c2_globals.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -465,6 +465,9 @@
   notproduct(bool, PrintOptimizePtrCompare, false,                          \
           "Print information about optimized pointers compare")             \
                                                                             \
+  notproduct(bool, VerifyConnectionGraph , true,                            \
+          "Verify Connection Graph construction in Escape Analysis")        \
+                                                                            \
   product(bool, UseOptoBiasInlining, true,                                  \
           "Generate biased locking code in C2 ideal graph")                 \
                                                                             \
--- a/src/share/vm/opto/callnode.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/callnode.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1538,10 +1538,7 @@
     // If we are locking an unescaped object, the lock/unlock is unnecessary
     //
     ConnectionGraph *cgr = phase->C->congraph();
-    PointsToNode::EscapeState es = PointsToNode::GlobalEscape;
-    if (cgr != NULL)
-      es = cgr->escape_state(obj_node());
-    if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
+    if (cgr != NULL && cgr->not_global_escape(obj_node())) {
       assert(!is_eliminated() || is_coarsened(), "sanity");
       // The lock could be marked eliminated by lock coarsening
       // code during first IGVN before EA. Replace coarsened flag
@@ -1680,10 +1677,7 @@
     // If we are unlocking an unescaped object, the lock/unlock is unnecessary.
     //
     ConnectionGraph *cgr = phase->C->congraph();
-    PointsToNode::EscapeState es = PointsToNode::GlobalEscape;
-    if (cgr != NULL)
-      es = cgr->escape_state(obj_node());
-    if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
+    if (cgr != NULL && cgr->not_global_escape(obj_node())) {
       assert(!is_eliminated() || is_coarsened(), "sanity");
       // The lock could be marked eliminated by lock coarsening
       // code during first IGVN before EA. Replace coarsened flag
--- a/src/share/vm/opto/callnode.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/callnode.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -546,6 +546,12 @@
   // or result projection is there are several CheckCastPP
   // or returns NULL if there is no one.
   Node *result_cast();
+  // Does this node returns pointer?
+  bool returns_pointer() const {
+    const TypeTuple *r = tf()->range();
+    return (r->cnt() > TypeFunc::Parms &&
+            r->field_at(TypeFunc::Parms)->isa_ptr());
+  }
 
   // Collect all the interesting edges from a call for use in
   // replacing the call by something else.  Used by macro expansion
--- a/src/share/vm/opto/chaitin.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/chaitin.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1946,18 +1946,29 @@
     reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize);
 
   // Preserve area dump
+  int fixed_slots = C->fixed_slots();
+  OptoReg::Name begin_in_preserve = OptoReg::add(_matcher._old_SP, -(int)C->in_preserve_stack_slots());
+  OptoReg::Name return_addr = _matcher.return_addr();
+
   reg = OptoReg::add(reg, -1);
-  while( OptoReg::is_stack(reg)) {
+  while (OptoReg::is_stack(reg)) {
     tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
-    if( _matcher.return_addr() == reg )
+    if (return_addr == reg) {
       tty->print_cr("return address");
-    else if( _matcher.return_addr() == OptoReg::add(reg,1) &&
-             VerifyStackAtCalls )
-      tty->print_cr("0xBADB100D   +VerifyStackAtCalls");
-    else if ((int)OptoReg::reg2stack(reg) < C->fixed_slots())
+    } else if (reg >= begin_in_preserve) {
+      // Preserved slots are present on x86
+      if (return_addr == OptoReg::add(reg, VMRegImpl::slots_per_word))
+        tty->print_cr("saved fp register");
+      else if (return_addr == OptoReg::add(reg, 2*VMRegImpl::slots_per_word) &&
+               VerifyStackAtCalls)
+        tty->print_cr("0xBADB100D   +VerifyStackAtCalls");
+      else
+        tty->print_cr("in_preserve");
+    } else if ((int)OptoReg::reg2stack(reg) < fixed_slots) {
       tty->print_cr("Fixed slot %d", OptoReg::reg2stack(reg));
-    else
-      tty->print_cr("pad2, in_preserve");
+    } else {
+      tty->print_cr("pad2, stack alignment");
+    }
     reg = OptoReg::add(reg, -1);
   }
 
--- a/src/share/vm/opto/compile.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/compile.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1707,7 +1707,6 @@
       if (major_progress()) print_method("PhaseIdealLoop before EA", 2);
       if (failing())  return;
     }
-    TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, true);
     ConnectionGraph::do_analysis(this, &igvn);
 
     if (failing())  return;
@@ -1719,6 +1718,7 @@
     if (failing())  return;
 
     if (congraph() != NULL && macro_count() > 0) {
+      NOT_PRODUCT( TracePhase t2("macroEliminate", &_t_macroEliminate, TimeCompiler); )
       PhaseMacroExpand mexp(igvn);
       mexp.eliminate_macro_nodes();
       igvn.set_delay_transform(false);
@@ -1875,10 +1875,10 @@
 
     cfg.Estimate_Block_Frequency();
     cfg.GlobalCodeMotion(m,unique(),proj_list);
+    if (failing())  return;
 
     print_method("Global code motion", 2);
 
-    if (failing())  return;
     NOT_PRODUCT( verify_graph_edges(); )
 
     debug_only( cfg.verify(); )
--- a/src/share/vm/opto/compile.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/compile.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -631,7 +631,7 @@
 
   // Decide how to build a call.
   // The profile factor is a discount to apply to this site's interp. profile.
-  CallGenerator*    call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float profile_factor);
+  CallGenerator*    call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true);
   bool should_delay_inlining(ciMethod* call_method, JVMState* jvms);
 
   // Report if there were too many traps at a current method and bci.
--- a/src/share/vm/opto/connode.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/connode.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1051,6 +1051,7 @@
 //------------------------------Value------------------------------------------
 const Type *CastX2PNode::Value( PhaseTransform *phase ) const {
   const Type* t = phase->type(in(1));
+  if (t == Type::TOP) return Type::TOP;
   if (t->base() == Type_X && t->singleton()) {
     uintptr_t bits = (uintptr_t) t->is_intptr_t()->get_con();
     if (bits == 0)   return TypePtr::NULL_PTR;
@@ -1121,6 +1122,7 @@
 //------------------------------Value------------------------------------------
 const Type *CastP2XNode::Value( PhaseTransform *phase ) const {
   const Type* t = phase->type(in(1));
+  if (t == Type::TOP) return Type::TOP;
   if (t->base() == Type::RawPtr && t->singleton()) {
     uintptr_t bits = (uintptr_t) t->is_rawptr()->get_con();
     return TypeX::make(bits);
--- a/src/share/vm/opto/doCall.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/doCall.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -61,7 +61,7 @@
 
 CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual,
                                        JVMState* jvms, bool allow_inline,
-                                       float prof_factor) {
+                                       float prof_factor, bool allow_intrinsics) {
   ciMethod*       caller   = jvms->method();
   int             bci      = jvms->bci();
   Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
@@ -108,7 +108,7 @@
   // then we return it as the inlined version of the call.
   // We do this before the strict f.p. check below because the
   // intrinsics handle strict f.p. correctly.
-  if (allow_inline) {
+  if (allow_inline && allow_intrinsics) {
     CallGenerator* cg = find_intrinsic(call_method, call_is_virtual);
     if (cg != NULL)  return cg;
   }
@@ -455,21 +455,12 @@
     // cg->generate(), we are committed.  If it fails, the whole
     // compilation task is compromised.
     if (failing())  return;
-#ifndef PRODUCT
-    if (PrintOpto || PrintOptoInlining || PrintInlining) {
-      // Only one fall-back, so if an intrinsic fails, ignore any bytecodes.
-      if (cg->is_intrinsic() && call_method->code_size() > 0) {
-        tty->print("Bailed out of intrinsic, will not inline: ");
-        call_method->print_name(); tty->cr();
-      }
-    }
-#endif
+
     // This can happen if a library intrinsic is available, but refuses
     // the call site, perhaps because it did not match a pattern the
-    // intrinsic was expecting to optimize.  The fallback position is
-    // to call out-of-line.
-    try_inline = false;  // Inline tactic bailed out.
-    cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
+    // intrinsic was expecting to optimize. Should always be possible to
+    // get a normal java call that may inline in that case
+    cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
     if ((new_jvms = cg->generate(jvms)) == NULL) {
       guarantee(failing(), "call failed to generate:  calls should work");
       return;
--- a/src/share/vm/opto/escape.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/escape.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "ci/bcEscapeAnalyzer.hpp"
+#include "compiler/compileLog.hpp"
 #include "libadt/vectset.hpp"
 #include "memory/allocation.hpp"
 #include "opto/c2compiler.hpp"
@@ -34,125 +35,1935 @@
 #include "opto/phaseX.hpp"
 #include "opto/rootnode.hpp"
 
-void PointsToNode::add_edge(uint targIdx, PointsToNode::EdgeType et) {
-  uint v = (targIdx << EdgeShift) + ((uint) et);
-  if (_edges == NULL) {
-     Arena *a = Compile::current()->comp_arena();
-    _edges = new(a) GrowableArray<uint>(a, INITIAL_EDGE_COUNT, 0, 0);
-  }
-  _edges->append_if_missing(v);
-}
-
-void PointsToNode::remove_edge(uint targIdx, PointsToNode::EdgeType et) {
-  uint v = (targIdx << EdgeShift) + ((uint) et);
-
-  _edges->remove(v);
-}
-
-#ifndef PRODUCT
-static const char *node_type_names[] = {
-  "UnknownType",
-  "JavaObject",
-  "LocalVar",
-  "Field"
-};
-
-static const char *esc_names[] = {
-  "UnknownEscape",
-  "NoEscape",
-  "ArgEscape",
-  "GlobalEscape"
-};
-
-static const char *edge_type_suffix[] = {
- "?", // UnknownEdge
- "P", // PointsToEdge
- "D", // DeferredEdge
- "F"  // FieldEdge
-};
-
-void PointsToNode::dump(bool print_state) const {
-  NodeType nt = node_type();
-  tty->print("%s ", node_type_names[(int) nt]);
-  if (print_state) {
-    EscapeState es = escape_state();
-    tty->print("%s %s ", esc_names[(int) es], _scalar_replaceable ? "":"NSR");
-  }
-  tty->print("[[");
-  for (uint i = 0; i < edge_count(); i++) {
-    tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]);
-  }
-  tty->print("]]  ");
-  if (_node == NULL)
-    tty->print_cr("<null>");
-  else
-    _node->dump();
-}
-#endif
-
 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
-  _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()),
-  _processed(C->comp_arena()),
-  pt_ptset(C->comp_arena()),
-  pt_visited(C->comp_arena()),
-  pt_worklist(C->comp_arena(), 4, 0, 0),
+  _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
   _collecting(true),
-  _progress(false),
+  _verify(false),
   _compile(C),
   _igvn(igvn),
   _node_map(C->comp_arena()) {
-
-  _phantom_object = C->top()->_idx,
-  add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true);
-
+  // Add unknown java object.
+  add_java_object(C->top(), PointsToNode::GlobalEscape);
+  phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
   // Add ConP(#NULL) and ConN(#NULL) nodes.
   Node* oop_null = igvn->zerocon(T_OBJECT);
-  _oop_null = oop_null->_idx;
-  assert(_oop_null < nodes_size(), "should be created already");
-  add_node(oop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
-
+  assert(oop_null->_idx < nodes_size(), "should be created already");
+  add_java_object(oop_null, PointsToNode::NoEscape);
+  null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
   if (UseCompressedOops) {
     Node* noop_null = igvn->zerocon(T_NARROWOOP);
-    _noop_null = noop_null->_idx;
-    assert(_noop_null < nodes_size(), "should be created already");
-    add_node(noop_null, PointsToNode::JavaObject, PointsToNode::NoEscape, true);
-  } else {
-    _noop_null = _oop_null; // Should be initialized
+    assert(noop_null->_idx < nodes_size(), "should be created already");
+    map_ideal_node(noop_null, null_obj);
   }
   _pcmp_neq = NULL; // Should be initialized
   _pcmp_eq  = NULL;
 }
 
-void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) {
-  PointsToNode *f = ptnode_adr(from_i);
-  PointsToNode *t = ptnode_adr(to_i);
+bool ConnectionGraph::has_candidates(Compile *C) {
+  // EA brings benefits only when the code has allocations and/or locks which
+  // are represented by ideal Macro nodes.
+  int cnt = C->macro_count();
+  for( int i=0; i < cnt; i++ ) {
+    Node *n = C->macro_node(i);
+    if ( n->is_Allocate() )
+      return true;
+    if( n->is_Lock() ) {
+      Node* obj = n->as_Lock()->obj_node()->uncast();
+      if( !(obj->is_Parm() || obj->is_Con()) )
+        return true;
+    }
+  }
+  return false;
+}
+
+void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
+  Compile::TracePhase t2("escapeAnalysis", &Phase::_t_escapeAnalysis, true);
+  ResourceMark rm;
+
+  // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
+  // to create space for them in ConnectionGraph::_nodes[].
+  Node* oop_null = igvn->zerocon(T_OBJECT);
+  Node* noop_null = igvn->zerocon(T_NARROWOOP);
+  ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
+  // Perform escape analysis
+  if (congraph->compute_escape()) {
+    // There are non escaping objects.
+    C->set_congraph(congraph);
+  }
+  // Cleanup.
+  if (oop_null->outcnt() == 0)
+    igvn->hash_delete(oop_null);
+  if (noop_null->outcnt() == 0)
+    igvn->hash_delete(noop_null);
+}
+
+bool ConnectionGraph::compute_escape() {
+  Compile* C = _compile;
+  PhaseGVN* igvn = _igvn;
+
+  // Worklists used by EA.
+  Unique_Node_List delayed_worklist;
+  GrowableArray<Node*> alloc_worklist;
+  GrowableArray<Node*> ptr_cmp_worklist;
+  GrowableArray<Node*> storestore_worklist;
+  GrowableArray<PointsToNode*>   ptnodes_worklist;
+  GrowableArray<JavaObjectNode*> java_objects_worklist;
+  GrowableArray<JavaObjectNode*> non_escaped_worklist;
+  GrowableArray<FieldNode*>      oop_fields_worklist;
+  DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
+
+  { Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true);
+
+  // 1. Populate Connection Graph (CG) with PointsTo nodes.
+  ideal_nodes.map(C->unique(), NULL);  // preallocate space
+  // Initialize worklist
+  if (C->root() != NULL) {
+    ideal_nodes.push(C->root());
+  }
+  for( uint next = 0; next < ideal_nodes.size(); ++next ) {
+    Node* n = ideal_nodes.at(next);
+    // Create PointsTo nodes and add them to Connection Graph. Called
+    // only once per ideal node since ideal_nodes is Unique_Node list.
+    add_node_to_connection_graph(n, &delayed_worklist);
+    PointsToNode* ptn = ptnode_adr(n->_idx);
+    if (ptn != NULL) {
+      ptnodes_worklist.append(ptn);
+      if (ptn->is_JavaObject()) {
+        java_objects_worklist.append(ptn->as_JavaObject());
+        if ((n->is_Allocate() || n->is_CallStaticJava()) &&
+            (ptn->escape_state() < PointsToNode::GlobalEscape)) {
+          // Only allocations and java static calls results are interesting.
+          non_escaped_worklist.append(ptn->as_JavaObject());
+        }
+      } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
+        oop_fields_worklist.append(ptn->as_Field());
+      }
+    }
+    if (n->is_MergeMem()) {
+      // Collect all MergeMem nodes to add memory slices for
+      // scalar replaceable objects in split_unique_types().
+      _mergemem_worklist.append(n->as_MergeMem());
+    } else if (OptimizePtrCompare && n->is_Cmp() &&
+               (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
+      // Collect compare pointers nodes.
+      ptr_cmp_worklist.append(n);
+    } else if (n->is_MemBarStoreStore()) {
+      // Collect all MemBarStoreStore nodes so that depending on the
+      // escape status of the associated Allocate node some of them
+      // may be eliminated.
+      storestore_worklist.append(n);
+#ifdef ASSERT
+    } else if(n->is_AddP()) {
+      // Collect address nodes for graph verification.
+      addp_worklist.append(n);
+#endif
+    }
+    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+      Node* m = n->fast_out(i);   // Get user
+      ideal_nodes.push(m);
+    }
+  }
+  if (non_escaped_worklist.length() == 0) {
+    _collecting = false;
+    return false; // Nothing to do.
+  }
+  // Add final simple edges to graph.
+  while(delayed_worklist.size() > 0) {
+    Node* n = delayed_worklist.pop();
+    add_final_edges(n);
+  }
+  int ptnodes_length = ptnodes_worklist.length();
+
+#ifdef ASSERT
+  if (VerifyConnectionGraph) {
+    // Verify that no new simple edges could be created and all
+    // local vars has edges.
+    _verify = true;
+    for (int next = 0; next < ptnodes_length; ++next) {
+      PointsToNode* ptn = ptnodes_worklist.at(next);
+      add_final_edges(ptn->ideal_node());
+      if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
+        ptn->dump();
+        assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
+      }
+    }
+    _verify = false;
+  }
+#endif
+
+  // 2. Finish Graph construction by propagating references to all
+  //    java objects through graph.
+  if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist,
+                                 java_objects_worklist, oop_fields_worklist)) {
+    // All objects escaped or hit time or iterations limits.
+    _collecting = false;
+    return false;
+  }
+
+  // 3. Adjust scalar_replaceable state of nonescaping objects and push
+  //    scalar replaceable allocations on alloc_worklist for processing
+  //    in split_unique_types().
+  int non_escaped_length = non_escaped_worklist.length();
+  for (int next = 0; next < non_escaped_length; next++) {
+    JavaObjectNode* ptn = non_escaped_worklist.at(next);
+    if (ptn->escape_state() == PointsToNode::NoEscape &&
+        ptn->scalar_replaceable()) {
+      adjust_scalar_replaceable_state(ptn);
+      if (ptn->scalar_replaceable()) {
+        alloc_worklist.append(ptn->ideal_node());
+      }
+    }
+  }
+
+#ifdef ASSERT
+  if (VerifyConnectionGraph) {
+    // Verify that graph is complete - no new edges could be added or needed.
+    verify_connection_graph(ptnodes_worklist, non_escaped_worklist,
+                            java_objects_worklist, addp_worklist);
+  }
+  assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
+  assert(null_obj->escape_state() == PointsToNode::NoEscape &&
+         null_obj->edge_count() == 0 &&
+         !null_obj->arraycopy_src() &&
+         !null_obj->arraycopy_dst(), "sanity");
+#endif
+
+  _collecting = false;
+
+  } // TracePhase t3("connectionGraph")
+
+  // 4. Optimize ideal graph based on EA information.
+  bool has_non_escaping_obj = (non_escaped_worklist.length() > 0);
+  if (has_non_escaping_obj) {
+    optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
+  }
+
+#ifndef PRODUCT
+  if (PrintEscapeAnalysis) {
+    dump(ptnodes_worklist); // Dump ConnectionGraph
+  }
+#endif
+
+  bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
+#ifdef ASSERT
+  if (VerifyConnectionGraph) {
+    int alloc_length = alloc_worklist.length();
+    for (int next = 0; next < alloc_length; ++next) {
+      Node* n = alloc_worklist.at(next);
+      PointsToNode* ptn = ptnode_adr(n->_idx);
+      assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
+    }
+  }
+#endif
+
+  // 5. Separate memory graph for scalar replaceable allcations.
+  if (has_scalar_replaceable_candidates &&
+      C->AliasLevel() >= 3 && EliminateAllocations) {
+    // Now use the escape information to create unique types for
+    // scalar replaceable objects.
+    split_unique_types(alloc_worklist);
+    if (C->failing())  return false;
+    C->print_method("After Escape Analysis", 2);
+
+#ifdef ASSERT
+  } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
+    tty->print("=== No allocations eliminated for ");
+    C->method()->print_short_name();
+    if(!EliminateAllocations) {
+      tty->print(" since EliminateAllocations is off ===");
+    } else if(!has_scalar_replaceable_candidates) {
+      tty->print(" since there are no scalar replaceable candidates ===");
+    } else if(C->AliasLevel() < 3) {
+      tty->print(" since AliasLevel < 3 ===");
+    }
+    tty->cr();
+#endif
+  }
+  return has_non_escaping_obj;
+}
+
+// Populate Connection Graph with PointsTo nodes and create simple
+// connection graph edges.
+void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
+  assert(!_verify, "this method sould not be called for verification");
+  PhaseGVN* igvn = _igvn;
+  uint n_idx = n->_idx;
+  PointsToNode* n_ptn = ptnode_adr(n_idx);
+  if (n_ptn != NULL)
+    return; // No need to redefine PointsTo node during first iteration.
+
+  if (n->is_Call()) {
+    // Arguments to allocation and locking don't escape.
+    if (n->is_AbstractLock()) {
+      // Put Lock and Unlock nodes on IGVN worklist to process them during
+      // first IGVN optimization when escape information is still available.
+      record_for_optimizer(n);
+    } else if (n->is_Allocate()) {
+      add_call_node(n->as_Call());
+      record_for_optimizer(n);
+    } else {
+      if (n->is_CallStaticJava()) {
+        const char* name = n->as_CallStaticJava()->_name;
+        if (name != NULL && strcmp(name, "uncommon_trap") == 0)
+          return; // Skip uncommon traps
+      }
+      // Don't mark as processed since call's arguments have to be processed.
+      delayed_worklist->push(n);
+      // Check if a call returns an object.
+      if (n->as_Call()->returns_pointer() &&
+          n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
+        add_call_node(n->as_Call());
+      }
+    }
+    return;
+  }
+  // Put this check here to process call arguments since some call nodes
+  // point to phantom_obj.
+  if (n_ptn == phantom_obj || n_ptn == null_obj)
+    return; // Skip predefined nodes.
 
-  assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
-  assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of PointsTo edge");
-  assert(t->node_type() == PointsToNode::JavaObject, "invalid destination of PointsTo edge");
-  if (to_i == _phantom_object) { // Quick test for most common object
-    if (f->has_unknown_ptr()) {
-      return;
+  int opcode = n->Opcode();
+  switch (opcode) {
+    case Op_AddP: {
+      Node* base = get_addp_base(n);
+      PointsToNode* ptn_base = ptnode_adr(base->_idx);
+      // Field nodes are created for all field types. They are used in
+      // adjust_scalar_replaceable_state() and split_unique_types().
+      // Note, non-oop fields will have only base edges in Connection
+      // Graph because such fields are not used for oop loads and stores.
+      int offset = address_offset(n, igvn);
+      add_field(n, PointsToNode::NoEscape, offset);
+      if (ptn_base == NULL) {
+        delayed_worklist->push(n); // Process it later.
+      } else {
+        n_ptn = ptnode_adr(n_idx);
+        add_base(n_ptn->as_Field(), ptn_base);
+      }
+      break;
+    }
+    case Op_CastX2P: {
+      map_ideal_node(n, phantom_obj);
+      break;
+    }
+    case Op_CastPP:
+    case Op_CheckCastPP:
+    case Op_EncodeP:
+    case Op_DecodeN: {
+      add_local_var_and_edge(n, PointsToNode::NoEscape,
+                             n->in(1), delayed_worklist);
+      break;
+    }
+    case Op_CMoveP: {
+      add_local_var(n, PointsToNode::NoEscape);
+      // Do not add edges during first iteration because some could be
+      // not defined yet.
+      delayed_worklist->push(n);
+      break;
+    }
+    case Op_ConP:
+    case Op_ConN: {
+      // assume all oop constants globally escape except for null
+      PointsToNode::EscapeState es;
+      if (igvn->type(n) == TypePtr::NULL_PTR ||
+          igvn->type(n) == TypeNarrowOop::NULL_PTR) {
+        es = PointsToNode::NoEscape;
+      } else {
+        es = PointsToNode::GlobalEscape;
+      }
+      add_java_object(n, es);
+      break;
+    }
+    case Op_CreateEx: {
+      // assume that all exception objects globally escape
+      add_java_object(n, PointsToNode::GlobalEscape);
+      break;
+    }
+    case Op_LoadKlass:
+    case Op_LoadNKlass: {
+      // Unknown class is loaded
+      map_ideal_node(n, phantom_obj);
+      break;
+    }
+    case Op_LoadP:
+    case Op_LoadN:
+    case Op_LoadPLocked: {
+      // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
+      // ThreadLocal has RawPrt type.
+      const Type* t = igvn->type(n);
+      if (t->make_ptr() != NULL) {
+        Node* adr = n->in(MemNode::Address);
+#ifdef ASSERT
+        if (!adr->is_AddP()) {
+          assert(igvn->type(adr)->isa_rawptr(), "sanity");
+        } else {
+          assert((ptnode_adr(adr->_idx) == NULL ||
+                  ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
+        }
+#endif
+        add_local_var_and_edge(n, PointsToNode::NoEscape,
+                               adr, delayed_worklist);
+      }
+      break;
+    }
+    case Op_Parm: {
+      map_ideal_node(n, phantom_obj);
+      break;
+    }
+    case Op_PartialSubtypeCheck: {
+      // Produces Null or notNull and is used in only in CmpP so
+      // phantom_obj could be used.
+      map_ideal_node(n, phantom_obj); // Result is unknown
+      break;
+    }
+    case Op_Phi: {
+      // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
+      // ThreadLocal has RawPrt type.
+      const Type* t = n->as_Phi()->type();
+      if (t->make_ptr() != NULL) {
+        add_local_var(n, PointsToNode::NoEscape);
+        // Do not add edges during first iteration because some could be
+        // not defined yet.
+        delayed_worklist->push(n);
+      }
+      break;
+    }
+    case Op_Proj: {
+      // we are only interested in the oop result projection from a call
+      if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
+          n->in(0)->as_Call()->returns_pointer()) {
+        add_local_var_and_edge(n, PointsToNode::NoEscape,
+                               n->in(0), delayed_worklist);
+      }
+      break;
+    }
+    case Op_Rethrow: // Exception object escapes
+    case Op_Return: {
+      if (n->req() > TypeFunc::Parms &&
+          igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
+        // Treat Return value as LocalVar with GlobalEscape escape state.
+        add_local_var_and_edge(n, PointsToNode::GlobalEscape,
+                               n->in(TypeFunc::Parms), delayed_worklist);
+      }
+      break;
+    }
+    case Op_StoreP:
+    case Op_StoreN:
+    case Op_StorePConditional:
+    case Op_CompareAndSwapP:
+    case Op_CompareAndSwapN: {
+      Node* adr = n->in(MemNode::Address);
+      const Type *adr_type = igvn->type(adr);
+      adr_type = adr_type->make_ptr();
+      if (adr_type->isa_oopptr() ||
+          (opcode == Op_StoreP || opcode == Op_StoreN) &&
+                        (adr_type == TypeRawPtr::NOTNULL &&
+                         adr->in(AddPNode::Address)->is_Proj() &&
+                         adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
+        delayed_worklist->push(n); // Process it later.
+#ifdef ASSERT
+        assert(adr->is_AddP(), "expecting an AddP");
+        if (adr_type == TypeRawPtr::NOTNULL) {
+          // Verify a raw address for a store captured by Initialize node.
+          int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
+          assert(offs != Type::OffsetBot, "offset must be a constant");
+        }
+#endif
+      } else {
+        // Ignore copy the displaced header to the BoxNode (OSR compilation).
+        if (adr->is_BoxLock())
+          break;
+        // Stored value escapes in unsafe access.
+        if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) {
+          // Pointer stores in G1 barriers looks like unsafe access.
+          // Ignore such stores to be able scalar replace non-escaping
+          // allocations.
+          if (UseG1GC && adr->is_AddP()) {
+            Node* base = get_addp_base(adr);
+            if (base->Opcode() == Op_LoadP &&
+                base->in(MemNode::Address)->is_AddP()) {
+              adr = base->in(MemNode::Address);
+              Node* tls = get_addp_base(adr);
+              if (tls->Opcode() == Op_ThreadLocal) {
+                int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
+                if (offs == in_bytes(JavaThread::satb_mark_queue_offset() +
+                                     PtrQueue::byte_offset_of_buf())) {
+                  break; // G1 pre barier previous oop value store.
+                }
+                if (offs == in_bytes(JavaThread::dirty_card_queue_offset() +
+                                     PtrQueue::byte_offset_of_buf())) {
+                  break; // G1 post barier card address store.
+                }
+              }
+            }
+          }
+          delayed_worklist->push(n); // Process unsafe access later.
+          break;
+        }
+#ifdef ASSERT
+        n->dump(1);
+        assert(false, "not unsafe or G1 barrier raw StoreP");
+#endif
+      }
+      break;
+    }
+    case Op_AryEq:
+    case Op_StrComp:
+    case Op_StrEquals:
+    case Op_StrIndexOf: {
+      add_local_var(n, PointsToNode::ArgEscape);
+      delayed_worklist->push(n); // Process it later.
+      break;
+    }
+    case Op_ThreadLocal: {
+      add_java_object(n, PointsToNode::ArgEscape);
+      break;
+    }
+    default:
+      ; // Do nothing for nodes not related to EA.
+  }
+  return;
+}
+
+#ifdef ASSERT
+#define ELSE_FAIL(name)                               \
+      /* Should not be called for not pointer type. */  \
+      n->dump(1);                                       \
+      assert(false, name);                              \
+      break;
+#else
+#define ELSE_FAIL(name) \
+      break;
+#endif
+
+// Add final simple edges to graph.
+void ConnectionGraph::add_final_edges(Node *n) {
+  PointsToNode* n_ptn = ptnode_adr(n->_idx);
+#ifdef ASSERT
+  if (_verify && n_ptn->is_JavaObject())
+    return; // This method does not change graph for JavaObject.
+#endif
+
+  if (n->is_Call()) {
+    process_call_arguments(n->as_Call());
+    return;
+  }
+  assert(n->is_Store() || n->is_LoadStore() ||
+         (n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
+         "node should be registered already");
+  int opcode = n->Opcode();
+  switch (opcode) {
+    case Op_AddP: {
+      Node* base = get_addp_base(n);
+      PointsToNode* ptn_base = ptnode_adr(base->_idx);
+      assert(ptn_base != NULL, "field's base should be registered");
+      add_base(n_ptn->as_Field(), ptn_base);
+      break;
+    }
+    case Op_CastPP:
+    case Op_CheckCastPP:
+    case Op_EncodeP:
+    case Op_DecodeN: {
+      add_local_var_and_edge(n, PointsToNode::NoEscape,
+                             n->in(1), NULL);
+      break;
+    }
+    case Op_CMoveP: {
+      for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
+        Node* in = n->in(i);
+        if (in == NULL)
+          continue;  // ignore NULL
+        Node* uncast_in = in->uncast();
+        if (uncast_in->is_top() || uncast_in == n)
+          continue;  // ignore top or inputs which go back this node
+        PointsToNode* ptn = ptnode_adr(in->_idx);
+        assert(ptn != NULL, "node should be registered");
+        add_edge(n_ptn, ptn);
+      }
+      break;
+    }
+    case Op_LoadP:
+    case Op_LoadN:
+    case Op_LoadPLocked: {
+      // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
+      // ThreadLocal has RawPrt type.
+      const Type* t = _igvn->type(n);
+      if (t->make_ptr() != NULL) {
+        Node* adr = n->in(MemNode::Address);
+        add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
+        break;
+      }
+      ELSE_FAIL("Op_LoadP");
+    }
+    case Op_Phi: {
+      // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
+      // ThreadLocal has RawPrt type.
+      const Type* t = n->as_Phi()->type();
+      if (t->make_ptr() != NULL) {
+        for (uint i = 1; i < n->req(); i++) {
+          Node* in = n->in(i);
+          if (in == NULL)
+            continue;  // ignore NULL
+          Node* uncast_in = in->uncast();
+          if (uncast_in->is_top() || uncast_in == n)
+            continue;  // ignore top or inputs which go back this node
+          PointsToNode* ptn = ptnode_adr(in->_idx);
+          assert(ptn != NULL, "node should be registered");
+          add_edge(n_ptn, ptn);
+        }
+        break;
+      }
+      ELSE_FAIL("Op_Phi");
+    }
+    case Op_Proj: {
+      // we are only interested in the oop result projection from a call
+      if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
+          n->in(0)->as_Call()->returns_pointer()) {
+        add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
+        break;
+      }
+      ELSE_FAIL("Op_Proj");
+    }
+    case Op_Rethrow: // Exception object escapes
+    case Op_Return: {
+      if (n->req() > TypeFunc::Parms &&
+          _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
+        // Treat Return value as LocalVar with GlobalEscape escape state.
+        add_local_var_and_edge(n, PointsToNode::GlobalEscape,
+                               n->in(TypeFunc::Parms), NULL);
+        break;
+      }
+      ELSE_FAIL("Op_Return");
+    }
+    case Op_StoreP:
+    case Op_StoreN:
+    case Op_StorePConditional:
+    case Op_CompareAndSwapP:
+    case Op_CompareAndSwapN: {
+      Node* adr = n->in(MemNode::Address);
+      const Type *adr_type = _igvn->type(adr);
+      adr_type = adr_type->make_ptr();
+      if (adr_type->isa_oopptr() ||
+          (opcode == Op_StoreP || opcode == Op_StoreN) &&
+                        (adr_type == TypeRawPtr::NOTNULL &&
+                         adr->in(AddPNode::Address)->is_Proj() &&
+                         adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
+        // Point Address to Value
+        PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
+        assert(adr_ptn != NULL &&
+               adr_ptn->as_Field()->is_oop(), "node should be registered");
+        Node *val = n->in(MemNode::ValueIn);
+        PointsToNode* ptn = ptnode_adr(val->_idx);
+        assert(ptn != NULL, "node should be registered");
+        add_edge(adr_ptn, ptn);
+        break;
+      } else if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) {
+        // Stored value escapes in unsafe access.
+        Node *val = n->in(MemNode::ValueIn);
+        PointsToNode* ptn = ptnode_adr(val->_idx);
+        assert(ptn != NULL, "node should be registered");
+        ptn->set_escape_state(PointsToNode::GlobalEscape);
+        // Add edge to object for unsafe access with offset.
+        PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
+        assert(adr_ptn != NULL, "node should be registered");
+        if (adr_ptn->is_Field()) {
+          assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
+          add_edge(adr_ptn, ptn);
+        }
+        break;
+      }
+      ELSE_FAIL("Op_StoreP");
+    }
+    case Op_AryEq:
+    case Op_StrComp:
+    case Op_StrEquals:
+    case Op_StrIndexOf: {
+      // char[] arrays passed to string intrinsic do not escape but
+      // they are not scalar replaceable. Adjust escape state for them.
+      // Start from in(2) edge since in(1) is memory edge.
+      for (uint i = 2; i < n->req(); i++) {
+        Node* adr = n->in(i);
+        const Type* at = _igvn->type(adr);
+        if (!adr->is_top() && at->isa_ptr()) {
+          assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
+                 at->isa_ptr() != NULL, "expecting a pointer");
+          if (adr->is_AddP()) {
+            adr = get_addp_base(adr);
+          }
+          PointsToNode* ptn = ptnode_adr(adr->_idx);
+          assert(ptn != NULL, "node should be registered");
+          add_edge(n_ptn, ptn);
+        }
+      }
+      break;
+    }
+    default: {
+      // This method should be called only for EA specific nodes which may
+      // miss some edges when they were created.
+#ifdef ASSERT
+      n->dump(1);
+#endif
+      guarantee(false, "unknown node");
+    }
+  }
+  return;
+}
+
+void ConnectionGraph::add_call_node(CallNode* call) {
+  assert(call->returns_pointer(), "only for call which returns pointer");
+  uint call_idx = call->_idx;
+  if (call->is_Allocate()) {
+    Node* k = call->in(AllocateNode::KlassNode);
+    const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
+    assert(kt != NULL, "TypeKlassPtr  required.");
+    ciKlass* cik = kt->klass();
+    PointsToNode::EscapeState es = PointsToNode::NoEscape;
+    bool scalar_replaceable = true;
+    if (call->is_AllocateArray()) {
+      if (!cik->is_array_klass()) { // StressReflectiveCode
+        es = PointsToNode::GlobalEscape;
+      } else {
+        int length = call->in(AllocateNode::ALength)->find_int_con(-1);
+        if (length < 0 || length > EliminateAllocationArraySizeLimit) {
+          // Not scalar replaceable if the length is not constant or too big.
+          scalar_replaceable = false;
+        }
+      }
+    } else {  // Allocate instance
+      if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
+         !cik->is_instance_klass() || // StressReflectiveCode
+          cik->as_instance_klass()->has_finalizer()) {
+        es = PointsToNode::GlobalEscape;
+      }
+    }
+    add_java_object(call, es);
+    PointsToNode* ptn = ptnode_adr(call_idx);
+    if (!scalar_replaceable && ptn->scalar_replaceable()) {
+      ptn->set_scalar_replaceable(false);
+    }
+  } else if (call->is_CallStaticJava()) {
+    // Call nodes could be different types:
+    //
+    // 1. CallDynamicJavaNode (what happened during call is unknown):
+    //
+    //    - mapped to GlobalEscape JavaObject node if oop is returned;
+    //
+    //    - all oop arguments are escaping globally;
+    //
+    // 2. CallStaticJavaNode (execute bytecode analysis if possible):
+    //
+    //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
+    //
+    //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
+    //    - mapped to NoEscape JavaObject node if non-escaping object allocated
+    //      during call is returned;
+    //    - mapped to ArgEscape LocalVar node pointed to object arguments
+    //      which are returned and does not escape during call;
+    //
+    //    - oop arguments escaping status is defined by bytecode analysis;
+    //
+    // For a static call, we know exactly what method is being called.
+    // Use bytecode estimator to record whether the call's return value escapes.
+    ciMethod* meth = call->as_CallJava()->method();
+    if (meth == NULL) {
+      const char* name = call->as_CallStaticJava()->_name;
+      assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
+      // Returns a newly allocated unescaped object.
+      add_java_object(call, PointsToNode::NoEscape);
+      ptnode_adr(call_idx)->set_scalar_replaceable(false);
     } else {
-      f->set_has_unknown_ptr();
+      BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
+      call_analyzer->copy_dependencies(_compile->dependencies());
+      if (call_analyzer->is_return_allocated()) {
+        // Returns a newly allocated unescaped object, simply
+        // update dependency information.
+        // Mark it as NoEscape so that objects referenced by
+        // it's fields will be marked as NoEscape at least.
+        add_java_object(call, PointsToNode::NoEscape);
+        ptnode_adr(call_idx)->set_scalar_replaceable(false);
+      } else {
+        // Determine whether any arguments are returned.
+        const TypeTuple* d = call->tf()->domain();
+        bool ret_arg = false;
+        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
+          if (d->field_at(i)->isa_ptr() != NULL &&
+              call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
+            ret_arg = true;
+            break;
+          }
+        }
+        if (ret_arg) {
+          add_local_var(call, PointsToNode::ArgEscape);
+        } else {
+          // Returns unknown object.
+          map_ideal_node(call, phantom_obj);
+        }
+      }
+    }
+  } else {
+    // An other type of call, assume the worst case:
+    // returned value is unknown and globally escapes.
+    assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
+    map_ideal_node(call, phantom_obj);
+  }
+}
+
+void ConnectionGraph::process_call_arguments(CallNode *call) {
+    bool is_arraycopy = false;
+    switch (call->Opcode()) {
+#ifdef ASSERT
+    case Op_Allocate:
+    case Op_AllocateArray:
+    case Op_Lock:
+    case Op_Unlock:
+      assert(false, "should be done already");
+      break;
+#endif
+    case Op_CallLeafNoFP:
+      is_arraycopy = (call->as_CallLeaf()->_name != NULL &&
+                      strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);
+      // fall through
+    case Op_CallLeaf: {
+      // Stub calls, objects do not escape but they are not scale replaceable.
+      // Adjust escape state for outgoing arguments.
+      const TypeTuple * d = call->tf()->domain();
+      bool src_has_oops = false;
+      for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
+        const Type* at = d->field_at(i);
+        Node *arg = call->in(i);
+        const Type *aat = _igvn->type(arg);
+        if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr())
+          continue;
+        if (arg->is_AddP()) {
+          //
+          // The inline_native_clone() case when the arraycopy stub is called
+          // after the allocation before Initialize and CheckCastPP nodes.
+          // Or normal arraycopy for object arrays case.
+          //
+          // Set AddP's base (Allocate) as not scalar replaceable since
+          // pointer to the base (with offset) is passed as argument.
+          //
+          arg = get_addp_base(arg);
+        }
+        PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
+        assert(arg_ptn != NULL, "should be registered");
+        PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
+        if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
+          assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
+                 aat->isa_ptr() != NULL, "expecting an Ptr");
+          bool arg_has_oops = aat->isa_oopptr() &&
+                              (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
+                               (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
+          if (i == TypeFunc::Parms) {
+            src_has_oops = arg_has_oops;
+          }
+          //
+          // src or dst could be j.l.Object when other is basic type array:
+          //
+          //   arraycopy(char[],0,Object*,0,size);
+          //   arraycopy(Object*,0,char[],0,size);
+          //
+          // Don't add edges in such cases.
+          //
+          bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
+                                       arg_has_oops && (i > TypeFunc::Parms);
+#ifdef ASSERT
+          if (!(is_arraycopy ||
+                call->as_CallLeaf()->_name != NULL &&
+                (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
+                 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
+          ) {
+            call->dump();
+            assert(false, "EA: unexpected CallLeaf");
+          }
+#endif
+          // Always process arraycopy's destination object since
+          // we need to add all possible edges to references in
+          // source object.
+          if (arg_esc >= PointsToNode::ArgEscape &&
+              !arg_is_arraycopy_dest) {
+            continue;
+          }
+          set_escape_state(arg_ptn, PointsToNode::ArgEscape);
+          if (arg_is_arraycopy_dest) {
+            Node* src = call->in(TypeFunc::Parms);
+            if (src->is_AddP()) {
+              src = get_addp_base(src);
+            }
+            PointsToNode* src_ptn = ptnode_adr(src->_idx);
+            assert(src_ptn != NULL, "should be registered");
+            if (arg_ptn != src_ptn) {
+              // Special arraycopy edge:
+              // A destination object's field can't have the source object
+              // as base since objects escape states are not related.
+              // Only escape state of destination object's fields affects
+              // escape state of fields in source object.
+              add_arraycopy(call, PointsToNode::ArgEscape, src_ptn, arg_ptn);
+            }
+          }
+        }
+      }
+      break;
+    }
+    case Op_CallStaticJava: {
+      // For a static call, we know exactly what method is being called.
+      // Use bytecode estimator to record the call's escape affects
+#ifdef ASSERT
+      const char* name = call->as_CallStaticJava()->_name;
+      assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
+#endif
+      ciMethod* meth = call->as_CallJava()->method();
+      BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
+      // fall-through if not a Java method or no analyzer information
+      if (call_analyzer != NULL) {
+        PointsToNode* call_ptn = ptnode_adr(call->_idx);
+        const TypeTuple* d = call->tf()->domain();
+        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
+          const Type* at = d->field_at(i);
+          int k = i - TypeFunc::Parms;
+          Node* arg = call->in(i);
+          PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
+          if (at->isa_ptr() != NULL &&
+              call_analyzer->is_arg_returned(k)) {
+            // The call returns arguments.
+            if (call_ptn != NULL) { // Is call's result used?
+              assert(call_ptn->is_LocalVar(), "node should be registered");
+              assert(arg_ptn != NULL, "node should be registered");
+              add_edge(call_ptn, arg_ptn);
+            }
+          }
+          if (at->isa_oopptr() != NULL &&
+              arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
+            if (!call_analyzer->is_arg_stack(k)) {
+              // The argument global escapes
+              set_escape_state(arg_ptn, PointsToNode::GlobalEscape);
+            } else {
+              set_escape_state(arg_ptn, PointsToNode::ArgEscape);
+              if (!call_analyzer->is_arg_local(k)) {
+                // The argument itself doesn't escape, but any fields might
+                set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape);
+              }
+            }
+          }
+        }
+        if (call_ptn != NULL && call_ptn->is_LocalVar()) {
+          // The call returns arguments.
+          assert(call_ptn->edge_count() > 0, "sanity");
+          if (!call_analyzer->is_return_local()) {
+            // Returns also unknown object.
+            add_edge(call_ptn, phantom_obj);
+          }
+        }
+        break;
+      }
+    }
+    default: {
+      // Fall-through here if not a Java method or no analyzer information
+      // or some other type of call, assume the worst case: all arguments
+      // globally escape.
+      const TypeTuple* d = call->tf()->domain();
+      for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
+        const Type* at = d->field_at(i);
+        if (at->isa_oopptr() != NULL) {
+          Node* arg = call->in(i);
+          if (arg->is_AddP()) {
+            arg = get_addp_base(arg);
+          }
+          assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
+          set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape);
+        }
+      }
     }
   }
-  add_edge(f, to_i, PointsToNode::PointsToEdge);
+}
+
+
+// Finish Graph construction.
+bool ConnectionGraph::complete_connection_graph(
+                         GrowableArray<PointsToNode*>&   ptnodes_worklist,
+                         GrowableArray<JavaObjectNode*>& non_escaped_worklist,
+                         GrowableArray<JavaObjectNode*>& java_objects_worklist,
+                         GrowableArray<FieldNode*>&      oop_fields_worklist) {
+  // Normally only 1-3 passes needed to build Connection Graph depending
+  // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
+  // Set limit to 20 to catch situation when something did go wrong and
+  // bailout Escape Analysis.
+  // Also limit build time to 30 sec (60 in debug VM).
+#define CG_BUILD_ITER_LIMIT 20
+#ifdef ASSERT
+#define CG_BUILD_TIME_LIMIT 60.0
+#else
+#define CG_BUILD_TIME_LIMIT 30.0
+#endif
+
+  // Propagate GlobalEscape and ArgEscape escape states and check that
+  // we still have non-escaping objects. The method pushs on _worklist
+  // Field nodes which reference phantom_object.
+  if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
+    return false; // Nothing to do.
+  }
+  // Now propagate references to all JavaObject nodes.
+  int java_objects_length = java_objects_worklist.length();
+  elapsedTimer time;
+  int new_edges = 1;
+  int iterations = 0;
+  do {
+    while ((new_edges > 0) &&
+          (iterations++   < CG_BUILD_ITER_LIMIT) &&
+          (time.seconds() < CG_BUILD_TIME_LIMIT)) {
+      time.start();
+      new_edges = 0;
+      // Propagate references to phantom_object for nodes pushed on _worklist
+      // by find_non_escaped_objects() and find_field_value().
+      new_edges += add_java_object_edges(phantom_obj, false);
+      for (int next = 0; next < java_objects_length; ++next) {
+        JavaObjectNode* ptn = java_objects_worklist.at(next);
+        new_edges += add_java_object_edges(ptn, true);
+      }
+      if (new_edges > 0) {
+        // Update escape states on each iteration if graph was updated.
+        if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
+          return false; // Nothing to do.
+        }
+      }
+      time.stop();
+    }
+    if ((iterations     < CG_BUILD_ITER_LIMIT) &&
+        (time.seconds() < CG_BUILD_TIME_LIMIT)) {
+      time.start();
+      // Find fields which have unknown value.
+      int fields_length = oop_fields_worklist.length();
+      for (int next = 0; next < fields_length; next++) {
+        FieldNode* field = oop_fields_worklist.at(next);
+        if (field->edge_count() == 0) {
+          new_edges += find_field_value(field);
+          // This code may added new edges to phantom_object.
+          // Need an other cycle to propagate references to phantom_object.
+        }
+      }
+      time.stop();
+    } else {
+      new_edges = 0; // Bailout
+    }
+  } while (new_edges > 0);
+
+  // Bailout if passed limits.
+  if ((iterations     >= CG_BUILD_ITER_LIMIT) ||
+      (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
+    Compile* C = _compile;
+    if (C->log() != NULL) {
+      C->log()->begin_elem("connectionGraph_bailout reason='reached ");
+      C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time");
+      C->log()->end_elem(" limit'");
+    }
+    assert(false, err_msg("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
+           time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()));
+    // Possible infinite build_connection_graph loop,
+    // bailout (no changes to ideal graph were made).
+    return false;
+  }
+#ifdef ASSERT
+  if (Verbose && PrintEscapeAnalysis) {
+    tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d",
+                  iterations, nodes_size(), ptnodes_worklist.length());
+  }
+#endif
+
+#undef CG_BUILD_ITER_LIMIT
+#undef CG_BUILD_TIME_LIMIT
+
+  // Find fields initialized by NULL for non-escaping Allocations.
+  int non_escaped_length = non_escaped_worklist.length();
+  for (int next = 0; next < non_escaped_length; next++) {
+    JavaObjectNode* ptn = non_escaped_worklist.at(next);
+    PointsToNode::EscapeState es = ptn->escape_state();
+    assert(es <= PointsToNode::ArgEscape, "sanity");
+    if (es == PointsToNode::NoEscape) {
+      if (find_init_values(ptn, null_obj, _igvn) > 0) {
+        // Adding references to NULL object does not change escape states
+        // since it does not escape. Also no fields are added to NULL object.
+        add_java_object_edges(null_obj, false);
+      }
+    }
+    Node* n = ptn->ideal_node();
+    if (n->is_Allocate()) {
+      // The object allocated by this Allocate node will never be
+      // seen by an other thread. Mark it so that when it is
+      // expanded no MemBarStoreStore is added.
+      InitializeNode* ini = n->as_Allocate()->initialization();
+      if (ini != NULL)
+        ini->set_does_not_escape();
+    }
+  }
+  return true; // Finished graph construction.
+}
+
+// Propagate GlobalEscape and ArgEscape escape states to all nodes
+// and check that we still have non-escaping java objects.
+bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
+                                               GrowableArray<JavaObjectNode*>& non_escaped_worklist) {
+  GrowableArray<PointsToNode*> escape_worklist;
+  // First, put all nodes with GlobalEscape and ArgEscape states on worklist.
+  int ptnodes_length = ptnodes_worklist.length();
+  for (int next = 0; next < ptnodes_length; ++next) {
+    PointsToNode* ptn = ptnodes_worklist.at(next);
+    if (ptn->escape_state() >= PointsToNode::ArgEscape ||
+        ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
+      escape_worklist.push(ptn);
+    }
+  }
+  // Set escape states to referenced nodes (edges list).
+  while (escape_worklist.length() > 0) {
+    PointsToNode* ptn = escape_worklist.pop();
+    PointsToNode::EscapeState es  = ptn->escape_state();
+    PointsToNode::EscapeState field_es = ptn->fields_escape_state();
+    if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
+        es >= PointsToNode::ArgEscape) {
+      // GlobalEscape or ArgEscape state of field means it has unknown value.
+      if (add_edge(ptn, phantom_obj)) {
+        // New edge was added
+        add_field_uses_to_worklist(ptn->as_Field());
+      }
+    }
+    for (EdgeIterator i(ptn); i.has_next(); i.next()) {
+      PointsToNode* e = i.get();
+      if (e->is_Arraycopy()) {
+        assert(ptn->arraycopy_dst(), "sanity");
+        // Propagate only fields escape state through arraycopy edge.
+        if (e->fields_escape_state() < field_es) {
+          set_fields_escape_state(e, field_es);
+          escape_worklist.push(e);
+        }
+      } else if (es >= field_es) {
+        // fields_escape_state is also set to 'es' if it is less than 'es'.
+        if (e->escape_state() < es) {
+          set_escape_state(e, es);
+          escape_worklist.push(e);
+        }
+      } else {
+        // Propagate field escape state.
+        bool es_changed = false;
+        if (e->fields_escape_state() < field_es) {
+          set_fields_escape_state(e, field_es);
+          es_changed = true;
+        }
+        if ((e->escape_state() < field_es) &&
+            e->is_Field() && ptn->is_JavaObject() &&
+            e->as_Field()->is_oop()) {
+          // Change escape state of referenced fileds.
+          set_escape_state(e, field_es);
+          es_changed = true;;
+        } else if (e->escape_state() < es) {
+          set_escape_state(e, es);
+          es_changed = true;;
+        }
+        if (es_changed) {
+          escape_worklist.push(e);
+        }
+      }
+    }
+  }
+  // Remove escaped objects from non_escaped list.
+  for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) {
+    JavaObjectNode* ptn = non_escaped_worklist.at(next);
+    if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
+      non_escaped_worklist.delete_at(next);
+    }
+    if (ptn->escape_state() == PointsToNode::NoEscape) {
+      // Find fields in non-escaped allocations which have unknown value.
+      find_init_values(ptn, phantom_obj, NULL);
+    }
+  }
+  return (non_escaped_worklist.length() > 0);
+}
+
+// Add all references to JavaObject node by walking over all uses.
+int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
+  int new_edges = 0;
+  if (populate_worklist) {
+    // Populate _worklist by uses of jobj's uses.
+    for (UseIterator i(jobj); i.has_next(); i.next()) {
+      PointsToNode* use = i.get();
+      if (use->is_Arraycopy())
+        continue;
+      add_uses_to_worklist(use);
+      if (use->is_Field() && use->as_Field()->is_oop()) {
+        // Put on worklist all field's uses (loads) and
+        // related field nodes (same base and offset).
+        add_field_uses_to_worklist(use->as_Field());
+      }
+    }
+  }
+  while(_worklist.length() > 0) {
+    PointsToNode* use = _worklist.pop();
+    if (PointsToNode::is_base_use(use)) {
+      // Add reference from jobj to field and from field to jobj (field's base).
+      use = PointsToNode::get_use_node(use)->as_Field();
+      if (add_base(use->as_Field(), jobj)) {
+        new_edges++;
+      }
+      continue;
+    }
+    assert(!use->is_JavaObject(), "sanity");
+    if (use->is_Arraycopy()) {
+      if (jobj == null_obj) // NULL object does not have field edges
+        continue;
+      // Added edge from Arraycopy node to arraycopy's source java object
+      if (add_edge(use, jobj)) {
+        jobj->set_arraycopy_src();
+        new_edges++;
+      }
+      // and stop here.
+      continue;
+    }
+    if (!add_edge(use, jobj))
+      continue; // No new edge added, there was such edge already.
+    new_edges++;
+    if (use->is_LocalVar()) {
+      add_uses_to_worklist(use);
+      if (use->arraycopy_dst()) {
+        for (EdgeIterator i(use); i.has_next(); i.next()) {
+          PointsToNode* e = i.get();
+          if (e->is_Arraycopy()) {
+            if (jobj == null_obj) // NULL object does not have field edges
+              continue;
+            // Add edge from arraycopy's destination java object to Arraycopy node.
+            if (add_edge(jobj, e)) {
+              new_edges++;
+              jobj->set_arraycopy_dst();
+            }
+          }
+        }
+      }
+    } else {
+      // Added new edge to stored in field values.
+      // Put on worklist all field's uses (loads) and
+      // related field nodes (same base and offset).
+      add_field_uses_to_worklist(use->as_Field());
+    }
+  }
+  return new_edges;
+}
+
+// Put on worklist all related field nodes.
+void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
+  assert(field->is_oop(), "sanity");
+  int offset = field->offset();
+  add_uses_to_worklist(field);
+  // Loop over all bases of this field and push on worklist Field nodes
+  // with the same offset and base (since they may reference the same field).
+  for (BaseIterator i(field); i.has_next(); i.next()) {
+    PointsToNode* base = i.get();
+    add_fields_to_worklist(field, base);
+    // Check if the base was source object of arraycopy and go over arraycopy's
+    // destination objects since values stored to a field of source object are
+    // accessable by uses (loads) of fields of destination objects.
+    if (base->arraycopy_src()) {
+      for (UseIterator j(base); j.has_next(); j.next()) {
+        PointsToNode* arycp = j.get();
+        if (arycp->is_Arraycopy()) {
+          for (UseIterator k(arycp); k.has_next(); k.next()) {
+            PointsToNode* abase = k.get();
+            if (abase->arraycopy_dst() && abase != base) {
+              // Look for the same arracopy reference.
+              add_fields_to_worklist(field, abase);
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
+// Put on worklist all related field nodes.
+void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
+  int offset = field->offset();
+  if (base->is_LocalVar()) {
+    for (UseIterator j(base); j.has_next(); j.next()) {
+      PointsToNode* f = j.get();
+      if (PointsToNode::is_base_use(f)) { // Field
+        f = PointsToNode::get_use_node(f);
+        if (f == field || !f->as_Field()->is_oop())
+          continue;
+        int offs = f->as_Field()->offset();
+        if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
+          add_to_worklist(f);
+        }
+      }
+    }
+  } else {
+    assert(base->is_JavaObject(), "sanity");
+    if (// Skip phantom_object since it is only used to indicate that
+        // this field's content globally escapes.
+        (base != phantom_obj) &&
+        // NULL object node does not have fields.
+        (base != null_obj)) {
+      for (EdgeIterator i(base); i.has_next(); i.next()) {
+        PointsToNode* f = i.get();
+        // Skip arraycopy edge since store to destination object field
+        // does not update value in source object field.
+        if (f->is_Arraycopy()) {
+          assert(base->arraycopy_dst(), "sanity");
+          continue;
+        }
+        if (f == field || !f->as_Field()->is_oop())
+          continue;
+        int offs = f->as_Field()->offset();
+        if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
+          add_to_worklist(f);
+        }
+      }
+    }
+  }
+}
+
+// Find fields which have unknown value.
+int ConnectionGraph::find_field_value(FieldNode* field) {
+  // Escaped fields should have init value already.
+  assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
+  int new_edges = 0;
+  for (BaseIterator i(field); i.has_next(); i.next()) {
+    PointsToNode* base = i.get();
+    if (base->is_JavaObject()) {
+      // Skip Allocate's fields which will be processed later.
+      if (base->ideal_node()->is_Allocate())
+        return 0;
+      assert(base == null_obj, "only NULL ptr base expected here");
+    }
+  }
+  if (add_edge(field, phantom_obj)) {
+    // New edge was added
+    new_edges++;
+    add_field_uses_to_worklist(field);
+  }
+  return new_edges;
+}
+
+// Find fields initializing values for allocations.
+int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) {
+  assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
+  int new_edges = 0;
+  Node* alloc = pta->ideal_node();
+  if (init_val == phantom_obj) {
+    // Do nothing for Allocate nodes since its fields values are "known".
+    if (alloc->is_Allocate())
+      return 0;
+    assert(alloc->as_CallStaticJava(), "sanity");
+#ifdef ASSERT
+    if (alloc->as_CallStaticJava()->method() == NULL) {
+      const char* name = alloc->as_CallStaticJava()->_name;
+      assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
+    }
+#endif
+    // Non-escaped allocation returned from Java or runtime call have
+    // unknown values in fields.
+    for (EdgeIterator i(pta); i.has_next(); i.next()) {
+      PointsToNode* ptn = i.get();
+      if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
+        if (add_edge(ptn, phantom_obj)) {
+          // New edge was added
+          new_edges++;
+          add_field_uses_to_worklist(ptn->as_Field());
+        }
+      }
+    }
+    return new_edges;
+  }
+  assert(init_val == null_obj, "sanity");
+  // Do nothing for Call nodes since its fields values are unknown.
+  if (!alloc->is_Allocate())
+    return 0;
+
+  InitializeNode* ini = alloc->as_Allocate()->initialization();
+  Compile* C = _compile;
+  bool visited_bottom_offset = false;
+  GrowableArray<int> offsets_worklist;
+
+  // Check if an oop field's initializing value is recorded and add
+  // a corresponding NULL if field's value if it is not recorded.
+  // Connection Graph does not record a default initialization by NULL
+  // captured by Initialize node.
+  //
+  for (EdgeIterator i(pta); i.has_next(); i.next()) {
+    PointsToNode* ptn = i.get(); // Field (AddP)
+    if (!ptn->is_Field() || !ptn->as_Field()->is_oop())
+      continue; // Not oop field
+    int offset = ptn->as_Field()->offset();
+    if (offset == Type::OffsetBot) {
+      if (!visited_bottom_offset) {
+        // OffsetBot is used to reference array's element,
+        // always add reference to NULL to all Field nodes since we don't
+        // known which element is referenced.
+        if (add_edge(ptn, null_obj)) {
+          // New edge was added
+          new_edges++;
+          add_field_uses_to_worklist(ptn->as_Field());
+          visited_bottom_offset = true;
+        }
+      }
+    } else {
+      // Check only oop fields.
+      const Type* adr_type = ptn->ideal_node()->as_AddP()->bottom_type();
+      if (adr_type->isa_rawptr()) {
+#ifdef ASSERT
+        // Raw pointers are used for initializing stores so skip it
+        // since it should be recorded already
+        Node* base = get_addp_base(ptn->ideal_node());
+        assert(adr_type->isa_rawptr() && base->is_Proj() &&
+               (base->in(0) == alloc),"unexpected pointer type");
+#endif
+        continue;
+      }
+      if (!offsets_worklist.contains(offset)) {
+        offsets_worklist.append(offset);
+        Node* value = NULL;
+        if (ini != NULL) {
+          BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
+          Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
+          if (store != NULL && store->is_Store()) {
+            value = store->in(MemNode::ValueIn);
+          } else {
+            // There could be initializing stores which follow allocation.
+            // For example, a volatile field store is not collected
+            // by Initialize node.
+            //
+            // Need to check for dependent loads to separate such stores from
+            // stores which follow loads. For now, add initial value NULL so
+            // that compare pointers optimization works correctly.
+          }
+        }
+        if (value == NULL) {
+          // A field's initializing value was not recorded. Add NULL.
+          if (add_edge(ptn, null_obj)) {
+            // New edge was added
+            new_edges++;
+            add_field_uses_to_worklist(ptn->as_Field());
+          }
+        }
+      }
+    }
+  }
+  return new_edges;
 }
 
-void ConnectionGraph::add_deferred_edge(uint from_i, uint to_i) {
-  PointsToNode *f = ptnode_adr(from_i);
-  PointsToNode *t = ptnode_adr(to_i);
+// Adjust scalar_replaceable state after Connection Graph is built.
+void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) {
+  // Search for non-escaping objects which are not scalar replaceable
+  // and mark them to propagate the state to referenced objects.
+
+  // 1. An object is not scalar replaceable if the field into which it is
+  // stored has unknown offset (stored into unknown element of an array).
+  //
+  for (UseIterator i(jobj); i.has_next(); i.next()) {
+    PointsToNode* use = i.get();
+    assert(!use->is_Arraycopy(), "sanity");
+    if (use->is_Field()) {
+      FieldNode* field = use->as_Field();
+      assert(field->is_oop() && field->scalar_replaceable() &&
+             field->fields_escape_state() == PointsToNode::NoEscape, "sanity");
+      if (field->offset() == Type::OffsetBot) {
+        jobj->set_scalar_replaceable(false);
+        return;
+      }
+    }
+    assert(use->is_Field() || use->is_LocalVar(), "sanity");
+    // 2. An object is not scalar replaceable if it is merged with other objects.
+    for (EdgeIterator j(use); j.has_next(); j.next()) {
+      PointsToNode* ptn = j.get();
+      if (ptn->is_JavaObject() && ptn != jobj) {
+        // Mark all objects.
+        jobj->set_scalar_replaceable(false);
+         ptn->set_scalar_replaceable(false);
+      }
+    }
+    if (!jobj->scalar_replaceable()) {
+      return;
+    }
+  }
+
+  for (EdgeIterator j(jobj); j.has_next(); j.next()) {
+    // Non-escaping object node should point only to field nodes.
+    FieldNode* field = j.get()->as_Field();
+    int offset = field->as_Field()->offset();
+
+    // 3. An object is not scalar replaceable if it has a field with unknown
+    // offset (array's element is accessed in loop).
+    if (offset == Type::OffsetBot) {
+      jobj->set_scalar_replaceable(false);
+      return;
+    }
+    // 4. Currently an object is not scalar replaceable if a LoadStore node
+    // access its field since the field value is unknown after it.
+    //
+    Node* n = field->ideal_node();
+    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+      if (n->fast_out(i)->is_LoadStore()) {
+        jobj->set_scalar_replaceable(false);
+        return;
+      }
+    }
+
+    // 5. Or the address may point to more then one object. This may produce
+    // the false positive result (set not scalar replaceable)
+    // since the flow-insensitive escape analysis can't separate
+    // the case when stores overwrite the field's value from the case
+    // when stores happened on different control branches.
+    //
+    // Note: it will disable scalar replacement in some cases:
+    //
+    //    Point p[] = new Point[1];
+    //    p[0] = new Point(); // Will be not scalar replaced
+    //
+    // but it will save us from incorrect optimizations in next cases:
+    //
+    //    Point p[] = new Point[1];
+    //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
+    //
+    if (field->base_count() > 1) {
+      for (BaseIterator i(field); i.has_next(); i.next()) {
+        PointsToNode* base = i.get();
+        // Don't take into account LocalVar nodes which
+        // may point to only one object which should be also
+        // this field's base by now.
+        if (base->is_JavaObject() && base != jobj) {
+          // Mark all bases.
+          jobj->set_scalar_replaceable(false);
+          base->set_scalar_replaceable(false);
+        }
+      }
+    }
+  }
+}
+
+#ifdef ASSERT
+void ConnectionGraph::verify_connection_graph(
+                         GrowableArray<PointsToNode*>&   ptnodes_worklist,
+                         GrowableArray<JavaObjectNode*>& non_escaped_worklist,
+                         GrowableArray<JavaObjectNode*>& java_objects_worklist,
+                         GrowableArray<Node*>& addp_worklist) {
+  // Verify that graph is complete - no new edges could be added.
+  int java_objects_length = java_objects_worklist.length();
+  int non_escaped_length  = non_escaped_worklist.length();
+  int new_edges = 0;
+  for (int next = 0; next < java_objects_length; ++next) {
+    JavaObjectNode* ptn = java_objects_worklist.at(next);
+    new_edges += add_java_object_edges(ptn, true);
+  }
+  assert(new_edges == 0, "graph was not complete");
+  // Verify that escape state is final.
+  int length = non_escaped_worklist.length();
+  find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist);
+  assert((non_escaped_length == non_escaped_worklist.length()) &&
+         (non_escaped_length == length) &&
+         (_worklist.length() == 0), "escape state was not final");
+
+  // Verify fields information.
+  int addp_length = addp_worklist.length();
+  for (int next = 0; next < addp_length; ++next ) {
+    Node* n = addp_worklist.at(next);
+    FieldNode* field = ptnode_adr(n->_idx)->as_Field();
+    if (field->is_oop()) {
+      // Verify that field has all bases
+      Node* base = get_addp_base(n);
+      PointsToNode* ptn = ptnode_adr(base->_idx);
+      if (ptn->is_JavaObject()) {
+        assert(field->has_base(ptn->as_JavaObject()), "sanity");
+      } else {
+        assert(ptn->is_LocalVar(), "sanity");
+        for (EdgeIterator i(ptn); i.has_next(); i.next()) {
+          PointsToNode* e = i.get();
+          if (e->is_JavaObject()) {
+            assert(field->has_base(e->as_JavaObject()), "sanity");
+          }
+        }
+      }
+      // Verify that all fields have initializing values.
+      if (field->edge_count() == 0) {
+        field->dump();
+        assert(field->edge_count() > 0, "sanity");
+      }
+    }
+  }
+}
+#endif
+
+// Optimize ideal graph.
+void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
+                                           GrowableArray<Node*>& storestore_worklist) {
+  Compile* C = _compile;
+  PhaseIterGVN* igvn = _igvn;
+  if (EliminateLocks) {
+    // Mark locks before changing ideal graph.
+    int cnt = C->macro_count();
+    for( int i=0; i < cnt; i++ ) {
+      Node *n = C->macro_node(i);
+      if (n->is_AbstractLock()) { // Lock and Unlock nodes
+        AbstractLockNode* alock = n->as_AbstractLock();
+        if (!alock->is_non_esc_obj()) {
+          if (not_global_escape(alock->obj_node())) {
+            assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
+            // The lock could be marked eliminated by lock coarsening
+            // code during first IGVN before EA. Replace coarsened flag
+            // to eliminate all associated locks/unlocks.
+            alock->set_non_esc_obj();
+          }
+        }
+      }
+    }
+  }
+
+  if (OptimizePtrCompare) {
+    // Add ConI(#CC_GT) and ConI(#CC_EQ).
+    _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
+    _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
+    // Optimize objects compare.
+    while (ptr_cmp_worklist.length() != 0) {
+      Node *n = ptr_cmp_worklist.pop();
+      Node *res = optimize_ptr_compare(n);
+      if (res != NULL) {
+#ifndef PRODUCT
+        if (PrintOptimizePtrCompare) {
+          tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
+          if (Verbose) {
+            n->dump(1);
+          }
+        }
+#endif
+        igvn->replace_node(n, res);
+      }
+    }
+    // cleanup
+    if (_pcmp_neq->outcnt() == 0)
+      igvn->hash_delete(_pcmp_neq);
+    if (_pcmp_eq->outcnt()  == 0)
+      igvn->hash_delete(_pcmp_eq);
+  }
+
+  // For MemBarStoreStore nodes added in library_call.cpp, check
+  // escape status of associated AllocateNode and optimize out
+  // MemBarStoreStore node if the allocated object never escapes.
+  while (storestore_worklist.length() != 0) {
+    Node *n = storestore_worklist.pop();
+    MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();
+    Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);
+    assert (alloc->is_Allocate(), "storestore should point to AllocateNode");
+    if (not_global_escape(alloc)) {
+      MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
+      mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
+      mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
+      igvn->register_new_node_with_optimizer(mb);
+      igvn->replace_node(storestore, mb);
+    }
+  }
+}
+
+// Optimize objects compare.
+Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
+  assert(OptimizePtrCompare, "sanity");
+  PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx);
+  PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx);
+  JavaObjectNode* jobj1 = unique_java_object(n->in(1));
+  JavaObjectNode* jobj2 = unique_java_object(n->in(2));
+  assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
+  assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
 
-  assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
-  assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of Deferred edge");
-  assert(t->node_type() == PointsToNode::LocalVar || t->node_type() == PointsToNode::Field, "invalid destination of Deferred edge");
-  // don't add a self-referential edge, this can occur during removal of
-  // deferred edges
-  if (from_i != to_i)
-    add_edge(f, to_i, PointsToNode::DeferredEdge);
+  // Check simple cases first.
+  if (jobj1 != NULL) {
+    if (jobj1->escape_state() == PointsToNode::NoEscape) {
+      if (jobj1 == jobj2) {
+        // Comparing the same not escaping object.
+        return _pcmp_eq;
+      }
+      Node* obj = jobj1->ideal_node();
+      // Comparing not escaping allocation.
+      if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
+          !ptn2->points_to(jobj1)) {
+        return _pcmp_neq; // This includes nullness check.
+      }
+    }
+  }
+  if (jobj2 != NULL) {
+    if (jobj2->escape_state() == PointsToNode::NoEscape) {
+      Node* obj = jobj2->ideal_node();
+      // Comparing not escaping allocation.
+      if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
+          !ptn1->points_to(jobj2)) {
+        return _pcmp_neq; // This includes nullness check.
+      }
+    }
+  }
+  if (jobj1 != NULL && jobj1 != phantom_obj &&
+      jobj2 != NULL && jobj2 != phantom_obj &&
+      jobj1->ideal_node()->is_Con() &&
+      jobj2->ideal_node()->is_Con()) {
+    // Klass or String constants compare. Need to be careful with
+    // compressed pointers - compare types of ConN and ConP instead of nodes.
+    const Type* t1 = jobj1->ideal_node()->bottom_type()->make_ptr();
+    const Type* t2 = jobj2->ideal_node()->bottom_type()->make_ptr();
+    assert(t1 != NULL && t2 != NULL, "sanity");
+    if (t1->make_ptr() == t2->make_ptr()) {
+      return _pcmp_eq;
+    } else {
+      return _pcmp_neq;
+    }
+  }
+  if (ptn1->meet(ptn2)) {
+    return NULL; // Sets are not disjoint
+  }
+
+  // Sets are disjoint.
+  bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
+  bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
+  bool set1_has_null_ptr    = ptn1->points_to(null_obj);
+  bool set2_has_null_ptr    = ptn2->points_to(null_obj);
+  if (set1_has_unknown_ptr && set2_has_null_ptr ||
+      set2_has_unknown_ptr && set1_has_null_ptr) {
+    // Check nullness of unknown object.
+    return NULL;
+  }
+
+  // Disjointness by itself is not sufficient since
+  // alias analysis is not complete for escaped objects.
+  // Disjoint sets are definitely unrelated only when
+  // at least one set has only not escaping allocations.
+  if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
+    if (ptn1->non_escaping_allocation()) {
+      return _pcmp_neq;
+    }
+  }
+  if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
+    if (ptn2->non_escaping_allocation()) {
+      return _pcmp_neq;
+    }
+  }
+  return NULL;
+}
+
+// Connection Graph constuction functions.
+
+void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
+  PointsToNode* ptadr = _nodes.at(n->_idx);
+  if (ptadr != NULL) {
+    assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
+    return;
+  }
+  Compile* C = _compile;
+  ptadr = new (C->comp_arena()) LocalVarNode(C, n, es);
+  _nodes.at_put(n->_idx, ptadr);
+}
+
+void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
+  PointsToNode* ptadr = _nodes.at(n->_idx);
+  if (ptadr != NULL) {
+    assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
+    return;
+  }
+  Compile* C = _compile;
+  ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es);
+  _nodes.at_put(n->_idx, ptadr);
+}
+
+void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
+  PointsToNode* ptadr = _nodes.at(n->_idx);
+  if (ptadr != NULL) {
+    assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
+    return;
+  }
+  Compile* C = _compile;
+  bool is_oop = is_oop_field(n, offset);
+  FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop);
+  _nodes.at_put(n->_idx, field);
+}
+
+void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
+                                    PointsToNode* src, PointsToNode* dst) {
+  assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
+  assert((src != null_obj) && (dst != null_obj), "not for ConP NULL");
+  PointsToNode* ptadr = _nodes.at(n->_idx);
+  if (ptadr != NULL) {
+    assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
+    return;
+  }
+  Compile* C = _compile;
+  ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es);
+  _nodes.at_put(n->_idx, ptadr);
+  // Add edge from arraycopy node to source object.
+  (void)add_edge(ptadr, src);
+  src->set_arraycopy_src();
+  // Add edge from destination object to arraycopy node.
+  (void)add_edge(dst, ptadr);
+  dst->set_arraycopy_dst();
 }
 
+bool ConnectionGraph::is_oop_field(Node* n, int offset) {
+  const Type* adr_type = n->as_AddP()->bottom_type();
+  BasicType bt = T_INT;
+  if (offset == Type::OffsetBot) {
+    // Check only oop fields.
+    if (!adr_type->isa_aryptr() ||
+        (adr_type->isa_aryptr()->klass() == NULL) ||
+         adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
+      // OffsetBot is used to reference array's element. Ignore first AddP.
+      if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) {
+        bt = T_OBJECT;
+      }
+    }
+  } else if (offset != oopDesc::klass_offset_in_bytes()) {
+    if (adr_type->isa_instptr()) {
+      ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
+      if (field != NULL) {
+        bt = field->layout_type();
+      } else {
+        // Ignore non field load (for example, klass load)
+      }
+    } else if (adr_type->isa_aryptr()) {
+      if (offset == arrayOopDesc::length_offset_in_bytes()) {
+        // Ignore array length load.
+      } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) {
+        // Ignore first AddP.
+      } else {
+        const Type* elemtype = adr_type->isa_aryptr()->elem();
+        bt = elemtype->array_element_basic_type();
+      }
+    } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
+      // Allocation initialization, ThreadLocal field access, unsafe access
+      for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+        int opcode = n->fast_out(i)->Opcode();
+        if (opcode == Op_StoreP || opcode == Op_LoadP ||
+            opcode == Op_StoreN || opcode == Op_LoadN) {
+          bt = T_OBJECT;
+        }
+      }
+    }
+  }
+  return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY);
+}
+
+// Returns unique pointed java object or NULL.
+JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) {
+  assert(!_collecting, "should not call when contructed graph");
+  // If the node was created after the escape computation we can't answer.
+  uint idx = n->_idx;
+  if (idx >= nodes_size()) {
+    return NULL;
+  }
+  PointsToNode* ptn = ptnode_adr(idx);
+  if (ptn->is_JavaObject()) {
+    return ptn->as_JavaObject();
+  }
+  assert(ptn->is_LocalVar(), "sanity");
+  // Check all java objects it points to.
+  JavaObjectNode* jobj = NULL;
+  for (EdgeIterator i(ptn); i.has_next(); i.next()) {
+    PointsToNode* e = i.get();
+    if (e->is_JavaObject()) {
+      if (jobj == NULL) {
+        jobj = e->as_JavaObject();
+      } else if (jobj != e) {
+        return NULL;
+      }
+    }
+  }
+  return jobj;
+}
+
+// Return true if this node points only to non-escaping allocations.
+bool PointsToNode::non_escaping_allocation() {
+  if (is_JavaObject()) {
+    Node* n = ideal_node();
+    if (n->is_Allocate() || n->is_CallStaticJava()) {
+      return (escape_state() == PointsToNode::NoEscape);
+    } else {
+      return false;
+    }
+  }
+  assert(is_LocalVar(), "sanity");
+  // Check all java objects it points to.
+  for (EdgeIterator i(this); i.has_next(); i.next()) {
+    PointsToNode* e = i.get();
+    if (e->is_JavaObject()) {
+      Node* n = e->ideal_node();
+      if ((e->escape_state() != PointsToNode::NoEscape) ||
+          !(n->is_Allocate() || n->is_CallStaticJava())) {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+// Return true if we know the node does not escape globally.
+bool ConnectionGraph::not_global_escape(Node *n) {
+  assert(!_collecting, "should not call during graph construction");
+  // If the node was created after the escape computation we can't answer.
+  uint idx = n->_idx;
+  if (idx >= nodes_size()) {
+    return false;
+  }
+  PointsToNode* ptn = ptnode_adr(idx);
+  PointsToNode::EscapeState es = ptn->escape_state();
+  // If we have already computed a value, return it.
+  if (es >= PointsToNode::GlobalEscape)
+    return false;
+  if (ptn->is_JavaObject()) {
+    return true; // (es < PointsToNode::GlobalEscape);
+  }
+  assert(ptn->is_LocalVar(), "sanity");
+  // Check all java objects it points to.
+  for (EdgeIterator i(ptn); i.has_next(); i.next()) {
+    if (i.get()->escape_state() >= PointsToNode::GlobalEscape)
+      return false;
+  }
+  return true;
+}
+
+
+// Helper functions
+
+// Return true if this node points to specified node or nodes it points to.
+bool PointsToNode::points_to(JavaObjectNode* ptn) const {
+  if (is_JavaObject()) {
+    return (this == ptn);
+  }
+  assert(is_LocalVar(), "sanity");
+  for (EdgeIterator i(this); i.has_next(); i.next()) {
+    if (i.get() == ptn)
+      return true;
+  }
+  return false;
+}
+
+// Return true if one node points to an other.
+bool PointsToNode::meet(PointsToNode* ptn) {
+  if (this == ptn) {
+    return true;
+  } else if (ptn->is_JavaObject()) {
+    return this->points_to(ptn->as_JavaObject());
+  } else if (this->is_JavaObject()) {
+    return ptn->points_to(this->as_JavaObject());
+  }
+  assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
+  int ptn_count =  ptn->edge_count();
+  for (EdgeIterator i(this); i.has_next(); i.next()) {
+    PointsToNode* this_e = i.get();
+    for (int j = 0; j < ptn_count; j++) {
+      if (this_e == ptn->edge(j))
+        return true;
+    }
+  }
+  return false;
+}
+
+#ifdef ASSERT
+// Return true if bases point to this java object.
+bool FieldNode::has_base(JavaObjectNode* jobj) const {
+  for (BaseIterator i(this); i.has_next(); i.next()) {
+    if (i.get() == jobj)
+      return true;
+  }
+  return false;
+}
+#endif
+
 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
   const Type *adr_type = phase->type(adr);
   if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
@@ -171,286 +1982,7 @@
   return t_ptr->offset();
 }
 
-void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) {
-  // Don't add fields to NULL pointer.
-  if (is_null_ptr(from_i))
-    return;
-  PointsToNode *f = ptnode_adr(from_i);
-  PointsToNode *t = ptnode_adr(to_i);
-
-  assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
-  assert(f->node_type() == PointsToNode::JavaObject, "invalid destination of Field edge");
-  assert(t->node_type() == PointsToNode::Field, "invalid destination of Field edge");
-  assert (t->offset() == -1 || t->offset() == offset, "conflicting field offsets");
-  t->set_offset(offset);
-
-  add_edge(f, to_i, PointsToNode::FieldEdge);
-}
-
-void ConnectionGraph::set_escape_state(uint ni, PointsToNode::EscapeState es) {
-  // Don't change non-escaping state of NULL pointer.
-  if (is_null_ptr(ni))
-    return;
-  PointsToNode *npt = ptnode_adr(ni);
-  PointsToNode::EscapeState old_es = npt->escape_state();
-  if (es > old_es)
-    npt->set_escape_state(es);
-}
-
-void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt,
-                               PointsToNode::EscapeState es, bool done) {
-  PointsToNode* ptadr = ptnode_adr(n->_idx);
-  ptadr->_node = n;
-  ptadr->set_node_type(nt);
-
-  // inline set_escape_state(idx, es);
-  PointsToNode::EscapeState old_es = ptadr->escape_state();
-  if (es > old_es)
-    ptadr->set_escape_state(es);
-
-  if (done)
-    _processed.set(n->_idx);
-}
-
-PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n) {
-  uint idx = n->_idx;
-  PointsToNode::EscapeState es;
-
-  // If we are still collecting or there were no non-escaping allocations
-  // we don't know the answer yet
-  if (_collecting)
-    return PointsToNode::UnknownEscape;
-
-  // if the node was created after the escape computation, return
-  // UnknownEscape
-  if (idx >= nodes_size())
-    return PointsToNode::UnknownEscape;
-
-  es = ptnode_adr(idx)->escape_state();
-
-  // if we have already computed a value, return it
-  if (es != PointsToNode::UnknownEscape &&
-      ptnode_adr(idx)->node_type() == PointsToNode::JavaObject)
-    return es;
-
-  // PointsTo() calls n->uncast() which can return a new ideal node.
-  if (n->uncast()->_idx >= nodes_size())
-    return PointsToNode::UnknownEscape;
-
-  PointsToNode::EscapeState orig_es = es;
-
-  // compute max escape state of anything this node could point to
-  for(VectorSetI i(PointsTo(n)); i.test() && es != PointsToNode::GlobalEscape; ++i) {
-    uint pt = i.elem;
-    PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state();
-    if (pes > es)
-      es = pes;
-  }
-  if (orig_es != es) {
-    // cache the computed escape state
-    assert(es > orig_es, "should have computed an escape state");
-    set_escape_state(idx, es);
-  } // orig_es could be PointsToNode::UnknownEscape
-  return es;
-}
-
-VectorSet* ConnectionGraph::PointsTo(Node * n) {
-  pt_ptset.Reset();
-  pt_visited.Reset();
-  pt_worklist.clear();
-
-#ifdef ASSERT
-  Node *orig_n = n;
-#endif
-
-  n = n->uncast();
-  PointsToNode* npt = ptnode_adr(n->_idx);
-
-  // If we have a JavaObject, return just that object
-  if (npt->node_type() == PointsToNode::JavaObject) {
-    pt_ptset.set(n->_idx);
-    return &pt_ptset;
-  }
-#ifdef ASSERT
-  if (npt->_node == NULL) {
-    if (orig_n != n)
-      orig_n->dump();
-    n->dump();
-    assert(npt->_node != NULL, "unregistered node");
-  }
-#endif
-  pt_worklist.push(n->_idx);
-  while(pt_worklist.length() > 0) {
-    int ni = pt_worklist.pop();
-    if (pt_visited.test_set(ni))
-      continue;
-
-    PointsToNode* pn = ptnode_adr(ni);
-    // ensure that all inputs of a Phi have been processed
-    assert(!_collecting || !pn->_node->is_Phi() || _processed.test(ni),"");
-
-    int edges_processed = 0;
-    uint e_cnt = pn->edge_count();
-    for (uint e = 0; e < e_cnt; e++) {
-      uint etgt = pn->edge_target(e);
-      PointsToNode::EdgeType et = pn->edge_type(e);
-      if (et == PointsToNode::PointsToEdge) {
-        pt_ptset.set(etgt);
-        edges_processed++;
-      } else if (et == PointsToNode::DeferredEdge) {
-        pt_worklist.push(etgt);
-        edges_processed++;
-      } else {
-        assert(false,"neither PointsToEdge or DeferredEdge");
-      }
-    }
-    if (edges_processed == 0) {
-      // no deferred or pointsto edges found.  Assume the value was set
-      // outside this method.  Add the phantom object to the pointsto set.
-      pt_ptset.set(_phantom_object);
-    }
-  }
-  return &pt_ptset;
-}
-
-void ConnectionGraph::remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited) {
-  // This method is most expensive during ConnectionGraph construction.
-  // Reuse vectorSet and an additional growable array for deferred edges.
-  deferred_edges->clear();
-  visited->Reset();
-
-  visited->set(ni);
-  PointsToNode *ptn = ptnode_adr(ni);
-  assert(ptn->node_type() == PointsToNode::LocalVar ||
-         ptn->node_type() == PointsToNode::Field, "sanity");
-  assert(ptn->edge_count() != 0, "should have at least phantom_object");
-
-  // Mark current edges as visited and move deferred edges to separate array.
-  for (uint i = 0; i < ptn->edge_count(); ) {
-    uint t = ptn->edge_target(i);
-#ifdef ASSERT
-    assert(!visited->test_set(t), "expecting no duplications");
-#else
-    visited->set(t);
-#endif
-    if (ptn->edge_type(i) == PointsToNode::DeferredEdge) {
-      ptn->remove_edge(t, PointsToNode::DeferredEdge);
-      deferred_edges->append(t);
-    } else {
-      i++;
-    }
-  }
-  for (int next = 0; next < deferred_edges->length(); ++next) {
-    uint t = deferred_edges->at(next);
-    PointsToNode *ptt = ptnode_adr(t);
-    uint e_cnt = ptt->edge_count();
-    assert(e_cnt != 0, "should have at least phantom_object");
-    for (uint e = 0; e < e_cnt; e++) {
-      uint etgt = ptt->edge_target(e);
-      if (visited->test_set(etgt))
-        continue;
-
-      PointsToNode::EdgeType et = ptt->edge_type(e);
-      if (et == PointsToNode::PointsToEdge) {
-        add_pointsto_edge(ni, etgt);
-      } else if (et == PointsToNode::DeferredEdge) {
-        deferred_edges->append(etgt);
-      } else {
-        assert(false,"invalid connection graph");
-      }
-    }
-  }
-  if (ptn->edge_count() == 0) {
-    // No pointsto edges found after deferred edges are removed.
-    // For example, in the next case where call is replaced
-    // with uncommon trap and as result array's load references
-    // itself through deferred edges:
-    //
-    // A a = b[i];
-    // if (c!=null) a = c.foo();
-    // b[i] = a;
-    //
-    // Assume the value was set outside this method and
-    // add edge to phantom object.
-    add_pointsto_edge(ni, _phantom_object);
-  }
-}
-
-
-//  Add an edge to node given by "to_i" from any field of adr_i whose offset
-//  matches "offset"  A deferred edge is added if to_i is a LocalVar, and
-//  a pointsto edge is added if it is a JavaObject
-
-void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) {
-  // No fields for NULL pointer.
-  if (is_null_ptr(adr_i)) {
-    return;
-  }
-  PointsToNode* an = ptnode_adr(adr_i);
-  PointsToNode* to = ptnode_adr(to_i);
-  bool deferred = (to->node_type() == PointsToNode::LocalVar);
-  bool escaped  = (to_i == _phantom_object) && (offs == Type::OffsetTop);
-  if (escaped) {
-    // Values in fields escaped during call.
-    assert(an->escape_state() >= PointsToNode::ArgEscape, "sanity");
-    offs = Type::OffsetBot;
-  }
-  for (uint fe = 0; fe < an->edge_count(); fe++) {
-    assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
-    int fi = an->edge_target(fe);
-    if (escaped) {
-      set_escape_state(fi, PointsToNode::GlobalEscape);
-    }
-    PointsToNode* pf = ptnode_adr(fi);
-    int po = pf->offset();
-    if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
-      if (deferred)
-        add_deferred_edge(fi, to_i);
-      else
-        add_pointsto_edge(fi, to_i);
-    }
-  }
-}
-
-// Add a deferred  edge from node given by "from_i" to any field of adr_i
-// whose offset matches "offset".
-void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) {
-  // No fields for NULL pointer.
-  if (is_null_ptr(adr_i)) {
-    return;
-  }
-  if (adr_i == _phantom_object) {
-    // Add only one edge for unknown object.
-    add_pointsto_edge(from_i, _phantom_object);
-    return;
-  }
-  PointsToNode* an = ptnode_adr(adr_i);
-  bool is_alloc = an->_node->is_Allocate();
-  for (uint fe = 0; fe < an->edge_count(); fe++) {
-    assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
-    int fi = an->edge_target(fe);
-    PointsToNode* pf = ptnode_adr(fi);
-    int offset = pf->offset();
-    if (!is_alloc) {
-      // Assume the field was set outside this method if it is not Allocation
-      add_pointsto_edge(fi, _phantom_object);
-    }
-    if (offset == offs || offset == Type::OffsetBot || offs == Type::OffsetBot) {
-      add_deferred_edge(from_i, fi);
-    }
-  }
-  // Some fields references (AddP) may still be missing
-  // until Connection Graph construction is complete.
-  // For example, loads from RAW pointers with offset 0
-  // which don't have AddP.
-  // A reference to phantom_object will be added if
-  // a field reference is still missing after completing
-  // Connection Graph (see remove_deferred()).
-}
-
-// Helper functions
-
-static Node* get_addp_base(Node *addp) {
+Node* ConnectionGraph::get_addp_base(Node *addp) {
   assert(addp->is_AddP(), "must be AddP");
   //
   // AddP cases for Base and Address inputs:
@@ -513,30 +2045,30 @@
   //       | |
   //       AddP  ( base == address )
   //
-  Node *base = addp->in(AddPNode::Base)->uncast();
-  if (base->is_top()) { // The AddP case #3 and #6.
-    base = addp->in(AddPNode::Address)->uncast();
+  Node *base = addp->in(AddPNode::Base);
+  if (base->uncast()->is_top()) { // The AddP case #3 and #6.
+    base = addp->in(AddPNode::Address);
     while (base->is_AddP()) {
       // Case #6 (unsafe access) may have several chained AddP nodes.
-      assert(base->in(AddPNode::Base)->is_top(), "expected unsafe access address only");
-      base = base->in(AddPNode::Address)->uncast();
+      assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
+      base = base->in(AddPNode::Address);
     }
-    assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal ||
-           base->Opcode() == Op_CastX2P || base->is_DecodeN() ||
-           (base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL) ||
-           (base->is_Proj() && base->in(0)->is_Allocate()), "sanity");
+    Node* uncast_base = base->uncast();
+    int opcode = uncast_base->Opcode();
+    assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
+           opcode == Op_CastX2P || uncast_base->is_DecodeN() ||
+           (uncast_base->is_Mem() && uncast_base->bottom_type() == TypeRawPtr::NOTNULL) ||
+           (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
   }
   return base;
 }
 
-static Node* find_second_addp(Node* addp, Node* n) {
+Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
-
   Node* addp2 = addp->raw_out(0);
   if (addp->outcnt() == 1 && addp2->is_AddP() &&
       addp2->in(AddPNode::Base) == n &&
       addp2->in(AddPNode::Address) == addp) {
-
     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
     //
     // Find array's offset to push it on worklist first and
@@ -575,7 +2107,8 @@
 // Adjust the type and inputs of an AddP which computes the
 // address of a field of an instance
 //
-bool ConnectionGraph::split_AddP(Node *addp, Node *base,  PhaseGVN  *igvn) {
+bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
+  PhaseGVN* igvn = _igvn;
   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
   assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
@@ -612,7 +2145,6 @@
       !base_t->klass()->is_subtype_of(t->klass())) {
      return false; // bail out
   }
-
   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
   // Do NOT remove the next line: ensure a new alias index is allocated
   // for the instance type. Note: C++ will not remove it since the call
@@ -620,9 +2152,7 @@
   int alias_idx = _compile->get_alias_index(tinst);
   igvn->set_type(addp, tinst);
   // record the allocation in the node map
-  assert(ptnode_adr(addp->_idx)->_node != NULL, "should be registered");
-  set_map(addp->_idx, get_map(base->_idx));
-
+  set_map(addp, get_map(base->_idx));
   // Set addp's Base and Address to 'base'.
   Node *abase = addp->in(AddPNode::Base);
   Node *adr   = addp->in(AddPNode::Address);
@@ -657,8 +2187,9 @@
 // created phi or an existing phi.  Sets create_new to indicate whether a new
 // phi was created.  Cache the last newly created phi in the node map.
 //
-PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn, bool &new_created) {
+PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, bool &new_created) {
   Compile *C = _compile;
+  PhaseGVN* igvn = _igvn;
   new_created = false;
   int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
   // nothing to do if orig_phi is bottom memory or matches alias_idx
@@ -698,12 +2229,7 @@
   C->copy_node_notes_to(result, orig_phi);
   igvn->set_type(result, result->bottom_type());
   record_for_optimizer(result);
-
-  debug_only(Node* pn = ptnode_adr(orig_phi->_idx)->_node;)
-  assert(pn == NULL || pn == orig_phi, "wrong node");
-  set_map(orig_phi->_idx, result);
-  ptnode_adr(orig_phi->_idx)->_node = orig_phi;
-
+  set_map(orig_phi, result);
   new_created = true;
   return result;
 }
@@ -712,27 +2238,25 @@
 // Return a new version of Memory Phi "orig_phi" with the inputs having the
 // specified alias index.
 //
-PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn) {
-
+PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist) {
   assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
   Compile *C = _compile;
+  PhaseGVN* igvn = _igvn;
   bool new_phi_created;
-  PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created);
+  PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
   if (!new_phi_created) {
     return result;
   }
-
   GrowableArray<PhiNode *>  phi_list;
   GrowableArray<uint>  cur_input;
-
   PhiNode *phi = orig_phi;
   uint idx = 1;
   bool finished = false;
   while(!finished) {
     while (idx < phi->req()) {
-      Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, igvn);
+      Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist);
       if (mem != NULL && mem->is_Phi()) {
-        PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created);
+        PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
         if (new_phi_created) {
           // found an phi for which we created a new split, push current one on worklist and begin
           // processing new one
@@ -775,19 +2299,18 @@
   return result;
 }
 
-
 //
 // The next methods are derived from methods in MemNode.
 //
-static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
+Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
   Node *mem = mmem;
   // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
   // means an array I have not precisely typed yet.  Do not do any
   // alias stuff with it any time soon.
-  if( toop->base() != Type::AnyPtr &&
+  if (toop->base() != Type::AnyPtr &&
       !(toop->klass() != NULL &&
         toop->klass()->is_java_lang_Object() &&
-        toop->offset() == Type::OffsetBot) ) {
+        toop->offset() == Type::OffsetBot)) {
     mem = mmem->memory_at(alias_idx);
     // Update input if it is progress over what we have now
   }
@@ -797,9 +2320,9 @@
 //
 // Move memory users to their memory slices.
 //
-void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *igvn) {
+void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis) {
   Compile* C = _compile;
-
+  PhaseGVN* igvn = _igvn;
   const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
   assert(tp != NULL, "ptr type");
   int alias_idx = C->get_alias_index(tp);
@@ -816,7 +2339,7 @@
       }
       // Replace previous general reference to mem node.
       uint orig_uniq = C->unique();
-      Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
+      Node* m = find_inst_mem(n, general_idx, orig_phis);
       assert(orig_uniq == C->unique(), "no new nodes");
       mmem->set_memory_at(general_idx, m);
       --imax;
@@ -836,7 +2359,7 @@
       }
       // Move to general memory slice.
       uint orig_uniq = C->unique();
-      Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
+      Node* m = find_inst_mem(n, general_idx, orig_phis);
       assert(orig_uniq == C->unique(), "no new nodes");
       igvn->hash_delete(use);
       imax -= use->replace_edge(n, m);
@@ -873,10 +2396,11 @@
 // Search memory chain of "mem" to find a MemNode whose address
 // is the specified alias index.
 //
-Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *phase) {
+Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis) {
   if (orig_mem == NULL)
     return orig_mem;
-  Compile* C = phase->C;
+  Compile* C = _compile;
+  PhaseGVN* igvn = _igvn;
   const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
   bool is_instance = (toop != NULL) && toop->is_known_instance();
   Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
@@ -887,7 +2411,7 @@
     if (result == start_mem)
       break;  // hit one of our sentinels
     if (result->is_Mem()) {
-      const Type *at = phase->type(result->in(MemNode::Address));
+      const Type *at = igvn->type(result->in(MemNode::Address));
       if (at == Type::TOP)
         break; // Dead
       assert (at->isa_ptr() != NULL, "pointer type required.");
@@ -909,7 +2433,7 @@
         break;  // hit one of our sentinels
       } else if (proj_in->is_Call()) {
         CallNode *call = proj_in->as_Call();
-        if (!call->may_modify(toop, phase)) {
+        if (!call->may_modify(toop, igvn)) {
           result = call->in(TypeFunc::Memory);
         }
       } else if (proj_in->is_Initialize()) {
@@ -928,7 +2452,7 @@
       if (result == mmem->base_memory()) {
         // Didn't find instance memory, search through general slice recursively.
         result = mmem->memory_at(C->get_general_index(alias_idx));
-        result = find_inst_mem(result, alias_idx, orig_phis, phase);
+        result = find_inst_mem(result, alias_idx, orig_phis);
         if (C->failing()) {
           return NULL;
         }
@@ -936,7 +2460,7 @@
       }
     } else if (result->is_Phi() &&
                C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
-      Node *un = result->as_Phi()->unique_input(phase);
+      Node *un = result->as_Phi()->unique_input(igvn);
       if (un != NULL) {
         orig_phis.append_if_missing(result->as_Phi());
         result = un;
@@ -944,7 +2468,7 @@
         break;
       }
     } else if (result->is_ClearArray()) {
-      if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), phase)) {
+      if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
         // Can not bypass initialization of the instance
         // we are looking for.
         break;
@@ -952,7 +2476,7 @@
       // Otherwise skip it (the call updated 'result' value).
     } else if (result->Opcode() == Op_SCMemProj) {
       assert(result->in(0)->is_LoadStore(), "sanity");
-      const Type *at = phase->type(result->in(0)->in(MemNode::Address));
+      const Type *at = igvn->type(result->in(0)->in(MemNode::Address));
       if (at != Type::TOP) {
         assert (at->isa_ptr() != NULL, "pointer type required.");
         int idx = C->get_alias_index(at->is_ptr());
@@ -972,7 +2496,7 @@
       orig_phis.append_if_missing(mphi);
     } else if (C->get_alias_index(t) != alias_idx) {
       // Create a new Phi with the specified alias index type.
-      result = split_memory_phi(mphi, alias_idx, orig_phis, phase);
+      result = split_memory_phi(mphi, alias_idx, orig_phis);
     }
   }
   // the result is either MemNode, PhiNode, InitializeNode.
@@ -1071,12 +2595,12 @@
 void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist) {
   GrowableArray<Node *>  memnode_worklist;
   GrowableArray<PhiNode *>  orig_phis;
-
   PhaseIterGVN  *igvn = _igvn;
   uint new_index_start = (uint) _compile->num_alias_types();
   Arena* arena = Thread::current()->resource_area();
   VectorSet visited(arena);
-
+  ideal_nodes.clear(); // Reset for use with set_map/get_map.
+  uint unique_old = _compile->unique();
 
   //  Phase 1:  Process possible allocations from alloc_worklist.
   //  Create instance types for the CheckCastPP for allocations where possible.
@@ -1088,17 +2612,15 @@
   while (alloc_worklist.length() != 0) {
     Node *n = alloc_worklist.pop();
     uint ni = n->_idx;
-    const TypeOopPtr* tinst = NULL;
     if (n->is_Call()) {
       CallNode *alloc = n->as_Call();
       // copy escape information to call node
       PointsToNode* ptn = ptnode_adr(alloc->_idx);
-      PointsToNode::EscapeState es = escape_state(alloc);
+      PointsToNode::EscapeState es = ptn->escape_state();
       // We have an allocation or call which returns a Java object,
       // see if it is unescaped.
       if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable())
         continue;
-
       // Find CheckCastPP for the allocate or for the return value of a call
       n = alloc->result_cast();
       if (n == NULL) {            // No uses except Initialize node
@@ -1145,20 +2667,18 @@
         // so it could be eliminated.
         alloc->as_Allocate()->_is_scalar_replaceable = true;
       }
-      set_escape_state(n->_idx, es); // CheckCastPP escape state
+      set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state
       // in order for an object to be scalar-replaceable, it must be:
       //   - a direct allocation (not a call returning an object)
       //   - non-escaping
       //   - eligible to be a unique type
       //   - not determined to be ineligible by escape analysis
-      assert(ptnode_adr(alloc->_idx)->_node != NULL &&
-             ptnode_adr(n->_idx)->_node != NULL, "should be registered");
-      set_map(alloc->_idx, n);
-      set_map(n->_idx, alloc);
+      set_map(alloc, n);
+      set_map(n, alloc);
       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
       if (t == NULL)
         continue;  // not a TypeOopPtr
-      tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
+      const TypeOopPtr* tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
       igvn->hash_delete(n);
       igvn->set_type(n,  tinst);
       n->raise_bottom_type(tinst);
@@ -1168,9 +2688,10 @@
 
         // First, put on the worklist all Field edges from Connection Graph
         // which is more accurate then putting immediate users from Ideal Graph.
-        for (uint e = 0; e < ptn->edge_count(); e++) {
-          Node *use = ptnode_adr(ptn->edge_target(e))->_node;
-          assert(ptn->edge_type(e) == PointsToNode::FieldEdge && use->is_AddP(),
+        for (EdgeIterator e(ptn); e.has_next(); e.next()) {
+          PointsToNode* tgt = e.get();
+          Node* use = tgt->ideal_node();
+          assert(tgt->is_Field() && use->is_AddP(),
                  "only AddP nodes are Field edges in CG");
           if (use->outcnt() > 0) { // Don't process dead nodes
             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
@@ -1202,16 +2723,18 @@
         }
       }
     } else if (n->is_AddP()) {
-      VectorSet* ptset = PointsTo(get_addp_base(n));
-      assert(ptset->Size() == 1, "AddP address is unique");
-      uint elem = ptset->getelem(); // Allocation node's index
-      if (elem == _phantom_object) {
-        assert(false, "escaped allocation");
-        continue; // Assume the value was set outside this method.
+      JavaObjectNode* jobj = unique_java_object(get_addp_base(n));
+      if (jobj == NULL || jobj == phantom_obj) {
+#ifdef ASSERT
+        ptnode_adr(get_addp_base(n)->_idx)->dump();
+        ptnode_adr(n->_idx)->dump();
+        assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
+#endif
+        _compile->record_failure(C2Compiler::retry_no_escape_analysis());
+        return;
       }
-      Node *base = get_map(elem);  // CheckCastPP node
-      if (!split_AddP(n, base, igvn)) continue; // wrong type from dead path
-      tinst = igvn->type(base)->isa_oopptr();
+      Node *base = get_map(jobj->idx());  // CheckCastPP node
+      if (!split_AddP(n, base)) continue; // wrong type from dead path
     } else if (n->is_Phi() ||
                n->is_CheckCastPP() ||
                n->is_EncodeP() ||
@@ -1221,18 +2744,20 @@
         assert(n->is_Phi(), "loops only through Phi's");
         continue;  // already processed
       }
-      VectorSet* ptset = PointsTo(n);
-      if (ptset->Size() == 1) {
-        uint elem = ptset->getelem(); // Allocation node's index
-        if (elem == _phantom_object) {
-          assert(false, "escaped allocation");
-          continue; // Assume the value was set outside this method.
-        }
-        Node *val = get_map(elem);   // CheckCastPP node
+      JavaObjectNode* jobj = unique_java_object(n);
+      if (jobj == NULL || jobj == phantom_obj) {
+#ifdef ASSERT
+        ptnode_adr(n->_idx)->dump();
+        assert(jobj != NULL && jobj != phantom_obj, "escaped allocation");
+#endif
+        _compile->record_failure(C2Compiler::retry_no_escape_analysis());
+        return;
+      } else {
+        Node *val = get_map(jobj->idx());   // CheckCastPP node
         TypeNode *tn = n->as_Type();
-        tinst = igvn->type(val)->isa_oopptr();
+        const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
         assert(tinst != NULL && tinst->is_known_instance() &&
-               (uint)tinst->instance_id() == elem , "instance type expected.");
+               tinst->instance_id() == jobj->idx() , "instance type expected.");
 
         const Type *tn_type = igvn->type(tn);
         const TypeOopPtr *tn_t;
@@ -1241,7 +2766,6 @@
         } else {
           tn_t = tn_type->isa_oopptr();
         }
-
         if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
           if (tn_type->isa_narrowoop()) {
             tn_type = tinst->make_narrowoop();
@@ -1314,13 +2838,13 @@
   }
   // New alias types were created in split_AddP().
   uint new_index_end = (uint) _compile->num_alias_types();
+  assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1");
 
   //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
   //            compute new values for Memory inputs  (the Memory inputs are not
   //            actually updated until phase 4.)
   if (memnode_worklist.length() == 0)
     return;  // nothing to do
-
   while (memnode_worklist.length() != 0) {
     Node *n = memnode_worklist.pop();
     if (visited.test_set(n->_idx))
@@ -1341,17 +2865,14 @@
       assert (addr_t->isa_ptr() != NULL, "pointer type required.");
       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
       assert ((uint)alias_idx < new_index_end, "wrong alias index");
-      Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn);
+      Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
       if (_compile->failing()) {
         return;
       }
       if (mem != n->in(MemNode::Memory)) {
         // We delay the memory edge update since we need old one in
         // MergeMem code below when instances memory slices are separated.
-        debug_only(Node* pn = ptnode_adr(n->_idx)->_node;)
-        assert(pn == NULL || pn == n, "wrong node");
-        set_map(n->_idx, mem);
-        ptnode_adr(n->_idx)->_node = n;
+        set_map(n, mem);
       }
       if (n->is_Load()) {
         continue;  // don't push users
@@ -1442,7 +2963,7 @@
         if((uint)_compile->get_general_index(ni) == i) {
           Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
           if (nmm->is_empty_memory(m)) {
-            Node* result = find_inst_mem(mem, ni, orig_phis, igvn);
+            Node* result = find_inst_mem(mem, ni, orig_phis);
             if (_compile->failing()) {
               return;
             }
@@ -1458,7 +2979,7 @@
       if (result == nmm->base_memory()) {
         // Didn't find instance memory, search through general slice recursively.
         result = nmm->memory_at(_compile->get_general_index(ni));
-        result = find_inst_mem(result, ni, orig_phis, igvn);
+        result = find_inst_mem(result, ni, orig_phis);
         if (_compile->failing()) {
           return;
         }
@@ -1482,7 +3003,7 @@
     igvn->hash_delete(phi);
     for (uint i = 1; i < phi->req(); i++) {
       Node *mem = phi->in(i);
-      Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis, igvn);
+      Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
       if (_compile->failing()) {
         return;
       }
@@ -1496,39 +3017,36 @@
 
   // Update the memory inputs of MemNodes with the value we computed
   // in Phase 2 and move stores memory users to corresponding memory slices.
-
   // Disable memory split verification code until the fix for 6984348.
   // Currently it produces false negative results since it does not cover all cases.
 #if 0 // ifdef ASSERT
   visited.Reset();
   Node_Stack old_mems(arena, _compile->unique() >> 2);
 #endif
-  for (uint i = 0; i < nodes_size(); i++) {
-    Node *nmem = get_map(i);
-    if (nmem != NULL) {
-      Node *n = ptnode_adr(i)->_node;
-      assert(n != NULL, "sanity");
-      if (n->is_Mem()) {
+  for (uint i = 0; i < ideal_nodes.size(); i++) {
+    Node*    n = ideal_nodes.at(i);
+    Node* nmem = get_map(n->_idx);
+    assert(nmem != NULL, "sanity");
+    if (n->is_Mem()) {
 #if 0 // ifdef ASSERT
-        Node* old_mem = n->in(MemNode::Memory);
-        if (!visited.test_set(old_mem->_idx)) {
-          old_mems.push(old_mem, old_mem->outcnt());
-        }
+      Node* old_mem = n->in(MemNode::Memory);
+      if (!visited.test_set(old_mem->_idx)) {
+        old_mems.push(old_mem, old_mem->outcnt());
+      }
 #endif
-        assert(n->in(MemNode::Memory) != nmem, "sanity");
-        if (!n->is_Load()) {
-          // Move memory users of a store first.
-          move_inst_mem(n, orig_phis, igvn);
-        }
-        // Now update memory input
-        igvn->hash_delete(n);
-        n->set_req(MemNode::Memory, nmem);
-        igvn->hash_insert(n);
-        record_for_optimizer(n);
-      } else {
-        assert(n->is_Allocate() || n->is_CheckCastPP() ||
-               n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
+      assert(n->in(MemNode::Memory) != nmem, "sanity");
+      if (!n->is_Load()) {
+        // Move memory users of a store first.
+        move_inst_mem(n, orig_phis);
       }
+      // Now update memory input
+      igvn->hash_delete(n);
+      n->set_req(MemNode::Memory, nmem);
+      igvn->hash_insert(n);
+      record_for_optimizer(n);
+    } else {
+      assert(n->is_Allocate() || n->is_CheckCastPP() ||
+             n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
     }
   }
 #if 0 // ifdef ASSERT
@@ -1542,1611 +3060,72 @@
 #endif
 }
 
-bool ConnectionGraph::has_candidates(Compile *C) {
-  // EA brings benefits only when the code has allocations and/or locks which
-  // are represented by ideal Macro nodes.
-  int cnt = C->macro_count();
-  for( int i=0; i < cnt; i++ ) {
-    Node *n = C->macro_node(i);
-    if ( n->is_Allocate() )
-      return true;
-    if( n->is_Lock() ) {
-      Node* obj = n->as_Lock()->obj_node()->uncast();
-      if( !(obj->is_Parm() || obj->is_Con()) )
-        return true;
-    }
-  }
-  return false;
-}
-
-void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
-  // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
-  // to create space for them in ConnectionGraph::_nodes[].
-  Node* oop_null = igvn->zerocon(T_OBJECT);
-  Node* noop_null = igvn->zerocon(T_NARROWOOP);
-
-  ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
-  // Perform escape analysis
-  if (congraph->compute_escape()) {
-    // There are non escaping objects.
-    C->set_congraph(congraph);
-  }
-
-  // Cleanup.
-  if (oop_null->outcnt() == 0)
-    igvn->hash_delete(oop_null);
-  if (noop_null->outcnt() == 0)
-    igvn->hash_delete(noop_null);
-}
-
-bool ConnectionGraph::compute_escape() {
-  Compile* C = _compile;
-
-  // 1. Populate Connection Graph (CG) with Ideal nodes.
-
-  Unique_Node_List worklist_init;
-  worklist_init.map(C->unique(), NULL);  // preallocate space
-
-  // Initialize worklist
-  if (C->root() != NULL) {
-    worklist_init.push(C->root());
-  }
-
-  GrowableArray<Node*> alloc_worklist;
-  GrowableArray<Node*> addp_worklist;
-  GrowableArray<Node*> ptr_cmp_worklist;
-  GrowableArray<Node*> storestore_worklist;
-  PhaseGVN* igvn = _igvn;
-
-  // Push all useful nodes onto CG list and set their type.
-  for( uint next = 0; next < worklist_init.size(); ++next ) {
-    Node* n = worklist_init.at(next);
-    record_for_escape_analysis(n, igvn);
-    // Only allocations and java static calls results are checked
-    // for an escape status. See process_call_result() below.
-    if (n->is_Allocate() || n->is_CallStaticJava() &&
-        ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
-      alloc_worklist.append(n);
-    } else if(n->is_AddP()) {
-      // Collect address nodes. Use them during stage 3 below
-      // to build initial connection graph field edges.
-      addp_worklist.append(n);
-    } else if (n->is_MergeMem()) {
-      // Collect all MergeMem nodes to add memory slices for
-      // scalar replaceable objects in split_unique_types().
-      _mergemem_worklist.append(n->as_MergeMem());
-    } else if (OptimizePtrCompare && n->is_Cmp() &&
-               (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
-      // Compare pointers nodes
-      ptr_cmp_worklist.append(n);
-    } else if (n->is_MemBarStoreStore()) {
-      // Collect all MemBarStoreStore nodes so that depending on the
-      // escape status of the associated Allocate node some of them
-      // may be eliminated.
-      storestore_worklist.append(n);
-    }
-    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
-      Node* m = n->fast_out(i);   // Get user
-      worklist_init.push(m);
-    }
-  }
-
-  if (alloc_worklist.length() == 0) {
-    _collecting = false;
-    return false; // Nothing to do.
-  }
-
-  // 2. First pass to create simple CG edges (doesn't require to walk CG).
-  uint delayed_size = _delayed_worklist.size();
-  for( uint next = 0; next < delayed_size; ++next ) {
-    Node* n = _delayed_worklist.at(next);
-    build_connection_graph(n, igvn);
-  }
-
-  // 3. Pass to create initial fields edges (JavaObject -F-> AddP)
-  //    to reduce number of iterations during stage 4 below.
-  uint addp_length = addp_worklist.length();
-  for( uint next = 0; next < addp_length; ++next ) {
-    Node* n = addp_worklist.at(next);
-    Node* base = get_addp_base(n);
-    if (base->is_Proj() && base->in(0)->is_Call())
-      base = base->in(0);
-    PointsToNode::NodeType nt = ptnode_adr(base->_idx)->node_type();
-    if (nt == PointsToNode::JavaObject) {
-      build_connection_graph(n, igvn);
-    }
-  }
-
-  GrowableArray<int> cg_worklist;
-  cg_worklist.append(_phantom_object);
-  GrowableArray<uint>  worklist;
-
-  // 4. Build Connection Graph which need
-  //    to walk the connection graph.
-  _progress = false;
-  for (uint ni = 0; ni < nodes_size(); ni++) {
-    PointsToNode* ptn = ptnode_adr(ni);
-    Node *n = ptn->_node;
-    if (n != NULL) { // Call, AddP, LoadP, StoreP
-      build_connection_graph(n, igvn);
-      if (ptn->node_type() != PointsToNode::UnknownType)
-        cg_worklist.append(n->_idx); // Collect CG nodes
-      if (!_processed.test(n->_idx))
-        worklist.append(n->_idx); // Collect C/A/L/S nodes
-    }
-  }
-
-  // After IGVN user nodes may have smaller _idx than
-  // their inputs so they will be processed first in
-  // previous loop. Because of that not all Graph
-  // edges will be created. Walk over interesting
-  // nodes again until no new edges are created.
-  //
-  // Normally only 1-3 passes needed to build
-  // Connection Graph depending on graph complexity.
-  // Observed 8 passes in jvm2008 compiler.compiler.
-  // Set limit to 20 to catch situation when something
-  // did go wrong and recompile the method without EA.
-
-#define CG_BUILD_ITER_LIMIT 20
-
-  uint length = worklist.length();
-  int iterations = 0;
-  while(_progress && (iterations++ < CG_BUILD_ITER_LIMIT)) {
-    _progress = false;
-    for( uint next = 0; next < length; ++next ) {
-      int ni = worklist.at(next);
-      PointsToNode* ptn = ptnode_adr(ni);
-      Node* n = ptn->_node;
-      assert(n != NULL, "should be known node");
-      build_connection_graph(n, igvn);
-    }
-  }
-  if (iterations >= CG_BUILD_ITER_LIMIT) {
-    assert(iterations < CG_BUILD_ITER_LIMIT,
-           err_msg("infinite EA connection graph build with %d nodes and worklist size %d",
-           nodes_size(), length));
-    // Possible infinite build_connection_graph loop,
-    // retry compilation without escape analysis.
-    C->record_failure(C2Compiler::retry_no_escape_analysis());
-    _collecting = false;
-    return false;
-  }
-#undef CG_BUILD_ITER_LIMIT
-
-  // 5. Propagate escaped states.
-  worklist.clear();
-
-  // mark all nodes reachable from GlobalEscape nodes
-  (void)propagate_escape_state(&cg_worklist, &worklist, PointsToNode::GlobalEscape);
+#ifndef PRODUCT
+static const char *node_type_names[] = {
+  "UnknownType",
+  "JavaObject",
+  "LocalVar",
+  "Field",
+  "Arraycopy"
+};
 
-  // mark all nodes reachable from ArgEscape nodes
-  bool has_non_escaping_obj = propagate_escape_state(&cg_worklist, &worklist, PointsToNode::ArgEscape);
-
-  Arena* arena = Thread::current()->resource_area();
-  VectorSet visited(arena);
-
-  // 6. Find fields initializing values for not escaped allocations
-  uint alloc_length = alloc_worklist.length();
-  for (uint next = 0; next < alloc_length; ++next) {
-    Node* n = alloc_worklist.at(next);
-    PointsToNode::EscapeState es = ptnode_adr(n->_idx)->escape_state();
-    if (es == PointsToNode::NoEscape) {
-      has_non_escaping_obj = true;
-      if (n->is_Allocate()) {
-        find_init_values(n, &visited, igvn);
-        // The object allocated by this Allocate node will never be
-        // seen by an other thread. Mark it so that when it is
-        // expanded no MemBarStoreStore is added.
-        n->as_Allocate()->initialization()->set_does_not_escape();
-      }
-    } else if ((es == PointsToNode::ArgEscape) && n->is_Allocate()) {
-      // Same as above. Mark this Allocate node so that when it is
-      // expanded no MemBarStoreStore is added.
-      n->as_Allocate()->initialization()->set_does_not_escape();
-    }
-  }
-
-  uint cg_length = cg_worklist.length();
-
-  // Skip the rest of code if all objects escaped.
-  if (!has_non_escaping_obj) {
-    cg_length = 0;
-    addp_length = 0;
-  }
-
-  for (uint next = 0; next < cg_length; ++next) {
-    int ni = cg_worklist.at(next);
-    PointsToNode* ptn = ptnode_adr(ni);
-    PointsToNode::NodeType nt = ptn->node_type();
-    if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
-      if (ptn->edge_count() == 0) {
-        // No values were found. Assume the value was set
-        // outside this method - add edge to phantom object.
-        add_pointsto_edge(ni, _phantom_object);
-      }
-    }
-  }
-
-  // 7. Remove deferred edges from the graph.
-  for (uint next = 0; next < cg_length; ++next) {
-    int ni = cg_worklist.at(next);
-    PointsToNode* ptn = ptnode_adr(ni);
-    PointsToNode::NodeType nt = ptn->node_type();
-    if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
-      remove_deferred(ni, &worklist, &visited);
-    }
-  }
-
-  // 8. Adjust escape state of nonescaping objects.
-  for (uint next = 0; next < addp_length; ++next) {
-    Node* n = addp_worklist.at(next);
-    adjust_escape_state(n);
-  }
-
-  // push all NoEscape nodes on the worklist
-  worklist.clear();
-  for( uint next = 0; next < cg_length; ++next ) {
-    int nk = cg_worklist.at(next);
-    if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape &&
-        !is_null_ptr(nk))
-      worklist.push(nk);
-  }
+static const char *esc_names[] = {
+  "UnknownEscape",
+  "NoEscape",
+  "ArgEscape",
+  "GlobalEscape"
+};
 
-  alloc_worklist.clear();
-  // Propagate scalar_replaceable value.
-  while(worklist.length() > 0) {
-    uint nk = worklist.pop();
-    PointsToNode* ptn = ptnode_adr(nk);
-    Node* n = ptn->_node;
-    bool scalar_replaceable = ptn->scalar_replaceable();
-    if (n->is_Allocate() && scalar_replaceable) {
-      // Push scalar replaceable allocations on alloc_worklist
-      // for processing in split_unique_types(). Note,
-      // following code may change scalar_replaceable value.
-      alloc_worklist.append(n);
-    }
-    uint e_cnt = ptn->edge_count();
-    for (uint ei = 0; ei < e_cnt; ei++) {
-      uint npi = ptn->edge_target(ei);
-      if (is_null_ptr(npi))
-        continue;
-      PointsToNode *np = ptnode_adr(npi);
-      if (np->escape_state() < PointsToNode::NoEscape) {
-        set_escape_state(npi, PointsToNode::NoEscape);
-        if (!scalar_replaceable) {
-          np->set_scalar_replaceable(false);
-        }
-        worklist.push(npi);
-      } else if (np->scalar_replaceable() && !scalar_replaceable) {
-        np->set_scalar_replaceable(false);
-        worklist.push(npi);
-      }
-    }
-  }
-
-  _collecting = false;
-  assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
-
-  assert(ptnode_adr(_oop_null)->escape_state() == PointsToNode::NoEscape &&
-         ptnode_adr(_oop_null)->edge_count() == 0, "sanity");
-  if (UseCompressedOops) {
-    assert(ptnode_adr(_noop_null)->escape_state() == PointsToNode::NoEscape &&
-           ptnode_adr(_noop_null)->edge_count() == 0, "sanity");
-  }
-
-  if (EliminateLocks && has_non_escaping_obj) {
-    // Mark locks before changing ideal graph.
-    int cnt = C->macro_count();
-    for( int i=0; i < cnt; i++ ) {
-      Node *n = C->macro_node(i);
-      if (n->is_AbstractLock()) { // Lock and Unlock nodes
-        AbstractLockNode* alock = n->as_AbstractLock();
-        if (!alock->is_non_esc_obj()) {
-          PointsToNode::EscapeState es = escape_state(alock->obj_node());
-          assert(es != PointsToNode::UnknownEscape, "should know");
-          if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
-            assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
-            // The lock could be marked eliminated by lock coarsening
-            // code during first IGVN before EA. Replace coarsened flag
-            // to eliminate all associated locks/unlocks.
-            alock->set_non_esc_obj();
-          }
-        }
-      }
-    }
-  }
-
-  if (OptimizePtrCompare && has_non_escaping_obj) {
-    // Add ConI(#CC_GT) and ConI(#CC_EQ).
-    _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
-    _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
-    // Optimize objects compare.
-    while (ptr_cmp_worklist.length() != 0) {
-      Node *n = ptr_cmp_worklist.pop();
-      Node *res = optimize_ptr_compare(n);
-      if (res != NULL) {
-#ifndef PRODUCT
-        if (PrintOptimizePtrCompare) {
-          tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
-          if (Verbose) {
-            n->dump(1);
-          }
-        }
-#endif
-        _igvn->replace_node(n, res);
-      }
-    }
-    // cleanup
-    if (_pcmp_neq->outcnt() == 0)
-      igvn->hash_delete(_pcmp_neq);
-    if (_pcmp_eq->outcnt()  == 0)
-      igvn->hash_delete(_pcmp_eq);
-  }
-
-  // For MemBarStoreStore nodes added in library_call.cpp, check
-  // escape status of associated AllocateNode and optimize out
-  // MemBarStoreStore node if the allocated object never escapes.
-  while (storestore_worklist.length() != 0) {
-    Node *n = storestore_worklist.pop();
-    MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();
-    Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);
-    assert (alloc->is_Allocate(), "storestore should point to AllocateNode");
-    PointsToNode::EscapeState es = ptnode_adr(alloc->_idx)->escape_state();
-    if (es == PointsToNode::NoEscape || es == PointsToNode::ArgEscape) {
-      MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
-      mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
-      mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
-
-      _igvn->register_new_node_with_optimizer(mb);
-      _igvn->replace_node(storestore, mb);
-    }
+void PointsToNode::dump(bool print_state) const {
+  NodeType nt = node_type();
+  tty->print("%s ", node_type_names[(int) nt]);
+  if (print_state) {
+    EscapeState es = escape_state();
+    EscapeState fields_es = fields_escape_state();
+    tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
+    if (nt == PointsToNode::JavaObject && !this->scalar_replaceable())
+      tty->print("NSR");
   }
-
-#ifndef PRODUCT
-  if (PrintEscapeAnalysis) {
-    dump(); // Dump ConnectionGraph
-  }
-#endif
-
-  bool has_scalar_replaceable_candidates = false;
-  alloc_length = alloc_worklist.length();
-  for (uint next = 0; next < alloc_length; ++next) {
-    Node* n = alloc_worklist.at(next);
-    PointsToNode* ptn = ptnode_adr(n->_idx);
-    assert(ptn->escape_state() == PointsToNode::NoEscape, "sanity");
-    if (ptn->scalar_replaceable()) {
-      has_scalar_replaceable_candidates = true;
-      break;
+  if (is_Field()) {
+    FieldNode* f = (FieldNode*)this;
+    tty->print("(");
+    for (BaseIterator i(f); i.has_next(); i.next()) {
+      PointsToNode* b = i.get();
+      tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
     }
-  }
-
-  if ( has_scalar_replaceable_candidates &&
-       C->AliasLevel() >= 3 && EliminateAllocations ) {
-
-    // Now use the escape information to create unique types for
-    // scalar replaceable objects.
-    split_unique_types(alloc_worklist);
-
-    if (C->failing())  return false;
-
-    C->print_method("After Escape Analysis", 2);
-
-#ifdef ASSERT
-  } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
-    tty->print("=== No allocations eliminated for ");
-    C->method()->print_short_name();
-    if(!EliminateAllocations) {
-      tty->print(" since EliminateAllocations is off ===");
-    } else if(!has_scalar_replaceable_candidates) {
-      tty->print(" since there are no scalar replaceable candidates ===");
-    } else if(C->AliasLevel() < 3) {
-      tty->print(" since AliasLevel < 3 ===");
-    }
-    tty->cr();
-#endif
+    tty->print(" )");
   }
-  return has_non_escaping_obj;
-}
-
-// Find fields initializing values for allocations.
-void ConnectionGraph::find_init_values(Node* alloc, VectorSet* visited, PhaseTransform* phase) {
-  assert(alloc->is_Allocate(), "Should be called for Allocate nodes only");
-  PointsToNode* pta = ptnode_adr(alloc->_idx);
-  assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
-  InitializeNode* ini = alloc->as_Allocate()->initialization();
-
-  Compile* C = _compile;
-  visited->Reset();
-  // Check if a oop field's initializing value is recorded and add
-  // a corresponding NULL field's value if it is not recorded.
-  // Connection Graph does not record a default initialization by NULL
-  // captured by Initialize node.
-  //
-  uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
-  uint ae_cnt = pta->edge_count();
-  bool visited_bottom_offset = false;
-  for (uint ei = 0; ei < ae_cnt; ei++) {
-    uint nidx = pta->edge_target(ei); // Field (AddP)
-    PointsToNode* ptn = ptnode_adr(nidx);
-    assert(ptn->_node->is_AddP(), "Should be AddP nodes only");
-    int offset = ptn->offset();
-    if (offset == Type::OffsetBot) {
-      if (!visited_bottom_offset) {
-        visited_bottom_offset = true;
-        // Check only oop fields.
-        const Type* adr_type = ptn->_node->as_AddP()->bottom_type();
-        if (!adr_type->isa_aryptr() ||
-            (adr_type->isa_aryptr()->klass() == NULL) ||
-             adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
-          // OffsetBot is used to reference array's element,
-          // always add reference to NULL since we don't
-          // known which element is referenced.
-          add_edge_from_fields(alloc->_idx, null_idx, offset);
-        }
-      }
-    } else if (offset != oopDesc::klass_offset_in_bytes() &&
-               !visited->test_set(offset)) {
-
-      // Check only oop fields.
-      const Type* adr_type = ptn->_node->as_AddP()->bottom_type();
-      BasicType basic_field_type = T_INT;
-      if (adr_type->isa_instptr()) {
-        ciField* field = C->alias_type(adr_type->isa_instptr())->field();
-        if (field != NULL) {
-          basic_field_type = field->layout_type();
-        } else {
-          // Ignore non field load (for example, klass load)
-        }
-      } else if (adr_type->isa_aryptr()) {
-        if (offset != arrayOopDesc::length_offset_in_bytes()) {
-          const Type* elemtype = adr_type->isa_aryptr()->elem();
-          basic_field_type = elemtype->array_element_basic_type();
-        } else {
-          // Ignore array length load
-        }
-#ifdef ASSERT
-      } else {
-        // Raw pointers are used for initializing stores so skip it
-        // since it should be recorded already
-        Node* base = get_addp_base(ptn->_node);
-        assert(adr_type->isa_rawptr() && base->is_Proj() &&
-               (base->in(0) == alloc),"unexpected pointer type");
-#endif
-      }
-      if (basic_field_type == T_OBJECT ||
-          basic_field_type == T_NARROWOOP ||
-          basic_field_type == T_ARRAY) {
-        Node* value = NULL;
-        if (ini != NULL) {
-          BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
-          Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
-          if (store != NULL && store->is_Store()) {
-            value = store->in(MemNode::ValueIn);
-          } else if (ptn->edge_count() > 0) { // Are there oop stores?
-            // Check for a store which follows allocation without branches.
-            // For example, a volatile field store is not collected
-            // by Initialize node. TODO: it would be nice to use idom() here.
-            //
-            // Search all references to the same field which use different
-            // AddP nodes, for example, in the next case:
-            //
-            //    Point p[] = new Point[1];
-            //    if ( x ) { p[0] = new Point(); p[0].x = x; }
-            //    if ( p[0] != null ) { y = p[0].x; } // has CastPP
-            //
-            for (uint next = ei; (next < ae_cnt) && (value == NULL); next++) {
-              uint fpi = pta->edge_target(next); // Field (AddP)
-              PointsToNode *ptf = ptnode_adr(fpi);
-              if (ptf->offset() == offset) {
-                Node* nf = ptf->_node;
-                for (DUIterator_Fast imax, i = nf->fast_outs(imax); i < imax; i++) {
-                  store = nf->fast_out(i);
-                  if (store->is_Store() && store->in(0) != NULL) {
-                    Node* ctrl = store->in(0);
-                    while(!(ctrl == ini || ctrl == alloc || ctrl == NULL ||
-                            ctrl == C->root() || ctrl == C->top() || ctrl->is_Region() ||
-                            ctrl->is_IfTrue() || ctrl->is_IfFalse())) {
-                       ctrl = ctrl->in(0);
-                    }
-                    if (ctrl == ini || ctrl == alloc) {
-                      value = store->in(MemNode::ValueIn);
-                      break;
-                    }
-                  }
-                }
-              }
-            }
-          }
-        }
-        if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
-          // A field's initializing value was not recorded. Add NULL.
-          add_edge_from_fields(alloc->_idx, null_idx, offset);
-        }
-      }
-    }
+  tty->print("[");
+  for (EdgeIterator i(this); i.has_next(); i.next()) {
+    PointsToNode* e = i.get();
+    tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
   }
-}
-
-// Adjust escape state after Connection Graph is built.
-void ConnectionGraph::adjust_escape_state(Node* n) {
-  PointsToNode* ptn = ptnode_adr(n->_idx);
-  assert(n->is_AddP(), "Should be called for AddP nodes only");
-  // Search for objects which are not scalar replaceable
-  // and mark them to propagate the state to referenced objects.
-  //
-
-  int offset = ptn->offset();
-  Node* base = get_addp_base(n);
-  VectorSet* ptset = PointsTo(base);
-  int ptset_size = ptset->Size();
-
-  // An object is not scalar replaceable if the field which may point
-  // to it has unknown offset (unknown element of an array of objects).
-  //
-
-  if (offset == Type::OffsetBot) {
-    uint e_cnt = ptn->edge_count();
-    for (uint ei = 0; ei < e_cnt; ei++) {
-      uint npi = ptn->edge_target(ei);
-      ptnode_adr(npi)->set_scalar_replaceable(false);
-    }
-  }
-
-  // Currently an object is not scalar replaceable if a LoadStore node
-  // access its field since the field value is unknown after it.
-  //
-  bool has_LoadStore = false;
-  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
-    Node *use = n->fast_out(i);
-    if (use->is_LoadStore()) {
-      has_LoadStore = true;
-      break;
-    }
-  }
-  // An object is not scalar replaceable if the address points
-  // to unknown field (unknown element for arrays, offset is OffsetBot).
-  //
-  // Or the address may point to more then one object. This may produce
-  // the false positive result (set not scalar replaceable)
-  // since the flow-insensitive escape analysis can't separate
-  // the case when stores overwrite the field's value from the case
-  // when stores happened on different control branches.
-  //
-  // Note: it will disable scalar replacement in some cases:
-  //
-  //    Point p[] = new Point[1];
-  //    p[0] = new Point(); // Will be not scalar replaced
-  //
-  // but it will save us from incorrect optimizations in next cases:
-  //
-  //    Point p[] = new Point[1];
-  //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
-  //
-  if (ptset_size > 1 || ptset_size != 0 &&
-      (has_LoadStore || offset == Type::OffsetBot)) {
-    for( VectorSetI j(ptset); j.test(); ++j ) {
-      ptnode_adr(j.elem)->set_scalar_replaceable(false);
-    }
-  }
-}
-
-// Propagate escape states to referenced nodes.
-bool ConnectionGraph::propagate_escape_state(GrowableArray<int>* cg_worklist,
-                                             GrowableArray<uint>* worklist,
-                                             PointsToNode::EscapeState esc_state) {
-  bool has_java_obj = false;
-
-  // push all nodes with the same escape state on the worklist
-  uint cg_length = cg_worklist->length();
-  for (uint next = 0; next < cg_length; ++next) {
-    int nk = cg_worklist->at(next);
-    if (ptnode_adr(nk)->escape_state() == esc_state)
-      worklist->push(nk);
-  }
-  // mark all reachable nodes
-  while (worklist->length() > 0) {
-    int pt = worklist->pop();
-    PointsToNode* ptn = ptnode_adr(pt);
-    if (ptn->node_type() == PointsToNode::JavaObject &&
-        !is_null_ptr(pt)) {
-      has_java_obj = true;
-      if (esc_state > PointsToNode::NoEscape) {
-        // fields values are unknown if object escapes
-        add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
-      }
+  tty->print(" [");
+  for (UseIterator i(this); i.has_next(); i.next()) {
+    PointsToNode* u = i.get();
+    bool is_base = false;
+    if (PointsToNode::is_base_use(u)) {
+      is_base = true;
+      u = PointsToNode::get_use_node(u)->as_Field();
     }
-    uint e_cnt = ptn->edge_count();
-    for (uint ei = 0; ei < e_cnt; ei++) {
-      uint npi = ptn->edge_target(ei);
-      if (is_null_ptr(npi))
-        continue;
-      PointsToNode *np = ptnode_adr(npi);
-      if (np->escape_state() < esc_state) {
-        set_escape_state(npi, esc_state);
-        worklist->push(npi);
-      }
-    }
-  }
-  // Has not escaping java objects
-  return has_java_obj && (esc_state < PointsToNode::GlobalEscape);
-}
-
-// Optimize objects compare.
-Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
-  assert(OptimizePtrCompare, "sanity");
-  // Clone returned Set since PointsTo() returns pointer
-  // to the same structure ConnectionGraph.pt_ptset.
-  VectorSet ptset1 = *PointsTo(n->in(1));
-  VectorSet ptset2 = *PointsTo(n->in(2));
-
-  // Check simple cases first.
-  if (ptset1.Size() == 1) {
-    uint pt1 = ptset1.getelem();
-    PointsToNode* ptn1 = ptnode_adr(pt1);
-    if (ptn1->escape_state() == PointsToNode::NoEscape) {
-      if (ptset2.Size() == 1 && ptset2.getelem() == pt1) {
-        // Comparing the same not escaping object.
-        return _pcmp_eq;
-      }
-      Node* obj = ptn1->_node;
-      // Comparing not escaping allocation.
-      if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
-          !ptset2.test(pt1)) {
-        return _pcmp_neq; // This includes nullness check.
-      }
-    }
-  } else if (ptset2.Size() == 1) {
-    uint pt2 = ptset2.getelem();
-    PointsToNode* ptn2 = ptnode_adr(pt2);
-    if (ptn2->escape_state() == PointsToNode::NoEscape) {
-      Node* obj = ptn2->_node;
-      // Comparing not escaping allocation.
-      if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
-          !ptset1.test(pt2)) {
-        return _pcmp_neq; // This includes nullness check.
-      }
-    }
+    tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
   }
-
-  if (!ptset1.disjoint(ptset2)) {
-    return NULL; // Sets are not disjoint
-  }
-
-  // Sets are disjoint.
-  bool set1_has_unknown_ptr = ptset1.test(_phantom_object) != 0;
-  bool set2_has_unknown_ptr = ptset2.test(_phantom_object) != 0;
-  bool set1_has_null_ptr   = (ptset1.test(_oop_null) | ptset1.test(_noop_null)) != 0;
-  bool set2_has_null_ptr   = (ptset2.test(_oop_null) | ptset2.test(_noop_null)) != 0;
-
-  if (set1_has_unknown_ptr && set2_has_null_ptr ||
-      set2_has_unknown_ptr && set1_has_null_ptr) {
-    // Check nullness of unknown object.
-    return NULL;
-  }
-
-  // Disjointness by itself is not sufficient since
-  // alias analysis is not complete for escaped objects.
-  // Disjoint sets are definitely unrelated only when
-  // at least one set has only not escaping objects.
-  if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
-    bool has_only_non_escaping_alloc = true;
-    for (VectorSetI i(&ptset1); i.test(); ++i) {
-      uint pt = i.elem;
-      PointsToNode* ptn = ptnode_adr(pt);
-      Node* obj = ptn->_node;
-      if (ptn->escape_state() != PointsToNode::NoEscape ||
-          !(obj->is_Allocate() || obj->is_CallStaticJava())) {
-        has_only_non_escaping_alloc = false;
-        break;
-      }
-    }
-    if (has_only_non_escaping_alloc) {
-      return _pcmp_neq;
-    }
-  }
-  if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
-    bool has_only_non_escaping_alloc = true;
-    for (VectorSetI i(&ptset2); i.test(); ++i) {
-      uint pt = i.elem;
-      PointsToNode* ptn = ptnode_adr(pt);
-      Node* obj = ptn->_node;
-      if (ptn->escape_state() != PointsToNode::NoEscape ||
-          !(obj->is_Allocate() || obj->is_CallStaticJava())) {
-        has_only_non_escaping_alloc = false;
-        break;
-      }
-    }
-    if (has_only_non_escaping_alloc) {
-      return _pcmp_neq;
-    }
-  }
-  return NULL;
+  tty->print(" ]]  ");
+  if (_node == NULL)
+    tty->print_cr("<null>");
+  else
+    _node->dump();
 }
 
-void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
-    bool is_arraycopy = false;
-    switch (call->Opcode()) {
-#ifdef ASSERT
-    case Op_Allocate:
-    case Op_AllocateArray:
-    case Op_Lock:
-    case Op_Unlock:
-      assert(false, "should be done already");
-      break;
-#endif
-    case Op_CallLeafNoFP:
-      is_arraycopy = (call->as_CallLeaf()->_name != NULL &&
-                      strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);
-      // fall through
-    case Op_CallLeaf:
-    {
-      // Stub calls, objects do not escape but they are not scale replaceable.
-      // Adjust escape state for outgoing arguments.
-      const TypeTuple * d = call->tf()->domain();
-      bool src_has_oops = false;
-      for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
-        const Type* at = d->field_at(i);
-        Node *arg = call->in(i)->uncast();
-        const Type *aat = phase->type(arg);
-        PointsToNode::EscapeState arg_esc = ptnode_adr(arg->_idx)->escape_state();
-        if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
-            (is_arraycopy || arg_esc < PointsToNode::ArgEscape)) {
-
-          assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
-                 aat->isa_ptr() != NULL, "expecting an Ptr");
-          bool arg_has_oops = aat->isa_oopptr() &&
-                              (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
-                               (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
-          if (i == TypeFunc::Parms) {
-            src_has_oops = arg_has_oops;
-          }
-          //
-          // src or dst could be j.l.Object when other is basic type array:
-          //
-          //   arraycopy(char[],0,Object*,0,size);
-          //   arraycopy(Object*,0,char[],0,size);
-          //
-          // Don't add edges from dst's fields in such cases.
-          //
-          bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
-                                       arg_has_oops && (i > TypeFunc::Parms);
-#ifdef ASSERT
-          if (!(is_arraycopy ||
-                call->as_CallLeaf()->_name != NULL &&
-                (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
-                 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
-          ) {
-            call->dump();
-            assert(false, "EA: unexpected CallLeaf");
-          }
-#endif
-          // Always process arraycopy's destination object since
-          // we need to add all possible edges to references in
-          // source object.
-          if (arg_esc >= PointsToNode::ArgEscape &&
-              !arg_is_arraycopy_dest) {
-            continue;
-          }
-          set_escape_state(arg->_idx, PointsToNode::ArgEscape);
-          Node* arg_base = arg;
-          if (arg->is_AddP()) {
-            //
-            // The inline_native_clone() case when the arraycopy stub is called
-            // after the allocation before Initialize and CheckCastPP nodes.
-            // Or normal arraycopy for object arrays case.
-            //
-            // Set AddP's base (Allocate) as not scalar replaceable since
-            // pointer to the base (with offset) is passed as argument.
-            //
-            arg_base = get_addp_base(arg);
-          }
-          VectorSet argset = *PointsTo(arg_base); // Clone set
-          for( VectorSetI j(&argset); j.test(); ++j ) {
-            uint pd = j.elem; // Destination object
-            set_escape_state(pd, PointsToNode::ArgEscape);
-
-            if (arg_is_arraycopy_dest) {
-              PointsToNode* ptd = ptnode_adr(pd);
-              // Conservatively reference an unknown object since
-              // not all source's fields/elements may be known.
-              add_edge_from_fields(pd, _phantom_object, Type::OffsetBot);
-
-              Node *src = call->in(TypeFunc::Parms)->uncast();
-              Node* src_base = src;
-              if (src->is_AddP()) {
-                src_base  = get_addp_base(src);
-              }
-              // Create edges from destination's fields to
-              // everything known source's fields could point to.
-              for( VectorSetI s(PointsTo(src_base)); s.test(); ++s ) {
-                uint ps = s.elem;
-                bool has_bottom_offset = false;
-                for (uint fd = 0; fd < ptd->edge_count(); fd++) {
-                  assert(ptd->edge_type(fd) == PointsToNode::FieldEdge, "expecting a field edge");
-                  int fdi = ptd->edge_target(fd);
-                  PointsToNode* pfd = ptnode_adr(fdi);
-                  int offset = pfd->offset();
-                  if (offset == Type::OffsetBot)
-                    has_bottom_offset = true;
-                  assert(offset != -1, "offset should be set");
-                  add_deferred_edge_to_fields(fdi, ps, offset);
-                }
-                // Destination object may not have access (no field edge)
-                // to fields which are accessed in source object.
-                // As result no edges will be created to those source's
-                // fields and escape state of destination object will
-                // not be propagated to those fields.
-                //
-                // Mark source object as global escape except in
-                // the case with Type::OffsetBot field (which is
-                // common case for array elements access) when
-                // edges are created to all source's fields.
-                if (!has_bottom_offset) {
-                  set_escape_state(ps, PointsToNode::GlobalEscape);
-                }
-              }
-            }
-          }
-        }
-      }
-      break;
-    }
-
-    case Op_CallStaticJava:
-    // For a static call, we know exactly what method is being called.
-    // Use bytecode estimator to record the call's escape affects
-    {
-      ciMethod *meth = call->as_CallJava()->method();
-      BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
-      // fall-through if not a Java method or no analyzer information
-      if (call_analyzer != NULL) {
-        const TypeTuple * d = call->tf()->domain();
-        bool copy_dependencies = false;
-        for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
-          const Type* at = d->field_at(i);
-          int k = i - TypeFunc::Parms;
-          Node *arg = call->in(i)->uncast();
-
-          if (at->isa_oopptr() != NULL &&
-              ptnode_adr(arg->_idx)->escape_state() < PointsToNode::GlobalEscape) {
-
-            bool global_escapes = false;
-            bool fields_escapes = false;
-            if (!call_analyzer->is_arg_stack(k)) {
-              // The argument global escapes, mark everything it could point to
-              set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
-              global_escapes = true;
-            } else {
-              if (!call_analyzer->is_arg_local(k)) {
-                // The argument itself doesn't escape, but any fields might
-                fields_escapes = true;
-              }
-              set_escape_state(arg->_idx, PointsToNode::ArgEscape);
-              copy_dependencies = true;
-            }
-
-            for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
-              uint pt = j.elem;
-              if (global_escapes) {
-                // The argument global escapes, mark everything it could point to
-                set_escape_state(pt, PointsToNode::GlobalEscape);
-                add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
-              } else {
-                set_escape_state(pt, PointsToNode::ArgEscape);
-                if (fields_escapes) {
-                  // The argument itself doesn't escape, but any fields might.
-                  // Use OffsetTop to indicate such case.
-                  add_edge_from_fields(pt, _phantom_object, Type::OffsetTop);
-                }
-              }
-            }
-          }
-        }
-        if (copy_dependencies)
-          call_analyzer->copy_dependencies(_compile->dependencies());
-        break;
-      }
-    }
-
-    default:
-    // Fall-through here if not a Java method or no analyzer information
-    // or some other type of call, assume the worst case: all arguments
-    // globally escape.
-    {
-      // adjust escape state for  outgoing arguments
-      const TypeTuple * d = call->tf()->domain();
-      for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
-        const Type* at = d->field_at(i);
-        if (at->isa_oopptr() != NULL) {
-          Node *arg = call->in(i)->uncast();
-          set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
-          for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
-            uint pt = j.elem;
-            set_escape_state(pt, PointsToNode::GlobalEscape);
-            add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
-          }
-        }
-      }
-    }
-  }
-}
-void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) {
-  CallNode   *call = resproj->in(0)->as_Call();
-  uint    call_idx = call->_idx;
-  uint resproj_idx = resproj->_idx;
-
-  switch (call->Opcode()) {
-    case Op_Allocate:
-    {
-      Node *k = call->in(AllocateNode::KlassNode);
-      const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
-      assert(kt != NULL, "TypeKlassPtr  required.");
-      ciKlass* cik = kt->klass();
-
-      PointsToNode::EscapeState es;
-      uint edge_to;
-      if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
-         !cik->is_instance_klass() || // StressReflectiveCode
-          cik->as_instance_klass()->has_finalizer()) {
-        es = PointsToNode::GlobalEscape;
-        edge_to = _phantom_object; // Could not be worse
-      } else {
-        es = PointsToNode::NoEscape;
-        edge_to = call_idx;
-        assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity");
-      }
-      set_escape_state(call_idx, es);
-      add_pointsto_edge(resproj_idx, edge_to);
-      _processed.set(resproj_idx);
-      break;
-    }
-
-    case Op_AllocateArray:
-    {
-
-      Node *k = call->in(AllocateNode::KlassNode);
-      const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
-      assert(kt != NULL, "TypeKlassPtr  required.");
-      ciKlass* cik = kt->klass();
-
-      PointsToNode::EscapeState es;
-      uint edge_to;
-      if (!cik->is_array_klass()) { // StressReflectiveCode
-        es = PointsToNode::GlobalEscape;
-        edge_to = _phantom_object;
-      } else {
-        es = PointsToNode::NoEscape;
-        edge_to = call_idx;
-        assert(ptnode_adr(call_idx)->scalar_replaceable(), "sanity");
-        int length = call->in(AllocateNode::ALength)->find_int_con(-1);
-        if (length < 0 || length > EliminateAllocationArraySizeLimit) {
-          // Not scalar replaceable if the length is not constant or too big.
-          ptnode_adr(call_idx)->set_scalar_replaceable(false);
-        }
-      }
-      set_escape_state(call_idx, es);
-      add_pointsto_edge(resproj_idx, edge_to);
-      _processed.set(resproj_idx);
-      break;
-    }
-
-    case Op_CallStaticJava:
-    // For a static call, we know exactly what method is being called.
-    // Use bytecode estimator to record whether the call's return value escapes
-    {
-      bool done = true;
-      const TypeTuple *r = call->tf()->range();
-      const Type* ret_type = NULL;
-
-      if (r->cnt() > TypeFunc::Parms)
-        ret_type = r->field_at(TypeFunc::Parms);
-
-      // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
-      //        _multianewarray functions return a TypeRawPtr.
-      if (ret_type == NULL || ret_type->isa_ptr() == NULL) {
-        _processed.set(resproj_idx);
-        break;  // doesn't return a pointer type
-      }
-      ciMethod *meth = call->as_CallJava()->method();
-      const TypeTuple * d = call->tf()->domain();
-      if (meth == NULL) {
-        // not a Java method, assume global escape
-        set_escape_state(call_idx, PointsToNode::GlobalEscape);
-        add_pointsto_edge(resproj_idx, _phantom_object);
-      } else {
-        BCEscapeAnalyzer *call_analyzer = meth->get_bcea();
-        bool copy_dependencies = false;
-
-        if (call_analyzer->is_return_allocated()) {
-          // Returns a newly allocated unescaped object, simply
-          // update dependency information.
-          // Mark it as NoEscape so that objects referenced by
-          // it's fields will be marked as NoEscape at least.
-          set_escape_state(call_idx, PointsToNode::NoEscape);
-          ptnode_adr(call_idx)->set_scalar_replaceable(false);
-          // Fields values are unknown
-          add_edge_from_fields(call_idx, _phantom_object, Type::OffsetBot);
-          add_pointsto_edge(resproj_idx, call_idx);
-          copy_dependencies = true;
-        } else {
-          // determine whether any arguments are returned
-          set_escape_state(call_idx, PointsToNode::ArgEscape);
-          bool ret_arg = false;
-          for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
-            const Type* at = d->field_at(i);
-            if (at->isa_oopptr() != NULL) {
-              Node *arg = call->in(i)->uncast();
-
-              if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
-                ret_arg = true;
-                PointsToNode *arg_esp = ptnode_adr(arg->_idx);
-                if (arg_esp->node_type() == PointsToNode::UnknownType)
-                  done = false;
-                else if (arg_esp->node_type() == PointsToNode::JavaObject)
-                  add_pointsto_edge(resproj_idx, arg->_idx);
-                else
-                  add_deferred_edge(resproj_idx, arg->_idx);
-              }
-            }
-          }
-          if (done) {
-            copy_dependencies = true;
-            // is_return_local() is true when only arguments are returned.
-            if (!ret_arg || !call_analyzer->is_return_local()) {
-              // Returns unknown object.
-              add_pointsto_edge(resproj_idx, _phantom_object);
-            }
-          }
-        }
-        if (copy_dependencies)
-          call_analyzer->copy_dependencies(_compile->dependencies());
-      }
-      if (done)
-        _processed.set(resproj_idx);
-      break;
-    }
-
-    default:
-    // Some other type of call, assume the worst case that the
-    // returned value, if any, globally escapes.
-    {
-      const TypeTuple *r = call->tf()->range();
-      if (r->cnt() > TypeFunc::Parms) {
-        const Type* ret_type = r->field_at(TypeFunc::Parms);
-
-        // Note:  we use isa_ptr() instead of isa_oopptr()  here because the
-        //        _multianewarray functions return a TypeRawPtr.
-        if (ret_type->isa_ptr() != NULL) {
-          set_escape_state(call_idx, PointsToNode::GlobalEscape);
-          add_pointsto_edge(resproj_idx, _phantom_object);
-        }
-      }
-      _processed.set(resproj_idx);
-    }
-  }
-}
-
-// Populate Connection Graph with Ideal nodes and create simple
-// connection graph edges (do not need to check the node_type of inputs
-// or to call PointsTo() to walk the connection graph).
-void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) {
-  if (_processed.test(n->_idx))
-    return; // No need to redefine node's state.
-
-  if (n->is_Call()) {
-    // Arguments to allocation and locking don't escape.
-    if (n->is_Allocate()) {
-      add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true);
-      record_for_optimizer(n);
-    } else if (n->is_Lock() || n->is_Unlock()) {
-      // Put Lock and Unlock nodes on IGVN worklist to process them during
-      // the first IGVN optimization when escape information is still available.
-      record_for_optimizer(n);
-      _processed.set(n->_idx);
-    } else {
-      // Don't mark as processed since call's arguments have to be processed.
-      PointsToNode::NodeType nt = PointsToNode::UnknownType;
-      PointsToNode::EscapeState es = PointsToNode::UnknownEscape;
-
-      // Check if a call returns an object.
-      const TypeTuple *r = n->as_Call()->tf()->range();
-      if (r->cnt() > TypeFunc::Parms &&
-          r->field_at(TypeFunc::Parms)->isa_ptr() &&
-          n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
-        nt = PointsToNode::JavaObject;
-        if (!n->is_CallStaticJava()) {
-          // Since the called mathod is statically unknown assume
-          // the worst case that the returned value globally escapes.
-          es = PointsToNode::GlobalEscape;
-        }
-      }
-      add_node(n, nt, es, false);
-    }
-    return;
-  }
-
-  // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
-  // ThreadLocal has RawPrt type.
-  switch (n->Opcode()) {
-    case Op_AddP:
-    {
-      add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false);
-      break;
-    }
-    case Op_CastX2P:
-    { // "Unsafe" memory access.
-      add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
-      break;
-    }
-    case Op_CastPP:
-    case Op_CheckCastPP:
-    case Op_EncodeP:
-    case Op_DecodeN:
-    {
-      add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
-      int ti = n->in(1)->_idx;
-      PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
-      if (nt == PointsToNode::UnknownType) {
-        _delayed_worklist.push(n); // Process it later.
-        break;
-      } else if (nt == PointsToNode::JavaObject) {
-        add_pointsto_edge(n->_idx, ti);
-      } else {
-        add_deferred_edge(n->_idx, ti);
-      }
-      _processed.set(n->_idx);
-      break;
-    }
-    case Op_ConP:
-    {
-      // assume all pointer constants globally escape except for null
-      PointsToNode::EscapeState es;
-      if (phase->type(n) == TypePtr::NULL_PTR)
-        es = PointsToNode::NoEscape;
-      else
-        es = PointsToNode::GlobalEscape;
-
-      add_node(n, PointsToNode::JavaObject, es, true);
-      break;
-    }
-    case Op_ConN:
-    {
-      // assume all narrow oop constants globally escape except for null
-      PointsToNode::EscapeState es;
-      if (phase->type(n) == TypeNarrowOop::NULL_PTR)
-        es = PointsToNode::NoEscape;
-      else
-        es = PointsToNode::GlobalEscape;
-
-      add_node(n, PointsToNode::JavaObject, es, true);
-      break;
-    }
-    case Op_CreateEx:
-    {
-      // assume that all exception objects globally escape
-      add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
-      break;
-    }
-    case Op_LoadKlass:
-    case Op_LoadNKlass:
-    {
-      add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
-      break;
-    }
-    case Op_LoadP:
-    case Op_LoadN:
-    {
-      const Type *t = phase->type(n);
-      if (t->make_ptr() == NULL) {
-        _processed.set(n->_idx);
-        return;
-      }
-      add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
-      break;
-    }
-    case Op_Parm:
-    {
-      _processed.set(n->_idx); // No need to redefine it state.
-      uint con = n->as_Proj()->_con;
-      if (con < TypeFunc::Parms)
-        return;
-      const Type *t = n->in(0)->as_Start()->_domain->field_at(con);
-      if (t->isa_ptr() == NULL)
-        return;
-      // We have to assume all input parameters globally escape
-      // (Note: passing 'false' since _processed is already set).
-      add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false);
-      break;
-    }
-    case Op_PartialSubtypeCheck:
-    { // Produces Null or notNull and is used in CmpP.
-      add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
-      break;
-    }
-    case Op_Phi:
-    {
-      const Type *t = n->as_Phi()->type();
-      if (t->make_ptr() == NULL) {
-        // nothing to do if not an oop or narrow oop
-        _processed.set(n->_idx);
-        return;
-      }
-      add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
-      uint i;
-      for (i = 1; i < n->req() ; i++) {
-        Node* in = n->in(i);
-        if (in == NULL)
-          continue;  // ignore NULL
-        in = in->uncast();
-        if (in->is_top() || in == n)
-          continue;  // ignore top or inputs which go back this node
-        int ti = in->_idx;
-        PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
-        if (nt == PointsToNode::UnknownType) {
-          break;
-        } else if (nt == PointsToNode::JavaObject) {
-          add_pointsto_edge(n->_idx, ti);
-        } else {
-          add_deferred_edge(n->_idx, ti);
-        }
-      }
-      if (i >= n->req())
-        _processed.set(n->_idx);
-      else
-        _delayed_worklist.push(n);
-      break;
-    }
-    case Op_Proj:
-    {
-      // we are only interested in the oop result projection from a call
-      if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
-        const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
-        assert(r->cnt() > TypeFunc::Parms, "sanity");
-        if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
-          add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
-          int ti = n->in(0)->_idx;
-          // The call may not be registered yet (since not all its inputs are registered)
-          // if this is the projection from backbranch edge of Phi.
-          if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) {
-            process_call_result(n->as_Proj(), phase);
-          }
-          if (!_processed.test(n->_idx)) {
-            // The call's result may need to be processed later if the call
-            // returns it's argument and the argument is not processed yet.
-            _delayed_worklist.push(n);
-          }
-          break;
-        }
-      }
-      _processed.set(n->_idx);
-      break;
-    }
-    case Op_Return:
-    {
-      if( n->req() > TypeFunc::Parms &&
-          phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
-        // Treat Return value as LocalVar with GlobalEscape escape state.
-        add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false);
-        int ti = n->in(TypeFunc::Parms)->_idx;
-        PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
-        if (nt == PointsToNode::UnknownType) {
-          _delayed_worklist.push(n); // Process it later.
-          break;
-        } else if (nt == PointsToNode::JavaObject) {
-          add_pointsto_edge(n->_idx, ti);
-        } else {
-          add_deferred_edge(n->_idx, ti);
-        }
-      }
-      _processed.set(n->_idx);
-      break;
-    }
-    case Op_StoreP:
-    case Op_StoreN:
-    {
-      const Type *adr_type = phase->type(n->in(MemNode::Address));
-      adr_type = adr_type->make_ptr();
-      if (adr_type->isa_oopptr()) {
-        add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
-      } else {
-        Node* adr = n->in(MemNode::Address);
-        if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL &&
-            adr->in(AddPNode::Address)->is_Proj() &&
-            adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
-          add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
-          // We are computing a raw address for a store captured
-          // by an Initialize compute an appropriate address type.
-          int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
-          assert(offs != Type::OffsetBot, "offset must be a constant");
-        } else {
-          _processed.set(n->_idx);
-          return;
-        }
-      }
-      break;
-    }
-    case Op_StorePConditional:
-    case Op_CompareAndSwapP:
-    case Op_CompareAndSwapN:
-    {
-      const Type *adr_type = phase->type(n->in(MemNode::Address));
-      adr_type = adr_type->make_ptr();
-      if (adr_type->isa_oopptr()) {
-        add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
-      } else {
-        _processed.set(n->_idx);
-        return;
-      }
-      break;
-    }
-    case Op_AryEq:
-    case Op_StrComp:
-    case Op_StrEquals:
-    case Op_StrIndexOf:
-    {
-      // char[] arrays passed to string intrinsics are not scalar replaceable.
-      add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
-      break;
-    }
-    case Op_ThreadLocal:
-    {
-      add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
-      break;
-    }
-    default:
-      ;
-      // nothing to do
-  }
-  return;
-}
-
-void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
-  uint n_idx = n->_idx;
-  assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered");
-
-  // Don't set processed bit for AddP, LoadP, StoreP since
-  // they may need more then one pass to process.
-  // Also don't mark as processed Call nodes since their
-  // arguments may need more then one pass to process.
-  if (_processed.test(n_idx))
-    return; // No need to redefine node's state.
-
-  if (n->is_Call()) {
-    CallNode *call = n->as_Call();
-    process_call_arguments(call, phase);
-    return;
-  }
-
-  switch (n->Opcode()) {
-    case Op_AddP:
-    {
-      Node *base = get_addp_base(n);
-      int offset = address_offset(n, phase);
-      // Create a field edge to this node from everything base could point to.
-      for( VectorSetI i(PointsTo(base)); i.test(); ++i ) {
-        uint pt = i.elem;
-        add_field_edge(pt, n_idx, offset);
-      }
-      break;
-    }
-    case Op_CastX2P:
-    {
-      assert(false, "Op_CastX2P");
-      break;
-    }
-    case Op_CastPP:
-    case Op_CheckCastPP:
-    case Op_EncodeP:
-    case Op_DecodeN:
-    {
-      int ti = n->in(1)->_idx;
-      assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered");
-      if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
-        add_pointsto_edge(n_idx, ti);
-      } else {
-        add_deferred_edge(n_idx, ti);
-      }
-      _processed.set(n_idx);
-      break;
-    }
-    case Op_ConP:
-    {
-      assert(false, "Op_ConP");
-      break;
-    }
-    case Op_ConN:
-    {
-      assert(false, "Op_ConN");
-      break;
-    }
-    case Op_CreateEx:
-    {
-      assert(false, "Op_CreateEx");
-      break;
-    }
-    case Op_LoadKlass:
-    case Op_LoadNKlass:
-    {
-      assert(false, "Op_LoadKlass");
-      break;
-    }
-    case Op_LoadP:
-    case Op_LoadN:
-    {
-      const Type *t = phase->type(n);
-#ifdef ASSERT
-      if (t->make_ptr() == NULL)
-        assert(false, "Op_LoadP");
-#endif
-
-      Node* adr = n->in(MemNode::Address)->uncast();
-      Node* adr_base;
-      if (adr->is_AddP()) {
-        adr_base = get_addp_base(adr);
-      } else {
-        adr_base = adr;
-      }
-
-      // For everything "adr_base" could point to, create a deferred edge from
-      // this node to each field with the same offset.
-      int offset = address_offset(adr, phase);
-      for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
-        uint pt = i.elem;
-        if (adr->is_AddP()) {
-          // Add field edge if it is missing.
-          add_field_edge(pt, adr->_idx, offset);
-        }
-        add_deferred_edge_to_fields(n_idx, pt, offset);
-      }
-      break;
-    }
-    case Op_Parm:
-    {
-      assert(false, "Op_Parm");
-      break;
-    }
-    case Op_PartialSubtypeCheck:
-    {
-      assert(false, "Op_PartialSubtypeCheck");
-      break;
-    }
-    case Op_Phi:
-    {
-#ifdef ASSERT
-      const Type *t = n->as_Phi()->type();
-      if (t->make_ptr() == NULL)
-        assert(false, "Op_Phi");
-#endif
-      for (uint i = 1; i < n->req() ; i++) {
-        Node* in = n->in(i);
-        if (in == NULL)
-          continue;  // ignore NULL
-        in = in->uncast();
-        if (in->is_top() || in == n)
-          continue;  // ignore top or inputs which go back this node
-        int ti = in->_idx;
-        PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
-        assert(nt != PointsToNode::UnknownType, "all nodes should be known");
-        if (nt == PointsToNode::JavaObject) {
-          add_pointsto_edge(n_idx, ti);
-        } else {
-          add_deferred_edge(n_idx, ti);
-        }
-      }
-      _processed.set(n_idx);
-      break;
-    }
-    case Op_Proj:
-    {
-      // we are only interested in the oop result projection from a call
-      if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
-        assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType,
-               "all nodes should be registered");
-        const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
-        assert(r->cnt() > TypeFunc::Parms, "sanity");
-        if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
-          process_call_result(n->as_Proj(), phase);
-          assert(_processed.test(n_idx), "all call results should be processed");
-          break;
-        }
-      }
-      assert(false, "Op_Proj");
-      break;
-    }
-    case Op_Return:
-    {
-#ifdef ASSERT
-      if( n->req() <= TypeFunc::Parms ||
-          !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
-        assert(false, "Op_Return");
-      }
-#endif
-      int ti = n->in(TypeFunc::Parms)->_idx;
-      assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered");
-      if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
-        add_pointsto_edge(n_idx, ti);
-      } else {
-        add_deferred_edge(n_idx, ti);
-      }
-      _processed.set(n_idx);
-      break;
-    }
-    case Op_StoreP:
-    case Op_StoreN:
-    case Op_StorePConditional:
-    case Op_CompareAndSwapP:
-    case Op_CompareAndSwapN:
-    {
-      Node *adr = n->in(MemNode::Address);
-      const Type *adr_type = phase->type(adr)->make_ptr();
-#ifdef ASSERT
-      if (!adr_type->isa_oopptr())
-        assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP");
-#endif
-
-      assert(adr->is_AddP(), "expecting an AddP");
-      Node *adr_base = get_addp_base(adr);
-      Node *val = n->in(MemNode::ValueIn)->uncast();
-      int offset = address_offset(adr, phase);
-      // For everything "adr_base" could point to, create a deferred edge
-      // to "val" from each field with the same offset.
-      for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
-        uint pt = i.elem;
-        // Add field edge if it is missing.
-        add_field_edge(pt, adr->_idx, offset);
-        add_edge_from_fields(pt, val->_idx, offset);
-      }
-      break;
-    }
-    case Op_AryEq:
-    case Op_StrComp:
-    case Op_StrEquals:
-    case Op_StrIndexOf:
-    {
-      // char[] arrays passed to string intrinsic do not escape but
-      // they are not scalar replaceable. Adjust escape state for them.
-      // Start from in(2) edge since in(1) is memory edge.
-      for (uint i = 2; i < n->req(); i++) {
-        Node* adr = n->in(i)->uncast();
-        const Type *at = phase->type(adr);
-        if (!adr->is_top() && at->isa_ptr()) {
-          assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
-                 at->isa_ptr() != NULL, "expecting an Ptr");
-          if (adr->is_AddP()) {
-            adr = get_addp_base(adr);
-          }
-          // Mark as ArgEscape everything "adr" could point to.
-          set_escape_state(adr->_idx, PointsToNode::ArgEscape);
-        }
-      }
-      _processed.set(n_idx);
-      break;
-    }
-    case Op_ThreadLocal:
-    {
-      assert(false, "Op_ThreadLocal");
-      break;
-    }
-    default:
-      // This method should be called only for EA specific nodes.
-      ShouldNotReachHere();
-  }
-}
-
-#ifndef PRODUCT
-void ConnectionGraph::dump() {
+void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
   bool first = true;
-
-  uint size = nodes_size();
-  for (uint ni = 0; ni < size; ni++) {
-    PointsToNode *ptn = ptnode_adr(ni);
-    PointsToNode::NodeType ptn_type = ptn->node_type();
-
-    if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL)
+  int ptnodes_length = ptnodes_worklist.length();
+  for (int i = 0; i < ptnodes_length; i++) {
+    PointsToNode *ptn = ptnodes_worklist.at(i);
+    if (ptn == NULL || !ptn->is_JavaObject())
       continue;
-    PointsToNode::EscapeState es = escape_state(ptn->_node);
-    if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
+    PointsToNode::EscapeState es = ptn->escape_state();
+    if (ptn->ideal_node()->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
       if (first) {
         tty->cr();
         tty->print("======== Connection graph for ");
@@ -3154,22 +3133,14 @@
         tty->cr();
         first = false;
       }
-      tty->print("%6d ", ni);
       ptn->dump();
-      // Print all locals which reference this allocation
-      for (uint li = ni; li < size; li++) {
-        PointsToNode *ptn_loc = ptnode_adr(li);
-        PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type();
-        if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL &&
-             ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) {
-          ptnode_adr(li)->dump(false);
-        }
-      }
-      if (Verbose) {
-        // Print all fields which reference this allocation
-        for (uint i = 0; i < ptn->edge_count(); i++) {
-          uint ei = ptn->edge_target(i);
-          ptnode_adr(ei)->dump(false);
+      // Print all locals and fields which reference this allocation
+      for (UseIterator j(ptn); j.has_next(); j.next()) {
+        PointsToNode* use = j.get();
+        if (use->is_LocalVar()) {
+          use->dump(Verbose);
+        } else if (Verbose) {
+          use->dump();
         }
       }
       tty->cr();
--- a/src/share/vm/opto/escape.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/escape.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -115,18 +115,36 @@
 class  CallNode;
 class  PhiNode;
 class  PhaseTransform;
+class  PointsToNode;
 class  Type;
 class  TypePtr;
 class  VectorSet;
 
-class PointsToNode {
-friend class ConnectionGraph;
+class JavaObjectNode;
+class LocalVarNode;
+class FieldNode;
+class ArraycopyNode;
+
+// ConnectionGraph nodes
+class PointsToNode : public ResourceObj {
+  GrowableArray<PointsToNode*> _edges; // List of nodes this node points to
+  GrowableArray<PointsToNode*> _uses;  // List of nodes which point to this node
+
+  const u1           _type;  // NodeType
+  u1                _flags;  // NodeFlags
+  u1               _escape;  // EscapeState of object
+  u1        _fields_escape;  // EscapeState of object's fields
+
+  Node* const        _node;  // Ideal node corresponding to this PointsTo node.
+  const int           _idx;  // Cached ideal node's _idx
+
 public:
   typedef enum {
     UnknownType = 0,
     JavaObject  = 1,
     LocalVar    = 2,
-    Field       = 3
+    Field       = 3,
+    Arraycopy   = 4
   } NodeType;
 
   typedef enum {
@@ -140,178 +158,387 @@
   } EscapeState;
 
   typedef enum {
-    UnknownEdge   = 0,
-    PointsToEdge  = 1,
-    DeferredEdge  = 2,
-    FieldEdge     = 3
-  } EdgeType;
-
-private:
-  enum {
-    EdgeMask = 3,
-    EdgeShift = 2,
-
-    INITIAL_EDGE_COUNT = 4
-  };
-
-  NodeType             _type;
-  EscapeState          _escape;
-  GrowableArray<uint>* _edges; // outgoing edges
-  Node* _node;                 // Ideal node corresponding to this PointsTo node.
-  int   _offset;               // Object fields offsets.
-  bool  _scalar_replaceable;   // Not escaped object could be replaced with scalar
-  bool  _has_unknown_ptr;      // Has edge to phantom_object
-
-public:
-  PointsToNode():
-    _type(UnknownType),
-    _escape(UnknownEscape),
-    _edges(NULL),
-    _node(NULL),
-    _offset(-1),
-    _has_unknown_ptr(false),
-    _scalar_replaceable(true) {}
+    ScalarReplaceable = 1,  // Not escaped object could be replaced with scalar
+    PointsToUnknown   = 2,  // Has edge to phantom_object
+    ArraycopySrc      = 4,  // Has edge from Arraycopy node
+    ArraycopyDst      = 8   // Has edge to Arraycopy node
+  } NodeFlags;
 
 
-  EscapeState escape_state() const { return _escape; }
-  NodeType node_type() const { return _type;}
-  int offset() { return _offset;}
-  bool scalar_replaceable() { return _scalar_replaceable;}
-  bool has_unknown_ptr()    { return _has_unknown_ptr;}
-
-  void set_offset(int offs) { _offset = offs;}
-  void set_escape_state(EscapeState state) { _escape = state; }
-  void set_node_type(NodeType ntype) {
-    assert(_type == UnknownType || _type == ntype, "Can't change node type");
-    _type = ntype;
-  }
-  void set_scalar_replaceable(bool v) { _scalar_replaceable = v; }
-  void set_has_unknown_ptr()          { _has_unknown_ptr = true; }
-
-  // count of outgoing edges
-  uint edge_count() const { return (_edges == NULL) ? 0 : _edges->length(); }
-
-  // node index of target of outgoing edge "e"
-  uint edge_target(uint e) const {
-    assert(_edges != NULL, "valid edge index");
-    return (_edges->at(e) >> EdgeShift);
-  }
-  // type of outgoing edge "e"
-  EdgeType edge_type(uint e) const {
-    assert(_edges != NULL, "valid edge index");
-    return (EdgeType) (_edges->at(e) & EdgeMask);
+  PointsToNode(Compile *C, Node* n, EscapeState es, NodeType type):
+    _edges(C->comp_arena(), 2, 0, NULL),
+    _uses (C->comp_arena(), 2, 0, NULL),
+    _node(n),
+    _idx(n->_idx),
+    _type((u1)type),
+    _escape((u1)es),
+    _fields_escape((u1)es),
+    _flags(ScalarReplaceable) {
+    assert(n != NULL && es != UnknownEscape, "sanity");
   }
 
-  // add a edge of the specified type pointing to the specified target
-  void add_edge(uint targIdx, EdgeType et);
+  Node* ideal_node()   const { return _node; }
+  int          idx()   const { return _idx; }
+
+  bool is_JavaObject() const { return _type == (u1)JavaObject; }
+  bool is_LocalVar()   const { return _type == (u1)LocalVar; }
+  bool is_Field()      const { return _type == (u1)Field; }
+  bool is_Arraycopy()  const { return _type == (u1)Arraycopy; }
+
+  JavaObjectNode* as_JavaObject() { assert(is_JavaObject(),""); return (JavaObjectNode*)this; }
+  LocalVarNode*   as_LocalVar()   { assert(is_LocalVar(),"");   return (LocalVarNode*)this; }
+  FieldNode*      as_Field()      { assert(is_Field(),"");      return (FieldNode*)this; }
+  ArraycopyNode*  as_Arraycopy()  { assert(is_Arraycopy(),"");  return (ArraycopyNode*)this; }
+
+  EscapeState escape_state() const { return (EscapeState)_escape; }
+  void    set_escape_state(EscapeState state) { _escape = (u1)state; }
+
+  EscapeState fields_escape_state() const { return (EscapeState)_fields_escape; }
+  void    set_fields_escape_state(EscapeState state) { _fields_escape = (u1)state; }
+
+  bool     has_unknown_ptr() const { return (_flags & PointsToUnknown) != 0; }
+  void set_has_unknown_ptr()       { _flags |= PointsToUnknown; }
+
+  bool     arraycopy_src() const { return (_flags & ArraycopySrc) != 0; }
+  void set_arraycopy_src()       { _flags |= ArraycopySrc; }
+  bool     arraycopy_dst() const { return (_flags & ArraycopyDst) != 0; }
+  void set_arraycopy_dst()       { _flags |= ArraycopyDst; }
 
-  // remove an edge of the specified type pointing to the specified target
-  void remove_edge(uint targIdx, EdgeType et);
+  bool     scalar_replaceable() const { return (_flags & ScalarReplaceable) != 0;}
+  void set_scalar_replaceable(bool v) {
+    if (v)
+      _flags |= ScalarReplaceable;
+    else
+      _flags &= ~ScalarReplaceable;
+  }
+
+  int edge_count()              const { return _edges.length(); }
+  PointsToNode* edge(int e)     const { return _edges.at(e); }
+  bool add_edge(PointsToNode* edge)    { return _edges.append_if_missing(edge); }
+
+  int use_count()             const { return _uses.length(); }
+  PointsToNode* use(int e)    const { return _uses.at(e); }
+  bool add_use(PointsToNode* use)    { return _uses.append_if_missing(use); }
+
+  // Mark base edge use to distinguish from stored value edge.
+  bool add_base_use(FieldNode* use) { return _uses.append_if_missing((PointsToNode*)((intptr_t)use + 1)); }
+  static bool is_base_use(PointsToNode* use) { return (((intptr_t)use) & 1); }
+  static PointsToNode* get_use_node(PointsToNode* use) { return (PointsToNode*)(((intptr_t)use) & ~1); }
+
+  // Return true if this node points to specified node or nodes it points to.
+  bool points_to(JavaObjectNode* ptn) const;
+
+  // Return true if this node points only to non-escaping allocations.
+  bool non_escaping_allocation();
+
+  // Return true if one node points to an other.
+  bool meet(PointsToNode* ptn);
 
 #ifndef PRODUCT
+  NodeType node_type() const { return (NodeType)_type;}
   void dump(bool print_state=true) const;
 #endif
 
 };
 
+class LocalVarNode: public PointsToNode {
+public:
+  LocalVarNode(Compile *C, Node* n, EscapeState es):
+    PointsToNode(C, n, es, LocalVar) {}
+};
+
+class JavaObjectNode: public PointsToNode {
+public:
+  JavaObjectNode(Compile *C, Node* n, EscapeState es):
+    PointsToNode(C, n, es, JavaObject) {
+      if (es > NoEscape)
+        set_scalar_replaceable(false);
+    }
+};
+
+class FieldNode: public PointsToNode {
+  GrowableArray<PointsToNode*> _bases; // List of JavaObject nodes which point to this node
+  const int   _offset; // Field's offset.
+  const bool  _is_oop; // Field points to object
+        bool  _has_unknown_base; // Has phantom_object base
+public:
+  FieldNode(Compile *C, Node* n, EscapeState es, int offs, bool is_oop):
+    PointsToNode(C, n, es, Field),
+    _offset(offs), _is_oop(is_oop),
+    _has_unknown_base(false) {}
+
+  int      offset()              const { return _offset;}
+  bool     is_oop()              const { return _is_oop;}
+  bool     has_unknown_base()    const { return _has_unknown_base; }
+  void set_has_unknown_base()          { _has_unknown_base = true; }
+
+  int base_count()              const { return _bases.length(); }
+  PointsToNode* base(int e)     const { return _bases.at(e); }
+  bool add_base(PointsToNode* base)    { return _bases.append_if_missing(base); }
+#ifdef ASSERT
+  // Return true if bases points to this java object.
+  bool has_base(JavaObjectNode* ptn) const;
+#endif
+
+};
+
+class ArraycopyNode: public PointsToNode {
+public:
+  ArraycopyNode(Compile *C, Node* n, EscapeState es):
+    PointsToNode(C, n, es, Arraycopy) {}
+};
+
+// Iterators for PointsTo node's edges:
+//   for (EdgeIterator i(n); i.has_next(); i.next()) {
+//     PointsToNode* u = i.get();
+class PointsToIterator: public StackObj {
+protected:
+  const PointsToNode* node;
+  const int cnt;
+  int i;
+public:
+  inline PointsToIterator(const PointsToNode* n, int cnt) : node(n), cnt(cnt), i(0) { }
+  inline bool has_next() const { return i < cnt; }
+  inline void next() { i++; }
+  PointsToNode* get() const { ShouldNotCallThis(); return NULL; }
+};
+
+class EdgeIterator: public PointsToIterator {
+public:
+  inline EdgeIterator(const PointsToNode* n) : PointsToIterator(n, n->edge_count()) { }
+  inline PointsToNode* get() const { return node->edge(i); }
+};
+
+class UseIterator: public PointsToIterator {
+public:
+  inline UseIterator(const PointsToNode* n) : PointsToIterator(n, n->use_count()) { }
+  inline PointsToNode* get() const { return node->use(i); }
+};
+
+class BaseIterator: public PointsToIterator {
+public:
+  inline BaseIterator(const FieldNode* n) : PointsToIterator(n, n->base_count()) { }
+  inline PointsToNode* get() const { return ((PointsToNode*)node)->as_Field()->base(i); }
+};
+
+
 class ConnectionGraph: public ResourceObj {
 private:
-  GrowableArray<PointsToNode>  _nodes; // Connection graph nodes indexed
-                                       // by ideal node index.
-
-  Unique_Node_List  _delayed_worklist; // Nodes to be processed before
-                                       // the call build_connection_graph().
+  GrowableArray<PointsToNode*>  _nodes; // Map from ideal nodes to
+                                        // ConnectionGraph nodes.
 
-  GrowableArray<MergeMemNode *>  _mergemem_worklist; // List of all MergeMem nodes
+  GrowableArray<PointsToNode*>  _worklist; // Nodes to be processed
 
-  VectorSet                _processed; // Records which nodes have been
-                                       // processed.
-
-  bool                    _collecting; // Indicates whether escape information
-                                       // is still being collected. If false,
-                                       // no new nodes will be processed.
+  bool            _collecting; // Indicates whether escape information
+                               // is still being collected. If false,
+                               // no new nodes will be processed.
 
-  bool                    _progress;   // Indicates whether new Graph's edges
-                                       // were created.
+  bool               _verify;  // verify graph
 
-  uint                _phantom_object; // Index of globally escaping object
-                                       // that pointer values loaded from
-                                       // a field which has not been set
-                                       // are assumed to point to.
-  uint                      _oop_null; // ConP(#NULL)->_idx
-  uint                     _noop_null; // ConN(#NULL)->_idx
-  Node*                     _pcmp_neq; // ConI(#CC_GT)
-  Node*                      _pcmp_eq; // ConI(#CC_EQ)
+  JavaObjectNode* phantom_obj; // Unknown object
+  JavaObjectNode*    null_obj;
+  Node*             _pcmp_neq; // ConI(#CC_GT)
+  Node*              _pcmp_eq; // ConI(#CC_EQ)
 
-  Compile *                  _compile; // Compile object for current compilation
-  PhaseIterGVN *                _igvn; // Value numbering
+  Compile*           _compile; // Compile object for current compilation
+  PhaseIterGVN*         _igvn; // Value numbering
+
+  Unique_Node_List ideal_nodes; // Used by CG construction and types splitting.
 
   // Address of an element in _nodes.  Used when the element is to be modified
-  PointsToNode *ptnode_adr(uint idx) const {
+  PointsToNode* ptnode_adr(int idx) const {
     // There should be no new ideal nodes during ConnectionGraph build,
-    // growableArray::adr_at() will throw assert otherwise.
-    return _nodes.adr_at(idx);
+    // growableArray::at() will throw assert otherwise.
+    return _nodes.at(idx);
   }
   uint nodes_size() const { return _nodes.length(); }
 
-  bool is_null_ptr(uint idx) const { return (idx == _noop_null || idx == _oop_null); }
+  // Add nodes to ConnectionGraph.
+  void add_local_var(Node* n, PointsToNode::EscapeState es);
+  void add_java_object(Node* n, PointsToNode::EscapeState es);
+  void add_field(Node* n, PointsToNode::EscapeState es, int offset);
+  void add_arraycopy(Node* n, PointsToNode::EscapeState es, PointsToNode* src, PointsToNode* dst);
+
+  // Compute the escape state for arguments to a call.
+  void process_call_arguments(CallNode *call);
+
+  // Add PointsToNode node corresponding to a call
+  void add_call_node(CallNode* call);
+
+  // Map ideal node to existing PointsTo node (usually phantom_object).
+  void map_ideal_node(Node *n, PointsToNode* ptn) {
+    assert(ptn != NULL, "only existing PointsTo node");
+    _nodes.at_put(n->_idx, ptn);
+  }
+
+  // Create PointsToNode node and add it to Connection Graph.
+  void add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist);
+
+  // Add final simple edges to graph.
+  void add_final_edges(Node *n);
+
+  // Finish Graph construction.
+  bool complete_connection_graph(GrowableArray<PointsToNode*>&   ptnodes_worklist,
+                                 GrowableArray<JavaObjectNode*>& non_escaped_worklist,
+                                 GrowableArray<JavaObjectNode*>& java_objects_worklist,
+                                 GrowableArray<FieldNode*>&      oop_fields_worklist);
+
+#ifdef ASSERT
+  void verify_connection_graph(GrowableArray<PointsToNode*>&   ptnodes_worklist,
+                               GrowableArray<JavaObjectNode*>& non_escaped_worklist,
+                               GrowableArray<JavaObjectNode*>& java_objects_worklist,
+                               GrowableArray<Node*>& addp_worklist);
+#endif
+
+  // Add all references to this JavaObject node.
+  int add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist);
+
+  // Put node on worklist if it is (or was) not there.
+  void add_to_worklist(PointsToNode* pt) {
+    _worklist.push(pt);
+    return;
+  }
+
+  // Put on worklist all uses of this node.
+  void add_uses_to_worklist(PointsToNode* pt) {
+    for (UseIterator i(pt); i.has_next(); i.next())
+      _worklist.push(i.get());
+  }
+
+  // Put on worklist all field's uses and related field nodes.
+  void add_field_uses_to_worklist(FieldNode* field);
+
+  // Put on worklist all related field nodes.
+  void add_fields_to_worklist(FieldNode* field, PointsToNode* base);
+
+  // Find fields which have unknown value.
+  int find_field_value(FieldNode* field);
+
+  // Find fields initializing values for allocations.
+  int find_init_values(JavaObjectNode* ptn, PointsToNode* init_val, PhaseTransform* phase);
+
+  // Set the escape state of an object and its fields.
+  void set_escape_state(PointsToNode* ptn, PointsToNode::EscapeState esc) {
+    // Don't change non-escaping state of NULL pointer.
+    if (ptn != null_obj) {
+      if (ptn->escape_state() < esc)
+        ptn->set_escape_state(esc);
+      if (ptn->fields_escape_state() < esc)
+        ptn->set_fields_escape_state(esc);
+    }
+  }
+  void set_fields_escape_state(PointsToNode* ptn, PointsToNode::EscapeState esc) {
+    // Don't change non-escaping state of NULL pointer.
+    if (ptn != null_obj) {
+      if (ptn->fields_escape_state() < esc)
+        ptn->set_fields_escape_state(esc);
+    }
+  }
 
-  // Add node to ConnectionGraph.
-  void add_node(Node *n, PointsToNode::NodeType nt, PointsToNode::EscapeState es, bool done);
+  // Propagate GlobalEscape and ArgEscape escape states to all nodes
+  // and check that we still have non-escaping java objects.
+  bool find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
+                                GrowableArray<JavaObjectNode*>& non_escaped_worklist);
+
+  // Adjust scalar_replaceable state after Connection Graph is built.
+  void adjust_scalar_replaceable_state(JavaObjectNode* jobj);
+
+  // Optimize ideal graph.
+  void optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
+                            GrowableArray<Node*>& storestore_worklist);
+  // Optimize objects compare.
+  Node* optimize_ptr_compare(Node* n);
+
+  // Returns unique corresponding java object or NULL.
+  JavaObjectNode* unique_java_object(Node *n);
+
+  // Add an edge of the specified type pointing to the specified target.
+  bool add_edge(PointsToNode* from, PointsToNode* to) {
+    assert(!from->is_Field() || from->as_Field()->is_oop(), "sanity");
+
+    if (to == phantom_obj) {
+      if (from->has_unknown_ptr()) {
+        return false; // already points to phantom_obj
+      }
+      from->set_has_unknown_ptr();
+    }
+
+    bool is_new = from->add_edge(to);
+    assert(to != phantom_obj || is_new, "sanity");
+    if (is_new) { // New edge?
+      assert(!_verify, "graph is incomplete");
+      is_new = to->add_use(from);
+      assert(is_new, "use should be also new");
+    }
+    return is_new;
+  }
+
+  // Add an edge from Field node to its base and back.
+  bool add_base(FieldNode* from, PointsToNode* to) {
+    assert(!to->is_Arraycopy(), "sanity");
+    if (to == phantom_obj) {
+      if (from->has_unknown_base()) {
+        return false; // already has phantom_obj base
+      }
+      from->set_has_unknown_base();
+    }
+    bool is_new = from->add_base(to);
+    assert(to != phantom_obj || is_new, "sanity");
+    if (is_new) {      // New edge?
+      assert(!_verify, "graph is incomplete");
+      if (to == null_obj)
+        return is_new; // Don't add fields to NULL pointer.
+      if (to->is_JavaObject()) {
+        is_new = to->add_edge(from);
+      } else {
+        is_new = to->add_base_use(from);
+      }
+      assert(is_new, "use should be also new");
+    }
+    return is_new;
+  }
+
+  // Add LocalVar node and edge if possible
+  void add_local_var_and_edge(Node* n, PointsToNode::EscapeState es, Node* to,
+                              Unique_Node_List *delayed_worklist) {
+    PointsToNode* ptn = ptnode_adr(to->_idx);
+    if (delayed_worklist != NULL) { // First iteration of CG construction
+      add_local_var(n, es);
+      if (ptn == NULL) {
+        delayed_worklist->push(n);
+        return; // Process it later.
+      }
+    } else {
+      assert(ptn != NULL, "node should be registered");
+    }
+    add_edge(ptnode_adr(n->_idx), ptn);
+  }
+
+  // Helper functions
+  bool   is_oop_field(Node* n, int offset);
+  static Node* get_addp_base(Node *addp);
+  static Node* find_second_addp(Node* addp, Node* n);
 
   // offset of a field reference
   int address_offset(Node* adr, PhaseTransform *phase);
 
-  // compute the escape state for arguments to a call
-  void process_call_arguments(CallNode *call, PhaseTransform *phase);
 
-  // compute the escape state for the return value of a call
-  void process_call_result(ProjNode *resproj, PhaseTransform *phase);
-
-  // Populate Connection Graph with Ideal nodes.
-  void record_for_escape_analysis(Node *n, PhaseTransform *phase);
-
-  // Build Connection Graph and set nodes escape state.
-  void build_connection_graph(Node *n, PhaseTransform *phase);
-
-  // walk the connection graph starting at the node corresponding to "n" and
-  // add the index of everything it could point to, to "ptset".  This may cause
-  // Phi's encountered to get (re)processed  (which requires "phase".)
-  VectorSet* PointsTo(Node * n);
-
-  // Reused structures for PointsTo().
-  VectorSet            pt_ptset;
-  VectorSet            pt_visited;
-  GrowableArray<uint>  pt_worklist;
+  // Propagate unique types created for unescaped allocated objects
+  // through the graph
+  void split_unique_types(GrowableArray<Node *>  &alloc_worklist);
 
-  //  Edge manipulation.  The "from_i" and "to_i" arguments are the
-  //  node indices of the source and destination of the edge
-  void add_pointsto_edge(uint from_i, uint to_i);
-  void add_deferred_edge(uint from_i, uint to_i);
-  void add_field_edge(uint from_i, uint to_i, int offs);
+  // Helper methods for unique types split.
+  bool split_AddP(Node *addp, Node *base);
 
-  // Add an edge of the specified type pointing to the specified target.
-  // Set _progress if new edge is added.
-  void add_edge(PointsToNode *f, uint to_i, PointsToNode::EdgeType et) {
-    uint e_cnt = f->edge_count();
-    f->add_edge(to_i, et);
-    _progress |= (f->edge_count() != e_cnt);
-  }
+  PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, bool &new_created);
+  PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist);
 
-  // Add an edge to node given by "to_i" from any field of adr_i whose offset
-  // matches "offset"  A deferred edge is added if to_i is a LocalVar, and
-  // a pointsto edge is added if it is a JavaObject
-  void add_edge_from_fields(uint adr, uint to_i, int offs);
-
-  // Add a deferred  edge from node given by "from_i" to any field
-  // of adr_i whose offset matches "offset"
-  void add_deferred_edge_to_fields(uint from_i, uint adr, int offs);
+  void  move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis);
+  Node* find_inst_mem(Node* mem, int alias_idx,GrowableArray<PhiNode *>  &orig_phi_worklist);
+  Node* step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop);
 
 
-  // Remove outgoing deferred edges from the node referenced by "ni".
-  // Any outgoing edges from the target of the deferred edge are copied
-  // to "ni".
-  void remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited);
+  GrowableArray<MergeMemNode*>  _mergemem_worklist; // List of all MergeMem nodes
 
   Node_Array _node_map; // used for bookeeping during type splitting
                         // Used for the following purposes:
@@ -320,21 +547,18 @@
                         // MemNode       - new memory input for this node
                         // ChecCastPP    - allocation that this is a cast of
                         // allocation    - CheckCastPP of the allocation
-  bool split_AddP(Node *addp, Node *base,  PhaseGVN  *igvn);
-  PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn, bool &new_created);
-  PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, PhaseGVN  *igvn);
-  void  move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis, PhaseGVN *igvn);
-  Node *find_inst_mem(Node *mem, int alias_idx,GrowableArray<PhiNode *>  &orig_phi_worklist,  PhaseGVN  *igvn);
-
-  // Propagate unique types created for unescaped allocated objects
-  // through the graph
-  void split_unique_types(GrowableArray<Node *>  &alloc_worklist);
 
   // manage entries in _node_map
-  void  set_map(int idx, Node *n)        { _node_map.map(idx, n); }
-  Node *get_map(int idx)                 { return _node_map[idx]; }
-  PhiNode *get_map_phi(int idx) {
-    Node *phi = _node_map[idx];
+
+  void  set_map(Node* from, Node* to)  {
+    ideal_nodes.push(from);
+    _node_map.map(from->_idx, to);
+  }
+
+  Node* get_map(int idx) { return _node_map[idx]; }
+
+  PhiNode* get_map_phi(int idx) {
+    Node* phi = _node_map[idx];
     return (phi == NULL) ? NULL : phi->as_Phi();
   }
 
@@ -344,23 +568,6 @@
     _igvn->add_users_to_worklist(n);
   }
 
-  // Set the escape state of a node
-  void set_escape_state(uint ni, PointsToNode::EscapeState es);
-
-  // Find fields initializing values for allocations.
-  void find_init_values(Node* n, VectorSet* visited, PhaseTransform* phase);
-
-  // Adjust escape state after Connection Graph is built.
-  void adjust_escape_state(Node* n);
-
-  // Propagate escape states to referenced nodes.
-  bool propagate_escape_state(GrowableArray<int>* cg_worklist,
-                              GrowableArray<uint>* worklist,
-                              PointsToNode::EscapeState esc_state);
-
-  // Optimize objects compare.
-  Node* optimize_ptr_compare(Node* n);
-
   // Compute the escape information
   bool compute_escape();
 
@@ -373,11 +580,10 @@
   // Perform escape analysis
   static void do_analysis(Compile *C, PhaseIterGVN *igvn);
 
-  // escape state of a node
-  PointsToNode::EscapeState escape_state(Node *n);
+  bool not_global_escape(Node *n);
 
 #ifndef PRODUCT
-  void dump();
+  void dump(GrowableArray<PointsToNode*>& ptnodes_worklist);
 #endif
 };
 
--- a/src/share/vm/opto/gcm.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/gcm.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1344,8 +1344,8 @@
 
   // Schedule locally.  Right now a simple topological sort.
   // Later, do a real latency aware scheduler.
-  int *ready_cnt = NEW_RESOURCE_ARRAY(int,C->unique());
-  memset( ready_cnt, -1, C->unique() * sizeof(int) );
+  uint max_idx = C->unique();
+  GrowableArray<int> ready_cnt(max_idx, max_idx, -1);
   visited.Clear();
   for (i = 0; i < _num_blocks; i++) {
     if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
--- a/src/share/vm/opto/graphKit.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/graphKit.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1522,6 +1522,11 @@
                           const TypeOopPtr* val_type,
                           BasicType bt,
                           bool use_precise) {
+  // Transformation of a value which could be NULL pointer (CastPP #NULL)
+  // could be delayed during Parse (for example, in adjust_map_after_if()).
+  // Execute transformation here to avoid barrier generation in such case.
+  if (_gvn.type(val) == TypePtr::NULL_PTR)
+    val = _gvn.makecon(TypePtr::NULL_PTR);
 
   set_control(ctl);
   if (stopped()) return top(); // Dead path ?
--- a/src/share/vm/opto/lcm.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/lcm.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -404,7 +404,7 @@
 // remaining cases (most), choose the instruction with the greatest latency
 // (that is, the most number of pseudo-cycles required to the end of the
 // routine). If there is a tie, choose the instruction with the most inputs.
-Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot) {
+Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
 
   // If only a single entry on the stack, use it
   uint cnt = worklist.size();
@@ -465,7 +465,7 @@
 
         // More than this instruction pending for successor to be ready,
         // don't choose this if other opportunities are ready
-        if (ready_cnt[use->_idx] > 1)
+        if (ready_cnt.at(use->_idx) > 1)
           n_choice = 1;
       }
 
@@ -565,7 +565,7 @@
 
 
 //------------------------------sched_call-------------------------------------
-uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
+uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
   RegMask regs;
 
   // Schedule all the users of the call right now.  All the users are
@@ -574,8 +574,9 @@
   for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) {
     Node* n = mcall->fast_out(i);
     assert( n->is_MachProj(), "" );
-    --ready_cnt[n->_idx];
-    assert( !ready_cnt[n->_idx], "" );
+    int n_cnt = ready_cnt.at(n->_idx)-1;
+    ready_cnt.at_put(n->_idx, n_cnt);
+    assert( n_cnt == 0, "" );
     // Schedule next to call
     _nodes.map(node_cnt++, n);
     // Collect defined registers
@@ -590,7 +591,9 @@
       Node* m = n->fast_out(j); // Get user
       if( bbs[m->_idx] != this ) continue;
       if( m->is_Phi() ) continue;
-      if( !--ready_cnt[m->_idx] )
+      int m_cnt = ready_cnt.at(m->_idx)-1;
+      ready_cnt.at_put(m->_idx, m_cnt);
+      if( m_cnt == 0 )
         worklist.push(m);
     }
 
@@ -655,7 +658,7 @@
 
 //------------------------------schedule_local---------------------------------
 // Topological sort within a block.  Someday become a real scheduler.
-bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, VectorSet &next_call) {
+bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &ready_cnt, VectorSet &next_call) {
   // Already "sorted" are the block start Node (as the first entry), and
   // the block-ending Node and any trailing control projections.  We leave
   // these alone.  PhiNodes and ParmNodes are made to follow the block start
@@ -695,7 +698,7 @@
         if( m && cfg->_bbs[m->_idx] == this && !m->is_top() )
           local++;              // One more block-local input
       }
-      ready_cnt[n->_idx] = local; // Count em up
+      ready_cnt.at_put(n->_idx, local); // Count em up
 
 #ifdef ASSERT
       if( UseConcMarkSweepGC || UseG1GC ) {
@@ -729,7 +732,7 @@
     }
   }
   for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
-    ready_cnt[_nodes[i2]->_idx] = 0;
+    ready_cnt.at_put(_nodes[i2]->_idx, 0);
 
   // All the prescheduled guys do not hold back internal nodes
   uint i3;
@@ -737,8 +740,10 @@
     Node *n = _nodes[i3];       // Get pre-scheduled
     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
       Node* m = n->fast_out(j);
-      if( cfg->_bbs[m->_idx] ==this ) // Local-block user
-        ready_cnt[m->_idx]--;   // Fix ready count
+      if( cfg->_bbs[m->_idx] ==this ) { // Local-block user
+        int m_cnt = ready_cnt.at(m->_idx)-1;
+        ready_cnt.at_put(m->_idx, m_cnt);   // Fix ready count
+      }
     }
   }
 
@@ -747,7 +752,7 @@
   Node_List worklist;
   for(uint i4=i3; i4<node_cnt; i4++ ) {    // Put ready guys on worklist
     Node *m = _nodes[i4];
-    if( !ready_cnt[m->_idx] ) {   // Zero ready count?
+    if( !ready_cnt.at(m->_idx) ) {   // Zero ready count?
       if (m->is_iteratively_computed()) {
         // Push induction variable increments last to allow other uses
         // of the phi to be scheduled first. The select() method breaks
@@ -775,14 +780,14 @@
       for (uint j=0; j<_nodes.size(); j++) {
         Node     *n = _nodes[j];
         int     idx = n->_idx;
-        tty->print("#   ready cnt:%3d  ", ready_cnt[idx]);
+        tty->print("#   ready cnt:%3d  ", ready_cnt.at(idx));
         tty->print("latency:%3d  ", cfg->_node_latency->at_grow(idx));
         tty->print("%4d: %s\n", idx, n->Name());
       }
     }
 #endif
 
-  uint max_idx = matcher.C->unique();
+  uint max_idx = (uint)ready_cnt.length();
   // Pull from worklist and schedule
   while( worklist.size() ) {    // Worklist is not ready
 
@@ -840,11 +845,13 @@
       Node* m = n->fast_out(i5); // Get user
       if( cfg->_bbs[m->_idx] != this ) continue;
       if( m->is_Phi() ) continue;
-      if (m->_idx > max_idx) { // new node, skip it
+      if (m->_idx >= max_idx) { // new node, skip it
         assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
         continue;
       }
-      if( !--ready_cnt[m->_idx] )
+      int m_cnt = ready_cnt.at(m->_idx)-1;
+      ready_cnt.at_put(m->_idx, m_cnt);
+      if( m_cnt == 0 )
         worklist.push(m);
     }
   }
--- a/src/share/vm/opto/library_call.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/library_call.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -338,8 +338,27 @@
     break;
 
   case vmIntrinsics::_bitCount_i:
+    if (!Matcher::match_rule_supported(Op_PopCountI)) return NULL;
+    break;
+
   case vmIntrinsics::_bitCount_l:
-    if (!UsePopCountInstruction)  return NULL;
+    if (!Matcher::match_rule_supported(Op_PopCountL)) return NULL;
+    break;
+
+  case vmIntrinsics::_numberOfLeadingZeros_i:
+    if (!Matcher::match_rule_supported(Op_CountLeadingZerosI)) return NULL;
+    break;
+
+  case vmIntrinsics::_numberOfLeadingZeros_l:
+    if (!Matcher::match_rule_supported(Op_CountLeadingZerosL)) return NULL;
+    break;
+
+  case vmIntrinsics::_numberOfTrailingZeros_i:
+    if (!Matcher::match_rule_supported(Op_CountTrailingZerosI)) return NULL;
+    break;
+
+  case vmIntrinsics::_numberOfTrailingZeros_l:
+    if (!Matcher::match_rule_supported(Op_CountTrailingZerosL)) return NULL;
     break;
 
   case vmIntrinsics::_Reference_get:
@@ -416,14 +435,12 @@
     return kit.transfer_exceptions_into_jvms();
   }
 
-  if (PrintIntrinsics) {
+  // The intrinsic bailed out
+  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
     if (jvms->has_method()) {
       // Not a root compile.
-      tty->print("Did not inline intrinsic %s%s at bci:%d in",
-                 vmIntrinsics::name_at(intrinsic_id()),
-                 (is_virtual() ? " (virtual)" : ""), kit.bci());
-      kit.caller()->print_short_name(tty);
-      tty->print_cr(" (%d bytes)", kit.caller()->code_size());
+      const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
+      CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), msg);
     } else {
       // Root compile
       tty->print("Did not generate intrinsic %s%s at bci:%d in",
@@ -2153,7 +2170,7 @@
   //
   // if (offset == java_lang_ref_Reference::_reference_offset) {
   //   if (base != null) {
-  //     if (klass(base)->reference_type() != REF_NONE)) {
+  //     if (instance_of(base, java.lang.ref.Reference)) {
   //       pre_barrier(_, pre_val, ...);
   //     }
   //   }
@@ -2165,8 +2182,6 @@
   IdealKit ideal(this);
 #define __ ideal.
 
-  const int reference_type_offset = in_bytes(instanceKlass::reference_type_offset());
-
   Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
 
   __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
@@ -2678,7 +2693,13 @@
     cas = _gvn.transform(new (C, 5) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
     break;
   case T_OBJECT:
-     // reference stores need a store barrier.
+    // Transformation of a value which could be NULL pointer (CastPP #NULL)
+    // could be delayed during Parse (for example, in adjust_map_after_if()).
+    // Execute transformation here to avoid barrier generation in such case.
+    if (_gvn.type(newval) == TypePtr::NULL_PTR)
+      newval = _gvn.makecon(TypePtr::NULL_PTR);
+
+    // Reference stores need a store barrier.
     // (They don't if CAS fails, but it isn't worth checking.)
     pre_barrier(true /* do_load*/,
                 control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
@@ -5449,4 +5470,3 @@
   push(result);
   return true;
 }
-
--- a/src/share/vm/opto/loopnode.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/loopnode.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -898,7 +898,7 @@
 Node* CountedLoopNode::match_incr_with_optional_truncation(
                       Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type) {
   // Quick cutouts:
-  if (expr == NULL || expr->req() != 3)  return false;
+  if (expr == NULL || expr->req() != 3)  return NULL;
 
   Node *t1 = NULL;
   Node *t2 = NULL;
--- a/src/share/vm/opto/macro.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/macro.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -234,11 +234,20 @@
     }
   } else {
     // G1 pre/post barriers
-    assert(p2x->outcnt() == 2, "expects 2 users: Xor and URShift nodes");
+    assert(p2x->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");
     // It could be only one user, URShift node, in Object.clone() instrinsic
     // but the new allocation is passed to arraycopy stub and it could not
     // be scalar replaced. So we don't check the case.
 
+    // An other case of only one user (Xor) is when the value check for NULL
+    // in G1 post barrier is folded after CCP so the code which used URShift
+    // is removed.
+
+    // Take Region node before eliminating post barrier since it also
+    // eliminates CastP2X node when it has only one user.
+    Node* this_region = p2x->in(0);
+    assert(this_region != NULL, "");
+
     // Remove G1 post barrier.
 
     // Search for CastP2X->Xor->URShift->Cmp path which
@@ -263,8 +272,6 @@
     // Remove G1 pre barrier.
 
     // Search "if (marking != 0)" check and set it to "false".
-    Node* this_region = p2x->in(0);
-    assert(this_region != NULL, "");
     // There is no G1 pre barrier if previous stored value is NULL
     // (for example, after initialization).
     if (this_region->is_Region() && this_region->req() == 3) {
@@ -292,7 +299,7 @@
     }
     // Now CastP2X can be removed since it is used only on dead path
     // which currently still alive until igvn optimize it.
-    assert(p2x->unique_out()->Opcode() == Op_URShiftX, "");
+    assert(p2x->outcnt() == 0 || p2x->unique_out()->Opcode() == Op_URShiftX, "");
     _igvn.replace_node(p2x, top());
   }
 }
--- a/src/share/vm/opto/memnode.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/memnode.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1718,8 +1718,10 @@
   bool is_instance = (tinst != NULL) && tinst->is_known_instance_field();
   if (ReduceFieldZeroing || is_instance) {
     Node* value = can_see_stored_value(mem,phase);
-    if (value != NULL && value->is_Con())
+    if (value != NULL && value->is_Con()) {
+      assert(value->bottom_type()->higher_equal(_type),"sanity");
       return value->bottom_type();
+    }
   }
 
   if (is_instance) {
@@ -1759,6 +1761,20 @@
   return LoadNode::Ideal(phase, can_reshape);
 }
 
+const Type* LoadBNode::Value(PhaseTransform *phase) const {
+  Node* mem = in(MemNode::Memory);
+  Node* value = can_see_stored_value(mem,phase);
+  if (value != NULL && value->is_Con() &&
+      !value->bottom_type()->higher_equal(_type)) {
+    // If the input to the store does not fit with the load's result type,
+    // it must be truncated. We can't delay until Ideal call since
+    // a singleton Value is needed for split_thru_phi optimization.
+    int con = value->get_int();
+    return TypeInt::make((con << 24) >> 24);
+  }
+  return LoadNode::Value(phase);
+}
+
 //--------------------------LoadUBNode::Ideal-------------------------------------
 //
 //  If the previous store is to the same address as this load,
@@ -1775,6 +1791,20 @@
   return LoadNode::Ideal(phase, can_reshape);
 }
 
+const Type* LoadUBNode::Value(PhaseTransform *phase) const {
+  Node* mem = in(MemNode::Memory);
+  Node* value = can_see_stored_value(mem,phase);
+  if (value != NULL && value->is_Con() &&
+      !value->bottom_type()->higher_equal(_type)) {
+    // If the input to the store does not fit with the load's result type,
+    // it must be truncated. We can't delay until Ideal call since
+    // a singleton Value is needed for split_thru_phi optimization.
+    int con = value->get_int();
+    return TypeInt::make(con & 0xFF);
+  }
+  return LoadNode::Value(phase);
+}
+
 //--------------------------LoadUSNode::Ideal-------------------------------------
 //
 //  If the previous store is to the same address as this load,
@@ -1791,6 +1821,20 @@
   return LoadNode::Ideal(phase, can_reshape);
 }
 
+const Type* LoadUSNode::Value(PhaseTransform *phase) const {
+  Node* mem = in(MemNode::Memory);
+  Node* value = can_see_stored_value(mem,phase);
+  if (value != NULL && value->is_Con() &&
+      !value->bottom_type()->higher_equal(_type)) {
+    // If the input to the store does not fit with the load's result type,
+    // it must be truncated. We can't delay until Ideal call since
+    // a singleton Value is needed for split_thru_phi optimization.
+    int con = value->get_int();
+    return TypeInt::make(con & 0xFFFF);
+  }
+  return LoadNode::Value(phase);
+}
+
 //--------------------------LoadSNode::Ideal--------------------------------------
 //
 //  If the previous store is to the same address as this load,
@@ -1809,6 +1853,20 @@
   return LoadNode::Ideal(phase, can_reshape);
 }
 
+const Type* LoadSNode::Value(PhaseTransform *phase) const {
+  Node* mem = in(MemNode::Memory);
+  Node* value = can_see_stored_value(mem,phase);
+  if (value != NULL && value->is_Con() &&
+      !value->bottom_type()->higher_equal(_type)) {
+    // If the input to the store does not fit with the load's result type,
+    // it must be truncated. We can't delay until Ideal call since
+    // a singleton Value is needed for split_thru_phi optimization.
+    int con = value->get_int();
+    return TypeInt::make((con << 16) >> 16);
+  }
+  return LoadNode::Value(phase);
+}
+
 //=============================================================================
 //----------------------------LoadKlassNode::make------------------------------
 // Polymorphic factory method:
--- a/src/share/vm/opto/memnode.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/memnode.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -215,6 +215,7 @@
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegI; }
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual const Type *Value(PhaseTransform *phase) const;
   virtual int store_Opcode() const { return Op_StoreB; }
   virtual BasicType memory_type() const { return T_BYTE; }
 };
@@ -228,6 +229,7 @@
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegI; }
   virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual const Type *Value(PhaseTransform *phase) const;
   virtual int store_Opcode() const { return Op_StoreB; }
   virtual BasicType memory_type() const { return T_BYTE; }
 };
@@ -241,10 +243,25 @@
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegI; }
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual const Type *Value(PhaseTransform *phase) const;
   virtual int store_Opcode() const { return Op_StoreC; }
   virtual BasicType memory_type() const { return T_CHAR; }
 };
 
+//------------------------------LoadSNode--------------------------------------
+// Load a short (16bits signed) from memory
+class LoadSNode : public LoadNode {
+public:
+  LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
+    : LoadNode(c,mem,adr,at,ti) {}
+  virtual int Opcode() const;
+  virtual uint ideal_reg() const { return Op_RegI; }
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual const Type *Value(PhaseTransform *phase) const;
+  virtual int store_Opcode() const { return Op_StoreC; }
+  virtual BasicType memory_type() const { return T_SHORT; }
+};
+
 //------------------------------LoadINode--------------------------------------
 // Load an integer from memory
 class LoadINode : public LoadNode {
@@ -433,19 +450,6 @@
 };
 
 
-//------------------------------LoadSNode--------------------------------------
-// Load a short (16bits signed) from memory
-class LoadSNode : public LoadNode {
-public:
-  LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
-    : LoadNode(c,mem,adr,at,ti) {}
-  virtual int Opcode() const;
-  virtual uint ideal_reg() const { return Op_RegI; }
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual int store_Opcode() const { return Op_StoreC; }
-  virtual BasicType memory_type() const { return T_SHORT; }
-};
-
 //------------------------------StoreNode--------------------------------------
 // Store value; requires Store, Address and Value
 class StoreNode : public MemNode {
--- a/src/share/vm/opto/output.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/output.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -167,7 +167,7 @@
   // Determine if we need to generate a stack overflow check.
   // Do it if the method is not a stub function and
   // has java calls or has frame size > vm_page_size/8.
-  return (stub_function() == NULL &&
+  return (UseStackBanging && stub_function() == NULL &&
           (has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3));
 }
 
--- a/src/share/vm/opto/parseHelper.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/parseHelper.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -71,14 +71,14 @@
   // Throw uncommon trap if class is not loaded or the value we are casting
   // _from_ is not loaded, and value is not null.  If the value _is_ NULL,
   // then the checkcast does nothing.
-  const TypeInstPtr *tp = _gvn.type(obj)->isa_instptr();
-  if (!will_link || (tp && !tp->is_loaded())) {
+  const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr();
+  if (!will_link || (tp && tp->klass() && !tp->klass()->is_loaded())) {
     if (C->log() != NULL) {
       if (!will_link) {
         C->log()->elem("assert_null reason='checkcast' klass='%d'",
                        C->log()->identify(klass));
       }
-      if (tp && !tp->is_loaded()) {
+      if (tp && tp->klass() && !tp->klass()->is_loaded()) {
         // %%% Cannot happen?
         C->log()->elem("assert_null reason='checkcast source' klass='%d'",
                        C->log()->identify(tp->klass()));
--- a/src/share/vm/opto/phase.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/phase.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,8 +39,9 @@
 
 // The next timers used for LogCompilation
 elapsedTimer Phase::_t_parser;
-elapsedTimer Phase::_t_escapeAnalysis;
 elapsedTimer Phase::_t_optimizer;
+elapsedTimer   Phase::_t_escapeAnalysis;
+elapsedTimer     Phase::_t_connectionGraph;
 elapsedTimer   Phase::_t_idealLoop;
 elapsedTimer   Phase::_t_ccp;
 elapsedTimer Phase::_t_matcher;
@@ -51,6 +52,7 @@
 elapsedTimer Phase::_t_graphReshaping;
 elapsedTimer Phase::_t_scheduler;
 elapsedTimer Phase::_t_blockOrdering;
+elapsedTimer Phase::_t_macroEliminate;
 elapsedTimer Phase::_t_macroExpand;
 elapsedTimer Phase::_t_peephole;
 elapsedTimer Phase::_t_codeGeneration;
@@ -104,6 +106,8 @@
     if (DoEscapeAnalysis) {
       // EA is part of Optimizer.
       tty->print_cr ("      escape analysis: %3.3f sec", Phase::_t_escapeAnalysis.seconds());
+      tty->print_cr ("        connection graph: %3.3f sec", Phase::_t_connectionGraph.seconds());
+      tty->print_cr ("      macroEliminate : %3.3f sec", Phase::_t_macroEliminate.seconds());
     }
     tty->print_cr ("      iterGVN        : %3.3f sec", Phase::_t_iterGVN.seconds());
     tty->print_cr ("      idealLoop      : %3.3f sec", Phase::_t_idealLoop.seconds());
@@ -112,9 +116,10 @@
     tty->print_cr ("      iterGVN2       : %3.3f sec", Phase::_t_iterGVN2.seconds());
     tty->print_cr ("      macroExpand    : %3.3f sec", Phase::_t_macroExpand.seconds());
     tty->print_cr ("      graphReshape   : %3.3f sec", Phase::_t_graphReshaping.seconds());
-    double optimizer_subtotal = Phase::_t_iterGVN.seconds() +
+    double optimizer_subtotal = Phase::_t_iterGVN.seconds() + Phase::_t_iterGVN2.seconds() +
+      Phase::_t_escapeAnalysis.seconds() + Phase::_t_macroEliminate.seconds() +
       Phase::_t_idealLoop.seconds() + Phase::_t_ccp.seconds() +
-      Phase::_t_graphReshaping.seconds();
+      Phase::_t_macroExpand.seconds() + Phase::_t_graphReshaping.seconds();
     double percent_of_optimizer = ((optimizer_subtotal == 0.0) ? 0.0 : (optimizer_subtotal / Phase::_t_optimizer.seconds() * 100.0));
     tty->print_cr ("      subtotal       : %3.3f sec,  %3.2f %%", optimizer_subtotal, percent_of_optimizer);
   }
--- a/src/share/vm/opto/phase.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/opto/phase.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -72,8 +72,12 @@
 
 // The next timers used for LogCompilation
   static elapsedTimer _t_parser;
-  static elapsedTimer _t_escapeAnalysis;
   static elapsedTimer _t_optimizer;
+public:
+  // ConnectionGraph can't be Phase since it is used after EA done.
+  static elapsedTimer   _t_escapeAnalysis;
+  static elapsedTimer     _t_connectionGraph;
+protected:
   static elapsedTimer   _t_idealLoop;
   static elapsedTimer   _t_ccp;
   static elapsedTimer _t_matcher;
@@ -84,6 +88,7 @@
   static elapsedTimer _t_graphReshaping;
   static elapsedTimer _t_scheduler;
   static elapsedTimer _t_blockOrdering;
+  static elapsedTimer _t_macroEliminate;
   static elapsedTimer _t_macroExpand;
   static elapsedTimer _t_peephole;
   static elapsedTimer _t_codeGeneration;
--- a/src/share/vm/prims/jvm.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/prims/jvm.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1301,9 +1301,6 @@
 // Inner class reflection ///////////////////////////////////////////////////////////////////////////////
 
 JVM_ENTRY(jobjectArray, JVM_GetDeclaredClasses(JNIEnv *env, jclass ofClass))
-  const int inner_class_info_index = 0;
-  const int outer_class_info_index = 1;
-
   JvmtiVMObjectAllocEventCollector oam;
   // ofClass is a reference to a java_lang_Class object. The mirror object
   // of an instanceKlass
@@ -1315,26 +1312,26 @@
   }
 
   instanceKlassHandle k(thread, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)));
-
-  if (k->inner_classes()->length() == 0) {
+  InnerClassesIterator iter(k);
+
+  if (iter.length() == 0) {
     // Neither an inner nor outer class
     oop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_NULL);
     return (jobjectArray)JNIHandles::make_local(env, result);
   }
 
   // find inner class info
-  typeArrayHandle    icls(thread, k->inner_classes());
   constantPoolHandle cp(thread, k->constants());
-  int length = icls->length();
+  int length = iter.length();
 
   // Allocate temp. result array
   objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), length/4, CHECK_NULL);
   objArrayHandle result (THREAD, r);
   int members = 0;
 
-  for(int i = 0; i < length; i += 4) {
-    int ioff = icls->ushort_at(i + inner_class_info_index);
-    int ooff = icls->ushort_at(i + outer_class_info_index);
+  for (; !iter.done(); iter.next()) {
+    int ioff = iter.inner_class_info_index();
+    int ooff = iter.outer_class_info_index();
 
     if (ioff != 0 && ooff != 0) {
       // Check to see if the name matches the class we're looking for
@@ -1392,17 +1389,13 @@
                                                      bool* inner_is_member,
                                                      TRAPS) {
   Thread* thread = THREAD;
-  const int inner_class_info_index = inner_class_inner_class_info_offset;
-  const int outer_class_info_index = inner_class_outer_class_info_offset;
-
-  if (k->inner_classes()->length() == 0) {
+  InnerClassesIterator iter(k);
+  if (iter.length() == 0) {
     // No inner class info => no declaring class
     return NULL;
   }
 
-  typeArrayHandle i_icls(thread, k->inner_classes());
   constantPoolHandle i_cp(thread, k->constants());
-  int i_length = i_icls->length();
 
   bool found = false;
   klassOop ok;
@@ -1410,10 +1403,10 @@
   *inner_is_member = false;
 
   // Find inner_klass attribute
-  for (int i = 0; i < i_length && !found; i += inner_class_next_offset) {
-    int ioff = i_icls->ushort_at(i + inner_class_info_index);
-    int ooff = i_icls->ushort_at(i + outer_class_info_index);
-    int noff = i_icls->ushort_at(i + inner_class_inner_name_offset);
+  for (; !iter.done() && !found; iter.next()) {
+    int ioff = iter.inner_class_info_index();
+    int ooff = iter.outer_class_info_index();
+    int noff = iter.inner_name_index();
     if (ioff != 0) {
       // Check to see if the name matches the class we're looking for
       // before attempting to find the class.
@@ -2716,7 +2709,9 @@
   }
   oop java_thread = JNIHandles::resolve_non_null(jthread);
   JavaThread* receiver = java_lang_Thread::thread(java_thread);
-  Events::log("JVM_StopThread thread JavaThread " INTPTR_FORMAT " as oop " INTPTR_FORMAT " [exception " INTPTR_FORMAT "]", receiver, (address)java_thread, throwable);
+  Events::log_exception(JavaThread::current(),
+                        "JVM_StopThread thread JavaThread " INTPTR_FORMAT " as oop " INTPTR_FORMAT " [exception " INTPTR_FORMAT "]",
+                        receiver, (address)java_thread, throwable);
   // First check if thread is alive
   if (receiver != NULL) {
     // Check if exception is getting thrown at self (use oop equality, since the
--- a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -292,8 +292,8 @@
 
 // Compute the number of entries in the InnerClasses attribute
 u2 JvmtiClassFileReconstituter::inner_classes_attribute_length() {
-  typeArrayOop inner_class_list = ikh()->inner_classes();
-  return (inner_class_list == NULL) ? 0 : inner_class_list->length();
+  InnerClassesIterator iter(ikh());
+  return iter.length();
 }
 
 // Write an annotation attribute.  The VM stores them in raw form, so all we need
@@ -324,26 +324,20 @@
 // JVMSpec|     } classes[number_of_classes];
 // JVMSpec|   }
 void JvmtiClassFileReconstituter::write_inner_classes_attribute(int length) {
-  typeArrayOop inner_class_list = ikh()->inner_classes();
-  guarantee(inner_class_list != NULL && inner_class_list->length() == length,
+  InnerClassesIterator iter(ikh());
+  guarantee(iter.length() != 0 && iter.length() == length,
             "caller must check");
-  typeArrayHandle inner_class_list_h(thread(), inner_class_list);
-  assert (length % instanceKlass::inner_class_next_offset == 0, "just checking");
   u2 entry_count = length / instanceKlass::inner_class_next_offset;
   u4 size = 2 + entry_count * (2+2+2+2);
 
   write_attribute_name_index("InnerClasses");
   write_u4(size);
   write_u2(entry_count);
-  for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) {
-    write_u2(inner_class_list_h->ushort_at(
-                      i + instanceKlass::inner_class_inner_class_info_offset));
-    write_u2(inner_class_list_h->ushort_at(
-                      i + instanceKlass::inner_class_outer_class_info_offset));
-    write_u2(inner_class_list_h->ushort_at(
-                      i + instanceKlass::inner_class_inner_name_offset));
-    write_u2(inner_class_list_h->ushort_at(
-                      i + instanceKlass::inner_class_access_flags_offset));
+  for (; !iter.done(); iter.next()) {
+    write_u2(iter.inner_class_info_index());
+    write_u2(iter.outer_class_info_index());
+    write_u2(iter.inner_name_index());
+    write_u2(iter.inner_access_flags());
   }
 }
 
@@ -727,8 +721,11 @@
       case Bytecodes::_invokestatic    :  // fall through
       case Bytecodes::_invokedynamic   :  // fall through
       case Bytecodes::_invokeinterface :
-        assert(len == 3 || (code == Bytecodes::_invokeinterface && len ==5),
+        assert(len == 3 ||
+               (code == Bytecodes::_invokeinterface && len == 5) ||
+               (code == Bytecodes::_invokedynamic   && len == 5),
                "sanity check");
+
         int cpci = Bytes::get_native_u2(bcp+1);
         bool is_invokedynamic = (EnableInvokeDynamic && code == Bytecodes::_invokedynamic);
         if (is_invokedynamic)
--- a/src/share/vm/prims/jvmtiExport.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/prims/jvmtiExport.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -324,6 +324,12 @@
       record_vm_internal_object_allocation(object);
     }
   }
+  inline static void post_array_size_exhausted() {
+    if (should_post_resource_exhausted()) {
+      post_resource_exhausted(JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
+                              "Requested array size exceeds VM limit");
+    }
+  }
 
   static void cleanup_thread             (JavaThread* thread) KERNEL_RETURN;
 
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -2400,44 +2400,33 @@
   // new constant indices as needed. The inner classes info is a
   // quadruple:
   // (inner_class_info, outer_class_info, inner_name, inner_access_flags)
-  typeArrayOop inner_class_list = scratch_class->inner_classes();
-  int icl_length = (inner_class_list == NULL) ? 0 : inner_class_list->length();
-  if (icl_length > 0) {
-    typeArrayHandle inner_class_list_h(THREAD, inner_class_list);
-    for (int i = 0; i < icl_length;
-         i += instanceKlass::inner_class_next_offset) {
-      int cur_index = inner_class_list_h->ushort_at(i
-                        + instanceKlass::inner_class_inner_class_info_offset);
-      if (cur_index == 0) {
-        continue;  // JVM spec. allows null inner class refs so skip it
-      }
-      int new_index = find_new_index(cur_index);
-      if (new_index != 0) {
-        RC_TRACE_WITH_THREAD(0x00080000, THREAD,
-          ("inner_class_info change: %d to %d", cur_index, new_index));
-        inner_class_list_h->ushort_at_put(i
-          + instanceKlass::inner_class_inner_class_info_offset, new_index);
-      }
-      cur_index = inner_class_list_h->ushort_at(i
-                    + instanceKlass::inner_class_outer_class_info_offset);
-      new_index = find_new_index(cur_index);
-      if (new_index != 0) {
-        RC_TRACE_WITH_THREAD(0x00080000, THREAD,
-          ("outer_class_info change: %d to %d", cur_index, new_index));
-        inner_class_list_h->ushort_at_put(i
-          + instanceKlass::inner_class_outer_class_info_offset, new_index);
-      }
-      cur_index = inner_class_list_h->ushort_at(i
-                    + instanceKlass::inner_class_inner_name_offset);
-      new_index = find_new_index(cur_index);
-      if (new_index != 0) {
-        RC_TRACE_WITH_THREAD(0x00080000, THREAD,
-          ("inner_name change: %d to %d", cur_index, new_index));
-        inner_class_list_h->ushort_at_put(i
-          + instanceKlass::inner_class_inner_name_offset, new_index);
-      }
-    } // end for each inner class
-  } // end if we have inner classes
+  InnerClassesIterator iter(scratch_class);
+  for (; !iter.done(); iter.next()) {
+    int cur_index = iter.inner_class_info_index();
+    if (cur_index == 0) {
+      continue;  // JVM spec. allows null inner class refs so skip it
+    }
+    int new_index = find_new_index(cur_index);
+    if (new_index != 0) {
+      RC_TRACE_WITH_THREAD(0x00080000, THREAD,
+        ("inner_class_info change: %d to %d", cur_index, new_index));
+      iter.set_inner_class_info_index(new_index);
+    }
+    cur_index = iter.outer_class_info_index();
+    new_index = find_new_index(cur_index);
+    if (new_index != 0) {
+      RC_TRACE_WITH_THREAD(0x00080000, THREAD,
+        ("outer_class_info change: %d to %d", cur_index, new_index));
+      iter.set_outer_class_info_index(new_index);
+    }
+    cur_index = iter.inner_name_index();
+    new_index = find_new_index(cur_index);
+    if (new_index != 0) {
+      RC_TRACE_WITH_THREAD(0x00080000, THREAD,
+        ("inner_name change: %d to %d", cur_index, new_index));
+      iter.set_inner_name_index(new_index);
+    }
+  } // end for each inner class
 
   // Attach each method in klass to the new constant pool and update
   // to use new constant pool indices as needed:
--- a/src/share/vm/prims/jvmtiTagMap.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/prims/jvmtiTagMap.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2999,7 +2999,8 @@
     char type = field->field_type();
     if (!is_primitive_field_type(type)) {
       oop fld_o = o->obj_field(field->field_offset());
-      if (fld_o != NULL) {
+      // ignore any objects that aren't visible to profiler
+      if (fld_o != NULL && ServiceUtil::visible_oop(fld_o)) {
         // reflection code may have a reference to a klassOop.
         // - see sun.reflect.UnsafeStaticFieldAccessorImpl and sun.misc.Unsafe
         if (fld_o->is_klass()) {
--- a/src/share/vm/prims/jvmtiThreadState.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/prims/jvmtiThreadState.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -319,6 +319,15 @@
   // clearing the flag indicates we are done with the PopFrame() dance
   clr_pending_step_for_popframe();
 
+  // If exception was thrown in this frame, need to reset jvmti thread state.
+  // Single stepping may not get enabled correctly by the agent since
+  // exception state is passed in MethodExit event which may be sent at some
+  // time in the future. JDWP agent ignores MethodExit events if caused by
+  // an exception.
+  //
+  if (is_exception_detected()) {
+    clear_exception_detected();
+  }
   // If step is pending for popframe then it may not be
   // a repeat step. The new_bci and method_id is same as current_bci
   // and current method_id after pop and step for recursive calls.
@@ -385,6 +394,15 @@
   // the ForceEarlyReturn() dance
   clr_pending_step_for_earlyret();
 
+  // If exception was thrown in this frame, need to reset jvmti thread state.
+  // Single stepping may not get enabled correctly by the agent since
+  // exception state is passed in MethodExit event which may be sent at some
+  // time in the future. JDWP agent ignores MethodExit events if caused by
+  // an exception.
+  //
+  if (is_exception_detected()) {
+    clear_exception_detected();
+  }
   // If step is pending for earlyret then it may not be a repeat step.
   // The new_bci and method_id is same as current_bci and current
   // method_id after earlyret and step for recursive calls.
--- a/src/share/vm/prims/jvmtiThreadState.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/prims/jvmtiThreadState.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -165,6 +165,10 @@
   inline bool is_exception_caught()    { return _exception_caught;  }
   inline void set_exception_detected() { _exception_detected = true;
                                          _exception_caught = false; }
+  inline void clear_exception_detected() {
+    _exception_detected = false;
+    assert(_exception_caught == false, "_exception_caught is out of phase");
+  }
   inline void set_exception_caught()   { _exception_caught = true;
                                          _exception_detected = false; }
 
--- a/src/share/vm/prims/nativeLookup.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/prims/nativeLookup.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -91,6 +91,19 @@
 }
 
 
+char* NativeLookup::critical_jni_name(methodHandle method) {
+  stringStream st;
+  // Prefix
+  st.print("JavaCritical_");
+  // Klass name
+  mangle_name_on(&st, method->klass_name());
+  st.print("_");
+  // Method name
+  mangle_name_on(&st, method->name());
+  return st.as_string();
+}
+
+
 char* NativeLookup::long_jni_name(methodHandle method) {
   // Signature ignore the wrapping parenteses and the trailing return type
   stringStream st;
@@ -108,6 +121,7 @@
   void JNICALL JVM_RegisterUnsafeMethods(JNIEnv *env, jclass unsafecls);
   void JNICALL JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass unsafecls);
   void JNICALL JVM_RegisterPerfMethods(JNIEnv *env, jclass perfclass);
+  void JNICALL JVM_RegisterWhiteBoxMethods(JNIEnv *env, jclass wbclass);
 }
 
 #define CC (char*)  /* cast a literal from (const char*) */
@@ -120,7 +134,8 @@
 
   { CC"Java_sun_misc_Unsafe_registerNatives",                      NULL, FN_PTR(JVM_RegisterUnsafeMethods)       },
   { CC"Java_java_lang_invoke_MethodHandleNatives_registerNatives", NULL, FN_PTR(JVM_RegisterMethodHandleMethods) },
-  { CC"Java_sun_misc_Perf_registerNatives",                        NULL, FN_PTR(JVM_RegisterPerfMethods)         }
+  { CC"Java_sun_misc_Perf_registerNatives",                        NULL, FN_PTR(JVM_RegisterPerfMethods)         },
+  { CC"Java_sun_hotspot_WhiteBox_registerNatives",                 NULL, FN_PTR(JVM_RegisterWhiteBoxMethods)     },
 };
 
 static address lookup_special_native(char* jni_name) {
@@ -193,6 +208,34 @@
 }
 
 
+address NativeLookup::lookup_critical_style(methodHandle method, char* pure_name, const char* long_name, int args_size, bool os_style) {
+  if (!method->has_native_function()) {
+    return NULL;
+  }
+
+  address current_entry = method->native_function();
+
+  char dll_name[JVM_MAXPATHLEN];
+  int offset;
+  if (os::dll_address_to_library_name(current_entry, dll_name, sizeof(dll_name), &offset)) {
+    char ebuf[32];
+    void* dll = os::dll_load(dll_name, ebuf, sizeof(ebuf));
+    if (dll != NULL) {
+      // Compute complete JNI name for style
+      stringStream st;
+      if (os_style) os::print_jni_name_prefix_on(&st, args_size);
+      st.print_raw(pure_name);
+      st.print_raw(long_name);
+      if (os_style) os::print_jni_name_suffix_on(&st, args_size);
+      char* jni_name = st.as_string();
+      return (address)os::dll_lookup(dll, jni_name);
+    }
+  }
+
+  return NULL;
+}
+
+
 // Check all the formats of native implementation name to see if there is one
 // for the specified method.
 address NativeLookup::lookup_entry(methodHandle method, bool& in_base_library, TRAPS) {
@@ -228,6 +271,58 @@
   return entry; // NULL indicates not found
 }
 
+// Check all the formats of native implementation name to see if there is one
+// for the specified method.
+address NativeLookup::lookup_critical_entry(methodHandle method) {
+  if (!CriticalJNINatives) return NULL;
+
+  if (method->is_synchronized() ||
+      !method->is_static()) {
+    // Only static non-synchronized methods are allowed
+    return NULL;
+  }
+
+  ResourceMark rm;
+  address entry = NULL;
+
+  Symbol* signature = method->signature();
+  for (int end = 0; end < signature->utf8_length(); end++) {
+    if (signature->byte_at(end) == 'L') {
+      // Don't allow object types
+      return NULL;
+    }
+  }
+
+  // Compute critical name
+  char* critical_name = critical_jni_name(method);
+
+  // Compute argument size
+  int args_size = 1                             // JNIEnv
+                + (method->is_static() ? 1 : 0) // class for static methods
+                + method->size_of_parameters(); // actual parameters
+
+
+  // 1) Try JNI short style
+  entry = lookup_critical_style(method, critical_name, "",        args_size, true);
+  if (entry != NULL) return entry;
+
+  // Compute long name
+  char* long_name = long_jni_name(method);
+
+  // 2) Try JNI long style
+  entry = lookup_critical_style(method, critical_name, long_name, args_size, true);
+  if (entry != NULL) return entry;
+
+  // 3) Try JNI short style without os prefix/suffix
+  entry = lookup_critical_style(method, critical_name, "",        args_size, false);
+  if (entry != NULL) return entry;
+
+  // 4) Try JNI long style without os prefix/suffix
+  entry = lookup_critical_style(method, critical_name, long_name, args_size, false);
+
+  return entry; // NULL indicates not found
+}
+
 // Check if there are any JVM TI prefixes which have been applied to the native method name.
 // If any are found, remove them before attemping the look up of the
 // native implementation again.
--- a/src/share/vm/prims/nativeLookup.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/prims/nativeLookup.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,15 +36,18 @@
   // JNI name computation
   static char* pure_jni_name(methodHandle method);
   static char* long_jni_name(methodHandle method);
+  static char* critical_jni_name(methodHandle method);
 
   // Style specific lookup
   static address lookup_style(methodHandle method, char* pure_name, const char* long_name, int args_size, bool os_style, bool& in_base_library, TRAPS);
+  static address lookup_critical_style(methodHandle method, char* pure_name, const char* long_name, int args_size, bool os_style);
   static address lookup_base (methodHandle method, bool& in_base_library, TRAPS);
   static address lookup_entry(methodHandle method, bool& in_base_library, TRAPS);
   static address lookup_entry_prefixed(methodHandle method, bool& in_base_library, TRAPS);
  public:
   // Lookup native function. May throw UnsatisfiedLinkError.
   static address lookup(methodHandle method, bool& in_base_library, TRAPS);
+  static address lookup_critical_entry(methodHandle method);
 
   // Lookup native functions in base library.
   static address base_library_lookup(const char* class_name, const char* method_name, const char* signature);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/prims/whitebox.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "jni.h"
+
+#include "memory/universe.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/whitebox.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/os.hpp"
+#include "utilities/debug.hpp"
+
+#ifndef SERIALGC
+#include "gc_implementation/g1/concurrentMark.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/heapRegionRemSet.hpp"
+#endif // !SERIALGC
+
+bool WhiteBox::_used = false;
+
+// Entry macro to transition from JNI to VM state.
+
+#define WB_ENTRY(result_type, header) JNI_ENTRY(result_type, header)
+#define WB_END JNI_END
+
+// Definitions of functions exposed via Whitebox API
+
+WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj))
+  return (jlong)(void*)JNIHandles::resolve(obj);
+WB_END
+
+WB_ENTRY(jint, WB_GetHeapOopSize(JNIEnv* env, jobject o))
+  return heapOopSize;
+WB_END
+
+#ifndef SERIALGC
+WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
+  G1CollectedHeap* g1 = G1CollectedHeap::heap();
+  oop result = JNIHandles::resolve(obj);
+  const HeapRegion* hr = g1->heap_region_containing(result);
+  return hr->isHumongous();
+WB_END
+
+WB_ENTRY(jlong, WB_G1NumFreeRegions(JNIEnv* env, jobject o))
+  G1CollectedHeap* g1 = G1CollectedHeap::heap();
+  size_t nr = g1->free_regions();
+  return (jlong)nr;
+WB_END
+
+WB_ENTRY(jboolean, WB_G1InConcurrentMark(JNIEnv* env, jobject o))
+  G1CollectedHeap* g1 = G1CollectedHeap::heap();
+  ConcurrentMark* cm = g1->concurrent_mark();
+  return cm->concurrent_marking_in_progress();
+WB_END
+
+WB_ENTRY(jint, WB_G1RegionSize(JNIEnv* env, jobject o))
+  return (jint)HeapRegion::GrainBytes;
+WB_END
+#endif // !SERIALGC
+
+#define CC (char*)
+
+static JNINativeMethod methods[] = {
+  {CC"getObjectAddress",   CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectAddress  },
+  {CC"getHeapOopSize",     CC"()I",                   (void*)&WB_GetHeapOopSize    },
+#ifndef SERIALGC
+  {CC"g1InConcurrentMark", CC"()Z",                   (void*)&WB_G1InConcurrentMark},
+  {CC"g1IsHumongous",      CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous     },
+  {CC"g1NumFreeRegions",   CC"()J",                   (void*)&WB_G1NumFreeRegions  },
+  {CC"g1RegionSize",       CC"()I",                   (void*)&WB_G1RegionSize      },
+#endif // !SERIALGC
+};
+
+#undef CC
+
+JVM_ENTRY(void, JVM_RegisterWhiteBoxMethods(JNIEnv* env, jclass wbclass))
+  {
+    if (WhiteBoxAPI) {
+      // Make sure that wbclass is loaded by the null classloader
+      instanceKlassHandle ikh = instanceKlassHandle(JNIHandles::resolve(wbclass)->klass());
+      Handle loader(ikh->class_loader());
+      if (loader.is_null()) {
+        ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
+        jint result = env->RegisterNatives(wbclass, methods, sizeof(methods)/sizeof(methods[0]));
+        if (result == 0) {
+          WhiteBox::set_used();
+        }
+      }
+    }
+  }
+JVM_END
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/prims/whitebox.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_PRIMS_WHITEBOX_HPP
+#define SHARE_VM_PRIMS_WHITEBOX_HPP
+
+class WhiteBox : public AllStatic {
+ private:
+  static bool _used;
+ public:
+  static bool used()     { return _used; }
+  static void set_used() { _used = true; }
+};
+
+#endif // SHARE_VM_PRIMS_WHITEBOX_HPP
--- a/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -271,13 +271,10 @@
 }
 
 // Create MDO if necessary.
-void AdvancedThresholdPolicy::create_mdo(methodHandle mh, TRAPS) {
+void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) {
   if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return;
   if (mh->method_data() == NULL) {
-    methodOopDesc::build_interpreter_method_data(mh, THREAD);
-    if (HAS_PENDING_EXCEPTION) {
-      CLEAR_PENDING_EXCEPTION;
-    }
+    methodOopDesc::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
   }
 }
 
@@ -426,22 +423,22 @@
 }
 
 // Update the rate and submit compile
-void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
+void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
   int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
   update_rate(os::javaTimeMillis(), mh());
-  CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", THREAD);
+  CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
 }
 
 // Handle the invocation event.
 void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
-                                                      CompLevel level, nmethod* nm, TRAPS) {
+                                                      CompLevel level, nmethod* nm, JavaThread* thread) {
   if (should_create_mdo(mh(), level)) {
-    create_mdo(mh, THREAD);
+    create_mdo(mh, thread);
   }
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
     CompLevel next_level = call_event(mh(), level);
     if (next_level != level) {
-      compile(mh, InvocationEntryBci, next_level, THREAD);
+      compile(mh, InvocationEntryBci, next_level, thread);
     }
   }
 }
@@ -449,13 +446,13 @@
 // Handle the back branch event. Notice that we can compile the method
 // with a regular entry from here.
 void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
-                                                       int bci, CompLevel level, nmethod* nm, TRAPS) {
+                                                       int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
   if (should_create_mdo(mh(), level)) {
-    create_mdo(mh, THREAD);
+    create_mdo(mh, thread);
   }
   // Check if MDO should be created for the inlined method
   if (should_create_mdo(imh(), level)) {
-    create_mdo(imh, THREAD);
+    create_mdo(imh, thread);
   }
 
   if (is_compilation_enabled()) {
@@ -463,7 +460,7 @@
     CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
     // At the very least compile the OSR version
     if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) {
-      compile(imh, bci, next_osr_level, THREAD);
+      compile(imh, bci, next_osr_level, thread);
     }
 
     // Use loop event as an opportunity to also check if there's been
@@ -502,14 +499,14 @@
           next_level = CompLevel_full_profile;
         }
         if (cur_level != next_level) {
-          compile(mh, InvocationEntryBci, next_level, THREAD);
+          compile(mh, InvocationEntryBci, next_level, thread);
         }
       }
     } else {
       cur_level = comp_level(imh());
       next_level = call_event(imh(), cur_level);
       if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) {
-        compile(imh, InvocationEntryBci, next_level, THREAD);
+        compile(imh, InvocationEntryBci, next_level, thread);
       }
     }
   }
--- a/src/share/vm/runtime/advancedThresholdPolicy.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/advancedThresholdPolicy.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -197,7 +197,7 @@
   // determines whether we should do that.
   inline bool should_create_mdo(methodOop method, CompLevel cur_level);
   // Create MDO if necessary.
-  void create_mdo(methodHandle mh, TRAPS);
+  void create_mdo(methodHandle mh, JavaThread* thread);
   // Is method profiled enough?
   bool is_method_profiled(methodOop method);
 
@@ -208,12 +208,12 @@
   jlong start_time() const     { return _start_time; }
 
   // Submit a given method for compilation (and update the rate).
-  virtual void submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS);
+  virtual void submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread);
   // event() from SimpleThresholdPolicy would call these.
   virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
-                                       CompLevel level, nmethod* nm, TRAPS);
+                                       CompLevel level, nmethod* nm, JavaThread* thread);
   virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
-                                        int bci, CompLevel level, nmethod* nm, TRAPS);
+                                        int bci, CompLevel level, nmethod* nm, JavaThread* thread);
 public:
   AdvancedThresholdPolicy() : _start_time(0) { }
   // Select task is called by CompileBroker. We should return a task or NULL.
--- a/src/share/vm/runtime/arguments.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -102,8 +102,6 @@
 char* Arguments::_meta_index_path = NULL;
 char* Arguments::_meta_index_dir = NULL;
 
-static bool force_client_mode = false;
-
 // Check if head of 'option' matches 'name', and sets 'tail' remaining part of option string
 
 static bool match_option(const JavaVMOption *option, const char* name,
@@ -818,8 +816,21 @@
     return true;
   }
 
-  jio_fprintf(defaultStream::error_stream(),
-              "Unrecognized VM option '%s'\n", argname);
+  // For locked flags, report a custom error message if available.
+  // Otherwise, report the standard unrecognized VM option.
+
+  Flag* locked_flag = Flag::find_flag((char*)argname, strlen(argname), true);
+  if (locked_flag != NULL) {
+    char locked_message_buf[BUFLEN];
+    locked_flag->get_locked_message(locked_message_buf, BUFLEN);
+    if (strlen(locked_message_buf) == 0) {
+      jio_fprintf(defaultStream::error_stream(),
+        "Unrecognized VM option '%s'\n", argname);
+    } else {
+      jio_fprintf(defaultStream::error_stream(), "%s", locked_message_buf);
+    }
+  }
+
   // allow for commandline "commenting out" options like -XX:#+Verbose
   return arg[0] == '#';
 }
@@ -1040,6 +1051,16 @@
 }
 
 #ifndef KERNEL
+static void disable_adaptive_size_policy(const char* collector_name) {
+  if (UseAdaptiveSizePolicy) {
+    if (FLAG_IS_CMDLINE(UseAdaptiveSizePolicy)) {
+      warning("disabling UseAdaptiveSizePolicy; it is incompatible with %s.",
+              collector_name);
+    }
+    FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
+  }
+}
+
 // If the user has chosen ParallelGCThreads > 0, we set UseParNewGC
 // if it's not explictly set or unset. If the user has chosen
 // UseParNewGC and not explicitly set ParallelGCThreads we
@@ -1049,11 +1070,8 @@
          "control point invariant");
   assert(UseParNewGC, "Error");
 
-  // Turn off AdaptiveSizePolicy by default for parnew until it is
-  // complete.
-  if (FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) {
-    FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
-  }
+  // Turn off AdaptiveSizePolicy for parnew until it is complete.
+  disable_adaptive_size_policy("UseParNewGC");
 
   if (ParallelGCThreads == 0) {
     FLAG_SET_DEFAULT(ParallelGCThreads,
@@ -1110,11 +1128,8 @@
     FLAG_SET_ERGO(bool, UseParNewGC, true);
   }
 
-  // Turn off AdaptiveSizePolicy by default for cms until it is
-  // complete.
-  if (FLAG_IS_DEFAULT(UseAdaptiveSizePolicy)) {
-    FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
-  }
+  // Turn off AdaptiveSizePolicy for CMS until it is complete.
+  disable_adaptive_size_policy("UseConcMarkSweepGC");
 
   // In either case, adjust ParallelGCThreads and/or UseParNewGC
   // as needed.
@@ -1341,7 +1356,7 @@
     return;
   }
 
-  if (os::is_server_class_machine() && !force_client_mode ) {
+  if (os::is_server_class_machine()) {
     // If no other collector is requested explicitly,
     // let the VM select the collector based on
     // machine class and automatic selection policy.
@@ -1366,12 +1381,9 @@
   // by ergonomics.
   if (MaxHeapSize <= max_heap_for_compressed_oops()) {
 #if !defined(COMPILER1) || defined(TIERED)
-// disable UseCompressedOops by default on MacOS X until 7118647 is fixed
-#ifndef __APPLE__
     if (FLAG_IS_DEFAULT(UseCompressedOops)) {
       FLAG_SET_ERGO(bool, UseCompressedOops, true);
     }
-#endif // !__APPLE__
 #endif
 #ifdef _WIN64
     if (UseLargePages && UseCompressedOops) {
@@ -1396,10 +1408,11 @@
 
 void Arguments::set_parallel_gc_flags() {
   assert(UseParallelGC || UseParallelOldGC, "Error");
-  // If parallel old was requested, automatically enable parallel scavenge.
-  if (UseParallelOldGC && !UseParallelGC && FLAG_IS_DEFAULT(UseParallelGC)) {
-    FLAG_SET_DEFAULT(UseParallelGC, true);
+  // Enable ParallelOld unless it was explicitly disabled (cmd line or rc file).
+  if (FLAG_IS_DEFAULT(UseParallelOldGC)) {
+    FLAG_SET_DEFAULT(UseParallelOldGC, true);
   }
+  FLAG_SET_DEFAULT(UseParallelGC, true);
 
   // If no heap maximum was requested explicitly, use some reasonable fraction
   // of the physical memory, up to a maximum of 1GB.
@@ -2050,6 +2063,19 @@
     FREE_C_HEAP_ARRAY(char, altclasses_path);
   }
 
+  if (WhiteBoxAPI) {
+    // Append wb.jar to bootclasspath if enabled
+    const char* wb_jar = "wb.jar";
+    size_t wb_path_len = strlen(get_meta_index_dir()) + 1 +
+                         strlen(wb_jar);
+    char* wb_path = NEW_C_HEAP_ARRAY(char, wb_path_len);
+    strcpy(wb_path, get_meta_index_dir());
+    strcat(wb_path, wb_jar);
+    scp.add_suffix(wb_path);
+    scp_assembly_required = true;
+    FREE_C_HEAP_ARRAY(char, wb_path);
+  }
+
   // Parse _JAVA_OPTIONS environment variable (if present) (mimics classic VM)
   result = parse_java_options_environment_variable(&scp, &scp_assembly_required);
   if (result != JNI_OK) {
@@ -2510,15 +2536,6 @@
       // was arrived at by experimenting with specjbb.
       FLAG_SET_CMDLINE(uintx, OldPLABSize, 8*K);  // Note: this is in words
 
-      // CompilationPolicyChoice=0 causes the server compiler to adopt
-      // a more conservative which-method-do-I-compile policy when one
-      // of the counters maintained by the interpreter trips.  The
-      // result is reduced startup time and improved specjbb and
-      // alacrity performance.  Zero is the default, but we set it
-      // explicitly here in case the default changes.
-      // See runtime/compilationPolicy.*.
-      FLAG_SET_CMDLINE(intx, CompilationPolicyChoice, 0);
-
       // Enable parallel GC and adaptive generation sizing
       FLAG_SET_CMDLINE(bool, UseParallelGC, true);
       FLAG_SET_DEFAULT(ParallelGCThreads,
@@ -2935,11 +2952,6 @@
   // Construct the path to the archive
   char jvm_path[JVM_MAXPATHLEN];
   os::jvm_path(jvm_path, sizeof(jvm_path));
-#ifdef TIERED
-  if (strstr(jvm_path, "client") != NULL) {
-    force_client_mode = true;
-  }
-#endif // TIERED
   char *end = strrchr(jvm_path, *os::file_separator());
   if (end != NULL) *end = '\0';
   char *shared_archive_path = NEW_C_HEAP_ARRAY(char, strlen(jvm_path) +
--- a/src/share/vm/runtime/compilationPolicy.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/compilationPolicy.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -306,29 +306,27 @@
   return (current >= initial + target);
 }
 
-nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
+nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci,
+                                    int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) {
   assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
   NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
-  if (JvmtiExport::can_post_interpreter_events()) {
-    assert(THREAD->is_Java_thread(), "Wrong type of thread");
-    if (((JavaThread*)THREAD)->is_interp_only_mode()) {
-      // If certain JVMTI events (e.g. frame pop event) are requested then the
-      // thread is forced to remain in interpreted code. This is
-      // implemented partly by a check in the run_compiled_code
-      // section of the interpreter whether we should skip running
-      // compiled code, and partly by skipping OSR compiles for
-      // interpreted-only threads.
-      if (bci != InvocationEntryBci) {
-        reset_counter_for_back_branch_event(method);
-        return NULL;
-      }
+  if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) {
+    // If certain JVMTI events (e.g. frame pop event) are requested then the
+    // thread is forced to remain in interpreted code. This is
+    // implemented partly by a check in the run_compiled_code
+    // section of the interpreter whether we should skip running
+    // compiled code, and partly by skipping OSR compiles for
+    // interpreted-only threads.
+    if (bci != InvocationEntryBci) {
+      reset_counter_for_back_branch_event(method);
+      return NULL;
     }
   }
   if (bci == InvocationEntryBci) {
     // when code cache is full, compilation gets switched off, UseCompiler
     // is set to false
     if (!method->has_compiled_code() && UseCompiler) {
-      method_invocation_event(method, CHECK_NULL);
+      method_invocation_event(method, thread);
     } else {
       // Force counter overflow on method entry, even if no compilation
       // happened.  (The method_invocation_event call does this also.)
@@ -344,7 +342,7 @@
     NOT_PRODUCT(trace_osr_request(method, osr_nm, bci));
     // when code cache is full, we should not compile any more...
     if (osr_nm == NULL && UseCompiler) {
-      method_back_branch_event(method, bci, CHECK_NULL);
+      method_back_branch_event(method, bci, thread);
       osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
     }
     if (osr_nm == NULL) {
@@ -395,7 +393,7 @@
 
 // SimpleCompPolicy - compile current method
 
-void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) {
+void SimpleCompPolicy::method_invocation_event(methodHandle m, JavaThread* thread) {
   int hot_count = m->invocation_count();
   reset_counter_for_invocation_event(m);
   const char* comment = "count";
@@ -405,18 +403,18 @@
     if (nm == NULL ) {
       const char* comment = "count";
       CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_highest_tier,
-                                    m, hot_count, comment, CHECK);
+                                    m, hot_count, comment, thread);
     }
   }
 }
 
-void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) {
+void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThread* thread) {
   int hot_count = m->backedge_count();
   const char* comment = "backedge_count";
 
   if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) {
     CompileBroker::compile_method(m, bci, CompLevel_highest_tier,
-                                  m, hot_count, comment, CHECK);
+                                  m, hot_count, comment, thread);
     NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));)
   }
 }
@@ -427,14 +425,13 @@
 
 
 // Consider m for compilation
-void StackWalkCompPolicy::method_invocation_event(methodHandle m, TRAPS) {
+void StackWalkCompPolicy::method_invocation_event(methodHandle m, JavaThread* thread) {
   int hot_count = m->invocation_count();
   reset_counter_for_invocation_event(m);
   const char* comment = "count";
 
   if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m)) {
-    ResourceMark rm(THREAD);
-    JavaThread *thread = (JavaThread*)THREAD;
+    ResourceMark rm(thread);
     frame       fr     = thread->last_frame();
     assert(fr.is_interpreted_frame(), "must be interpreted");
     assert(fr.interpreter_frame_method() == m(), "bad method");
@@ -461,17 +458,17 @@
       assert(top != NULL, "findTopInlinableFrame returned null");
       if (TraceCompilationPolicy) top->print();
       CompileBroker::compile_method(top->top_method(), InvocationEntryBci, CompLevel_highest_tier,
-                                    m, hot_count, comment, CHECK);
+                                    m, hot_count, comment, thread);
     }
   }
 }
 
-void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) {
+void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThread* thread) {
   int hot_count = m->backedge_count();
   const char* comment = "backedge_count";
 
   if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) {
-    CompileBroker::compile_method(m, bci, CompLevel_highest_tier, m, hot_count, comment, CHECK);
+    CompileBroker::compile_method(m, bci, CompLevel_highest_tier, m, hot_count, comment, thread);
 
     NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));)
   }
--- a/src/share/vm/runtime/compilationPolicy.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/compilationPolicy.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -64,7 +64,7 @@
   virtual int compiler_count(CompLevel comp_level) = 0;
   // main notification entry, return a pointer to an nmethod if the OSR is required,
   // returns NULL otherwise.
-  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) = 0;
+  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) = 0;
   // safepoint() is called at the end of the safepoint
   virtual void do_safepoint_work() = 0;
   // reprofile request
@@ -105,15 +105,15 @@
   virtual bool is_mature(methodOop method);
   virtual void initialize();
   virtual CompileTask* select_task(CompileQueue* compile_queue);
-  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS);
-  virtual void method_invocation_event(methodHandle m, TRAPS) = 0;
-  virtual void method_back_branch_event(methodHandle m, int bci, TRAPS) = 0;
+  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread);
+  virtual void method_invocation_event(methodHandle m, JavaThread* thread) = 0;
+  virtual void method_back_branch_event(methodHandle m, int bci, JavaThread* thread) = 0;
 };
 
 class SimpleCompPolicy : public NonTieredCompPolicy {
  public:
-  virtual void method_invocation_event(methodHandle m, TRAPS);
-  virtual void method_back_branch_event(methodHandle m, int bci, TRAPS);
+  virtual void method_invocation_event(methodHandle m, JavaThread* thread);
+  virtual void method_back_branch_event(methodHandle m, int bci, JavaThread* thread);
 };
 
 // StackWalkCompPolicy - existing C2 policy
@@ -121,8 +121,8 @@
 #ifdef COMPILER2
 class StackWalkCompPolicy : public NonTieredCompPolicy {
  public:
-  virtual void method_invocation_event(methodHandle m, TRAPS);
-  virtual void method_back_branch_event(methodHandle m, int bci, TRAPS);
+  virtual void method_invocation_event(methodHandle m, JavaThread* thread);
+  virtual void method_back_branch_event(methodHandle m, int bci, JavaThread* thread);
 
  private:
   RFrame* findTopInlinableFrame(GrowableArray<RFrame*>* stack);
--- a/src/share/vm/runtime/deoptimization.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/deoptimization.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -339,7 +339,6 @@
 
 #ifdef ASSERT
   assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking");
-  Events::log("fetch unroll sp " INTPTR_FORMAT, unpack_sp);
 #endif
 #else
   intptr_t* unpack_sp = stub_frame.sender(&dummy_map).unextended_sp();
@@ -577,6 +576,8 @@
     tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", thread, array, exec_mode);
   }
 #endif
+  Events::log(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d",
+              stub_frame.pc(), stub_frame.sp(), exec_mode);
 
   UnrollBlock* info = array->unroll_block();
 
@@ -981,6 +982,7 @@
 #endif // COMPILER2
 
 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) {
+  Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, fr.pc(), fr.sp());
 
 #ifndef PRODUCT
   if (TraceDeoptimization) {
@@ -1026,7 +1028,6 @@
 
   // Compare the vframeArray to the collected vframes
   assert(array->structural_compare(thread, chunk), "just checking");
-  Events::log("# vframes = %d", (intptr_t)chunk->length());
 
 #ifndef PRODUCT
   if (TraceDeoptimization) {
@@ -1124,8 +1125,6 @@
 
   gather_statistics(Reason_constraint, Action_none, Bytecodes::_illegal);
 
-  EventMark m("Deoptimization (pc=" INTPTR_FORMAT ", sp=" INTPTR_FORMAT ")", fr.pc(), fr.id());
-
   // Patch the nmethod so that when execution returns to it we will
   // deopt the execution state and return to the interpreter.
   fr.deoptimize(thread);
@@ -1239,6 +1238,10 @@
   // before we are done with it.
   nmethodLocker nl(fr.pc());
 
+  // Log a message
+  Events::log_deopt_message(thread, "Uncommon trap %d fr.pc " INTPTR_FORMAT,
+                            trap_request, fr.pc());
+
   {
     ResourceMark rm;
 
@@ -1249,7 +1252,6 @@
     DeoptAction action = trap_request_action(trap_request);
     jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
 
-    Events::log("Uncommon trap occurred @" INTPTR_FORMAT " unloaded_class_index = %d", fr.pc(), (int) trap_request);
     vframe*  vf  = vframe::new_vframe(&fr, &reg_map, thread);
     compiledVFrame* cvf = compiledVFrame::cast(vf);
 
--- a/src/share/vm/runtime/dtraceJSDT.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/dtraceJSDT.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -82,7 +82,7 @@
 
   int handle = pd_activate((void*)probes,
     module_name, providers_count, providers);
-  if (handle <= 0) {
+  if (handle < 0) {
     delete probes;
     THROW_MSG_0(vmSymbols::java_lang_RuntimeException(),
       "Unable to register DTrace probes (internal error).");
--- a/src/share/vm/runtime/frame.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/frame.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -570,7 +570,7 @@
     InterpreterCodelet* desc = Interpreter::codelet_containing(pc());
     if (desc != NULL) {
       st->print("~");
-      desc->print();
+      desc->print_on(st);
       NOT_PRODUCT(begin = desc->code_begin(); end = desc->code_end();)
     } else {
       st->print("~interpreter");
@@ -1315,7 +1315,6 @@
 }
 #endif
 
-
 #ifdef ASSERT
 void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const {
   assert(is_interpreted_frame(), "Not an interpreted frame");
@@ -1331,24 +1330,35 @@
   guarantee((current - low_mark) % monitor_size  ==  0         , "Misaligned bottom of BasicObjectLock*");
   guarantee( current >= low_mark                               , "Current BasicObjectLock* below than low_mark");
 }
+#endif
 
+#ifndef PRODUCT
+void frame::describe(FrameValues& values, int frame_no) {
+  // boundaries: sp and the 'real' frame pointer
+  values.describe(-1, sp(), err_msg("sp for #%d", frame_no), 1);
+  intptr_t* frame_pointer = real_fp(); // Note: may differ from fp()
 
-void frame::describe(FrameValues& values, int frame_no) {
-  intptr_t* frame_pointer = real_fp();
+  // print frame info at the highest boundary
+  intptr_t* info_address = MAX2(sp(), frame_pointer);
+
+  if (info_address != frame_pointer) {
+    // print frame_pointer explicitly if not marked by the frame info
+    values.describe(-1, frame_pointer, err_msg("frame pointer for #%d", frame_no), 1);
+  }
+
   if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) {
     // Label values common to most frames
     values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no));
-    values.describe(-1, sp(), err_msg("sp for #%d", frame_no));
-    values.describe(-1, frame_pointer, err_msg("frame pointer for #%d", frame_no));
   }
+
   if (is_interpreted_frame()) {
     methodOop m = interpreter_frame_method();
     int bci = interpreter_frame_bci();
 
     // Label the method and current bci
-    values.describe(-1, MAX2(sp(), frame_pointer),
+    values.describe(-1, info_address,
                     FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 2);
-    values.describe(-1, MAX2(sp(), frame_pointer),
+    values.describe(-1, info_address,
                     err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 1);
     if (m->max_locals() > 0) {
       intptr_t* l0 = interpreter_frame_local_at(0);
@@ -1380,21 +1390,36 @@
     }
   } else if (is_entry_frame()) {
     // For now just label the frame
-    values.describe(-1, MAX2(sp(), frame_pointer), err_msg("#%d entry frame", frame_no), 2);
+    values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
   } else if (is_compiled_frame()) {
     // For now just label the frame
     nmethod* nm = cb()->as_nmethod_or_null();
-    values.describe(-1, MAX2(sp(), frame_pointer),
+    values.describe(-1, info_address,
                     FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no,
                                        nm, nm->method()->name_and_sig_as_C_string(),
-                                       is_deoptimized_frame() ? " (deoptimized" : ""), 2);
+                                       (_deopt_state == is_deoptimized) ?
+                                       " (deoptimized)" :
+                                       ((_deopt_state == unknown) ? " (state unknown)" : "")),
+                    2);
   } else if (is_native_frame()) {
     // For now just label the frame
     nmethod* nm = cb()->as_nmethod_or_null();
-    values.describe(-1, MAX2(sp(), frame_pointer),
+    values.describe(-1, info_address,
                     FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no,
                                        nm, nm->method()->name_and_sig_as_C_string()), 2);
+  } else if (is_ricochet_frame()) {
+      values.describe(-1, info_address, err_msg("#%d ricochet frame", frame_no), 2);
+  } else {
+    // provide default info if not handled before
+    char *info = (char *) "special frame";
+    if ((_cb != NULL) &&
+        (_cb->name() != NULL)) {
+      info = (char *)_cb->name();
+    }
+    values.describe(-1, info_address, err_msg("#%d <%s>", frame_no, info), 2);
   }
+
+  // platform dependent additional data
   describe_pd(values, frame_no);
 }
 
@@ -1411,7 +1436,7 @@
 }
 
 
-#ifdef ASSERT
+#ifndef PRODUCT
 
 void FrameValues::describe(int owner, intptr_t* location, const char* description, int priority) {
   FrameValue fv;
@@ -1424,6 +1449,7 @@
 }
 
 
+#ifdef ASSERT
 void FrameValues::validate() {
   _values.sort(compare);
   bool error = false;
@@ -1449,7 +1475,7 @@
   }
   assert(!error, "invalid layout");
 }
-
+#endif // ASSERT
 
 void FrameValues::print(JavaThread* thread) {
   _values.sort(compare);
@@ -1498,4 +1524,4 @@
   }
 }
 
-#endif
+#endif // ndef PRODUCT
--- a/src/share/vm/runtime/frame.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/frame.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -494,7 +494,7 @@
 
 };
 
-#ifdef ASSERT
+#ifndef PRODUCT
 // A simple class to describe a location on the stack
 class FrameValue VALUE_OBJ_CLASS_SPEC {
  public:
@@ -524,7 +524,9 @@
   // Used by frame functions to describe locations.
   void describe(int owner, intptr_t* location, const char* description, int priority = 0);
 
+#ifdef ASSERT
   void validate();
+#endif
   void print(JavaThread* thread);
 };
 
--- a/src/share/vm/runtime/globals.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/globals.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,6 +81,12 @@
   }
 }
 
+// Get custom message for this locked flag, or return NULL if
+// none is available.
+void Flag::get_locked_message(char* buf, int buflen) const {
+  get_locked_message_ext(buf, buflen);
+}
+
 bool Flag::is_writeable() const {
   return strcmp(kind, "{manageable}") == 0 ||
          strcmp(kind, "{product rw}") == 0 ||
@@ -260,17 +266,22 @@
   return strncmp(s, q, len) == 0;
 }
 
-Flag* Flag::find_flag(char* name, size_t length) {
-  for (Flag* current = &flagTable[0]; current->name; current++) {
+// Search the flag table for a named flag
+Flag* Flag::find_flag(char* name, size_t length, bool allow_locked) {
+  for (Flag* current = &flagTable[0]; current->name != NULL; current++) {
     if (str_equal(current->name, name, length)) {
+      // Found a matching entry.  Report locked flags only if allowed.
       if (!(current->is_unlocked() || current->is_unlocker())) {
-        // disable use of diagnostic or experimental flags until they
-        // are explicitly unlocked
-        return NULL;
+        if (!allow_locked) {
+          // disable use of locked flags, e.g. diagnostic, experimental,
+          // commercial... until they are explicitly unlocked
+          return NULL;
+        }
       }
       return current;
     }
   }
+  // Flag name is not in the flag table
   return NULL;
 }
 
--- a/src/share/vm/runtime/globals.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/globals.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,17 @@
 #define SHARE_VM_RUNTIME_GLOBALS_HPP
 
 #include "utilities/debug.hpp"
+
+// use this for flags that are true per default in the tiered build
+// but false in non-tiered builds, and vice versa
+#ifdef TIERED
+#define  trueInTiered true
+#define falseInTiered false
+#else
+#define  trueInTiered false
+#define falseInTiered true
+#endif
+
 #ifdef TARGET_ARCH_x86
 # include "globals_x86.hpp"
 #endif
@@ -211,7 +222,7 @@
   // number of flags
   static size_t numFlags;
 
-  static Flag* find_flag(char* name, size_t length);
+  static Flag* find_flag(char* name, size_t length, bool allow_locked = false);
 
   bool is_bool() const        { return strcmp(type, "bool") == 0; }
   bool get_bool() const       { return *((bool*) addr); }
@@ -248,6 +259,9 @@
   bool is_writeable_ext() const;
   bool is_external_ext() const;
 
+  void get_locked_message(char*, int) const;
+  void get_locked_message_ext(char*, int) const;
+
   void print_on(outputStream* st, bool withComments = false );
   void print_as_flag(outputStream* st);
 };
@@ -353,16 +367,6 @@
 #define falseInProduct true
 #endif
 
-// use this for flags that are true per default in the tiered build
-// but false in non-tiered builds, and vice versa
-#ifdef TIERED
-#define  trueInTiered true
-#define falseInTiered false
-#else
-#define  trueInTiered false
-#define falseInTiered true
-#endif
-
 #ifdef JAVASE_EMBEDDED
 #define falseInEmbedded false
 #else
@@ -658,6 +662,12 @@
   develop(bool, SpecialArraysEquals, true,                                  \
           "special version of Arrays.equals(char[],char[])")                \
                                                                             \
+  product(bool, CriticalJNINatives, true,                                   \
+          "check for critical JNI entry points")                            \
+                                                                            \
+  notproduct(bool, StressCriticalJNINatives, false,                         \
+            "Exercise register saving code in critical natives")            \
+                                                                            \
   product(bool, UseSSE42Intrinsics, false,                                  \
           "SSE4.2 versions of intrinsics")                                  \
                                                                             \
@@ -735,8 +745,11 @@
   product(bool, MaxFDLimit, true,                                           \
           "Bump the number of file descriptors to max in solaris.")         \
                                                                             \
-  notproduct(bool, LogEvents, trueInDebug,                                  \
-          "Enable Event log")                                               \
+  diagnostic(bool, LogEvents, true,                                         \
+             "Enable the various ring buffer event logs")                   \
+                                                                            \
+  diagnostic(intx, LogEventsBufferEntries, 10,                              \
+             "Enable the various ring buffer event logs")                   \
                                                                             \
   product(bool, BytecodeVerificationRemote, true,                           \
           "Enables the Java bytecode verifier for remote classes")          \
@@ -1042,6 +1055,9 @@
   notproduct(bool, PrintSystemDictionaryAtExit, false,                      \
           "Prints the system dictionary at exit")                           \
                                                                             \
+  experimental(intx, PredictedLoadedClassCount, 0,                          \
+          "Experimental: Tune loaded class cache starting size.")           \
+                                                                            \
   diagnostic(bool, UnsyncloadClass, false,                                  \
           "Unstable: VM calls loadClass unsynchronized. Custom "            \
           "class loader  must call VM synchronized for findClass "          \
@@ -3000,7 +3016,7 @@
   product(intx, SafepointTimeoutDelay, 10000,                               \
           "Delay in milliseconds for option SafepointTimeout")              \
                                                                             \
-  product(intx, NmethodSweepFraction, 4,                                    \
+  product(intx, NmethodSweepFraction, 16,                                    \
           "Number of invocations of sweeper to cover all nmethods")         \
                                                                             \
   product(intx, NmethodSweepCheckInterval, 5,                               \
@@ -3477,16 +3493,19 @@
           "    Linux this policy requires root privilege.")                 \
                                                                             \
   product(bool, ThreadPriorityVerbose, false,                               \
-          "print priority changes")                                         \
+          "Print priority changes")                                         \
                                                                             \
   product(intx, DefaultThreadPriority, -1,                                  \
-          "what native priority threads run at if not specified elsewhere (-1 means no change)") \
+          "The native priority at which threads run if not elsewhere "      \
+          "specified (-1 means no change)")                                 \
                                                                             \
   product(intx, CompilerThreadPriority, -1,                                 \
-          "what priority should compiler threads run at (-1 means no change)") \
+          "The native priority at which compiler threads should run "       \
+          "(-1 means no change)")                                           \
                                                                             \
   product(intx, VMThreadPriority, -1,                                       \
-          "what priority should VM threads run at (-1 means no change)")    \
+          "The native priority at which the VM thread should run "          \
+          "(-1 means no change)")                                           \
                                                                             \
   product(bool, CompilerThreadHintNoPreempt, true,                          \
           "(Solaris only) Give compiler threads an extra quanta")           \
@@ -3505,6 +3524,15 @@
   product(intx, JavaPriority9_To_OSPriority, -1, "Map Java priorities to OS priorities") \
   product(intx, JavaPriority10_To_OSPriority,-1, "Map Java priorities to OS priorities") \
                                                                             \
+  experimental(bool, UseCriticalJavaThreadPriority, false,                  \
+          "Java thread priority 10 maps to critical scheduling priority")   \
+                                                                            \
+  experimental(bool, UseCriticalCompilerThreadPriority, false,              \
+          "Compiler thread(s) run at critical scheduling priority")         \
+                                                                            \
+  experimental(bool, UseCriticalCMSThreadPriority, false,                   \
+          "ConcurrentMarkSweep thread runs at critical scheduling priority")\
+                                                                            \
   /* compiler debugging */                                                  \
   notproduct(intx, CompileTheWorldStartAt,     1,                           \
           "First class to consider when using +CompileTheWorld")            \
@@ -3574,7 +3602,7 @@
           "Threshold at which tier 3 compilation is invoked (invocation "   \
           "minimum must be satisfied.")                                     \
                                                                             \
-  product(intx, Tier3BackEdgeThreshold,  7000,                              \
+  product(intx, Tier3BackEdgeThreshold,  60000,                             \
           "Back edge threshold at which tier 3 OSR compilation is invoked") \
                                                                             \
   product(intx, Tier4InvocationThreshold, 5000,                             \
@@ -3871,7 +3899,10 @@
   product(bool, UseVMInterruptibleIO, false,                                \
           "(Unstable, Solaris-specific) Thread interrupt before or with "   \
           "EINTR for I/O operations results in OS_INTRPT. The default value"\
-          " of this flag is true for JDK 6 and earliers")
+          " of this flag is true for JDK 6 and earlier")                    \
+                                                                            \
+  diagnostic(bool, WhiteBoxAPI, false,                                      \
+          "Enable internal testing APIs")
 
 /*
  *  Macros for factoring of globals
--- a/src/share/vm/runtime/globals_ext.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/globals_ext.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,4 +61,9 @@
   return false;
 }
 
+inline void Flag::get_locked_message_ext(char* buf, int buflen) const {
+  assert(buf != NULL, "Buffer cannot be NULL");
+  buf[0] = '\0';
+}
+
 #endif // SHARE_VM_RUNTIME_GLOBALS_EXT_HPP
--- a/src/share/vm/runtime/init.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/init.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,7 +65,7 @@
 void InlineCacheBuffer_init();
 void compilerOracle_init();
 void compilationPolicy_init();
-
+void compileBroker_init();
 
 // Initialization after compiler initialization
 bool universe_post_init();  // must happen after compiler_init
@@ -120,6 +120,7 @@
   InlineCacheBuffer_init();
   compilerOracle_init();
   compilationPolicy_init();
+  compileBroker_init();
   VMRegImpl::set_regName();
 
   if (!universe_post_init()) {
--- a/src/share/vm/runtime/interfaceSupport.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/interfaceSupport.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -436,6 +436,7 @@
 #define VM_LEAF_BASE(result_type, header)                            \
   TRACE_CALL(result_type, header)                                    \
   debug_only(NoHandleMark __hm;)                                     \
+  os::verify_stack_alignment();                                      \
   /* begin of body */
 
 
@@ -445,6 +446,7 @@
   TRACE_CALL(result_type, header)                                    \
   HandleMarkCleaner __hm(thread);                                    \
   Thread* THREAD = thread;                                           \
+  os::verify_stack_alignment();                                      \
   /* begin of body */
 
 
@@ -454,6 +456,7 @@
   TRACE_CALL(result_type, header)                                    \
   debug_only(NoHandleMark __hm;)                                     \
   Thread* THREAD = thread;                                           \
+  os::verify_stack_alignment();                                      \
   /* begin of body */
 
 
--- a/src/share/vm/runtime/mutex.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/mutex.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1296,10 +1296,6 @@
 
       assert(this->rank() >= 0, "bad lock rank");
 
-      if (LogMultipleMutexLocking && locks != NULL) {
-        Events::log("thread " INTPTR_FORMAT " locks %s, already owns %s", new_owner, name(), locks->name());
-      }
-
       // Deadlock avoidance rules require us to acquire Mutexes only in
       // a global total order. For example m1 is the lowest ranked mutex
       // that the thread holds and m2 is the mutex the thread is trying
@@ -1343,10 +1339,6 @@
     #ifdef ASSERT
       Monitor *locks = old_owner->owned_locks();
 
-      if (LogMultipleMutexLocking && locks != this) {
-        Events::log("thread " INTPTR_FORMAT " unlocks %s, still owns %s", old_owner, this->name(), locks->name());
-      }
-
       // remove "this" from the owned locks list
 
       Monitor *prev = NULL;
--- a/src/share/vm/runtime/mutexLocker.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/mutexLocker.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -126,6 +126,7 @@
 Mutex*   FreeList_lock                = NULL;
 Monitor* SecondaryFreeList_lock       = NULL;
 Mutex*   OldSets_lock                 = NULL;
+Monitor* RootRegionScan_lock          = NULL;
 Mutex*   MMUTracker_lock              = NULL;
 Mutex*   HotCardCache_lock            = NULL;
 
@@ -199,6 +200,7 @@
     def(FreeList_lock              , Mutex,   leaf     ,   true );
     def(SecondaryFreeList_lock     , Monitor, leaf     ,   true );
     def(OldSets_lock               , Mutex  , leaf     ,   true );
+    def(RootRegionScan_lock        , Monitor, leaf     ,   true );
     def(MMUTracker_lock            , Mutex  , leaf     ,   true );
     def(HotCardCache_lock          , Mutex  , special  ,   true );
     def(EvacFailureStack_lock      , Mutex  , nonleaf  ,   true );
--- a/src/share/vm/runtime/mutexLocker.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/mutexLocker.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -115,7 +115,7 @@
 
 #ifndef PRODUCT
 extern Mutex*   FullGCALot_lock;                 // a lock to make FullGCALot MT safe
-#endif
+#endif // PRODUCT
 extern Mutex*   Debug1_lock;                     // A bunch of pre-allocated locks that can be used for tracing
 extern Mutex*   Debug2_lock;                     // down synchronization related bugs!
 extern Mutex*   Debug3_lock;
@@ -129,6 +129,7 @@
 extern Mutex*   FreeList_lock;                   // protects the free region list during safepoints
 extern Monitor* SecondaryFreeList_lock;          // protects the secondary free region list
 extern Mutex*   OldSets_lock;                    // protects the old region sets
+extern Monitor* RootRegionScan_lock;             // used to notify that the CM threads have finished scanning the IM snapshot regions
 extern Mutex*   MMUTracker_lock;                 // protects the MMU
                                                  // tracker data structures
 extern Mutex*   HotCardCache_lock;               // protects the hot card cache
--- a/src/share/vm/runtime/os.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/os.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -73,8 +73,9 @@
   MinPriority      =  1,     // Minimum priority
   NormPriority     =  5,     // Normal (non-daemon) priority
   NearMaxPriority  =  9,     // High priority, used for VMThread
-  MaxPriority      = 10      // Highest priority, used for WatcherThread
+  MaxPriority      = 10,     // Highest priority, used for WatcherThread
                              // ensures that VMThread doesn't starve profiler
+  CriticalPriority = 11      // Critical thread priority
 };
 
 // Typedef for structured exception handling support
@@ -403,6 +404,8 @@
   static address current_stack_base();
   static size_t current_stack_size();
 
+  static void verify_stack_alignment() PRODUCT_RETURN;
+
   static int message_box(const char* title, const char* message);
   static char* do_you_want_to_debug(const char* message);
 
@@ -733,7 +736,7 @@
   // Thread priority helpers (implemented in OS-specific part)
   static OSReturn set_native_priority(Thread* thread, int native_prio);
   static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr);
-  static int java_to_os_priority[MaxPriority + 1];
+  static int java_to_os_priority[CriticalPriority + 1];
   // Hint to the underlying OS that a task switch would not be good.
   // Void return because it's a hint and can fail.
   static void hint_no_preempt();
--- a/src/share/vm/runtime/reflection.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/reflection.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -591,14 +591,11 @@
 // Caller is responsible for figuring out in advance which case must be true.
 void Reflection::check_for_inner_class(instanceKlassHandle outer, instanceKlassHandle inner,
                                        bool inner_is_member, TRAPS) {
-  const int inner_class_info_index = 0;
-  const int outer_class_info_index = 1;
-
-  typeArrayHandle    icls (THREAD, outer->inner_classes());
+  InnerClassesIterator iter(outer);
   constantPoolHandle cp   (THREAD, outer->constants());
-  for(int i = 0; i < icls->length(); i += 4) {
-     int ioff = icls->ushort_at(i + inner_class_info_index);
-     int ooff = icls->ushort_at(i + outer_class_info_index);
+  for (; !iter.done(); iter.next()) {
+     int ioff = iter.inner_class_info_index();
+     int ooff = iter.outer_class_info_index();
 
      if (inner_is_member && ioff != 0 && ooff != 0) {
         klassOop o = cp->klass_at(ooff, CHECK);
--- a/src/share/vm/runtime/safepoint.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/safepoint.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -95,6 +95,7 @@
 SafepointSynchronize::SynchronizeState volatile SafepointSynchronize::_state = SafepointSynchronize::_not_synchronized;
 volatile int  SafepointSynchronize::_waiting_to_block = 0;
 volatile int SafepointSynchronize::_safepoint_counter = 0;
+int SafepointSynchronize::_current_jni_active_count = 0;
 long  SafepointSynchronize::_end_of_last_safepoint = 0;
 static volatile int PageArmed = 0 ;        // safepoint polling page is RO|RW vs PROT_NONE
 static volatile int TryingToBlock = 0 ;    // proximate value -- for advisory use only
@@ -135,9 +136,11 @@
 
   RuntimeService::record_safepoint_begin();
 
-  {
   MutexLocker mu(Safepoint_lock);
 
+  // Reset the count of active JNI critical threads
+  _current_jni_active_count = 0;
+
   // Set number of threads to wait for, before we initiate the callbacks
   _waiting_to_block = nof_threads;
   TryingToBlock     = 0 ;
@@ -216,6 +219,8 @@
 #ifdef ASSERT
   for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
     assert(cur->safepoint_state()->is_running(), "Illegal initial state");
+    // Clear the visited flag to ensure that the critical counts are collected properly.
+    cur->set_visited_for_critical_count(false);
   }
 #endif // ASSERT
 
@@ -375,6 +380,16 @@
 
   OrderAccess::fence();
 
+#ifdef ASSERT
+  for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
+    // make sure all the threads were visited
+    assert(cur->was_visited_for_critical_count(), "missed a thread");
+  }
+#endif // ASSERT
+
+  // Update the count of active JNI critical regions
+  GC_locker::set_jni_lock_count(_current_jni_active_count);
+
   if (TraceSafepoint) {
     VM_Operation *op = VMThread::vm_operation();
     tty->print_cr("Entering safepoint region: %s", (op != NULL) ? op->name() : "no vm operation");
@@ -392,7 +407,6 @@
     // Record how much time spend on the above cleanup tasks
     update_statistics_on_cleanup_end(os::javaTimeNanos());
   }
-  }
 }
 
 // Wake up all threads, so they are ready to resume execution after the safepoint
@@ -539,6 +553,42 @@
 }
 
 
+// See if the thread is running inside a lazy critical native and
+// update the thread critical count if so.  Also set a suspend flag to
+// cause the native wrapper to return into the JVM to do the unlock
+// once the native finishes.
+void SafepointSynchronize::check_for_lazy_critical_native(JavaThread *thread, JavaThreadState state) {
+  if (state == _thread_in_native &&
+      thread->has_last_Java_frame() &&
+      thread->frame_anchor()->walkable()) {
+    // This thread might be in a critical native nmethod so look at
+    // the top of the stack and increment the critical count if it
+    // is.
+    frame wrapper_frame = thread->last_frame();
+    CodeBlob* stub_cb = wrapper_frame.cb();
+    if (stub_cb != NULL &&
+        stub_cb->is_nmethod() &&
+        stub_cb->as_nmethod_or_null()->is_lazy_critical_native()) {
+      // A thread could potentially be in a critical native across
+      // more than one safepoint, so only update the critical state on
+      // the first one.  When it returns it will perform the unlock.
+      if (!thread->do_critical_native_unlock()) {
+#ifdef ASSERT
+        if (!thread->in_critical()) {
+          GC_locker::increment_debug_jni_lock_count();
+        }
+#endif
+        thread->enter_critical();
+        // Make sure the native wrapper calls back on return to
+        // perform the needed critical unlock.
+        thread->set_critical_native_unlock();
+      }
+    }
+  }
+}
+
+
+
 // -------------------------------------------------------------------------------------------------------
 // Implementation of Safepoint callback point
 
@@ -585,6 +635,12 @@
         _waiting_to_block--;
         thread->safepoint_state()->set_has_called_back(true);
 
+        DEBUG_ONLY(thread->set_visited_for_critical_count(true));
+        if (thread->in_critical()) {
+          // Notice that this thread is in a critical section
+          increment_jni_active_count();
+        }
+
         // Consider (_waiting_to_block < 2) to pipeline the wakeup of the VM thread
         if (_waiting_to_block == 0) {
           Safepoint_lock->notify_all();
@@ -861,8 +917,9 @@
   // running, but are actually at a safepoint. We will happily
   // agree and update the safepoint state here.
   if (SafepointSynchronize::safepoint_safe(_thread, state)) {
-      roll_forward(_at_safepoint);
-      return;
+    SafepointSynchronize::check_for_lazy_critical_native(_thread, state);
+    roll_forward(_at_safepoint);
+    return;
   }
 
   if (state == _thread_in_vm) {
@@ -886,6 +943,11 @@
   switch(_type) {
     case _at_safepoint:
       SafepointSynchronize::signal_thread_at_safepoint();
+      DEBUG_ONLY(_thread->set_visited_for_critical_count(true));
+      if (_thread->in_critical()) {
+        // Notice that this thread is in a critical section
+        SafepointSynchronize::increment_jni_active_count();
+      }
       break;
 
     case _call_back:
--- a/src/share/vm/runtime/safepoint.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/safepoint.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "code/nmethod.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/extendedPC.hpp"
+#include "runtime/mutexLocker.hpp"
 #include "runtime/os.hpp"
 #include "utilities/ostream.hpp"
 
@@ -92,6 +93,7 @@
  private:
   static volatile SynchronizeState _state;     // Threads might read this flag directly, without acquireing the Threads_lock
   static volatile int _waiting_to_block;       // number of threads we are waiting for to block
+  static int _current_jni_active_count;        // Counts the number of active critical natives during the safepoint
 
   // This counter is used for fast versions of jni_Get<Primitive>Field.
   // An even value means there is no ongoing safepoint operations.
@@ -138,6 +140,8 @@
 
   static bool safepoint_safe(JavaThread *thread, JavaThreadState state);
 
+  static void check_for_lazy_critical_native(JavaThread *thread, JavaThreadState state);
+
   // Query
   inline static bool is_at_safepoint()   { return _state == _synchronized;  }
   inline static bool is_synchronizing()  { return _state == _synchronizing;  }
@@ -146,6 +150,11 @@
     return (_state != _not_synchronized);
   }
 
+  inline static void increment_jni_active_count() {
+    assert_locked_or_safepoint(Safepoint_lock);
+    _current_jni_active_count++;
+  }
+
   // Called when a thread volantary blocks
   static void   block(JavaThread *thread);
   static void   signal_thread_at_safepoint()              { _waiting_to_block--; }
--- a/src/share/vm/runtime/sharedRuntime.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -804,6 +804,7 @@
         if (thread->deopt_mark() != NULL) {
           Deoptimization::cleanup_deopt_info(thread, NULL);
         }
+        Events::log_exception(thread, "StackOverflowError at " INTPTR_FORMAT, pc);
         return StubRoutines::throw_StackOverflowError_entry();
       }
 
@@ -820,8 +821,10 @@
 
           if (vt_stub->is_abstract_method_error(pc)) {
             assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
+            Events::log_exception(thread, "AbstractMethodError at " INTPTR_FORMAT, pc);
             return StubRoutines::throw_AbstractMethodError_entry();
           } else {
+            Events::log_exception(thread, "NullPointerException at vtable entry " INTPTR_FORMAT, pc);
             return StubRoutines::throw_NullPointerException_at_call_entry();
           }
         } else {
@@ -838,6 +841,7 @@
           if (!cb->is_nmethod()) {
             guarantee(cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(),
                       "exception happened outside interpreter, nmethods and vtable stubs (1)");
+            Events::log_exception(thread, "NullPointerException in code blob at " INTPTR_FORMAT, pc);
             // There is no handler here, so we will simply unwind.
             return StubRoutines::throw_NullPointerException_at_call_entry();
           }
@@ -849,6 +853,7 @@
             // => the nmethod is not yet active (i.e., the frame
             // is not set up yet) => use return address pushed by
             // caller => don't push another return address
+            Events::log_exception(thread, "NullPointerException in IC check " INTPTR_FORMAT, pc);
             return StubRoutines::throw_NullPointerException_at_call_entry();
           }
 
@@ -886,9 +891,9 @@
     // for AbortVMOnException flag
     NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException"));
     if (exception_kind == IMPLICIT_NULL) {
-      Events::log("Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
+      Events::log_exception(thread, "Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
     } else {
-      Events::log("Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
+      Events::log_exception(thread, "Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
     }
     return target_pc;
   }
@@ -1541,7 +1546,6 @@
   if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
 
     address pc = caller.pc();
-    Events::log("update call-site at pc " INTPTR_FORMAT, pc);
 
     // Default call_addr is the location of the "basic" call.
     // Determine the address of the call we a reresolving. With
@@ -2679,6 +2683,20 @@
   return nm;
 }
 
+JRT_ENTRY_NO_ASYNC(void, SharedRuntime::block_for_jni_critical(JavaThread* thread))
+  assert(thread == JavaThread::current(), "must be");
+  // The code is about to enter a JNI lazy critical native method and
+  // _needs_gc is true, so if this thread is already in a critical
+  // section then just return, otherwise this thread should block
+  // until needs_gc has been cleared.
+  if (thread->in_critical()) {
+    return;
+  }
+  // Lock and unlock a critical section to give the system a chance to block
+  GC_locker::lock_critical(thread);
+  GC_locker::unlock_critical(thread);
+JRT_END
+
 #ifdef HAVE_DTRACE_H
 // Create a dtrace nmethod for this method.  The wrapper converts the
 // java compiled calling convention to the native convention, makes a dummy call
--- a/src/share/vm/runtime/sharedRuntime.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/sharedRuntime.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -462,6 +462,9 @@
                                           VMRegPair *regs,
                                           BasicType ret_type );
 
+  // Block before entering a JNI critical method
+  static void block_for_jni_critical(JavaThread* thread);
+
 #ifdef HAVE_DTRACE_H
   // Generate a dtrace wrapper for a given method.  The method takes arguments
   // in the Java compiled code convention, marshals them to the native
--- a/src/share/vm/runtime/simpleThresholdPolicy.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -177,13 +177,11 @@
 }
 
 nmethod* SimpleThresholdPolicy::event(methodHandle method, methodHandle inlinee,
-                                      int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
+                                      int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) {
   if (comp_level == CompLevel_none &&
-      JvmtiExport::can_post_interpreter_events()) {
-    assert(THREAD->is_Java_thread(), "Should be java thread");
-    if (((JavaThread*)THREAD)->is_interp_only_mode()) {
-      return NULL;
-    }
+      JvmtiExport::can_post_interpreter_events() &&
+      thread->is_interp_only_mode()) {
+    return NULL;
   }
   nmethod *osr_nm = NULL;
 
@@ -197,9 +195,9 @@
   }
 
   if (bci == InvocationEntryBci) {
-    method_invocation_event(method, inlinee, comp_level, nm, THREAD);
+    method_invocation_event(method, inlinee, comp_level, nm, thread);
   } else {
-    method_back_branch_event(method, inlinee, bci, comp_level, nm, THREAD);
+    method_back_branch_event(method, inlinee, bci, comp_level, nm, thread);
     // method == inlinee if the event originated in the main method
     int highest_level = inlinee->highest_osr_comp_level();
     if (highest_level > comp_level) {
@@ -210,7 +208,7 @@
 }
 
 // Check if the method can be compiled, change level if necessary
-void SimpleThresholdPolicy::compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
+void SimpleThresholdPolicy::compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
   assert(level <= TieredStopAtLevel, "Invalid compilation level");
   if (level == CompLevel_none) {
     return;
@@ -221,7 +219,7 @@
   // pure C1.
   if (!can_be_compiled(mh, level)) {
     if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
-        compile(mh, bci, CompLevel_simple, THREAD);
+        compile(mh, bci, CompLevel_simple, thread);
     }
     return;
   }
@@ -232,14 +230,14 @@
     if (PrintTieredEvents) {
       print_event(COMPILE, mh, mh, bci, level);
     }
-    submit_compile(mh, bci, level, THREAD);
+    submit_compile(mh, bci, level, thread);
   }
 }
 
 // Tell the broker to compile the method
-void SimpleThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
+void SimpleThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
   int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
-  CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", THREAD);
+  CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
 }
 
 // Call and loop predicates determine whether a transition to a higher
@@ -366,11 +364,11 @@
 
 // Handle the invocation event.
 void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
-                                              CompLevel level, nmethod* nm, TRAPS) {
+                                              CompLevel level, nmethod* nm, JavaThread* thread) {
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
     CompLevel next_level = call_event(mh(), level);
     if (next_level != level) {
-      compile(mh, InvocationEntryBci, next_level, THREAD);
+      compile(mh, InvocationEntryBci, next_level, thread);
     }
   }
 }
@@ -378,7 +376,7 @@
 // Handle the back branch event. Notice that we can compile the method
 // with a regular entry from here.
 void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
-                                                     int bci, CompLevel level, nmethod* nm, TRAPS) {
+                                                     int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
   // If the method is already compiling, quickly bail out.
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
     // Use loop event as an opportinity to also check there's been
@@ -391,13 +389,13 @@
                       next_osr_level < CompLevel_full_optimization ? next_osr_level : cur_level);
     bool is_compiling = false;
     if (next_level != cur_level) {
-      compile(mh, InvocationEntryBci, next_level, THREAD);
+      compile(mh, InvocationEntryBci, next_level, thread);
       is_compiling = true;
     }
 
     // Do the OSR version
     if (!is_compiling && next_osr_level != level) {
-      compile(mh, bci, next_osr_level, THREAD);
+      compile(mh, bci, next_osr_level, thread);
     }
   }
 }
--- a/src/share/vm/runtime/simpleThresholdPolicy.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/simpleThresholdPolicy.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -67,9 +67,9 @@
   // Print policy-specific information if necessary
   virtual void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level) { }
   // Check if the method can be compiled, change level if necessary
-  void compile(methodHandle mh, int bci, CompLevel level, TRAPS);
+  void compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread);
   // Submit a given method for compilation
-  virtual void submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS);
+  virtual void submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread);
   // Simple methods are as good being compiled with C1 as C2.
   // This function tells if it's such a function.
   inline bool is_trivial(methodOop method);
@@ -88,9 +88,9 @@
     return CompLevel_none;
   }
   virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
-                                       CompLevel level, nmethod* nm, TRAPS);
+                                       CompLevel level, nmethod* nm, JavaThread* thread);
   virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
-                                        int bci, CompLevel level, nmethod* nm, TRAPS);
+                                        int bci, CompLevel level, nmethod* nm, JavaThread* thread);
 public:
   SimpleThresholdPolicy() : _c1_count(0), _c2_count(0) { }
   virtual int compiler_count(CompLevel comp_level) {
@@ -104,7 +104,7 @@
   virtual void disable_compilation(methodOop method) { }
   virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
   virtual nmethod* event(methodHandle method, methodHandle inlinee,
-                         int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS);
+                         int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread);
   // Select task is called by CompileBroker. We should return a task or NULL.
   virtual CompileTask* select_task(CompileQueue* compile_queue);
   // Tell the runtime if we think a given method is adequately profiled.
--- a/src/share/vm/runtime/sweeper.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/sweeper.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -266,7 +266,17 @@
 
     // The last invocation iterates until there are no more nmethods
     for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
+      if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
+        if (PrintMethodFlushing && Verbose) {
+          tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
+        }
+        MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 
+        assert(Thread::current()->is_Java_thread(), "should be java thread");
+        JavaThread* thread = (JavaThread*)Thread::current();
+        ThreadBlockInVM tbivm(thread);
+        thread->java_suspend_self();
+      }
       // Since we will give up the CodeCache_lock, always skip ahead
       // to the next nmethod.  Other blobs can be deleted by other
       // threads but nmethods are only reclaimed by the sweeper.
--- a/src/share/vm/runtime/thread.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/thread.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
 #include "interpreter/linkResolver.hpp"
 #include "interpreter/oopMapCache.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
+#include "memory/gcLocker.inline.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/instanceKlass.hpp"
@@ -246,6 +247,10 @@
   omInUseList = NULL ;
   omInUseCount = 0 ;
 
+#ifdef ASSERT
+  _visited_for_critical_count = false;
+#endif
+
   _SR_lock = new Monitor(Mutex::suspend_resume, "SR_lock", true);
   _suspend_flags = 0;
 
@@ -1600,8 +1605,6 @@
     // java.lang.Thread.dispatchUncaughtException
     if (uncaught_exception.not_null()) {
       Handle group(this, java_lang_Thread::threadGroup(threadObj()));
-      Events::log("uncaught exception INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT",
-        (address)uncaught_exception(), (address)threadObj(), (address)group());
       {
         EXCEPTION_MARK;
         // Check if the method Thread.dispatchUncaughtException() exists. If so
@@ -2280,6 +2283,26 @@
   }
 }
 
+// This is a variant of the normal
+// check_special_condition_for_native_trans with slightly different
+// semantics for use by critical native wrappers.  It does all the
+// normal checks but also performs the transition back into
+// thread_in_Java state.  This is required so that critical natives
+// can potentially block and perform a GC if they are the last thread
+// exiting the GC_locker.
+void JavaThread::check_special_condition_for_native_trans_and_transition(JavaThread *thread) {
+  check_special_condition_for_native_trans(thread);
+
+  // Finish the transition
+  thread->set_thread_state(_thread_in_Java);
+
+  if (thread->do_critical_native_unlock()) {
+    ThreadInVMfromJavaNoAsyncException tiv(thread);
+    GC_locker::unlock_critical(thread);
+    thread->clear_critical_native_unlock();
+  }
+}
+
 // We need to guarantee the Threads_lock here, since resumes are not
 // allowed during safepoint synchronization
 // Can only resume from an external suspension
@@ -3201,11 +3224,6 @@
     return status;
   }
 
-  // Must be run after init_ft which initializes ft_enabled
-  if (TRACE_INITIALIZE() != JNI_OK) {
-    vm_exit_during_initialization("Failed to initialize tracing backend");
-  }
-
   // Should be done after the heap is fully created
   main_thread->cache_global_variables();
 
@@ -3347,6 +3365,7 @@
       initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK_0);
       initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK_0);
       initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK_0);
+      initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK_0);
     } else {
       warning("java.lang.OutOfMemoryError has not been initialized");
       warning("java.lang.NullPointerException has not been initialized");
@@ -3354,6 +3373,7 @@
       warning("java.lang.ArrayStoreException has not been initialized");
       warning("java.lang.ArithmeticException has not been initialized");
       warning("java.lang.StackOverflowError has not been initialized");
+      warning("java.lang.IllegalArgumentException has not been initialized");
     }
   }
 
@@ -3383,6 +3403,11 @@
 
   quicken_jni_functions();
 
+  // Must be run after init_ft which initializes ft_enabled
+  if (TRACE_INITIALIZE() != JNI_OK) {
+    vm_exit_during_initialization("Failed to initialize tracing backend");
+  }
+
   // Set flag that basic initialization has completed. Used by exceptions and various
   // debug stuff, that does not work until all basic classes have been initialized.
   set_init_completed();
@@ -3885,7 +3910,7 @@
   ThreadService::add_thread(p, daemon);
 
   // Possible GC point.
-  Events::log("Thread added: " INTPTR_FORMAT, p);
+  Events::log(p, "Thread added: " INTPTR_FORMAT, p);
 }
 
 void Threads::remove(JavaThread* p) {
@@ -3930,7 +3955,7 @@
   } // unlock Threads_lock
 
   // Since Events::log uses a lock, we grab it outside the Threads_lock
-  Events::log("Thread exited: " INTPTR_FORMAT, p);
+  Events::log(p, "Thread exited: " INTPTR_FORMAT, p);
 }
 
 // Threads_lock must be held when this is called (or must be called during a safepoint)
--- a/src/share/vm/runtime/thread.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/thread.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -182,7 +182,8 @@
     _ext_suspended          = 0x40000000U, // thread has self-suspended
     _deopt_suspend          = 0x10000000U, // thread needs to self suspend for deopt
 
-    _has_async_exception    = 0x00000001U  // there is a pending async exception
+    _has_async_exception    = 0x00000001U, // there is a pending async exception
+    _critical_native_unlock = 0x00000002U  // Must call back to unlock JNI critical lock
   };
 
   // various suspension related flags - atomically updated
@@ -267,6 +268,15 @@
   ObjectMonitor* omInUseList;                   // SLL to track monitors in circulation
   int omInUseCount;                             // length of omInUseList
 
+#ifdef ASSERT
+ private:
+  bool _visited_for_critical_count;
+
+ public:
+  void set_visited_for_critical_count(bool z) { _visited_for_critical_count = z; }
+  bool was_visited_for_critical_count() const   { return _visited_for_critical_count; }
+#endif
+
  public:
   enum {
     is_definitely_current_thread = true
@@ -350,6 +360,15 @@
     clear_suspend_flag(_has_async_exception);
   }
 
+  bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
+
+  void set_critical_native_unlock() {
+    set_suspend_flag(_critical_native_unlock);
+  }
+  void clear_critical_native_unlock() {
+    clear_suspend_flag(_critical_native_unlock);
+  }
+
   // Support for Unhandled Oop detection
 #ifdef CHECK_UNHANDLED_OOPS
  private:
@@ -1038,6 +1057,11 @@
   // Check for async exception in addition to safepoint and suspend request.
   static void check_special_condition_for_native_trans(JavaThread *thread);
 
+  // Same as check_special_condition_for_native_trans but finishes the
+  // transition into thread_in_Java mode so that it can potentially
+  // block.
+  static void check_special_condition_for_native_trans_and_transition(JavaThread *thread);
+
   bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits);
   bool is_ext_suspend_completed_with_lock(uint32_t *bits) {
     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
@@ -1310,8 +1334,10 @@
 
   // JNI critical regions. These can nest.
   bool in_critical()    { return _jni_active_critical > 0; }
-  void enter_critical() { assert(Thread::current() == this,
-                                 "this must be current thread");
+  bool in_last_critical()  { return _jni_active_critical == 1; }
+  void enter_critical() { assert(Thread::current() == this ||
+                                 Thread::current()->is_VM_thread() && SafepointSynchronize::is_synchronizing(),
+                                 "this must be current thread or synchronizing");
                           _jni_active_critical++; }
   void exit_critical()  { assert(Thread::current() == this,
                                  "this must be current thread");
--- a/src/share/vm/runtime/virtualspace.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/virtualspace.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -455,7 +455,7 @@
 
 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
-                                      (size_t(_base + _size) > OopEncodingHeapMax) &&
+                                      (Universe::narrow_oop_base() != NULL) &&
                                       Universe::narrow_oop_use_implicit_null_checks()),
          "noaccess_prefix should be used only with non zero based compressed oops");
 
--- a/src/share/vm/runtime/vmStructs.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/vmStructs.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -314,7 +314,7 @@
   nonstatic_field(instanceKlass,               _init_thread,                                  Thread*)                               \
   nonstatic_field(instanceKlass,               _vtable_len,                                   int)                                   \
   nonstatic_field(instanceKlass,               _itable_len,                                   int)                                   \
-  nonstatic_field(instanceKlass,               _reference_type,                               ReferenceType)                         \
+  nonstatic_field(instanceKlass,               _reference_type,                               u1)                                    \
   volatile_nonstatic_field(instanceKlass,      _oop_map_cache,                                OopMapCache*)                          \
   nonstatic_field(instanceKlass,               _jni_ids,                                      JNIid*)                                \
   nonstatic_field(instanceKlass,               _osr_nmethods_head,                            nmethod*)                              \
@@ -2261,13 +2261,6 @@
                                                                           \
   declare_constant(SymbolTable::symbol_table_size)                        \
                                                                           \
-  /********************/                                                  \
-  /* SystemDictionary */                                                  \
-  /********************/                                                  \
-                                                                          \
-  declare_constant(SystemDictionary::_loader_constraint_size)             \
-  declare_constant(SystemDictionary::_nof_buckets)                        \
-                                                                          \
   /***********************************/                                   \
   /* LoaderConstraintTable constants */                                   \
   /***********************************/                                   \
--- a/src/share/vm/runtime/vm_version.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/vm_version.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -165,6 +165,13 @@
   return VM_RELEASE;
 }
 
+// NOTE: do *not* use stringStream. this function is called by
+//       fatal error handlers. if the crash is in native thread,
+//       stringStream cannot get resource allocated and will SEGV.
+const char* Abstract_VM_Version::jre_release_version() {
+  return JRE_RELEASE_VERSION;
+}
+
 #define OS       LINUX_ONLY("linux")             \
                  WINDOWS_ONLY("windows")         \
                  SOLARIS_ONLY("solaris")         \
--- a/src/share/vm/runtime/vm_version.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/runtime/vm_version.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -71,6 +71,7 @@
 
   // Internal version providing additional build information
   static const char* internal_vm_info_string();
+  static const char* jre_release_version();
 
   // does HW support an 8-byte compare-exchange operation?
   static bool supports_cx8()  {return _supports_cx8;}
--- a/src/share/vm/services/diagnosticArgument.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/services/diagnosticArgument.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,9 +28,16 @@
 #include "services/diagnosticArgument.hpp"
 
 void GenDCmdArgument::read_value(const char* str, size_t len, TRAPS) {
-  if (is_set()) {
+  /* NOTE:Some argument types doesn't require a value,
+   * for instance boolean arguments: "enableFeatureX". is
+   * equivalent to "enableFeatureX=true". In these cases,
+   * str will be null. This is perfectly valid.
+   * All argument types must perform null checks on str.
+   */
+
+  if (is_set() && !allow_multiple()) {
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
-            "Duplicates in diagnostic command arguments");
+            "Duplicates in diagnostic command arguments\n");
   }
   parse_value(str, len, CHECK);
   set_is_set(true);
@@ -38,9 +45,9 @@
 
 template <> void DCmdArgument<jlong>::parse_value(const char* str,
                                                   size_t len, TRAPS) {
-  if (sscanf(str, INT64_FORMAT, &_value) != 1) {
+    if (str == NULL || sscanf(str, INT64_FORMAT, &_value) != 1) {
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
-      "Integer parsing error in diagnostic command arguments");
+      "Integer parsing error in diagnostic command arguments\n");
   }
 }
 
@@ -59,12 +66,13 @@
 
 template <> void DCmdArgument<bool>::parse_value(const char* str,
                                                  size_t len, TRAPS) {
+  // len is the length of the current token starting at str
   if (len == 0) {
     set_value(true);
   } else {
-    if (strcasecmp(str, "true") == 0) {
+    if (len == strlen("true") && strncasecmp(str, "true", len) == 0) {
        set_value(true);
-    } else if (strcasecmp(str, "false") == 0) {
+    } else if (len == strlen("false") && strncasecmp(str, "false", len) == 0) {
        set_value(false);
     } else {
       THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
@@ -88,16 +96,20 @@
 
 template <> void DCmdArgument<char*>::parse_value(const char* str,
                                                   size_t len, TRAPS) {
-  _value = NEW_C_HEAP_ARRAY(char, len+1);
-  strncpy(_value, str, len);
-  _value[len] = 0;
+  if (str == NULL) {
+    _value = NULL;
+  } else {
+    _value = NEW_C_HEAP_ARRAY(char, len+1);
+    strncpy(_value, str, len);
+    _value[len] = 0;
+  }
 }
 
 template <> void DCmdArgument<char*>::init_value(TRAPS) {
-  if (has_default()) {
+  if (has_default() && _default_string != NULL) {
     this->parse_value(_default_string, strlen(_default_string), THREAD);
     if (HAS_PENDING_EXCEPTION) {
-      fatal("Default string must be parsable");
+     fatal("Default string must be parsable");
     }
   } else {
     set_value(NULL);
@@ -110,3 +122,153 @@
     set_value(NULL);
   }
 }
+
+template <> void DCmdArgument<NanoTimeArgument>::parse_value(const char* str,
+                                                 size_t len, TRAPS) {
+  if (str == NULL) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "Integer parsing error nanotime value: syntax error");
+  }
+
+  int argc = sscanf(str, INT64_FORMAT , &_value._time);
+  if (argc != 1) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "Integer parsing error nanotime value: syntax error");
+  }
+  size_t idx = 0;
+  while(idx < len && isdigit(str[idx])) {
+    idx++;
+  }
+  if (idx == len) {
+    // only accept missing unit if the value is 0
+    if (_value._time != 0) {
+      THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+                "Integer parsing error nanotime value: unit required");
+    } else {
+      _value._nanotime = 0;
+      strcpy(_value._unit, "ns");
+      return;
+    }
+  } else if(len - idx > 2) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "Integer parsing error nanotime value: illegal unit");
+  } else {
+    strncpy(_value._unit, &str[idx], len - idx);
+    /*Write an extra null termination. This is safe because _value._unit
+     * is declared as char[3], and length is checked to be not larger than
+     * two above. Also, this is necessary, since length might be 1, and the
+     * default value already in the string is ns, which is two chars.
+     */
+    _value._unit[len-idx] = '\0';
+  }
+
+  if (strcmp(_value._unit, "ns") == 0) {
+    _value._nanotime = _value._time;
+  } else if (strcmp(_value._unit, "us") == 0) {
+    _value._nanotime = _value._time * 1000;
+  } else if (strcmp(_value._unit, "ms") == 0) {
+    _value._nanotime = _value._time * 1000 * 1000;
+  } else if (strcmp(_value._unit, "s") == 0) {
+    _value._nanotime = _value._time * 1000 * 1000 * 1000;
+  } else if (strcmp(_value._unit, "m") == 0) {
+    _value._nanotime = _value._time * 60 * 1000 * 1000 * 1000;
+  } else if (strcmp(_value._unit, "h") == 0) {
+    _value._nanotime = _value._time * 60 * 60 * 1000 * 1000 * 1000;
+  } else if (strcmp(_value._unit, "d") == 0) {
+    _value._nanotime = _value._time * 24 * 60 * 60 * 1000 * 1000 * 1000;
+  } else {
+     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+               "Integer parsing error nanotime value: illegal unit");
+  }
+}
+
+template <> void DCmdArgument<NanoTimeArgument>::init_value(TRAPS) {
+  if (has_default()) {
+    this->parse_value(_default_string, strlen(_default_string), THREAD);
+    if (HAS_PENDING_EXCEPTION) {
+      fatal("Default string must be parsable");
+    }
+  } else {
+    _value._time = 0;
+    _value._nanotime = 0;
+    strcmp(_value._unit, "ns");
+  }
+}
+
+template <> void DCmdArgument<NanoTimeArgument>::destroy_value() { }
+
+// WARNING StringArrayArgument can only be used as an option, it cannot be
+// used as an argument with the DCmdParser
+
+template <> void DCmdArgument<StringArrayArgument*>::parse_value(const char* str,
+                                                  size_t len, TRAPS) {
+  _value->add(str,len);
+}
+
+template <> void DCmdArgument<StringArrayArgument*>::init_value(TRAPS) {
+  _value = new StringArrayArgument();
+  _allow_multiple = true;
+  if (has_default()) {
+    fatal("StringArrayArgument cannot have default value");
+  }
+}
+
+template <> void DCmdArgument<StringArrayArgument*>::destroy_value() {
+  if (_value != NULL) {
+    delete _value;
+    set_value(NULL);
+  }
+}
+
+template <> void DCmdArgument<MemorySizeArgument>::parse_value(const char* str,
+                                                  size_t len, TRAPS) {
+  if (str == NULL) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "Integer parsing error nanotime value: syntax error");
+  }
+
+  if (*str == '-') {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+               "Parsing error memory size value: negative values not allowed");
+  }
+  int res = sscanf(str, UINT64_FORMAT "%c", &_value._val, &_value._multiplier);
+  if (res == 2) {
+     switch (_value._multiplier) {
+      case 'k': case 'K':
+         _value._size = _value._val * 1024;
+         break;
+      case 'm': case 'M':
+         _value._size = _value._val * 1024 * 1024;
+         break;
+      case 'g': case 'G':
+         _value._size = _value._val * 1024 * 1024 * 1024;
+         break;
+       default:
+         _value._size = _value._val;
+         _value._multiplier = ' ';
+         //default case should be to break with no error, since user
+         //can write size in bytes, or might have a delimiter and next arg
+         break;
+     }
+   } else if (res == 1) {
+     _value._size = _value._val;
+   } else {
+     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+               "Parsing error memory size value: invalid value");
+   }
+}
+
+template <> void DCmdArgument<MemorySizeArgument>::init_value(TRAPS) {
+  if (has_default()) {
+    this->parse_value(_default_string, strlen(_default_string), THREAD);
+    if (HAS_PENDING_EXCEPTION) {
+      fatal("Default string must be parsable");
+    }
+  } else {
+    _value._size = 0;
+    _value._val = 0;
+    _value._multiplier = ' ';
+  }
+}
+
+template <> void DCmdArgument<MemorySizeArgument>::destroy_value() { }
--- a/src/share/vm/services/diagnosticArgument.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/services/diagnosticArgument.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,49 @@
 #include "runtime/thread.hpp"
 #include "utilities/exceptions.hpp"
 
+class StringArrayArgument : public CHeapObj {
+private:
+  GrowableArray<char*>* _array;
+public:
+  StringArrayArgument() {
+    _array = new(ResourceObj::C_HEAP)GrowableArray<char *>(32, true);
+    assert(_array != NULL, "Sanity check");
+  }
+  void add(const char* str, size_t len) {
+    if (str != NULL) {
+      char* ptr = NEW_C_HEAP_ARRAY(char, len+1);
+      strncpy(ptr, str, len);
+      ptr[len] = 0;
+      _array->append(ptr);
+    }
+  }
+  GrowableArray<char*>* array() {
+    return _array;
+  }
+  ~StringArrayArgument() {
+    for (int i=0; i<_array->length(); i++) {
+      if(_array->at(i) != NULL) { // Safety check
+        FREE_C_HEAP_ARRAY(char, _array->at(i));
+      }
+    }
+    delete _array;
+  }
+};
+
+class NanoTimeArgument {
+public:
+  jlong _nanotime;
+  jlong _time;
+  char _unit[3];
+};
+
+class MemorySizeArgument {
+public:
+  u8 _size;
+  u8 _val;
+  char _multiplier;
+};
+
 class GenDCmdArgument : public ResourceObj {
 protected:
   GenDCmdArgument* _next;
@@ -40,6 +83,7 @@
   const char*      _default_string;
   bool             _is_set;
   bool             _is_mandatory;
+  bool             _allow_multiple;
   GenDCmdArgument(const char* name, const char* description, const char* type,
                   const char* default_string, bool mandatory) {
     _name = name;
@@ -48,6 +92,7 @@
     _default_string = default_string;
     _is_mandatory = mandatory;
     _is_set = false;
+    _allow_multiple = false;
   };
 public:
   const char* name() { return _name; }
@@ -56,6 +101,7 @@
   const char* default_string() { return _default_string; }
   bool is_set() { return _is_set; }
   void set_is_set(bool b) { _is_set = b; }
+  bool allow_multiple() { return _allow_multiple; }
   bool is_mandatory() { return _is_mandatory; }
   bool has_value() { return _is_set || _default_string != NULL; }
   bool has_default() { return _default_string != NULL; }
--- a/src/share/vm/services/diagnosticCommand.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/services/diagnosticCommand.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,38 @@
 #include "services/heapDumper.hpp"
 #include "services/management.hpp"
 
+void DCmdRegistrant::register_dcmds(){
+  // Registration of the diagnostic commands
+  // First boolean argument specifies if the command is enabled
+  // Second boolean argument specifies if the command is hidden
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HelpDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VersionDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CommandLineDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<PrintSystemPropertiesDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<PrintVMFlagsDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VMUptimeDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<SystemGCDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<RunFinalizationDCmd>(true, false));
+#ifndef SERVICES_KERNEL   // Heap dumping not supported
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HeapDumpDCmd>(true, false));
+#endif // SERVICES_KERNEL
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassHistogramDCmd>(true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ThreadDumpDCmd>(true, false));
+
+  //Enhanced JMX Agent Support
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JMXStartRemoteDCmd>(true,false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JMXStartLocalDCmd>(true,false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JMXStopRemoteDCmd>(true,false));
+
+}
+
+#ifndef HAVE_EXTRA_DCMD
+void DCmdRegistrant::register_dcmds_ext(){
+   // Do nothing here
+}
+#endif
+
+
 HelpDCmd::HelpDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap),
   _all("-all", "Show help for all commands", "BOOLEAN", false, "false"),
   _cmd("command name", "The name of the command for which we want help",
@@ -317,3 +349,185 @@
     return 0;
   }
 }
+
+// Enhanced JMX Agent support
+
+JMXStartRemoteDCmd::JMXStartRemoteDCmd(outputStream *output, bool heap_allocated) :
+
+  DCmdWithParser(output, heap_allocated),
+
+  _config_file
+  ("config.file",
+   "set com.sun.management.config.file", "STRING", false),
+
+  _jmxremote_port
+  ("jmxremote.port",
+   "set com.sun.management.jmxremote.port", "STRING", false),
+
+  _jmxremote_rmi_port
+  ("jmxremote.rmi.port",
+   "set com.sun.management.jmxremote.rmi.port", "STRING", false),
+
+  _jmxremote_ssl
+  ("jmxremote.ssl",
+   "set com.sun.management.jmxremote.ssl", "STRING", false),
+
+  _jmxremote_registry_ssl
+  ("jmxremote.registry.ssl",
+   "set com.sun.management.jmxremote.registry.ssl", "STRING", false),
+
+  _jmxremote_authenticate
+  ("jmxremote.authenticate",
+   "set com.sun.management.jmxremote.authenticate", "STRING", false),
+
+  _jmxremote_password_file
+  ("jmxremote.password.file",
+   "set com.sun.management.jmxremote.password.file", "STRING", false),
+
+  _jmxremote_access_file
+  ("jmxremote.access.file",
+   "set com.sun.management.jmxremote.access.file", "STRING", false),
+
+  _jmxremote_login_config
+  ("jmxremote.login.config",
+   "set com.sun.management.jmxremote.login.config", "STRING", false),
+
+  _jmxremote_ssl_enabled_cipher_suites
+  ("jmxremote.ssl.enabled.cipher.suites",
+   "set com.sun.management.jmxremote.ssl.enabled.cipher.suite", "STRING", false),
+
+  _jmxremote_ssl_enabled_protocols
+  ("jmxremote.ssl.enabled.protocols",
+   "set com.sun.management.jmxremote.ssl.enabled.protocols", "STRING", false),
+
+  _jmxremote_ssl_need_client_auth
+  ("jmxremote.ssl.need.client.auth",
+   "set com.sun.management.jmxremote.need.client.auth", "STRING", false),
+
+  _jmxremote_ssl_config_file
+  ("jmxremote.ssl.config.file",
+   "set com.sun.management.jmxremote.ssl_config_file", "STRING", false)
+
+  {
+    _dcmdparser.add_dcmd_option(&_config_file);
+    _dcmdparser.add_dcmd_option(&_jmxremote_port);
+    _dcmdparser.add_dcmd_option(&_jmxremote_rmi_port);
+    _dcmdparser.add_dcmd_option(&_jmxremote_ssl);
+    _dcmdparser.add_dcmd_option(&_jmxremote_registry_ssl);
+    _dcmdparser.add_dcmd_option(&_jmxremote_authenticate);
+    _dcmdparser.add_dcmd_option(&_jmxremote_password_file);
+    _dcmdparser.add_dcmd_option(&_jmxremote_access_file);
+    _dcmdparser.add_dcmd_option(&_jmxremote_login_config);
+    _dcmdparser.add_dcmd_option(&_jmxremote_ssl_enabled_cipher_suites);
+    _dcmdparser.add_dcmd_option(&_jmxremote_ssl_enabled_protocols);
+    _dcmdparser.add_dcmd_option(&_jmxremote_ssl_need_client_auth);
+    _dcmdparser.add_dcmd_option(&_jmxremote_ssl_config_file);
+}
+
+
+int JMXStartRemoteDCmd::num_arguments() {
+  ResourceMark rm;
+  JMXStartRemoteDCmd* dcmd = new JMXStartRemoteDCmd(NULL, false);
+  if (dcmd != NULL) {
+    DCmdMark mark(dcmd);
+    return dcmd->_dcmdparser.num_arguments();
+  } else {
+    return 0;
+  }
+}
+
+
+void JMXStartRemoteDCmd::execute(TRAPS) {
+    ResourceMark rm(THREAD);
+    HandleMark hm(THREAD);
+
+    // Load and initialize the sun.management.Agent class
+    // invoke startRemoteManagementAgent(string) method to start
+    // the remote management server.
+    // throw java.lang.NoSuchMethodError if the method doesn't exist
+
+    Handle loader = Handle(THREAD, SystemDictionary::java_system_loader());
+    klassOop k = SystemDictionary::resolve_or_fail(vmSymbols::sun_management_Agent(), loader, Handle(), true, CHECK);
+    instanceKlassHandle ik (THREAD, k);
+
+    JavaValue result(T_VOID);
+
+    // Pass all command line arguments to java as key=value,...
+    // All checks are done on java side
+
+    int len = 0;
+    stringStream options;
+    char comma[2] = {0,0};
+
+    // Leave default values on Agent.class side and pass only
+    // agruments explicitly set by user. All arguments passed
+    // to jcmd override properties with the same name set by
+    // command line with -D or by managmenent.properties
+    // file.
+#define PUT_OPTION(a) \
+    if ( (a).is_set() ){ \
+        options.print("%scom.sun.management.%s=%s", comma, (a).name(), (a).value()); \
+        comma[0] = ','; \
+    }
+
+    PUT_OPTION(_config_file);
+    PUT_OPTION(_jmxremote_port);
+    PUT_OPTION(_jmxremote_rmi_port);
+    PUT_OPTION(_jmxremote_ssl);
+    PUT_OPTION(_jmxremote_registry_ssl);
+    PUT_OPTION(_jmxremote_authenticate);
+    PUT_OPTION(_jmxremote_password_file);
+    PUT_OPTION(_jmxremote_access_file);
+    PUT_OPTION(_jmxremote_login_config);
+    PUT_OPTION(_jmxremote_ssl_enabled_cipher_suites);
+    PUT_OPTION(_jmxremote_ssl_enabled_protocols);
+    PUT_OPTION(_jmxremote_ssl_need_client_auth);
+    PUT_OPTION(_jmxremote_ssl_config_file);
+
+#undef PUT_OPTION
+
+    Handle str = java_lang_String::create_from_str(options.as_string(), CHECK);
+    JavaCalls::call_static(&result, ik, vmSymbols::startRemoteAgent_name(), vmSymbols::string_void_signature(), str, CHECK);
+}
+
+JMXStartLocalDCmd::JMXStartLocalDCmd(outputStream *output, bool heap_allocated) :
+  DCmd(output, heap_allocated)
+{
+  // do nothing
+}
+
+void JMXStartLocalDCmd::execute(TRAPS) {
+    ResourceMark rm(THREAD);
+    HandleMark hm(THREAD);
+
+    // Load and initialize the sun.management.Agent class
+    // invoke startLocalManagementAgent(void) method to start
+    // the local management server
+    // throw java.lang.NoSuchMethodError if method doesn't exist
+
+    Handle loader = Handle(THREAD, SystemDictionary::java_system_loader());
+    klassOop k = SystemDictionary::resolve_or_fail(vmSymbols::sun_management_Agent(), loader, Handle(), true, CHECK);
+    instanceKlassHandle ik (THREAD, k);
+
+    JavaValue result(T_VOID);
+    JavaCalls::call_static(&result, ik, vmSymbols::startLocalAgent_name(), vmSymbols::void_method_signature(), CHECK);
+}
+
+
+void JMXStopRemoteDCmd::execute(TRAPS) {
+    ResourceMark rm(THREAD);
+    HandleMark hm(THREAD);
+
+    // Load and initialize the sun.management.Agent class
+    // invoke stopRemoteManagementAgent method to stop the
+    // management server
+    // throw java.lang.NoSuchMethodError if method doesn't exist
+
+    Handle loader = Handle(THREAD, SystemDictionary::java_system_loader());
+    klassOop k = SystemDictionary::resolve_or_fail(vmSymbols::sun_management_Agent(), loader, Handle(), true, CHECK);
+    instanceKlassHandle ik (THREAD, k);
+
+    JavaValue result(T_VOID);
+    JavaCalls::call_static(&result, ik, vmSymbols::stopRemoteAgent_name(), vmSymbols::void_method_signature(), CHECK);
+}
+
--- a/src/share/vm/services/diagnosticCommand.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/services/diagnosticCommand.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
 #include "services/diagnosticArgument.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "services/diagnosticFramework.hpp"
+#include "services/diagnosticCommand_ext.hpp"
 
 class HelpDCmd : public DCmdWithParser {
 protected:
@@ -213,4 +214,82 @@
   virtual void execute(TRAPS);
 };
 
+// Enhanced JMX Agent support
+
+class JMXStartRemoteDCmd : public DCmdWithParser {
+
+  // Explicitly list all properties that could be
+  // passed to Agent.startRemoteManagementAgent()
+  // com.sun.management is omitted
+
+  DCmdArgument<char *> _config_file;
+  DCmdArgument<char *> _jmxremote_port;
+  DCmdArgument<char *> _jmxremote_rmi_port;
+  DCmdArgument<char *> _jmxremote_ssl;
+  DCmdArgument<char *> _jmxremote_registry_ssl;
+  DCmdArgument<char *> _jmxremote_authenticate;
+  DCmdArgument<char *> _jmxremote_password_file;
+  DCmdArgument<char *> _jmxremote_access_file;
+  DCmdArgument<char *> _jmxremote_login_config;
+  DCmdArgument<char *> _jmxremote_ssl_enabled_cipher_suites;
+  DCmdArgument<char *> _jmxremote_ssl_enabled_protocols;
+  DCmdArgument<char *> _jmxremote_ssl_need_client_auth;
+  DCmdArgument<char *> _jmxremote_ssl_config_file;
+
+public:
+  JMXStartRemoteDCmd(outputStream *output, bool heap_allocated);
+
+  static const char *name() {
+    return "ManagementAgent.start";
+  }
+
+  static const char *description() {
+    return "Start remote management agent.";
+  }
+
+  static int num_arguments();
+
+  virtual void execute(TRAPS);
+
+};
+
+class JMXStartLocalDCmd : public DCmd {
+
+  // Explicitly request start of local agent,
+  // it will not be started by start dcmd
+
+
+public:
+  JMXStartLocalDCmd(outputStream *output, bool heap_allocated);
+
+  static const char *name() {
+    return "ManagementAgent.start_local";
+  }
+
+  static const char *description() {
+    return "Start local management agent.";
+  }
+
+  virtual void execute(TRAPS);
+
+};
+
+class JMXStopRemoteDCmd : public DCmd {
+public:
+  JMXStopRemoteDCmd(outputStream *output, bool heap_allocated) :
+  DCmd(output, heap_allocated) {
+    // Do Nothing
+  }
+
+  static const char *name() {
+    return "ManagementAgent.stop";
+  }
+
+  static const char *description() {
+    return "Stop remote management agent.";
+  }
+
+  virtual void execute(TRAPS);
+};
+
 #endif // SHARE_VM_SERVICES_DIAGNOSTICCOMMAND_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/diagnosticCommand_ext.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. DO
+ * NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_DIAGNOSTICCOMMAND_EXT_HPP
+#define SHARE_VM_SERVICES_DIAGNOSTICCOMMAND_EXT_HPP
+
+#undef HAVE_EXTRA_DCMD
+
+#endif // SHARE_VM_SERVICES_DIAGNOSTICCOMMAND_HPP
--- a/src/share/vm/services/diagnosticFramework.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/services/diagnosticFramework.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,7 @@
 bool DCmdArgIter::next(TRAPS) {
   if (_len == 0) return false;
   // skipping spaces
-  while (_cursor < _len - 1 && isspace(_buffer[_cursor])) {
+  while (_cursor < _len - 1 && _buffer[_cursor] == _delim) {
     _cursor++;
   }
   // handling end of command line
--- a/src/share/vm/services/diagnosticFramework.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/services/diagnosticFramework.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -195,6 +195,7 @@
   DCmdParser() {
     _options = NULL;
     _arguments_list = NULL;
+    _delim = ' ';
   }
   void add_dcmd_option(GenDCmdArgument* arg);
   void add_dcmd_argument(GenDCmdArgument* arg);
@@ -387,4 +388,17 @@
   }
 };
 
+// This class provides a convenient way to register Dcmds, without a need to change
+// management.cpp every time. Body of these two methods resides in
+// diagnosticCommand.cpp
+
+class DCmdRegistrant : public AllStatic {
+
+private:
+    static void register_dcmds();
+    static void register_dcmds_ext();
+
+    friend class Management;
+};
+
 #endif // SHARE_VM_SERVICES_DIAGNOSTICFRAMEWORK_HPP
--- a/src/share/vm/services/g1MemoryPool.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/services/g1MemoryPool.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -78,7 +78,7 @@
   G1MemoryPoolSuper(g1h,
                     "G1 Old Gen",
                     g1h->g1mm()->old_space_committed(), /* init_size */
-                    _undefined_max,
+                    g1h->g1mm()->old_gen_max(),
                     true /* support_usage_threshold */) { }
 
 MemoryUsage G1OldGenPool::get_memory_usage() {
--- a/src/share/vm/services/g1MemoryPool.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/services/g1MemoryPool.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -101,7 +101,7 @@
     return _g1mm->old_space_used();
   }
   size_t max_size() const {
-    return _undefined_max;
+    return _g1mm->old_gen_max();
   }
   MemoryUsage get_memory_usage();
 };
--- a/src/share/vm/services/gcNotifier.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/services/gcNotifier.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,8 @@
   // Make a copy of the last GC statistics
   // GC may occur between now and the creation of the notification
   int num_pools = MemoryService::num_memory_pools();
-  GCStatInfo* stat = new GCStatInfo(num_pools);
+  // stat is deallocated inside GCNotificationRequest
+  GCStatInfo* stat = new(ResourceObj::C_HEAP) GCStatInfo(num_pools);
   mgr->get_last_gc_stat(stat);
   GCNotificationRequest *request = new GCNotificationRequest(os::javaTimeMillis(),mgr,action,cause,stat);
   addRequest(request);
@@ -179,17 +180,43 @@
 }
 
 void GCNotifier::sendNotification(TRAPS) {
+  GCNotifier::sendNotificationInternal(THREAD);
+  // Clearing pending exception to avoid premature termination of
+  // the service thread
+  if (HAS_PENDING_EXCEPTION) {
+    CLEAR_PENDING_EXCEPTION;
+  }
+}
+
+class NotificationMark : public StackObj {
+  // This class is used in GCNotifier::sendNotificationInternal to ensure that
+  // the GCNotificationRequest object is properly cleaned up, whatever path
+  // is used to exit the method.
+  GCNotificationRequest* _request;
+public:
+  NotificationMark(GCNotificationRequest* r) {
+    _request = r;
+  }
+  ~NotificationMark() {
+    assert(_request != NULL, "Sanity check");
+    delete _request;
+  }
+};
+
+void GCNotifier::sendNotificationInternal(TRAPS) {
   ResourceMark rm(THREAD);
+  HandleMark hm(THREAD);
   GCNotificationRequest *request = getRequest();
-  if(request != NULL) {
-    Handle objGcInfo = createGcInfo(request->gcManager,request->gcStatInfo,THREAD);
+  if (request != NULL) {
+    NotificationMark nm(request);
+    Handle objGcInfo = createGcInfo(request->gcManager, request->gcStatInfo, THREAD);
 
     Handle objName = java_lang_String::create_from_platform_dependent_str(request->gcManager->name(), CHECK);
     Handle objAction = java_lang_String::create_from_platform_dependent_str(request->gcAction, CHECK);
     Handle objCause = java_lang_String::create_from_platform_dependent_str(request->gcCause, CHECK);
 
     klassOop k = Management::sun_management_GarbageCollectorImpl_klass(CHECK);
-    instanceKlassHandle gc_mbean_klass (THREAD, k);
+    instanceKlassHandle gc_mbean_klass(THREAD, k);
 
     instanceOop gc_mbean = request->gcManager->get_memory_manager_instance(THREAD);
     instanceHandle gc_mbean_h(THREAD, gc_mbean);
@@ -212,11 +239,6 @@
                             vmSymbols::createGCNotification_signature(),
                             &args,
                             CHECK);
-    if (HAS_PENDING_EXCEPTION) {
-      CLEAR_PENDING_EXCEPTION;
-    }
-
-    delete request;
   }
 }
 
--- a/src/share/vm/services/gcNotifier.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/services/gcNotifier.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,6 +60,7 @@
   static GCNotificationRequest *last_request;
   static void addRequest(GCNotificationRequest *request);
   static GCNotificationRequest *getRequest();
+  static void sendNotificationInternal(TRAPS);
 public:
   static void pushNotification(GCMemoryManager *manager, const char *action, const char *cause);
   static bool has_event();
--- a/src/share/vm/services/management.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/services/management.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -119,21 +119,8 @@
   _optional_support.isThreadAllocatedMemorySupported = 1;
 
   // Registration of the diagnostic commands
-  // First boolean argument specifies if the command is enabled
-  // Second boolean argument specifies if the command is hidden
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HelpDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VersionDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CommandLineDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<PrintSystemPropertiesDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<PrintVMFlagsDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VMUptimeDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<SystemGCDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<RunFinalizationDCmd>(true, false));
-#ifndef SERVICES_KERNEL   // Heap dumping not supported
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HeapDumpDCmd>(true, false));
-#endif // SERVICES_KERNEL
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassHistogramDCmd>(true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ThreadDumpDCmd>(true, false));
+  DCmdRegistrant::register_dcmds();
+  DCmdRegistrant::register_dcmds_ext();
 }
 
 void Management::initialize(TRAPS) {
@@ -2047,15 +2034,15 @@
   // Make a copy of the last GC statistics
   // GC may occur while constructing the last GC information
   int num_pools = MemoryService::num_memory_pools();
-  GCStatInfo* stat = new GCStatInfo(num_pools);
-  if (mgr->get_last_gc_stat(stat) == 0) {
+  GCStatInfo stat(num_pools);
+  if (mgr->get_last_gc_stat(&stat) == 0) {
     gc_stat->gc_index = 0;
     return;
   }
 
-  gc_stat->gc_index = stat->gc_index();
-  gc_stat->start_time = Management::ticks_to_ms(stat->start_time());
-  gc_stat->end_time = Management::ticks_to_ms(stat->end_time());
+  gc_stat->gc_index = stat.gc_index();
+  gc_stat->start_time = Management::ticks_to_ms(stat.start_time());
+  gc_stat->end_time = Management::ticks_to_ms(stat.end_time());
 
   // Current implementation does not have GC extension attributes
   gc_stat->num_gc_ext_attributes = 0;
@@ -2073,17 +2060,17 @@
   objArrayHandle usage_after_gc_ah(THREAD, au);
 
   for (int i = 0; i < num_pools; i++) {
-    Handle before_usage = MemoryService::create_MemoryUsage_obj(stat->before_gc_usage_for_pool(i), CHECK);
+    Handle before_usage = MemoryService::create_MemoryUsage_obj(stat.before_gc_usage_for_pool(i), CHECK);
     Handle after_usage;
 
-    MemoryUsage u = stat->after_gc_usage_for_pool(i);
+    MemoryUsage u = stat.after_gc_usage_for_pool(i);
     if (u.max_size() == 0 && u.used() > 0) {
       // If max size == 0, this pool is a survivor space.
       // Set max size = -1 since the pools will be swapped after GC.
       MemoryUsage usage(u.init_size(), u.used(), u.committed(), (size_t)-1);
       after_usage = MemoryService::create_MemoryUsage_obj(usage, CHECK);
     } else {
-      after_usage = MemoryService::create_MemoryUsage_obj(stat->after_gc_usage_for_pool(i), CHECK);
+      after_usage = MemoryService::create_MemoryUsage_obj(stat.after_gc_usage_for_pool(i), CHECK);
     }
     usage_before_gc_ah->obj_at_put(i, before_usage());
     usage_after_gc_ah->obj_at_put(i, after_usage());
--- a/src/share/vm/services/memoryManager.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/services/memoryManager.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -214,8 +214,8 @@
 
 void GCMemoryManager::initialize_gc_stat_info() {
   assert(MemoryService::num_memory_pools() > 0, "should have one or more memory pools");
-  _last_gc_stat = new GCStatInfo(MemoryService::num_memory_pools());
-  _current_gc_stat = new GCStatInfo(MemoryService::num_memory_pools());
+  _last_gc_stat = new(ResourceObj::C_HEAP) GCStatInfo(MemoryService::num_memory_pools());
+  _current_gc_stat = new(ResourceObj::C_HEAP) GCStatInfo(MemoryService::num_memory_pools());
   // tracking concurrent collections we need two objects: one to update, and one to
   // hold the publicly available "last (completed) gc" information.
 }
--- a/src/share/vm/services/memoryManager.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/services/memoryManager.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -108,7 +108,7 @@
   const char* name()         { return "CodeCacheManager"; }
 };
 
-class GCStatInfo : public CHeapObj {
+class GCStatInfo : public ResourceObj {
 private:
   size_t _index;
   jlong  _start_time;
--- a/src/share/vm/trace/traceMacros.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/trace/traceMacros.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -40,4 +40,8 @@
 #define TRACE_START() true
 #define TRACE_INITIALIZE() 0
 
+#define TRACE_SET_KLASS_TRACE_ID(x1, x2) do { } while (0)
+#define TRACE_DEFINE_KLASS_METHODS typedef int ___IGNORED_hs_trace_type1
+#define TRACE_DEFINE_KLASS_TRACE_ID typedef int ___IGNORED_hs_trace_type2
+
 #endif
--- a/src/share/vm/utilities/bitMap.inline.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/bitMap.inline.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -178,8 +178,30 @@
     for (; !(res & 1); res_offset++) {
       res = res >> 1;
     }
-    assert(res_offset >= l_offset &&
-           res_offset < r_offset, "just checking");
+
+#ifdef ASSERT
+    // In the following assert, if r_offset is not bitamp word aligned,
+    // checking that res_offset is strictly less than r_offset is too
+    // strong and will trip the assert.
+    //
+    // Consider the case where l_offset is bit 15 and r_offset is bit 17
+    // of the same map word, and where bits [15:16:17:18] == [00:00:00:01].
+    // All the bits in the range [l_offset:r_offset) are 0.
+    // The loop that calculates res_offset, above, would yield the offset
+    // of bit 18 because it's in the same map word as l_offset and there
+    // is a set bit in that map word above l_offset (i.e. res != NoBits).
+    //
+    // In this case, however, we can assert is that res_offset is strictly
+    // less than size() since we know that there is at least one set bit
+    // at an offset above, but in the same map word as, r_offset.
+    // Otherwise, if r_offset is word aligned then it will not be in the
+    // same map word as l_offset (unless it equals l_offset). So either
+    // there won't be a set bit between l_offset and the end of it's map
+    // word (i.e. res == NoBits), or res_offset will be less than r_offset.
+
+    idx_t limit = is_word_aligned(r_offset) ? r_offset : size();
+    assert(res_offset >= l_offset && res_offset < limit, "just checking");
+#endif // ASSERT
     return MIN2(res_offset, r_offset);
   }
   // skip over all word length 0-bit runs
--- a/src/share/vm/utilities/debug.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/debug.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -600,19 +600,11 @@
   tty->flush();
 }
 
-
 extern "C" void events() {
   Command c("events");
-  Events::print_last(tty, 50);
+  Events::print();
 }
 
-
-extern "C" void nevents(int n) {
-  Command c("events");
-  Events::print_last(tty, n);
-}
-
-
 // Given a heap address that was valid before the most recent GC, if
 // the oop that used to contain it is still live, prints the new
 // location of the oop and the address. Useful for tracking down
@@ -771,7 +763,7 @@
 
   tty->print_cr("misc.");
   tty->print_cr("  flush()       - flushes the log file");
-  tty->print_cr("  events()      - dump last 50 events");
+  tty->print_cr("  events()      - dump events from ring buffers");
 
 
   tty->print_cr("compiler debugging");
--- a/src/share/vm/utilities/debug.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/debug.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,16 +33,23 @@
 // Simple class to format the ctor arguments into a fixed-sized buffer.
 template <size_t bufsz = 256>
 class FormatBuffer {
-public:
+ public:
   inline FormatBuffer(const char * format, ...);
   inline void append(const char* format, ...);
+  inline void print(const char* format, ...);
+  inline void printv(const char* format, va_list ap);
   operator const char *() const { return _buf; }
 
-private:
+  char* buffer() { return _buf; }
+  int size() { return bufsz; }
+
+ private:
   FormatBuffer(const FormatBuffer &); // prevent copies
 
-private:
+ protected:
   char _buf[bufsz];
+
+  inline FormatBuffer();
 };
 
 template <size_t bufsz>
@@ -54,6 +61,24 @@
 }
 
 template <size_t bufsz>
+FormatBuffer<bufsz>::FormatBuffer() {
+  _buf[0] = '\0';
+}
+
+template <size_t bufsz>
+void FormatBuffer<bufsz>::print(const char * format, ...) {
+  va_list argp;
+  va_start(argp, format);
+  jio_vsnprintf(_buf, bufsz, format, argp);
+  va_end(argp);
+}
+
+template <size_t bufsz>
+void FormatBuffer<bufsz>::printv(const char * format, va_list argp) {
+  jio_vsnprintf(_buf, bufsz, format, argp);
+}
+
+template <size_t bufsz>
 void FormatBuffer<bufsz>::append(const char* format, ...) {
   // Given that the constructor does a vsnprintf we can assume that
   // _buf is already initialized.
--- a/src/share/vm/utilities/decoder.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/decoder.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -25,7 +25,9 @@
 #include "precompiled.hpp"
 #include "prims/jvm.h"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
 #include "utilities/decoder.hpp"
+#include "utilities/vmError.hpp"
 
 #if defined(_WINDOWS)
   #include "decoder_windows.hpp"
@@ -35,74 +37,94 @@
   #include "decoder_elf.hpp"
 #endif
 
-NullDecoder*  Decoder::_decoder = NULL;
-NullDecoder   Decoder::_do_nothing_decoder;
-Mutex*           Decoder::_decoder_lock = new Mutex(Mutex::safepoint,
-                                "DecoderLock");
+AbstractDecoder*  Decoder::_shared_decoder = NULL;
+AbstractDecoder*  Decoder::_error_handler_decoder = NULL;
+NullDecoder       Decoder::_do_nothing_decoder;
+Mutex*            Decoder::_shared_decoder_lock = new Mutex(Mutex::native,
+                                "SharedDecoderLock");
 
-// _decoder_lock should already acquired before enter this method
-NullDecoder* Decoder::get_decoder() {
-  assert(_decoder_lock != NULL && _decoder_lock->owned_by_self(),
+AbstractDecoder* Decoder::get_shared_instance() {
+  assert(_shared_decoder_lock != NULL && _shared_decoder_lock->owned_by_self(),
     "Require DecoderLock to enter");
 
-  if (_decoder != NULL) {
-    return _decoder;
+  if (_shared_decoder == NULL) {
+    _shared_decoder = create_decoder();
   }
+  return _shared_decoder;
+}
 
-  // Decoder is a secondary service. Although, it is good to have,
-  // but we can live without it.
+AbstractDecoder* Decoder::get_error_handler_instance() {
+  if (_error_handler_decoder == NULL) {
+    _error_handler_decoder = create_decoder();
+  }
+  return _error_handler_decoder;
+}
+
+
+AbstractDecoder* Decoder::create_decoder() {
+  AbstractDecoder* decoder;
 #if defined(_WINDOWS)
-  _decoder = new (std::nothrow) WindowsDecoder();
+  decoder = new (std::nothrow) WindowsDecoder();
 #elif defined (__APPLE__)
-    _decoder = new (std::nothrow)MachODecoder();
+  decoder = new (std::nothrow)MachODecoder();
 #else
-    _decoder = new (std::nothrow)ElfDecoder();
+  decoder = new (std::nothrow)ElfDecoder();
 #endif
 
-  if (_decoder == NULL || _decoder->has_error()) {
-    if (_decoder != NULL) {
-      delete _decoder;
+  if (decoder == NULL || decoder->has_error()) {
+    if (decoder != NULL) {
+      delete decoder;
     }
-    _decoder = &_do_nothing_decoder;
+    decoder = &_do_nothing_decoder;
   }
-  return _decoder;
+  return decoder;
 }
 
 bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath) {
-  assert(_decoder_lock != NULL, "Just check");
-  MutexLockerEx locker(_decoder_lock, true);
-  NullDecoder* decoder = get_decoder();
+  assert(_shared_decoder_lock != NULL, "Just check");
+  bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
+  MutexLockerEx locker(error_handling_thread ? NULL : _shared_decoder_lock, true);
+  AbstractDecoder* decoder = error_handling_thread ?
+    get_error_handler_instance(): get_shared_instance();
   assert(decoder != NULL, "null decoder");
 
   return decoder->decode(addr, buf, buflen, offset, modulepath);
 }
 
 bool Decoder::demangle(const char* symbol, char* buf, int buflen) {
-  assert(_decoder_lock != NULL, "Just check");
-  MutexLockerEx locker(_decoder_lock, true);
-  NullDecoder* decoder = get_decoder();
+  assert(_shared_decoder_lock != NULL, "Just check");
+  bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
+  MutexLockerEx locker(error_handling_thread ? NULL : _shared_decoder_lock, true);
+  AbstractDecoder* decoder = error_handling_thread ?
+    get_error_handler_instance(): get_shared_instance();
   assert(decoder != NULL, "null decoder");
   return decoder->demangle(symbol, buf, buflen);
 }
 
 bool Decoder::can_decode_C_frame_in_vm() {
-  assert(_decoder_lock != NULL, "Just check");
-  MutexLockerEx locker(_decoder_lock, true);
-  NullDecoder* decoder = get_decoder();
+  assert(_shared_decoder_lock != NULL, "Just check");
+  bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
+  MutexLockerEx locker(error_handling_thread ? NULL : _shared_decoder_lock, true);
+  AbstractDecoder* decoder = error_handling_thread ?
+    get_error_handler_instance(): get_shared_instance();
   assert(decoder != NULL, "null decoder");
   return decoder->can_decode_C_frame_in_vm();
 }
 
-// shutdown real decoder and replace it with
-// _do_nothing_decoder
+/*
+ * Shutdown shared decoder and replace it with
+ * _do_nothing_decoder. Do nothing with error handler
+ * instance, since the JVM is going down.
+ */
 void Decoder::shutdown() {
-  assert(_decoder_lock != NULL, "Just check");
-  MutexLockerEx locker(_decoder_lock, true);
+  assert(_shared_decoder_lock != NULL, "Just check");
+  MutexLockerEx locker(_shared_decoder_lock, true);
 
-  if (_decoder != NULL && _decoder != &_do_nothing_decoder) {
-    delete _decoder;
+  if (_shared_decoder != NULL &&
+    _shared_decoder != &_do_nothing_decoder) {
+    delete _shared_decoder;
   }
 
-  _decoder = &_do_nothing_decoder;
+  _shared_decoder = &_do_nothing_decoder;
 }
 
--- a/src/share/vm/utilities/decoder.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/decoder.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
 #include "memory/allocation.hpp"
 #include "runtime/mutex.hpp"
 
-class NullDecoder: public CHeapObj {
+class AbstractDecoder : public CHeapObj {
 public:
   // status code for decoding native C frame
   enum decoder_status {
@@ -43,6 +43,34 @@
          helper_init_error     // SymInitialize failed (Windows only)
   };
 
+  // decode an pc address to corresponding function name and an offset from the beginning of
+  // the function
+  virtual bool decode(address pc, char* buf, int buflen, int* offset,
+    const char* modulepath = NULL) = 0;
+  // demangle a C++ symbol
+  virtual bool demangle(const char* symbol, char* buf, int buflen) = 0;
+  // if the decoder can decode symbols in vm
+  virtual bool can_decode_C_frame_in_vm() const = 0;
+
+  virtual decoder_status status() const {
+    return _decoder_status;
+  }
+
+  virtual bool has_error() const {
+    return is_error(_decoder_status);
+  }
+
+  static bool is_error(decoder_status status) {
+    return (status > 0);
+  }
+
+protected:
+  decoder_status  _decoder_status;
+};
+
+// Do nothing decoder
+class NullDecoder : public AbstractDecoder {
+public:
   NullDecoder() {
     _decoder_status = not_available;
   }
@@ -61,40 +89,34 @@
   virtual bool can_decode_C_frame_in_vm() const {
     return false;
   }
-
-  virtual decoder_status status() const {
-    return _decoder_status;
-  }
-
-  virtual bool has_error() const {
-    return is_error(_decoder_status);
-  }
-
-  static bool is_error(decoder_status status) {
-    return (status > 0);
-  }
-
-protected:
-  decoder_status  _decoder_status;
 };
 
 
-class Decoder: AllStatic {
+class Decoder : AllStatic {
 public:
   static bool decode(address pc, char* buf, int buflen, int* offset, const char* modulepath = NULL);
   static bool demangle(const char* symbol, char* buf, int buflen);
   static bool can_decode_C_frame_in_vm();
 
+  // shutdown shared instance
   static void shutdown();
 protected:
-  static NullDecoder* get_decoder();
+  // shared decoder instance, _shared_instance_lock is needed
+  static AbstractDecoder* get_shared_instance();
+  // a private instance for error handler. Error handler can be
+  // triggered almost everywhere, including signal handler, where
+  // no lock can be taken. So the shared decoder can not be used
+  // in this scenario.
+  static AbstractDecoder* get_error_handler_instance();
 
+  static AbstractDecoder* create_decoder();
 private:
-  static NullDecoder*     _decoder;
-  static NullDecoder      _do_nothing_decoder;
+  static AbstractDecoder*     _shared_decoder;
+  static AbstractDecoder*     _error_handler_decoder;
+  static NullDecoder          _do_nothing_decoder;
 
 protected:
-  static Mutex*       _decoder_lock;
+  static Mutex*               _shared_decoder_lock;
 };
 
 #endif // SHARE_VM_UTILITIES_DECODER_HPP
--- a/src/share/vm/utilities/decoder_elf.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/decoder_elf.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
 #include "utilities/decoder.hpp"
 #include "utilities/elfFile.hpp"
 
-class ElfDecoder: public NullDecoder {
+class ElfDecoder : public AbstractDecoder {
 
 public:
   ElfDecoder() {
--- a/src/share/vm/utilities/events.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/events.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #include "memory/allocation.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/osThread.hpp"
+#include "runtime/threadCritical.hpp"
 #include "runtime/threadLocalStorage.hpp"
 #include "runtime/timer.hpp"
 #include "utilities/events.hpp"
@@ -43,184 +44,44 @@
 #endif
 
 
-#ifndef PRODUCT
-
-////////////////////////////////////////////////////////////////////////////
-// Event
-
-typedef u4 EventID;
-
-class Event VALUE_OBJ_CLASS_SPEC  {
- private:
-  jlong       _time_tick;
-  intx        _thread_id;
-  const char* _format;
-  int         _indent;
-  intptr_t    _arg_1;
-  intptr_t    _arg_2;
-  intptr_t    _arg_3;
-
-  // only EventBuffer::add_event() can assign event id
-  friend class EventBuffer;
-  EventID     _id;
-
- public:
-
-  void clear() { _format = NULL; }
-
-  EventID id() const { return _id; }
-
-  void fill(int indent, const char* format, intptr_t arg_1, intptr_t arg_2, intptr_t arg_3) {
-    _format = format;
-    _arg_1  = arg_1;
-    _arg_2  = arg_2;
-    _arg_3  = arg_3;
-
-    _indent = indent;
-
-    _thread_id = os::current_thread_id();
-    _time_tick = os::elapsed_counter();
-  }
-
-  void print_on(outputStream *st) {
-    if (_format == NULL) return;
-    st->print("  %d", _thread_id);
-    st->print("  %3.2g   ", (double)_time_tick / os::elapsed_frequency());
-    st->fill_to(20);
-    for (int index = 0; index < _indent; index++) {
-      st->print("| ");
-    }
-    st->print_cr(_format, _arg_1, _arg_2, _arg_3);
-  }
-};
-
-////////////////////////////////////////////////////////////////////////////
-// EventBuffer
-//
-// Simple lock-free event queue. Every event has a unique 32-bit id.
-// It's fine if two threads add events at the same time, because they
-// will get different event id, and then write to different buffer location.
-// However, it is assumed that add_event() is quick enough (or buffer size
-// is big enough), so when one thread is adding event, there can't be more
-// than "size" events created by other threads; otherwise we'll end up having
-// two threads writing to the same location.
-
-class EventBuffer : AllStatic {
- private:
-  static Event* buffer;
-  static int    size;
-  static jint   indent;
-  static volatile EventID _current_event_id;
-
-  static EventID get_next_event_id() {
-    return (EventID)Atomic::add(1, (jint*)&_current_event_id);
-  }
-
- public:
-  static void inc_indent() { Atomic::inc(&indent); }
-  static void dec_indent() { Atomic::dec(&indent); }
+EventLog* Events::_logs = NULL;
+StringEventLog* Events::_messages = NULL;
+StringEventLog* Events::_exceptions = NULL;
+StringEventLog* Events::_deopt_messages = NULL;
 
-  static bool get_event(EventID id, Event* event) {
-    int index = (int)(id % size);
-    if (buffer[index].id() == id) {
-      memcpy(event, &buffer[index], sizeof(Event));
-      // check id again; if buffer[index] is being updated by another thread,
-      // event->id() will contain different value.
-      return (event->id() == id);
-    } else {
-      // id does not match - id is invalid, or event is overwritten
-      return false;
-    }
-  }
-
-  // add a new event to the queue; if EventBuffer is full, this call will
-  // overwrite the oldest event in the queue
-  static EventID add_event(const char* format,
-                           intptr_t arg_1, intptr_t arg_2, intptr_t arg_3) {
-    // assign a unique id
-    EventID id = get_next_event_id();
-
-    // event will be copied to buffer[index]
-    int index = (int)(id % size);
-
-    // first, invalidate id, buffer[index] can't have event with id = index + 2
-    buffer[index]._id = index + 2;
-
-    // make sure everyone has seen that buffer[index] is invalid
-    OrderAccess::fence();
-
-    // ... before updating its value
-    buffer[index].fill(indent, format, arg_1, arg_2, arg_3);
-
-    // finally, set up real event id, now buffer[index] contains valid event
-    OrderAccess::release_store(&(buffer[index]._id), id);
-
-    return id;
-  }
-
-  static void print_last(outputStream *st, int number) {
-    st->print_cr("[Last %d events in the event buffer]", number);
-    st->print_cr("-<thd>-<elapsed sec>-<description>---------------------");
+EventLog::EventLog() {
+  // This normally done during bootstrap when we're only single
+  // threaded but use a ThreadCritical to ensure inclusion in case
+  // some are created slightly late.
+  ThreadCritical tc;
+  _next = Events::_logs;
+  Events::_logs = this;
+}
 
-    int count = 0;
-    EventID id = _current_event_id;
-    while (count < number) {
-      Event event;
-      if (get_event(id, &event)) {
-         event.print_on(st);
-      }
-      id--;
-      count++;
-    }
-  }
-
-  static void print_all(outputStream* st) {
-    print_last(st, size);
-  }
-
-  static void init() {
-    // Allocate the event buffer
-    size   = EventLogLength;
-    buffer = NEW_C_HEAP_ARRAY(Event, size);
-
-    _current_event_id = 0;
-
-    // Clear the event buffer
-    for (int index = 0; index < size; index++) {
-      buffer[index]._id = index + 1;       // index + 1 is invalid id
-      buffer[index].clear();
-    }
-  }
-};
-
-Event*           EventBuffer::buffer;
-int              EventBuffer::size;
-volatile EventID EventBuffer::_current_event_id;
-int              EventBuffer::indent;
-
-////////////////////////////////////////////////////////////////////////////
-// Events
-
-// Events::log() is safe for signal handlers
-void Events::log(const char* format, ...) {
-  if (LogEvents) {
-    va_list ap;
-    va_start(ap, format);
-    intptr_t arg_1 = va_arg(ap, intptr_t);
-    intptr_t arg_2 = va_arg(ap, intptr_t);
-    intptr_t arg_3 = va_arg(ap, intptr_t);
-    va_end(ap);
-
-    EventBuffer::add_event(format, arg_1, arg_2, arg_3);
+// For each registered event logger, print out the current contents of
+// the buffer.  This is normally called when the JVM is crashing.
+void Events::print_all(outputStream* out) {
+  EventLog* log = _logs;
+  while (log != NULL) {
+    log->print_log_on(out);
+    log = log->next();
   }
 }
 
-void Events::print_all(outputStream *st) {
-  EventBuffer::print_all(st);
+void Events::print() {
+  print_all(tty);
 }
 
-void Events::print_last(outputStream *st, int number) {
-  EventBuffer::print_last(st, number);
+void Events::init() {
+  if (LogEvents) {
+    _messages = new StringEventLog("Events");
+    _exceptions = new StringEventLog("Internal exceptions");
+    _deopt_messages = new StringEventLog("Deoptimization events");
+  }
+}
+
+void eventlog_init() {
+  Events::init();
 }
 
 ///////////////////////////////////////////////////////////////////////////
@@ -230,37 +91,17 @@
   if (LogEvents) {
     va_list ap;
     va_start(ap, format);
-    intptr_t arg_1 = va_arg(ap, intptr_t);
-    intptr_t arg_2 = va_arg(ap, intptr_t);
-    intptr_t arg_3 = va_arg(ap, intptr_t);
+    // Save a copy of begin message and log it.
+    _buffer.printv(format, ap);
+    Events::log(NULL, _buffer);
     va_end(ap);
-
-    EventBuffer::add_event(format, arg_1, arg_2, arg_3);
-    EventBuffer::inc_indent();
   }
 }
 
 EventMark::~EventMark() {
   if (LogEvents) {
-    EventBuffer::dec_indent();
-    EventBuffer::add_event("done", 0, 0, 0);
+    // Append " done" to the begin message and log it
+    _buffer.append(" done");
+    Events::log(NULL, _buffer);
   }
 }
-
-///////////////////////////////////////////////////////////////////////////
-
-void eventlog_init() {
-  EventBuffer::init();
-}
-
-int print_all_events(outputStream *st) {
-  EventBuffer::print_all(st);
-  return 1;
-}
-
-#else
-
-void eventlog_init() {}
-int print_all_events(outputStream *st) { return 0; }
-
-#endif // PRODUCT
--- a/src/share/vm/utilities/events.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/events.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,47 +26,266 @@
 #define SHARE_VM_UTILITIES_EVENTS_HPP
 
 #include "memory/allocation.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/thread.hpp"
 #include "utilities/top.hpp"
+#include "utilities/vmError.hpp"
 
 // Events and EventMark provide interfaces to log events taking place in the vm.
 // This facility is extremly useful for post-mortem debugging. The eventlog
 // often provides crucial information about events leading up to the crash.
 //
-// All arguments past the format string must be passed as an intptr_t.
-//
-// To log a single event use:
-//    Events::log("New nmethod has been created " INTPTR_FORMAT, nm);
-//
-// To log a block of events use:
-//    EventMark m("GarbageCollecting %d", (intptr_t)gc_number);
-//
-// The constructor to eventlog indents the eventlog until the
-// destructor has been executed.
-//
-// IMPLEMENTATION RESTRICTION:
-//   Max 3 arguments are saved for each logged event.
-//
+// Abstractly the logs can record whatever they way but normally they
+// would record at least a timestamp and the current Thread, along
+// with whatever data they need in a ring buffer.  Commonly fixed
+// length text messages are recorded for simplicity but other
+// strategies could be used.  Several logs are provided by default but
+// new instances can be created as needed.
+
+// The base event log dumping class that is registered for dumping at
+// crash time.  This is a very generic interface that is mainly here
+// for completeness.  Normally the templated EventLogBase would be
+// subclassed to provide different log types.
+class EventLog : public CHeapObj {
+  friend class Events;
+
+ private:
+  EventLog* _next;
+
+  EventLog* next() const { return _next; }
+
+ public:
+  // Automatically registers the log so that it will be printed during
+  // crashes.
+  EventLog();
+
+  virtual void print_log_on(outputStream* out) = 0;
+};
+
+
+// A templated subclass of EventLog that provides basic ring buffer
+// functionality.  Most event loggers should subclass this, possibly
+// providing a more featureful log function if the existing copy
+// semantics aren't appropriate.  The name is used as the label of the
+// log when it is dumped during a crash.
+template <class T> class EventLogBase : public EventLog {
+  template <class X> class EventRecord {
+   public:
+    double  timestamp;
+    Thread* thread;
+    X       data;
+  };
+
+ protected:
+  Mutex           _mutex;
+  const char*     _name;
+  int             _length;
+  int             _index;
+  int             _count;
+  EventRecord<T>* _records;
+
+ public:
+  EventLogBase<T>(const char* name, int length = LogEventsBufferEntries):
+    _name(name),
+    _length(length),
+    _count(0),
+    _index(0),
+    _mutex(Mutex::event, name) {
+    _records = new EventRecord<T>[length];
+  }
+
+  double fetch_timestamp() {
+    return os::elapsedTime();
+  }
+
+  // move the ring buffer to next open slot and return the index of
+  // the slot to use for the current message.  Should only be called
+  // while mutex is held.
+  int compute_log_index() {
+    int index = _index;
+    if (_count < _length) _count++;
+    _index++;
+    if (_index >= _length) _index = 0;
+    return index;
+  }
+
+  bool should_log() {
+    // Don't bother adding new entries when we're crashing.  This also
+    // avoids mutating the ring buffer when printing the log.
+    return !VMError::fatal_error_in_progress();
+  }
+
+  // Print the contents of the log
+  void print_log_on(outputStream* out);
+
+ private:
+  void print_log_impl(outputStream* out);
+
+  // Print a single element.  A templated implementation might need to
+  // be declared by subclasses.
+  void print(outputStream* out, T& e);
+
+  void print(outputStream* out, EventRecord<T>& e) {
+    out->print("Event: %.3f ", e.timestamp);
+    if (e.thread != NULL) {
+      out->print("Thread " INTPTR_FORMAT " ", e.thread);
+    }
+    print(out, e.data);
+  }
+};
+
+// A simple wrapper class for fixed size text messages.
+class StringLogMessage : public FormatBuffer<132> {
+ public:
+  // Wrap this buffer in a stringStream.
+  stringStream stream() {
+    return stringStream(_buf, sizeof(_buf));
+  }
+};
+
+// A simple ring buffer of fixed size text messages.
+class StringEventLog : public EventLogBase<StringLogMessage> {
+ public:
+  StringEventLog(const char* name, int count = LogEventsBufferEntries) : EventLogBase<StringLogMessage>(name, count) {}
+
+  void logv(Thread* thread, const char* format, va_list ap) {
+    if (!should_log()) return;
+
+    double timestamp = fetch_timestamp();
+    MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
+    int index = compute_log_index();
+    _records[index].thread = thread;
+    _records[index].timestamp = timestamp;
+    _records[index].data.printv(format, ap);
+  }
+
+  void log(Thread* thread, const char* format, ...) {
+    va_list ap;
+    va_start(ap, format);
+    logv(thread, format, ap);
+    va_end(ap);
+  }
+
+};
+
+
 
 class Events : AllStatic {
- public:
-  // Logs an event, format as printf
-  static void log(const char* format, ...) PRODUCT_RETURN;
+  friend class EventLog;
+
+ private:
+  static EventLog* _logs;
+
+  // A log for generic messages that aren't well categorized.
+  static StringEventLog* _messages;
+
+  // A log for internal exception related messages, like internal
+  // throws and implicit exceptions.
+  static StringEventLog* _exceptions;
+
+  // Deoptization related messages
+  static StringEventLog* _deopt_messages;
 
-  // Prints all events in the buffer
-  static void print_all(outputStream* st) PRODUCT_RETURN;
+ public:
+  static void print_all(outputStream* out);
+
+  // Dump all events to the tty
+  static void print();
 
-  // Prints last number events from the event buffer
-  static void print_last(outputStream *st, int number) PRODUCT_RETURN;
+  // Logs a generic message with timestamp and format as printf.
+  static void log(Thread* thread, const char* format, ...);
+
+  // Log exception related message
+  static void log_exception(Thread* thread, const char* format, ...);
+
+  static void log_deopt_message(Thread* thread, const char* format, ...);
+
+  // Register default loggers
+  static void init();
 };
 
+
+inline void Events::log(Thread* thread, const char* format, ...) {
+  if (LogEvents) {
+    va_list ap;
+    va_start(ap, format);
+    _messages->logv(thread, format, ap);
+    va_end(ap);
+  }
+}
+
+inline void Events::log_exception(Thread* thread, const char* format, ...) {
+  if (LogEvents) {
+    va_list ap;
+    va_start(ap, format);
+    _exceptions->logv(thread, format, ap);
+    va_end(ap);
+  }
+}
+
+inline void Events::log_deopt_message(Thread* thread, const char* format, ...) {
+  if (LogEvents) {
+    va_list ap;
+    va_start(ap, format);
+    _deopt_messages->logv(thread, format, ap);
+    va_end(ap);
+  }
+}
+
+
+template <class T>
+inline void EventLogBase<T>::print_log_on(outputStream* out) {
+  if (ThreadLocalStorage::get_thread_slow() == NULL) {
+    // Not a regular Java thread so don't bother locking
+    print_log_impl(out);
+  } else {
+    MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
+    print_log_impl(out);
+  }
+}
+
+// Dump the ring buffer entries that current have entries.
+template <class T>
+inline void EventLogBase<T>::print_log_impl(outputStream* out) {
+  out->print_cr("%s (%d events):", _name, _count);
+  if (_count == 0) {
+    out->print_cr("No events");
+    out->cr();
+    return;
+  }
+
+  if (_count < _length) {
+    for (int i = 0; i < _count; i++) {
+      print(out, _records[i]);
+    }
+  } else {
+    for (int i = _index; i < _length; i++) {
+      print(out, _records[i]);
+    }
+    for (int i = 0; i < _index; i++) {
+      print(out, _records[i]);
+    }
+  }
+  out->cr();
+}
+
+// Implement a printing routine for the StringLogMessage
+template <>
+inline void EventLogBase<StringLogMessage>::print(outputStream* out, StringLogMessage& lm) {
+  out->print_raw(lm);
+  out->cr();
+}
+
+// Place markers for the beginning and end up of a set of events.
+// These end up in the default log.
 class EventMark : public StackObj {
+  StringLogMessage _buffer;
+
  public:
   // log a begin event, format as printf
-  EventMark(const char* format, ...) PRODUCT_RETURN;
+  EventMark(const char* format, ...);
   // log an end event
-  ~EventMark() PRODUCT_RETURN;
+  ~EventMark();
 };
 
-int print_all_events(outputStream *st);
-
 #endif // SHARE_VM_UTILITIES_EVENTS_HPP
--- a/src/share/vm/utilities/exceptions.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/exceptions.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -160,7 +160,7 @@
   thread->set_pending_exception(h_exception(), file, line);
 
   // vm log
-  Events::log("throw_exception " INTPTR_FORMAT, (address)h_exception());
+  Events::log_exception(thread, "Threw " INTPTR_FORMAT " at %s:%d", (address)h_exception(), file, line);
 }
 
 
--- a/src/share/vm/utilities/exceptions.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/exceptions.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -189,6 +189,13 @@
 #define CHECK_NULL                               CHECK_(NULL)
 #define CHECK_false                              CHECK_(false)
 
+#define CHECK_AND_CLEAR                         THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return;        } (0
+#define CHECK_AND_CLEAR_(result)                THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return result; } (0
+#define CHECK_AND_CLEAR_0                       CHECK_AND_CLEAR_(0)
+#define CHECK_AND_CLEAR_NH                      CHECK_AND_CLEAR_(Handle())
+#define CHECK_AND_CLEAR_NULL                    CHECK_AND_CLEAR_(NULL)
+#define CHECK_AND_CLEAR_false                   CHECK_AND_CLEAR_(false)
+
 // The THROW... macros should be used to throw an exception. They require a THREAD variable to be
 // visible within the scope containing the THROW. Usually this is achieved by declaring the function
 // with a TRAPS argument.
@@ -258,7 +265,6 @@
     ShouldNotReachHere();                  \
   } (0
 
-
 // ExceptionMark is a stack-allocated helper class for local exception handling.
 // It is used with the EXCEPTION_MARK macro.
 
--- a/src/share/vm/utilities/globalDefinitions_gcc.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/globalDefinitions_gcc.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -87,14 +87,7 @@
 #endif
 #ifdef __APPLE__
   #include <AvailabilityMacros.h>
-  #if (MAC_OS_X_VERSION_MAX_ALLOWED <= MAC_OS_X_VERSION_10_4)
-    // Mac OS X 10.4 defines EFL_AC and EFL_ID,
-    // which conflict with hotspot variable names.
-    //
-    // This has been fixed in Mac OS X 10.5.
-    #undef EFL_AC
-    #undef EFL_ID
-  #endif
+  #include <mach/mach.h>
 #endif
 #include <sys/time.h>
 #endif // LINUX || _ALLBSD_SOURCE
--- a/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -130,6 +130,9 @@
 //----------------------------------------------------------------------------------------------------
 // Non-standard stdlib-like stuff:
 inline int strcasecmp(const char *s1, const char *s2) { return _stricmp(s1,s2); }
+inline int strncasecmp(const char *s1, const char *s2, size_t n) {
+  return _strnicmp(s1,s2,n);
+}
 
 
 //----------------------------------------------------------------------------------------------------
--- a/src/share/vm/utilities/growableArray.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/growableArray.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -198,8 +198,11 @@
     return idx;
   }
 
-  void append_if_missing(const E& elem) {
-    if (!contains(elem)) append(elem);
+  bool append_if_missing(const E& elem) {
+    // Returns TRUE if elem is added.
+    bool missed = !contains(elem);
+    if (missed) append(elem);
+    return missed;
   }
 
   E at(int i) const {
@@ -292,12 +295,22 @@
     ShouldNotReachHere();
   }
 
+  // The order is preserved.
   void remove_at(int index) {
     assert(0 <= index && index < _len, "illegal index");
     for (int j = index + 1; j < _len; j++) _data[j-1] = _data[j];
     _len--;
   }
 
+  // The order is changed.
+  void delete_at(int index) {
+    assert(0 <= index && index < _len, "illegal index");
+    if (index < --_len) {
+      // Replace removed element with last one.
+      _data[index] = _data[_len];
+    }
+  }
+
   // inserts the given element before the element at index i
   void insert_before(const int idx, const E& elem) {
     check_nesting();
--- a/src/share/vm/utilities/hashtable.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/hashtable.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -183,7 +183,6 @@
 
   // Accessor
   int entry_size() const { return _entry_size; }
-  int table_size() { return _table_size; }
 
   // The following method is MT-safe and may be used with caution.
   BasicHashtableEntry* bucket(int i);
@@ -195,6 +194,7 @@
   BasicHashtableEntry* new_entry(unsigned int hashValue);
 
 public:
+  int table_size() { return _table_size; }
   void set_entry(int index, BasicHashtableEntry* entry);
 
   void add_entry(int index, BasicHashtableEntry* entry);
--- a/src/share/vm/utilities/numberSeq.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/numberSeq.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -156,6 +156,10 @@
     _sequence[i] = 0.0;
 }
 
+TruncatedSeq::~TruncatedSeq() {
+  FREE_C_HEAP_ARRAY(double, _sequence);
+}
+
 void TruncatedSeq::add(double val) {
   AbsSeq::add(val);
 
--- a/src/share/vm/utilities/numberSeq.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/numberSeq.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -118,6 +118,7 @@
   // accepts a value for L
   TruncatedSeq(int length = DefaultSeqLength,
                double alpha = DEFAULT_ALPHA_VALUE);
+  ~TruncatedSeq();
   virtual void add(double val);
   virtual double maximum() const;
   virtual double last() const; // the last value added to the sequence
--- a/src/share/vm/utilities/preserveException.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/preserveException.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -32,9 +32,9 @@
   thread     = Thread::current();
   _thread    = thread;
   _preserved_exception_oop = Handle(thread, _thread->pending_exception());
-  _thread->clear_pending_exception(); // Needed to avoid infinite recursion
   _preserved_exception_line = _thread->exception_line();
   _preserved_exception_file = _thread->exception_file();
+  _thread->clear_pending_exception(); // Needed to avoid infinite recursion
 }
 
 
--- a/src/share/vm/utilities/vmError.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/vmError.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "compiler/compileBroker.hpp"
 #include "gc_interface/collectedHeap.hpp"
+#include "prims/whitebox.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/init.hpp"
@@ -36,6 +37,7 @@
 #include "utilities/decoder.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/errorReporter.hpp"
+#include "utilities/events.hpp"
 #include "utilities/top.hpp"
 #include "utilities/vmError.hpp"
 
@@ -693,7 +695,14 @@
        st->cr();
      }
 
-  STEP(200, "(printing dynamic libraries)" )
+  STEP(200, "(printing ring buffers)" )
+
+     if (_verbose) {
+       Events::print_all(st);
+       st->cr();
+     }
+
+  STEP(205, "(printing dynamic libraries)" )
 
      if (_verbose) {
        // dynamic libraries, or memory map
@@ -709,6 +718,13 @@
        st->cr();
      }
 
+  STEP(215, "(printing warning if internal testing API used)" )
+
+     if (WhiteBox::used()) {
+       st->print_cr("Unsupported internal testing APIs have been used.");
+       st->cr();
+     }
+
   STEP(220, "(printing environment variables)" )
 
      if (_verbose) {
--- a/src/share/vm/utilities/vmError.hpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/vmError.hpp	Tue Apr 10 10:42:34 2012 -0700
@@ -27,11 +27,12 @@
 
 #include "utilities/globalDefinitions.hpp"
 
-
+class Decoder;
 class VM_ReportJavaOutOfMemory;
 
 class VMError : public StackObj {
   friend class VM_ReportJavaOutOfMemory;
+  friend class Decoder;
 
   enum ErrorType {
     internal_error = 0xe0000000,
--- a/src/share/vm/utilities/xmlstream.cpp	Wed Apr 04 20:44:38 2012 -0700
+++ b/src/share/vm/utilities/xmlstream.cpp	Tue Apr 10 10:42:34 2012 -0700
@@ -192,8 +192,11 @@
     _element_close_stack_ptr = cur_tag + strlen(cur_tag) + 1;
     _element_depth -= 1;
   }
-  if (bad_tag && !VMThread::should_terminate() && !is_error_reported())
+  if (bad_tag && !VMThread::should_terminate() && !VM_Exit::vm_exited() &&
+      !is_error_reported())
+  {
     assert(false, "bad tag in log");
+  }
 }
 #endif
 
--- a/test/Makefile	Wed Apr 04 20:44:38 2012 -0700
+++ b/test/Makefile	Tue Apr 10 10:42:34 2012 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1995, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1995, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,8 @@
 # Makefile to run various jdk tests
 #
 
+GETMIXEDPATH=echo
+
 # Get OS/ARCH specifics
 OSNAME = $(shell uname -s)
 ifeq ($(OSNAME), SunOS)
@@ -60,7 +62,14 @@
     ARCH = i586
   endif
 endif
-ifeq ($(OSNAME), Windows_NT)
+ifeq ($(PLATFORM),)
+  # detect wether we're running in MKS or cygwin
+  ifeq ($(OSNAME), Windows_NT) # MKS
+    GETMIXEDPATH=dosname -s
+  endif
+  ifeq ($(findstring CYGWIN,$(OSNAME)), CYGWIN)
+    GETMIXEDPATH=cygpath -m -s
+  endif
   PLATFORM = windows
   SLASH_JAVA = J:
   ifeq ($(word 1, $(PROCESSOR_IDENTIFIER)),ia64)
@@ -228,6 +237,24 @@
 
 ################################################################
 
+# wbapitest (make sure the whitebox testing api classes work
+
+wbapitest: prep $(JT_HOME) $(PRODUCT_HOME) $(JTREG)
+	$(JTREG) -a -v:fail,error               \
+          $(JTREG_KEY_OPTION)                   \
+          $(EXTRA_JTREG_OPTIONS)                \
+          -r:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTreport    \
+          -w:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTwork      \
+          -jdk:$(shell $(GETMIXEDPATH) "$(PRODUCT_HOME)")                  \
+          $(JAVA_OPTIONS:%=-vmoption:%)         \
+          $(shell $(GETMIXEDPATH) "$(TEST_ROOT)")/sanity                   \
+	  || $(BUNDLE_UP_FAILED)
+	$(BUNDLE_UP)
+
+PHONY_LIST += wbapitest
+
+################################################################
+
 # packtest
 
 # Expect JPRT to set JPRT_PACKTEST_HOME.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/7090976/Test7090976.java	Tue Apr 10 10:42:34 2012 -0700
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7090976
+ * @summary Eclipse/CDT causes a JVM crash while indexing C++ code
+ *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement Test7090976
+ */
+
+public class Test7090976 {
+
+    static interface I1 {
+        public void m1();
+    };
+
+    static interface I2 {
+        public void m2();
+    };
+
+    static interface I extends I1,I2 {
+    }
+
+    static class A implements I1 {
+        int v = 0;
+        int v2;
+
+        public void m1() {
+            v2 = v;
+        }
+    }
+
+    static class B implements I2 {
+        Object v = new Object();
+        Object v2;
+
+        public void m2() {
+            v2 = v;
+        }
+    }
+
+    private void test(A a)
+    {
+        if (a instanceof I) {
+            I i = (I)a;
+            i.m1();
+            i.m2();
+        }
+    }
+
+    public static void main(String[] args)
+    {
+        Test7090976 t = new Test7090976();
+        A a = new A();
+        B b = new B();
+        for (int i = 0; i < 10000; i++) {
+            t.test(a);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/7141637/SpreadNullArg.java	Tue Apr 10 10:42:34 2012 -0700
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2011 SAP AG.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test SpreadNullArg
+ * @bug 7141637
+ * @summary  verifies that the MethodHandle spread adapter can gracefully handle null arguments.
+ * @run main SpreadNullArg
+ * @author volker.simonis@gmail.com
+ */
+
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+
+public class SpreadNullArg {
+
+  public static void main(String args[]) {
+
+    MethodType mt_ref_arg = MethodType.methodType(int.class, Integer.class);
+    MethodHandle mh_spreadInvoker = MethodHandles.spreadInvoker(mt_ref_arg, 0);
+    MethodHandle mh_spread_target;
+    int result = 42;
+
+    try {
+      mh_spread_target =
+        MethodHandles.lookup().findStatic(SpreadNullArg.class, "target_spread_arg", mt_ref_arg);
+      result = (int) mh_spreadInvoker.invokeExact(mh_spread_target, (Object[]) null);
+    } catch(NullPointerException e) {
+      // Expected exception - do nothing!
+    } catch(Throwable e) {
+      throw new Error(e);
+    }
+
+    if (result != 42) throw new Error("Expected NullPointerException was not thrown");
+  }
+
+  public static int target_spread_arg(Integer i1) {
+    return i1.intValue();
+  }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/sanity/WBApi.java	Tue Apr 10 10:42:34 2012 -0700
@@ -0,0 +1,13 @@
+/*
+ * @test WBApi
+ * @summary verify that whitebox functions can be linked and executed
+ * @run compile -J-XX:+UnlockDiagnosticVMOptions -J-XX:+WhiteBoxAPI WBApi.java
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI WBApi
+ */
+
+import sun.hotspot.WhiteBox;
+public class WBApi {
+    public static void main(String... args) {
+        System.out.printf("args at: %x\n",WhiteBox.getWhiteBox().getObjectAddress(args));
+    }
+}