changeset 9801:80f8be586fae

Merge
author twisti
date Fri, 18 Dec 2015 12:39:02 -0800
parents ffa4ddf10551 (current diff) 55c0514c0a07 (diff)
children 77f51e2b4cd8
files agent/src/share/classes/sun/jvm/hotspot/gc/g1/HeapRegionSetCount.java src/cpu/aarch64/vm/interpreter_aarch64.cpp src/cpu/aarch64/vm/macroAssembler_aarch64.cpp src/cpu/aarch64/vm/stubGenerator_aarch64.cpp src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp src/cpu/ppc/vm/globalDefinitions_ppc.hpp src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp src/cpu/ppc/vm/templateInterpreter_ppc.cpp src/cpu/ppc/vm/templateInterpreter_ppc.hpp src/cpu/ppc/vm/vm_version_ppc.cpp src/cpu/sparc/vm/globalDefinitions_sparc.hpp src/cpu/sparc/vm/macroAssembler_sparc.cpp src/cpu/sparc/vm/macroAssembler_sparc.hpp src/cpu/sparc/vm/nativeInst_sparc.cpp src/cpu/sparc/vm/sparc.ad src/cpu/sparc/vm/vm_version_sparc.cpp src/cpu/sparc/vm/vm_version_sparc.hpp src/cpu/x86/vm/globalDefinitions_x86.hpp src/cpu/x86/vm/globals_x86.hpp src/cpu/x86/vm/macroAssembler_x86.cpp src/cpu/x86/vm/macroAssembler_x86.hpp src/cpu/x86/vm/stubGenerator_x86_32.cpp src/cpu/x86/vm/stubGenerator_x86_64.cpp src/cpu/x86/vm/templateInterpreter_x86_32.cpp src/cpu/x86/vm/templateInterpreter_x86_64.cpp src/cpu/x86/vm/x86_32.ad src/cpu/x86/vm/x86_64.ad src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java src/os/aix/vm/thread_aix.inline.hpp src/os/bsd/vm/thread_bsd.inline.hpp src/os/linux/vm/thread_linux.inline.hpp src/os/solaris/vm/thread_solaris.inline.hpp src/os/windows/vm/thread_windows.inline.hpp src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.cpp src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.hpp src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.cpp src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.hpp src/os_cpu/bsd_zero/vm/threadLS_bsd_zero.cpp src/os_cpu/bsd_zero/vm/threadLS_bsd_zero.hpp src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.cpp src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.hpp src/os_cpu/linux_ppc/vm/threadLS_linux_ppc.cpp src/os_cpu/linux_ppc/vm/threadLS_linux_ppc.hpp src/os_cpu/linux_sparc/vm/threadLS_linux_sparc.cpp src/os_cpu/linux_sparc/vm/threadLS_linux_sparc.hpp src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp src/os_cpu/linux_x86/vm/threadLS_linux_x86.hpp src/os_cpu/linux_zero/vm/threadLS_linux_zero.cpp src/os_cpu/linux_zero/vm/threadLS_linux_zero.hpp src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.cpp src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.hpp src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.cpp src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.hpp src/os_cpu/windows_x86/vm/threadLS_windows_x86.cpp src/os_cpu/windows_x86/vm/threadLS_windows_x86.hpp src/share/vm/ci/ciMethod.hpp src/share/vm/classfile/classFileParser.cpp src/share/vm/classfile/vmSymbols.cpp src/share/vm/classfile/vmSymbols.hpp src/share/vm/code/nmethod.cpp src/share/vm/compiler/compileBroker.cpp src/share/vm/compiler/compilerDirectives.hpp src/share/vm/gc/g1/g1ErgoVerbose.cpp src/share/vm/gc/g1/g1ErgoVerbose.hpp src/share/vm/gc/g1/g1HRPrinter.cpp src/share/vm/gc/g1/g1Log.cpp src/share/vm/gc/g1/g1Log.hpp src/share/vm/gc/g1/g1RootClosures.inline.hpp src/share/vm/gc/g1/vmStructs_g1.hpp src/share/vm/gc/shared/collectedHeap.hpp src/share/vm/interpreter/linkResolver.cpp src/share/vm/jvmci/jvmciRuntime.cpp src/share/vm/oops/constantPool.hpp src/share/vm/oops/instanceKlass.hpp src/share/vm/oops/klass.hpp src/share/vm/oops/klassVtable.hpp src/share/vm/oops/method.hpp src/share/vm/oops/objArrayKlass.hpp src/share/vm/prims/unsafe.cpp src/share/vm/prims/whitebox.cpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/os.hpp src/share/vm/runtime/sharedRuntime.cpp src/share/vm/runtime/sharedRuntime.hpp src/share/vm/runtime/stubRoutines.cpp src/share/vm/runtime/stubRoutines.hpp src/share/vm/runtime/thread.cpp src/share/vm/runtime/thread.hpp src/share/vm/runtime/threadLocalStorage.cpp src/share/vm/runtime/vmStructs.cpp src/share/vm/runtime/vm_operations.hpp test/gc/6941923/Test6941923.java test/gc/TestGCLogRotationViaJcmd.java test/gc/g1/TestPrintGCDetails.java test/gc/g1/TestSummarizeRSetStats.java test/gc/g1/TestSummarizeRSetStatsPerRegion.java test/gc/g1/TestSummarizeRSetStatsThreads.java test/gc/g1/TestSummarizeRSetStatsTools.java test/runtime/logging/SafepointTestMain.java test/runtime/logging/VMOperationTestMain.java
diffstat 536 files changed, 21903 insertions(+), 23791 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Dec 17 23:36:28 2015 +0000
+++ b/.hgtags	Fri Dec 18 12:39:02 2015 -0800
@@ -497,3 +497,5 @@
 53cb98d68a1aeb08d29c89d6da748de60c448e37 jdk9-b92
 d8b24776484cc4dfd19f50b23eaa18a80a161371 jdk9-b93
 a22b7c80529f5f05c847e932e017456e83c46233 jdk9-b94
+0c79cf3cdf0904fc4a630b91b32904491e1ae430 jdk-9+95
+a94bb7203596dd632486f1e3655fa5f70541dc08 jdk-9+96
--- a/agent/src/os/linux/LinuxDebuggerLocal.c	Thu Dec 17 23:36:28 2015 +0000
+++ b/agent/src/os/linux/LinuxDebuggerLocal.c	Fri Dec 18 12:39:02 2015 -0800
@@ -49,7 +49,7 @@
 #include "sun_jvm_hotspot_debugger_sparc_SPARCThreadContext.h"
 #endif
 
-#ifdef ppc64
+#if defined(ppc64) || defined(ppc64le)
 #include "sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext.h"
 #endif
 
@@ -223,9 +223,12 @@
   verifyBitness(env, (char *) &buf);
   CHECK_EXCEPTION;
 
+  char err_buf[200];
   struct ps_prochandle* ph;
-  if ( (ph = Pgrab(jpid)) == NULL) {
-    THROW_NEW_DEBUGGER_EXCEPTION("Can't attach to the process");
+  if ( (ph = Pgrab(jpid, err_buf, sizeof(err_buf))) == NULL) {
+    char msg[230];
+    snprintf(msg, sizeof(msg), "Can't attach to the process: %s", err_buf);
+    THROW_NEW_DEBUGGER_EXCEPTION(msg);
   }
   (*env)->SetLongField(env, this_obj, p_ps_prochandle_ID, (jlong)(intptr_t)ph);
   fillThreadsAndLoadObjects(env, this_obj, ph);
@@ -349,7 +352,7 @@
   return (err == PS_OK)? array : 0;
 }
 
-#if defined(i386) || defined(amd64) || defined(sparc) || defined(sparcv9) | defined(ppc64) || defined(aarch64)
+#if defined(i386) || defined(amd64) || defined(sparc) || defined(sparcv9) | defined(ppc64) || defined(ppc64le) || defined(aarch64)
 JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_getThreadIntegerRegisterSet0
   (JNIEnv *env, jobject this_obj, jint lwp_id) {
 
@@ -377,7 +380,7 @@
 #if defined(sparc) || defined(sparcv9)
 #define NPRGREG sun_jvm_hotspot_debugger_sparc_SPARCThreadContext_NPRGREG
 #endif
-#ifdef ppc64
+#if defined(ppc64) || defined(ppc64le)
 #define NPRGREG sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext_NPRGREG
 #endif
 
@@ -486,7 +489,7 @@
   }
 #endif /* aarch64 */
 
-#ifdef ppc64
+#if defined(ppc64) || defined(ppc64le)
 #define REG_INDEX(reg) sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext_##reg
 
   regs[REG_INDEX(LR)] = gregs.link;
--- a/agent/src/os/linux/libproc.h	Thu Dec 17 23:36:28 2015 +0000
+++ b/agent/src/os/linux/libproc.h	Fri Dec 18 12:39:02 2015 -0800
@@ -68,7 +68,8 @@
 *************************************************************************************/
 
 
-#if defined(sparc) || defined(sparcv9) || defined(ppc64)
+#if defined(sparc) || defined(sparcv9) || defined(ppc64) || defined(ppc64le)
+#include <asm/ptrace.h>
 #define user_regs_struct  pt_regs
 #endif
 #if defined(aarch64)
@@ -86,7 +87,7 @@
 struct ps_prochandle;
 
 // attach to a process
-struct ps_prochandle* Pgrab(pid_t pid);
+struct ps_prochandle* Pgrab(pid_t pid, char* err_buf, size_t err_buf_len);
 
 // attach to a core dump
 struct ps_prochandle* Pgrab_core(const char* execfile, const char* corefile);
--- a/agent/src/os/linux/ps_proc.c	Thu Dec 17 23:36:28 2015 +0000
+++ b/agent/src/os/linux/ps_proc.c	Fri Dec 18 12:39:02 2015 -0800
@@ -215,9 +215,12 @@
 }
 
 // attach to a process/thread specified by "pid"
-static bool ptrace_attach(pid_t pid) {
+static bool ptrace_attach(pid_t pid, char* err_buf, size_t err_buf_len) {
   if (ptrace(PTRACE_ATTACH, pid, NULL, NULL) < 0) {
-    print_debug("ptrace(PTRACE_ATTACH, ..) failed for %d\n", pid);
+    char buf[200];
+    char* msg = strerror_r(errno, buf, sizeof(buf));
+    snprintf(err_buf, err_buf_len, "ptrace(PTRACE_ATTACH, ..) failed for %d: %s", pid, msg);
+    print_debug("%s\n", err_buf);
     return false;
   } else {
     return ptrace_waitpid(pid);
@@ -370,16 +373,17 @@
 };
 
 // attach to the process. One and only one exposed stuff
-struct ps_prochandle* Pgrab(pid_t pid) {
+struct ps_prochandle* Pgrab(pid_t pid, char* err_buf, size_t err_buf_len) {
   struct ps_prochandle* ph = NULL;
   thread_info* thr = NULL;
 
   if ( (ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle))) == NULL) {
-     print_debug("can't allocate memory for ps_prochandle\n");
+     snprintf(err_buf, err_buf_len, "can't allocate memory for ps_prochandle");
+     print_debug("%s\n", err_buf);
      return NULL;
   }
 
-  if (ptrace_attach(pid) != true) {
+  if (ptrace_attach(pid, err_buf, err_buf_len) != true) {
      free(ph);
      return NULL;
   }
@@ -402,7 +406,7 @@
   thr = ph->threads;
   while (thr) {
      // don't attach to the main thread again
-     if (ph->pid != thr->lwp_id && ptrace_attach(thr->lwp_id) != true) {
+    if (ph->pid != thr->lwp_id && ptrace_attach(thr->lwp_id, err_buf, err_buf_len) != true) {
         // even if one attach fails, we get return NULL
         Prelease(ph);
         return NULL;
--- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Fri Dec 18 12:39:02 2015 -0800
@@ -1446,7 +1446,7 @@
                 if (type.equals("threads")) {
                     Threads threads = VM.getVM().getThreads();
                     for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
-                        Address base = thread.getBaseOfStackPointer();
+                        Address base = thread.getStackBase();
                         Address end = thread.getLastJavaSP();
                         if (end == null) continue;
                         if (end.lessThan(base)) {
@@ -1454,11 +1454,13 @@
                             base = end;
                             end = tmp;
                         }
-                        out.println("Searching " + base + " " + end);
+                        //out.println("Searching " + base + " " + end);
                         while (base != null && base.lessThan(end)) {
                             Address val = base.getAddressAt(0);
                             if (AddressOps.equal(val, value)) {
-                                out.println(base);
+                                ByteArrayOutputStream bos = new ByteArrayOutputStream();
+                                thread.printThreadIDOn(new PrintStream(bos));
+                                out.println("found on the stack of thread " + bos.toString() + " at " + base);
                             }
                             base = base.addOffsetTo(stride);
                         }
@@ -1601,6 +1603,8 @@
                         thread.printThreadIDOn(new PrintStream(bos));
                         if (all || bos.toString().equals(name)) {
                             out.println("Thread " + bos.toString() + " Address " + thread.getAddress());
+                            thread.printInfoOn(out);
+                            out.println(" ");
                             if (!all) return;
                         }
                     }
@@ -1618,6 +1622,8 @@
                     for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
                         thread.printThreadIDOn(out);
                         out.println(" " + thread.getThreadName());
+                        thread.printInfoOn(out);
+                        out.println("\n...");
                     }
                 }
             }
--- a/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Fri Dec 18 12:39:02 2015 -0800
@@ -125,10 +125,14 @@
     }
   }
 
-  // close this tool without calling System.exit
-  protected void closeUI() {
-      workerThread.shutdown();
-      frame.dispose();
+  private class CloseUI extends WindowAdapter {
+
+      @Override
+      public void windowClosing(WindowEvent e) {
+          workerThread.shutdown();
+          frame.dispose();
+      }
+
   }
 
   public void run() {
@@ -144,7 +148,8 @@
 
     frame = new JFrame("HSDB - HotSpot Debugger");
     frame.setSize(800, 600);
-    frame.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE);
+    frame.setDefaultCloseOperation(WindowConstants.DO_NOTHING_ON_CLOSE);
+    frame.addWindowListener(new CloseUI());
 
     JMenuBar menuBar = new JMenuBar();
 
@@ -207,7 +212,8 @@
     item = createMenuItem("Exit",
                             new ActionListener() {
                                 public void actionPerformed(ActionEvent e) {
-                                  closeUI();
+                                  workerThread.shutdown();
+                                  frame.dispose();
                                 }
                               });
     item.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_X, ActionEvent.ALT_MASK));
--- a/agent/src/share/classes/sun/jvm/hotspot/SAGetopt.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/SAGetopt.java	Fri Dec 18 12:39:02 2015 -0800
@@ -37,7 +37,7 @@
     private boolean _optreset; // special handling of first call
 
     public SAGetopt(String[] args) {
-        _argv  = args;
+        _argv  = args.clone();
         _optind   = 0;
         _optopt   = 1;
         _optarg   = null;
--- a/agent/src/share/classes/sun/jvm/hotspot/gc/g1/HeapRegionSetBase.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc/g1/HeapRegionSetBase.java	Fri Dec 18 12:39:02 2015 -0800
@@ -41,7 +41,8 @@
 
 public class HeapRegionSetBase extends VMObject {
 
-    static private long countField;
+    // uint _length
+    static private CIntegerField lengthField;
 
     static {
         VM.registerVMInitializedObserver(new Observer() {
@@ -54,13 +55,11 @@
     static private synchronized void initialize(TypeDataBase db) {
         Type type = db.lookupType("HeapRegionSetBase");
 
-        countField = type.getField("_count").getOffset();
+        lengthField = type.getCIntegerField("_length");
     }
 
-
-    public HeapRegionSetCount count() {
-        Address countFieldAddr = addr.addOffsetTo(countField);
-        return (HeapRegionSetCount) VMObjectFactory.newObject(HeapRegionSetCount.class, countFieldAddr);
+    public long length() {
+        return lengthField.getValue(addr);
     }
 
     public HeapRegionSetBase(Address addr) {
--- a/agent/src/share/classes/sun/jvm/hotspot/gc/g1/HeapRegionSetCount.java	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.g1;
-
-import java.util.Iterator;
-import java.util.Observable;
-import java.util.Observer;
-
-import sun.jvm.hotspot.debugger.Address;
-import sun.jvm.hotspot.runtime.VM;
-import sun.jvm.hotspot.runtime.VMObject;
-import sun.jvm.hotspot.runtime.VMObjectFactory;
-import sun.jvm.hotspot.types.AddressField;
-import sun.jvm.hotspot.types.CIntegerField;
-import sun.jvm.hotspot.types.Type;
-import sun.jvm.hotspot.types.TypeDataBase;
-
-// Mirror class for HeapRegionSetCount. Represents a group of regions.
-
-public class HeapRegionSetCount extends VMObject {
-
-    static private CIntegerField lengthField;
-    static private CIntegerField capacityField;
-
-    static {
-        VM.registerVMInitializedObserver(new Observer() {
-                public void update(Observable o, Object data) {
-                    initialize(VM.getVM().getTypeDataBase());
-                }
-            });
-    }
-
-    static private synchronized void initialize(TypeDataBase db) {
-        Type type = db.lookupType("HeapRegionSetCount");
-
-        lengthField   = type.getCIntegerField("_length");
-        capacityField = type.getCIntegerField("_capacity");
-    }
-
-    public long length() {
-        return lengthField.getValue(addr);
-    }
-
-    public long capacity() {
-        return capacityField.getValue(addr);
-    }
-
-    public HeapRegionSetCount(Address addr) {
-        super(addr);
-    }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaThread.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/JavaThread.java	Fri Dec 18 12:39:02 2015 -0800
@@ -416,7 +416,7 @@
     } else {
       tty.println("No Java frames present");
     }
-    tty.println("Base of Stack: " + getBaseOfStackPointer());
+    tty.println("Base of Stack: " + getStackBase());
     tty.println("Last_Java_SP: " + getLastJavaSP());
     tty.println("Last_Java_FP: " + getLastJavaFP());
     tty.println("Last_Java_PC: " + getLastJavaPC());
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Fri Dec 18 12:39:02 2015 -0800
@@ -229,17 +229,17 @@
 
      public String getValue() {
         if (isBool()) {
-           return new Boolean(getBool()).toString();
+           return Boolean.toString(getBool());
         } else if (isInt()) {
-           return new Long(getInt()).toString();
+           return Long.toString(getInt());
         } else if (isUInt()) {
-           return new Long(getUInt()).toString();
+           return Long.toString(getUInt());
         } else if (isIntx()) {
-           return new Long(getIntx()).toString();
+           return Long.toString(getIntx());
         } else if (isUIntx()) {
-           return new Long(getUIntx()).toString();
+           return Long.toString(getUIntx());
         } else if (isSizet()) {
-            return new Long(getSizet()).toString();
+            return Long.toString(getSizet());
         } else {
            return null;
         }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Fri Dec 18 12:39:02 2015 -0800
@@ -112,8 +112,7 @@
           long survivorRegionNum = g1mm.survivorRegionNum();
           HeapRegionSetBase oldSet = g1h.oldSet();
           HeapRegionSetBase humongousSet = g1h.humongousSet();
-          long oldRegionNum = oldSet.count().length()
-                       + humongousSet.count().capacity() / HeapRegion.grainBytes();
+          long oldRegionNum = oldSet.length() + humongousSet.length();
           printG1Space("G1 Heap:", g1h.n_regions(),
                        g1h.used(), g1h.capacity());
           System.out.println("G1 Young Generation:");
--- a/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Fri Dec 18 12:39:02 2015 -0800
@@ -1921,6 +1921,15 @@
             buf.link(genPCHref(addressToLong(pc)), pc.toString());
          }
 
+         if (!method.isStatic() && !method.isNative()) {
+            OopHandle oopHandle = vf.getLocals().oopHandleAt(0);
+
+            if (oopHandle != null) {
+               buf.append(", oop = ");
+               buf.append(oopHandle.toString());
+            }
+         }
+
          if (vf.isCompiledFrame()) {
             buf.append(" (Compiled");
          }
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java	Fri Dec 18 12:39:02 2015 -0800
@@ -54,7 +54,7 @@
 
   public static boolean knownCPU(String cpu) {
     final String[] KNOWN =
-        new String[] {"i386", "x86", "x86_64", "amd64", "sparc", "sparcv9", "ppc64", "aarch64"};
+        new String[] {"i386", "x86", "x86_64", "amd64", "sparc", "sparcv9", "ppc64", "ppc64le", "aarch64"};
 
     for(String s : KNOWN) {
       if(s.equals(cpu))
@@ -98,6 +98,9 @@
     if (cpu.equals("x86_64"))
       return "amd64";
 
+    if (cpu.equals("ppc64le"))
+      return "ppc64";
+
     return cpu;
 
   }
--- a/make/aix/makefiles/xlc.make	Thu Dec 17 23:36:28 2015 +0000
+++ b/make/aix/makefiles/xlc.make	Fri Dec 18 12:39:02 2015 -0800
@@ -74,6 +74,9 @@
 CFLAGS += -qnortti
 CFLAGS += -qnoeh
 
+# for compiler-level tls
+CFLAGS += -qtls=default
+
 CFLAGS += -D_REENTRANT
 # no xlc counterpart for -fcheck-new
 # CFLAGS += -fcheck-new
--- a/make/defs.make	Thu Dec 17 23:36:28 2015 +0000
+++ b/make/defs.make	Fri Dec 18 12:39:02 2015 -0800
@@ -277,7 +277,7 @@
 
   # Use uname output for SRCARCH, but deal with platform differences. If ARCH
   # is not explicitly listed below, it is treated as x86.
-  SRCARCH    ?= $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 ppc ppc64 aarch64 zero,$(ARCH)))
+  SRCARCH    ?= $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 ppc ppc64 ppc64le aarch64 zero,$(ARCH)))
   ARCH/       = x86
   ARCH/sparc  = sparc
   ARCH/sparc64= sparc
@@ -285,6 +285,7 @@
   ARCH/amd64  = x86
   ARCH/x86_64 = x86
   ARCH/ppc64  = ppc
+  ARCH/ppc64le= ppc
   ARCH/ppc    = ppc
   ARCH/aarch64= aarch64
   ARCH/zero   = zero
@@ -309,8 +310,13 @@
     endif
   endif
 
-  # LIBARCH is 1:1 mapping from BUILDARCH
-  LIBARCH        ?= $(LIBARCH/$(BUILDARCH))
+  # LIBARCH is 1:1 mapping from BUILDARCH, except for ARCH=ppc64le
+  ifeq ($(ARCH),ppc64le)
+    LIBARCH      ?= ppc64le
+  else
+    LIBARCH      ?= $(LIBARCH/$(BUILDARCH))
+  endif
+
   LIBARCH/i486    = i386
   LIBARCH/amd64   = amd64
   LIBARCH/sparc   = sparc
--- a/make/linux/makefiles/gcc.make	Thu Dec 17 23:36:28 2015 +0000
+++ b/make/linux/makefiles/gcc.make	Fri Dec 18 12:39:02 2015 -0800
@@ -260,6 +260,13 @@
 
 OPT_CFLAGS = $(OPT_CFLAGS/$(OPT_CFLAGS_DEFAULT)) $(OPT_EXTRAS)
 
+# Variable tracking size limit exceeded for VMStructs::init() 
+ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "1"
+  # GCC >= 4.3
+  # Gcc 4.1.2 does not support this flag, nor does it have problems compiling the file.
+  OPT_CFLAGS/vmStructs.o += -fno-var-tracking-assignments
+endif
+
 # The gcc compiler segv's on ia64 when compiling bytecodeInterpreter.cpp
 # if we use expensive-optimizations
 ifeq ($(BUILDARCH), ia64)
--- a/src/cpu/aarch64/vm/globals_aarch64.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/aarch64/vm/globals_aarch64.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -58,14 +58,17 @@
 #define DEFAULT_STACK_YELLOW_PAGES (2)
 #define DEFAULT_STACK_RED_PAGES (1)
 #define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
+#define DEFAULT_STACK_RESERVED_PAGES (0)
 
 #define MIN_STACK_YELLOW_PAGES 1
 #define MIN_STACK_RED_PAGES    1
 #define MIN_STACK_SHADOW_PAGES 1
+#define MIN_STACK_RESERVED_PAGES (0)
 
 define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
 define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
 define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
+define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
--- a/src/cpu/aarch64/vm/interpreter_aarch64.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/aarch64/vm/interpreter_aarch64.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -39,7 +39,6 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -260,20 +259,3 @@
 
   return entry_point;
 }
-
-
-void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
-
-  // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
-  // the days we had adapter frames. When we deoptimize a situation where a
-  // compiled caller calls a compiled caller will have registers it expects
-  // to survive the call to the callee. If we deoptimize the callee the only
-  // way we can restore these registers is to have the oldest interpreter
-  // frame that we create restore these values. That is what this routine
-  // will accomplish.
-
-  // At the moment we have modified c2 to not have any callee save registers
-  // so this problem does not exist and this routine is just a place holder.
-
-  assert(f->is_interpreted_frame(), "must be interpreted");
-}
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -41,6 +41,7 @@
 #include "runtime/icache.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "runtime/thread.hpp"
 
 #if INCLUDE_ALL_GCS
 #include "gc/g1/g1CollectedHeap.inline.hpp"
@@ -4653,3 +4654,23 @@
     BIND(DONE);
       sub(result, result, len); // Return index where we stopped
 }
+
+// get_thread() can be called anywhere inside generated code so we
+// need to save whatever non-callee save context might get clobbered
+// by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
+// the call setup code.
+//
+// aarch64_get_thread_helper() clobbers only r0, r1, and flags.
+//
+void MacroAssembler::get_thread(Register dst) {
+  RegSet saved_regs = RegSet::range(r0, r1) + lr - dst;
+  push(saved_regs, sp);
+
+  mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
+  blrt(lr, 1, 0, 1);
+  if (dst != c_rarg0) {
+    mov(dst, c_rarg0);
+  }
+
+  pop(saved_regs, sp);
+}
--- a/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -2319,7 +2319,7 @@
   //   c_rarg4   - input length
   //
   // Output:
-  //   r0       - input length
+  //   r0        - input length
   //
   address generate_cipherBlockChaining_decryptAESCrypt() {
     assert(UseAES, "need AES instructions and misaligned SSE support");
@@ -2381,7 +2381,7 @@
       __ br(Assembler::EQ, L_rounds_52);
 
       __ aesd(v0, v17); __ aesimc(v0, v0);
-      __ aesd(v0, v17); __ aesimc(v0, v0);
+      __ aesd(v0, v18); __ aesimc(v0, v0);
     __ BIND(L_rounds_52);
       __ aesd(v0, v19); __ aesimc(v0, v0);
       __ aesd(v0, v20); __ aesimc(v0, v0);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,1925 @@
+/*
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "interpreter/templateTable.hpp"
+#include "interpreter/bytecodeTracer.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#include <sys/types.h>
+
+#ifndef PRODUCT
+#include "oops/method.hpp"
+#endif // !PRODUCT
+
+#ifdef BUILTIN_SIM
+#include "../../../../../../simulator/simulator.hpp"
+#endif
+
+#define __ _masm->
+
+#ifndef CC_INTERP
+
+//-----------------------------------------------------------------------------
+
+extern "C" void entry(CodeBuffer*);
+
+//-----------------------------------------------------------------------------
+
+address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
+  address entry = __ pc();
+
+#ifdef ASSERT
+  {
+    Label L;
+    __ ldr(rscratch1, Address(rfp,
+                       frame::interpreter_frame_monitor_block_top_offset *
+                       wordSize));
+    __ mov(rscratch2, sp);
+    __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack
+                           // grows negative)
+    __ br(Assembler::HS, L); // check if frame is complete
+    __ stop ("interpreter frame not set up");
+    __ bind(L);
+  }
+#endif // ASSERT
+  // Restore bcp under the assumption that the current frame is still
+  // interpreted
+  __ restore_bcp();
+
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+  // throw exception
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::throw_StackOverflowError));
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
+        const char* name) {
+  address entry = __ pc();
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+  // setup parameters
+  // ??? convention: expect aberrant index in register r1
+  __ movw(c_rarg2, r1);
+  __ mov(c_rarg1, (address)name);
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::
+                              throw_ArrayIndexOutOfBoundsException),
+             c_rarg1, c_rarg2);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
+  address entry = __ pc();
+
+  // object is at TOS
+  __ pop(c_rarg1);
+
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::
+                              throw_ClassCastException),
+             c_rarg1);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_exception_handler_common(
+        const char* name, const char* message, bool pass_oop) {
+  assert(!pass_oop || message == NULL, "either oop or message but not both");
+  address entry = __ pc();
+  if (pass_oop) {
+    // object is at TOS
+    __ pop(c_rarg2);
+  }
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+  // setup parameters
+  __ lea(c_rarg1, Address((address)name));
+  if (pass_oop) {
+    __ call_VM(r0, CAST_FROM_FN_PTR(address,
+                                    InterpreterRuntime::
+                                    create_klass_exception),
+               c_rarg1, c_rarg2);
+  } else {
+    // kind of lame ExternalAddress can't take NULL because
+    // external_word_Relocation will assert.
+    if (message != NULL) {
+      __ lea(c_rarg2, Address((address)message));
+    } else {
+      __ mov(c_rarg2, NULL_WORD);
+    }
+    __ call_VM(r0,
+               CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
+               c_rarg1, c_rarg2);
+  }
+  // throw exception
+  __ b(address(Interpreter::throw_exception_entry()));
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
+  address entry = __ pc();
+  // NULL last_sp until next java call
+  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+  __ dispatch_next(state);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
+  address entry = __ pc();
+
+  // Restore stack bottom in case i2c adjusted stack
+  __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+  // and NULL it as marker that esp is now tos until next java call
+  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+  __ restore_bcp();
+  __ restore_locals();
+  __ restore_constant_pool_cache();
+  __ get_method(rmethod);
+
+  // Pop N words from the stack
+  __ get_cache_and_index_at_bcp(r1, r2, 1, index_size);
+  __ ldr(r1, Address(r1, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
+  __ andr(r1, r1, ConstantPoolCacheEntry::parameter_size_mask);
+
+  __ add(esp, esp, r1, Assembler::LSL, 3);
+
+  // Restore machine SP
+  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
+  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
+  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
+  __ ldr(rscratch2,
+         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
+  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3);
+  __ andr(sp, rscratch1, -16);
+
+#ifndef PRODUCT
+  // tell the simulator that the method has been reentered
+  if (NotifySimulator) {
+    __ notify(Assembler::method_reentry);
+  }
+#endif
+  __ get_dispatch();
+  __ dispatch_next(state, step);
+
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
+                                                               int step) {
+  address entry = __ pc();
+  __ restore_bcp();
+  __ restore_locals();
+  __ restore_constant_pool_cache();
+  __ get_method(rmethod);
+
+  // handle exceptions
+  {
+    Label L;
+    __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
+    __ cbz(rscratch1, L);
+    __ call_VM(noreg,
+               CAST_FROM_FN_PTR(address,
+                                InterpreterRuntime::throw_pending_exception));
+    __ should_not_reach_here();
+    __ bind(L);
+  }
+
+  __ get_dispatch();
+
+  // Calculate stack limit
+  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
+  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
+  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
+  __ ldr(rscratch2,
+         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
+  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
+  __ andr(sp, rscratch1, -16);
+
+  // Restore expression stack pointer
+  __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+  // NULL last_sp until next java call
+  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+
+  __ dispatch_next(state, step);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_result_handler_for(
+        BasicType type) {
+    address entry = __ pc();
+  switch (type) {
+  case T_BOOLEAN: __ uxtb(r0, r0);        break;
+  case T_CHAR   : __ uxth(r0, r0);       break;
+  case T_BYTE   : __ sxtb(r0, r0);        break;
+  case T_SHORT  : __ sxth(r0, r0);        break;
+  case T_INT    : __ uxtw(r0, r0);        break;  // FIXME: We almost certainly don't need this
+  case T_LONG   : /* nothing to do */        break;
+  case T_VOID   : /* nothing to do */        break;
+  case T_FLOAT  : /* nothing to do */        break;
+  case T_DOUBLE : /* nothing to do */        break;
+  case T_OBJECT :
+    // retrieve result from frame
+    __ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
+    // and verify it
+    __ verify_oop(r0);
+    break;
+  default       : ShouldNotReachHere();
+  }
+  __ ret(lr);                                  // return from result handler
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_safept_entry_for(
+        TosState state,
+        address runtime_entry) {
+  address entry = __ pc();
+  __ push(state);
+  __ call_VM(noreg, runtime_entry);
+  __ membar(Assembler::AnyAny);
+  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
+  return entry;
+}
+
+// Helpers for commoning out cases in the various type of method entries.
+//
+
+
+// increment invocation count & check for overflow
+//
+// Note: checking for negative value instead of overflow
+//       so we have a 'sticky' overflow test
+//
+// rmethod: method
+//
+void InterpreterGenerator::generate_counter_incr(
+        Label* overflow,
+        Label* profile_method,
+        Label* profile_method_continue) {
+  Label done;
+  // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
+  if (TieredCompilation) {
+    int increment = InvocationCounter::count_increment;
+    Label no_mdo;
+    if (ProfileInterpreter) {
+      // Are we profiling?
+      __ ldr(r0, Address(rmethod, Method::method_data_offset()));
+      __ cbz(r0, no_mdo);
+      // Increment counter in the MDO
+      const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
+                                                in_bytes(InvocationCounter::counter_offset()));
+      const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
+      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
+      __ b(done);
+    }
+    __ bind(no_mdo);
+    // Increment counter in MethodCounters
+    const Address invocation_counter(rscratch2,
+                  MethodCounters::invocation_counter_offset() +
+                  InvocationCounter::counter_offset());
+    __ get_method_counters(rmethod, rscratch2, done);
+    const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset()));
+    __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow);
+    __ bind(done);
+  } else { // not TieredCompilation
+    const Address backedge_counter(rscratch2,
+                  MethodCounters::backedge_counter_offset() +
+                  InvocationCounter::counter_offset());
+    const Address invocation_counter(rscratch2,
+                  MethodCounters::invocation_counter_offset() +
+                  InvocationCounter::counter_offset());
+
+    __ get_method_counters(rmethod, rscratch2, done);
+
+    if (ProfileInterpreter) { // %%% Merge this into MethodData*
+      __ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
+      __ addw(r1, r1, 1);
+      __ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
+    }
+    // Update standard invocation counters
+    __ ldrw(r1, invocation_counter);
+    __ ldrw(r0, backedge_counter);
+
+    __ addw(r1, r1, InvocationCounter::count_increment);
+    __ andw(r0, r0, InvocationCounter::count_mask_value);
+
+    __ strw(r1, invocation_counter);
+    __ addw(r0, r0, r1);                // add both counters
+
+    // profile_method is non-null only for interpreted method so
+    // profile_method != NULL == !native_call
+
+    if (ProfileInterpreter && profile_method != NULL) {
+      // Test to see if we should create a method data oop
+      __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
+      __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
+      __ cmpw(r0, rscratch2);
+      __ br(Assembler::LT, *profile_method_continue);
+
+      // if no method data exists, go to profile_method
+      __ test_method_data_pointer(rscratch2, *profile_method);
+    }
+
+    {
+      __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
+      __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
+      __ cmpw(r0, rscratch2);
+      __ br(Assembler::HS, *overflow);
+    }
+    __ bind(done);
+  }
+}
+
+void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
+
+  // Asm interpreter on entry
+  // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
+  // Everything as it was on entry
+
+  // InterpreterRuntime::frequency_counter_overflow takes two
+  // arguments, the first (thread) is passed by call_VM, the second
+  // indicates if the counter overflow occurs at a backwards branch
+  // (NULL bcp).  We pass zero for it.  The call returns the address
+  // of the verified entry point for the method or NULL if the
+  // compilation did not complete (either went background or bailed
+  // out).
+  __ mov(c_rarg1, 0);
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::frequency_counter_overflow),
+             c_rarg1);
+
+  __ b(*do_continue);
+}
+
+// See if we've got enough room on the stack for locals plus overhead.
+// The expression stack grows down incrementally, so the normal guard
+// page mechanism will work for that.
+//
+// NOTE: Since the additional locals are also always pushed (wasn't
+// obvious in generate_method_entry) so the guard should work for them
+// too.
+//
+// Args:
+//      r3: number of additional locals this frame needs (what we must check)
+//      rmethod: Method*
+//
+// Kills:
+//      r0
+void InterpreterGenerator::generate_stack_overflow_check(void) {
+
+  // monitor entry size: see picture of stack set
+  // (generate_method_entry) and frame_amd64.hpp
+  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+
+  // total overhead size: entry_size + (saved rbp through expr stack
+  // bottom).  be sure to change this if you add/subtract anything
+  // to/from the overhead area
+  const int overhead_size =
+    -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
+
+  const int page_size = os::vm_page_size();
+
+  Label after_frame_check;
+
+  // see if the frame is greater than one page in size. If so,
+  // then we need to verify there is enough stack space remaining
+  // for the additional locals.
+  //
+  // Note that we use SUBS rather than CMP here because the immediate
+  // field of this instruction may overflow.  SUBS can cope with this
+  // because it is a macro that will expand to some number of MOV
+  // instructions and a register operation.
+  __ subs(rscratch1, r3, (page_size - overhead_size) / Interpreter::stackElementSize);
+  __ br(Assembler::LS, after_frame_check);
+
+  // compute rsp as if this were going to be the last frame on
+  // the stack before the red zone
+
+  const Address stack_base(rthread, Thread::stack_base_offset());
+  const Address stack_size(rthread, Thread::stack_size_offset());
+
+  // locals + overhead, in bytes
+  __ mov(r0, overhead_size);
+  __ add(r0, r0, r3, Assembler::LSL, Interpreter::logStackElementSize);  // 2 slots per parameter.
+
+  __ ldr(rscratch1, stack_base);
+  __ ldr(rscratch2, stack_size);
+
+#ifdef ASSERT
+  Label stack_base_okay, stack_size_okay;
+  // verify that thread stack base is non-zero
+  __ cbnz(rscratch1, stack_base_okay);
+  __ stop("stack base is zero");
+  __ bind(stack_base_okay);
+  // verify that thread stack size is non-zero
+  __ cbnz(rscratch2, stack_size_okay);
+  __ stop("stack size is zero");
+  __ bind(stack_size_okay);
+#endif
+
+  // Add stack base to locals and subtract stack size
+  __ sub(rscratch1, rscratch1, rscratch2); // Stack limit
+  __ add(r0, r0, rscratch1);
+
+  // Use the maximum number of pages we might bang.
+  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
+                                                                              (StackRedPages+StackYellowPages);
+
+  // add in the red and yellow zone sizes
+  __ add(r0, r0, max_pages * page_size * 2);
+
+  // check against the current stack bottom
+  __ cmp(sp, r0);
+  __ br(Assembler::HI, after_frame_check);
+
+  // Remove the incoming args, peeling the machine SP back to where it
+  // was in the caller.  This is not strictly necessary, but unless we
+  // do so the stack frame may have a garbage FP; this ensures a
+  // correct call stack that we can always unwind.  The ANDR should be
+  // unnecessary because the sender SP in r13 is always aligned, but
+  // it doesn't hurt.
+  __ andr(sp, r13, -16);
+
+  // Note: the restored frame is not necessarily interpreted.
+  // Use the shared runtime version of the StackOverflowError.
+  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
+  __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
+
+  // all done with frame size check
+  __ bind(after_frame_check);
+}
+
+// Allocate monitor and lock method (asm interpreter)
+//
+// Args:
+//      rmethod: Method*
+//      rlocals: locals
+//
+// Kills:
+//      r0
+//      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
+//      rscratch1, rscratch2 (scratch regs)
+void TemplateInterpreterGenerator::lock_method() {
+  // synchronize method
+  const Address access_flags(rmethod, Method::access_flags_offset());
+  const Address monitor_block_top(
+        rfp,
+        frame::interpreter_frame_monitor_block_top_offset * wordSize);
+  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+
+#ifdef ASSERT
+  {
+    Label L;
+    __ ldrw(r0, access_flags);
+    __ tst(r0, JVM_ACC_SYNCHRONIZED);
+    __ br(Assembler::NE, L);
+    __ stop("method doesn't need synchronization");
+    __ bind(L);
+  }
+#endif // ASSERT
+
+  // get synchronization object
+  {
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+    Label done;
+    __ ldrw(r0, access_flags);
+    __ tst(r0, JVM_ACC_STATIC);
+    // get receiver (assume this is frequent case)
+    __ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
+    __ br(Assembler::EQ, done);
+    __ ldr(r0, Address(rmethod, Method::const_offset()));
+    __ ldr(r0, Address(r0, ConstMethod::constants_offset()));
+    __ ldr(r0, Address(r0,
+                           ConstantPool::pool_holder_offset_in_bytes()));
+    __ ldr(r0, Address(r0, mirror_offset));
+
+#ifdef ASSERT
+    {
+      Label L;
+      __ cbnz(r0, L);
+      __ stop("synchronization object is NULL");
+      __ bind(L);
+    }
+#endif // ASSERT
+
+    __ bind(done);
+  }
+
+  // add space for monitor & lock
+  __ sub(sp, sp, entry_size); // add space for a monitor entry
+  __ sub(esp, esp, entry_size);
+  __ mov(rscratch1, esp);
+  __ str(rscratch1, monitor_block_top);  // set new monitor block top
+  // store object
+  __ str(r0, Address(esp, BasicObjectLock::obj_offset_in_bytes()));
+  __ mov(c_rarg1, esp); // object address
+  __ lock_object(c_rarg1);
+}
+
+// Generate a fixed interpreter frame. This is identical setup for
+// interpreted methods and for native methods hence the shared code.
+//
+// Args:
+//      lr: return address
+//      rmethod: Method*
+//      rlocals: pointer to locals
+//      rcpool: cp cache
+//      stack_pointer: previous sp
+void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
+  // initialize fixed part of activation frame
+  if (native_call) {
+    __ sub(esp, sp, 12 *  wordSize);
+    __ mov(rbcp, zr);
+    __ stp(esp, zr, Address(__ pre(sp, -12 * wordSize)));
+    // add 2 zero-initialized slots for native calls
+    __ stp(zr, zr, Address(sp, 10 * wordSize));
+  } else {
+    __ sub(esp, sp, 10 *  wordSize);
+    __ ldr(rscratch1, Address(rmethod, Method::const_offset()));      // get ConstMethod
+    __ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase
+    __ stp(esp, rbcp, Address(__ pre(sp, -10 * wordSize)));
+  }
+
+  if (ProfileInterpreter) {
+    Label method_data_continue;
+    __ ldr(rscratch1, Address(rmethod, Method::method_data_offset()));
+    __ cbz(rscratch1, method_data_continue);
+    __ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset())));
+    __ bind(method_data_continue);
+    __ stp(rscratch1, rmethod, Address(sp, 4 * wordSize));  // save Method* and mdp (method data pointer)
+  } else {
+    __ stp(zr, rmethod, Address(sp, 4 * wordSize));        // save Method* (no mdp)
+  }
+
+  __ ldr(rcpool, Address(rmethod, Method::const_offset()));
+  __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset()));
+  __ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes()));
+  __ stp(rlocals, rcpool, Address(sp, 2 * wordSize));
+
+  __ stp(rfp, lr, Address(sp, 8 * wordSize));
+  __ lea(rfp, Address(sp, 8 * wordSize));
+
+  // set sender sp
+  // leave last_sp as null
+  __ stp(zr, r13, Address(sp, 6 * wordSize));
+
+  // Move SP out of the way
+  if (! native_call) {
+    __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
+    __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
+    __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
+    __ sub(rscratch1, sp, rscratch1, ext::uxtw, 3);
+    __ andr(sp, rscratch1, -16);
+  }
+}
+
+// End of helpers
+
+// Various method entries
+//------------------------------------------------------------------------------------------------------------------------
+//
+//
+
+// Method entry for java.lang.ref.Reference.get.
+address InterpreterGenerator::generate_Reference_get_entry(void) {
+#if INCLUDE_ALL_GCS
+  // Code: _aload_0, _getfield, _areturn
+  // parameter size = 1
+  //
+  // The code that gets generated by this routine is split into 2 parts:
+  //    1. The "intrinsified" code for G1 (or any SATB based GC),
+  //    2. The slow path - which is an expansion of the regular method entry.
+  //
+  // Notes:-
+  // * In the G1 code we do not check whether we need to block for
+  //   a safepoint. If G1 is enabled then we must execute the specialized
+  //   code for Reference.get (except when the Reference object is null)
+  //   so that we can log the value in the referent field with an SATB
+  //   update buffer.
+  //   If the code for the getfield template is modified so that the
+  //   G1 pre-barrier code is executed when the current method is
+  //   Reference.get() then going through the normal method entry
+  //   will be fine.
+  // * The G1 code can, however, check the receiver object (the instance
+  //   of java.lang.Reference) and jump to the slow path if null. If the
+  //   Reference object is null then we obviously cannot fetch the referent
+  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
+  //   regular method entry code to generate the NPE.
+  //
+  // This code is based on generate_accessor_enty.
+  //
+  // rmethod: Method*
+  // r13: senderSP must preserve for slow path, set SP to it on fast path
+
+  address entry = __ pc();
+
+  const int referent_offset = java_lang_ref_Reference::referent_offset;
+  guarantee(referent_offset > 0, "referent offset not initialized");
+
+  if (UseG1GC) {
+    Label slow_path;
+    const Register local_0 = c_rarg0;
+    // Check if local 0 != NULL
+    // If the receiver is null then it is OK to jump to the slow path.
+    __ ldr(local_0, Address(esp, 0));
+    __ cbz(local_0, slow_path);
+
+
+    // Load the value of the referent field.
+    const Address field_address(local_0, referent_offset);
+    __ load_heap_oop(local_0, field_address);
+
+    // Generate the G1 pre-barrier code to log the value of
+    // the referent field in an SATB buffer.
+    __ enter(); // g1_write may call runtime
+    __ g1_write_barrier_pre(noreg /* obj */,
+                            local_0 /* pre_val */,
+                            rthread /* thread */,
+                            rscratch2 /* tmp */,
+                            true /* tosca_live */,
+                            true /* expand_call */);
+    __ leave();
+    // areturn
+    __ andr(sp, r13, -16);  // done with stack
+    __ ret(lr);
+
+    // generate a vanilla interpreter entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
+    return entry;
+  }
+#endif // INCLUDE_ALL_GCS
+
+  // If G1 is not enabled then attempt to go through the accessor entry point
+  // Reference.get is an accessor
+  return generate_accessor_entry();
+}
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.update(int crc, int b)
+ */
+address InterpreterGenerator::generate_CRC32_update_entry() {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rmethod: Method*
+    // r13: senderSP must preserved for slow path
+    // esp: args
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    unsigned long offset;
+    __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
+    __ ldrw(rscratch1, Address(rscratch1, offset));
+    assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
+    __ cbnz(rscratch1, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = c_rarg0;  // crc
+    const Register val = c_rarg1;  // source java byte value
+    const Register tbl = c_rarg2;  // scratch
+
+    // Arguments are reversed on java expression stack
+    __ ldrw(val, Address(esp, 0));              // byte value
+    __ ldrw(crc, Address(esp, wordSize));       // Initial CRC
+
+    __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset);
+    __ add(tbl, tbl, offset);
+
+    __ ornw(crc, zr, crc); // ~crc
+    __ update_byte_crc32(crc, val, tbl);
+    __ ornw(crc, zr, crc); // ~crc
+
+    // result in c_rarg0
+
+    __ andr(sp, r13, -16);
+    __ ret(lr);
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
+ *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+ */
+address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rmethod,: Method*
+    // r13: senderSP must preserved for slow path
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    unsigned long offset;
+    __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
+    __ ldrw(rscratch1, Address(rscratch1, offset));
+    assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
+    __ cbnz(rscratch1, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = c_rarg0;  // crc
+    const Register buf = c_rarg1;  // source java byte array address
+    const Register len = c_rarg2;  // length
+    const Register off = len;      // offset (never overlaps with 'len')
+
+    // Arguments are reversed on java expression stack
+    // Calculate address of start element
+    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
+      __ ldr(buf, Address(esp, 2*wordSize)); // long buf
+      __ ldrw(off, Address(esp, wordSize)); // offset
+      __ add(buf, buf, off); // + offset
+      __ ldrw(crc,   Address(esp, 4*wordSize)); // Initial CRC
+    } else {
+      __ ldr(buf, Address(esp, 2*wordSize)); // byte[] array
+      __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
+      __ ldrw(off, Address(esp, wordSize)); // offset
+      __ add(buf, buf, off); // + offset
+      __ ldrw(crc,   Address(esp, 3*wordSize)); // Initial CRC
+    }
+    // Can now load 'len' since we're finished with 'off'
+    __ ldrw(len, Address(esp, 0x0)); // Length
+
+    __ andr(sp, r13, -16); // Restore the caller's SP
+
+    // We are frameless so we can just jump to the stub.
+    __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()));
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
+  // Bang each page in the shadow zone. We can't assume it's been done for
+  // an interpreter frame with greater than a page of locals, so each page
+  // needs to be checked.  Only true for non-native.
+  if (UseStackBanging) {
+    const int start_page = native_call ? StackShadowPages : 1;
+    const int page_size = os::vm_page_size();
+    for (int pages = start_page; pages <= StackShadowPages ; pages++) {
+      __ sub(rscratch2, sp, pages*page_size);
+      __ str(zr, Address(rscratch2));
+    }
+  }
+}
+
+
+// Interpreter stub for calling a native method. (asm interpreter)
+// This sets up a somewhat different looking stack for calling the
+// native method than the typical interpreter frame setup.
+address InterpreterGenerator::generate_native_entry(bool synchronized) {
+  // determine code generation flags
+  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+
+  // r1: Method*
+  // rscratch1: sender sp
+
+  address entry_point = __ pc();
+
+  const Address constMethod       (rmethod, Method::const_offset());
+  const Address access_flags      (rmethod, Method::access_flags_offset());
+  const Address size_of_parameters(r2, ConstMethod::
+                                       size_of_parameters_offset());
+
+  // get parameter size (always needed)
+  __ ldr(r2, constMethod);
+  __ load_unsigned_short(r2, size_of_parameters);
+
+  // native calls don't need the stack size check since they have no
+  // expression stack and the arguments are already on the stack and
+  // we only add a handful of words to the stack
+
+  // rmethod: Method*
+  // r2: size of parameters
+  // rscratch1: sender sp
+
+  // for natives the size of locals is zero
+
+  // compute beginning of parameters (rlocals)
+  __ add(rlocals, esp, r2, ext::uxtx, 3);
+  __ add(rlocals, rlocals, -wordSize);
+
+  // Pull SP back to minimum size: this avoids holes in the stack
+  __ andr(sp, esp, -16);
+
+  // initialize fixed part of activation frame
+  generate_fixed_frame(true);
+#ifndef PRODUCT
+  // tell the simulator that a method has been entered
+  if (NotifySimulator) {
+    __ notify(Assembler::method_entry);
+  }
+#endif
+
+  // make sure method is native & not abstract
+#ifdef ASSERT
+  __ ldrw(r0, access_flags);
+  {
+    Label L;
+    __ tst(r0, JVM_ACC_NATIVE);
+    __ br(Assembler::NE, L);
+    __ stop("tried to execute non-native method as native");
+    __ bind(L);
+  }
+  {
+    Label L;
+    __ tst(r0, JVM_ACC_ABSTRACT);
+    __ br(Assembler::EQ, L);
+    __ stop("tried to execute abstract method in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // Since at this point in the method invocation the exception
+  // handler would try to exit the monitor of synchronized methods
+  // which hasn't been entered yet, we set the thread local variable
+  // _do_not_unlock_if_synchronized to true. The remove_activation
+  // will check this flag.
+
+   const Address do_not_unlock_if_synchronized(rthread,
+        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
+  __ mov(rscratch2, true);
+  __ strb(rscratch2, do_not_unlock_if_synchronized);
+
+  // increment invocation count & check for overflow
+  Label invocation_counter_overflow;
+  if (inc_counter) {
+    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
+  }
+
+  Label continue_after_compile;
+  __ bind(continue_after_compile);
+
+  bang_stack_shadow_pages(true);
+
+  // reset the _do_not_unlock_if_synchronized flag
+  __ strb(zr, do_not_unlock_if_synchronized);
+
+  // check for synchronized methods
+  // Must happen AFTER invocation_counter check and stack overflow check,
+  // so method is not locked if overflows.
+  if (synchronized) {
+    lock_method();
+  } else {
+    // no synchronization necessary
+#ifdef ASSERT
+    {
+      Label L;
+      __ ldrw(r0, access_flags);
+      __ tst(r0, JVM_ACC_SYNCHRONIZED);
+      __ br(Assembler::EQ, L);
+      __ stop("method needs synchronization");
+      __ bind(L);
+    }
+#endif
+  }
+
+  // start execution
+#ifdef ASSERT
+  {
+    Label L;
+    const Address monitor_block_top(rfp,
+                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
+    __ ldr(rscratch1, monitor_block_top);
+    __ cmp(esp, rscratch1);
+    __ br(Assembler::EQ, L);
+    __ stop("broken stack frame setup in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // jvmti support
+  __ notify_method_entry();
+
+  // work registers
+  const Register t = r17;
+  const Register result_handler = r19;
+
+  // allocate space for parameters
+  __ ldr(t, Address(rmethod, Method::const_offset()));
+  __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
+
+  __ sub(rscratch1, esp, t, ext::uxtx, Interpreter::logStackElementSize);
+  __ andr(sp, rscratch1, -16);
+  __ mov(esp, rscratch1);
+
+  // get signature handler
+  {
+    Label L;
+    __ ldr(t, Address(rmethod, Method::signature_handler_offset()));
+    __ cbnz(t, L);
+    __ call_VM(noreg,
+               CAST_FROM_FN_PTR(address,
+                                InterpreterRuntime::prepare_native_call),
+               rmethod);
+    __ ldr(t, Address(rmethod, Method::signature_handler_offset()));
+    __ bind(L);
+  }
+
+  // call signature handler
+  assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
+         "adjust this code");
+  assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
+         "adjust this code");
+  assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
+          "adjust this code");
+
+  // The generated handlers do not touch rmethod (the method).
+  // However, large signatures cannot be cached and are generated
+  // each time here.  The slow-path generator can do a GC on return,
+  // so we must reload it after the call.
+  __ blr(t);
+  __ get_method(rmethod);        // slow path can do a GC, reload rmethod
+
+
+  // result handler is in r0
+  // set result handler
+  __ mov(result_handler, r0);
+  // pass mirror handle if static call
+  {
+    Label L;
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+    __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
+    __ tst(t, JVM_ACC_STATIC);
+    __ br(Assembler::EQ, L);
+    // get mirror
+    __ ldr(t, Address(rmethod, Method::const_offset()));
+    __ ldr(t, Address(t, ConstMethod::constants_offset()));
+    __ ldr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
+    __ ldr(t, Address(t, mirror_offset));
+    // copy mirror into activation frame
+    __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize));
+    // pass handle to mirror
+    __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize);
+    __ bind(L);
+  }
+
+  // get native function entry point in r10
+  {
+    Label L;
+    __ ldr(r10, Address(rmethod, Method::native_function_offset()));
+    address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
+    __ mov(rscratch2, unsatisfied);
+    __ ldr(rscratch2, rscratch2);
+    __ cmp(r10, rscratch2);
+    __ br(Assembler::NE, L);
+    __ call_VM(noreg,
+               CAST_FROM_FN_PTR(address,
+                                InterpreterRuntime::prepare_native_call),
+               rmethod);
+    __ get_method(rmethod);
+    __ ldr(r10, Address(rmethod, Method::native_function_offset()));
+    __ bind(L);
+  }
+
+  // pass JNIEnv
+  __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
+
+  // It is enough that the pc() points into the right code
+  // segment. It does not have to be the correct return pc.
+  __ set_last_Java_frame(esp, rfp, (address)NULL, rscratch1);
+
+  // change thread state
+#ifdef ASSERT
+  {
+    Label L;
+    __ ldrw(t, Address(rthread, JavaThread::thread_state_offset()));
+    __ cmp(t, _thread_in_Java);
+    __ br(Assembler::EQ, L);
+    __ stop("Wrong thread state in native stub");
+    __ bind(L);
+  }
+#endif
+
+  // Change state to native
+  __ mov(rscratch1, _thread_in_native);
+  __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
+  __ stlrw(rscratch1, rscratch2);
+
+  // Call the native method.
+  __ blrt(r10, rscratch1);
+  __ maybe_isb();
+  __ get_method(rmethod);
+  // result potentially in r0 or v0
+
+  // make room for the pushes we're about to do
+  __ sub(rscratch1, esp, 4 * wordSize);
+  __ andr(sp, rscratch1, -16);
+
+  // NOTE: The order of these pushes is known to frame::interpreter_frame_result
+  // in order to extract the result of a method call. If the order of these
+  // pushes change or anything else is added to the stack then the code in
+  // interpreter_frame_result must also change.
+  __ push(dtos);
+  __ push(ltos);
+
+  // change thread state
+  __ mov(rscratch1, _thread_in_native_trans);
+  __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
+  __ stlrw(rscratch1, rscratch2);
+
+  if (os::is_MP()) {
+    if (UseMembar) {
+      // Force this write out before the read below
+      __ dsb(Assembler::SY);
+    } else {
+      // Write serialization page so VM thread can do a pseudo remote membar.
+      // We use the current thread pointer to calculate a thread specific
+      // offset to write to within the page. This minimizes bus traffic
+      // due to cache line collision.
+      __ serialize_memory(rthread, rscratch2);
+    }
+  }
+
+  // check for safepoint operation in progress and/or pending suspend requests
+  {
+    Label Continue;
+    {
+      unsigned long offset;
+      __ adrp(rscratch2, SafepointSynchronize::address_of_state(), offset);
+      __ ldrw(rscratch2, Address(rscratch2, offset));
+    }
+    assert(SafepointSynchronize::_not_synchronized == 0,
+           "SafepointSynchronize::_not_synchronized");
+    Label L;
+    __ cbnz(rscratch2, L);
+    __ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
+    __ cbz(rscratch2, Continue);
+    __ bind(L);
+
+    // Don't use call_VM as it will see a possible pending exception
+    // and forward it and never return here preventing us from
+    // clearing _last_native_pc down below. So we do a runtime call by
+    // hand.
+    //
+    __ mov(c_rarg0, rthread);
+    __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
+    __ blrt(rscratch2, 1, 0, 0);
+    __ maybe_isb();
+    __ get_method(rmethod);
+    __ reinit_heapbase();
+    __ bind(Continue);
+  }
+
+  // change thread state
+  __ mov(rscratch1, _thread_in_Java);
+  __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
+  __ stlrw(rscratch1, rscratch2);
+
+  // reset_last_Java_frame
+  __ reset_last_Java_frame(true, true);
+
+  // reset handle block
+  __ ldr(t, Address(rthread, JavaThread::active_handles_offset()));
+  __ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes()));
+
+  // If result is an oop unbox and store it in frame where gc will see it
+  // and result handler will pick it up
+
+  {
+    Label no_oop, store_result;
+    __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
+    __ cmp(t, result_handler);
+    __ br(Assembler::NE, no_oop);
+    // retrieve result
+    __ pop(ltos);
+    __ cbz(r0, store_result);
+    __ ldr(r0, Address(r0, 0));
+    __ bind(store_result);
+    __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
+    // keep stack depth as expected by pushing oop which will eventually be discarded
+    __ push(ltos);
+    __ bind(no_oop);
+  }
+
+  {
+    Label no_reguard;
+    __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
+    __ ldrb(rscratch1, Address(rscratch1));
+    __ cmp(rscratch1, JavaThread::stack_guard_yellow_disabled);
+    __ br(Assembler::NE, no_reguard);
+
+    __ pusha(); // XXX only save smashed registers
+    __ mov(c_rarg0, rthread);
+    __ mov(rscratch2, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
+    __ blrt(rscratch2, 0, 0, 0);
+    __ popa(); // XXX only restore smashed registers
+    __ bind(no_reguard);
+  }
+
+  // The method register is junk from after the thread_in_native transition
+  // until here.  Also can't call_VM until the bcp has been
+  // restored.  Need bcp for throwing exception below so get it now.
+  __ get_method(rmethod);
+
+  // restore bcp to have legal interpreter frame, i.e., bci == 0 <=>
+  // rbcp == code_base()
+  __ ldr(rbcp, Address(rmethod, Method::const_offset()));   // get ConstMethod*
+  __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));          // get codebase
+  // handle exceptions (exception handling will handle unlocking!)
+  {
+    Label L;
+    __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
+    __ cbz(rscratch1, L);
+    // Note: At some point we may want to unify this with the code
+    // used in call_VM_base(); i.e., we should use the
+    // StubRoutines::forward_exception code. For now this doesn't work
+    // here because the rsp is not correctly set at this point.
+    __ MacroAssembler::call_VM(noreg,
+                               CAST_FROM_FN_PTR(address,
+                               InterpreterRuntime::throw_pending_exception));
+    __ should_not_reach_here();
+    __ bind(L);
+  }
+
+  // do unlocking if necessary
+  {
+    Label L;
+    __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
+    __ tst(t, JVM_ACC_SYNCHRONIZED);
+    __ br(Assembler::EQ, L);
+    // the code below should be shared with interpreter macro
+    // assembler implementation
+    {
+      Label unlock;
+      // BasicObjectLock will be first in list, since this is a
+      // synchronized method. However, need to check that the object
+      // has not been unlocked by an explicit monitorexit bytecode.
+
+      // monitor expect in c_rarg1 for slow unlock path
+      __ lea (c_rarg1, Address(rfp,   // address of first monitor
+                               (intptr_t)(frame::interpreter_frame_initial_sp_offset *
+                                          wordSize - sizeof(BasicObjectLock))));
+
+      __ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
+      __ cbnz(t, unlock);
+
+      // Entry already unlocked, need to throw exception
+      __ MacroAssembler::call_VM(noreg,
+                                 CAST_FROM_FN_PTR(address,
+                   InterpreterRuntime::throw_illegal_monitor_state_exception));
+      __ should_not_reach_here();
+
+      __ bind(unlock);
+      __ unlock_object(c_rarg1);
+    }
+    __ bind(L);
+  }
+
+  // jvmti support
+  // Note: This must happen _after_ handling/throwing any exceptions since
+  //       the exception handler code notifies the runtime of method exits
+  //       too. If this happens before, method entry/exit notifications are
+  //       not properly paired (was bug - gri 11/22/99).
+  __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
+
+  // restore potential result in r0:d0, call result handler to
+  // restore potential result in ST0 & handle result
+
+  __ pop(ltos);
+  __ pop(dtos);
+
+  __ blr(result_handler);
+
+  // remove activation
+  __ ldr(esp, Address(rfp,
+                    frame::interpreter_frame_sender_sp_offset *
+                    wordSize)); // get sender sp
+  // remove frame anchor
+  __ leave();
+
+  // resture sender sp
+  __ mov(sp, esp);
+
+  __ ret(lr);
+
+  if (inc_counter) {
+    // Handle overflow of counter and compile method
+    __ bind(invocation_counter_overflow);
+    generate_counter_overflow(&continue_after_compile);
+  }
+
+  return entry_point;
+}
+
+//
+// Generic interpreted method entry to (asm) interpreter
+//
+address InterpreterGenerator::generate_normal_entry(bool synchronized) {
+  // determine code generation flags
+  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+
+  // rscratch1: sender sp
+  address entry_point = __ pc();
+
+  const Address constMethod(rmethod, Method::const_offset());
+  const Address access_flags(rmethod, Method::access_flags_offset());
+  const Address size_of_parameters(r3,
+                                   ConstMethod::size_of_parameters_offset());
+  const Address size_of_locals(r3, ConstMethod::size_of_locals_offset());
+
+  // get parameter size (always needed)
+  // need to load the const method first
+  __ ldr(r3, constMethod);
+  __ load_unsigned_short(r2, size_of_parameters);
+
+  // r2: size of parameters
+
+  __ load_unsigned_short(r3, size_of_locals); // get size of locals in words
+  __ sub(r3, r3, r2); // r3 = no. of additional locals
+
+  // see if we've got enough room on the stack for locals plus overhead.
+  generate_stack_overflow_check();
+
+  // compute beginning of parameters (rlocals)
+  __ add(rlocals, esp, r2, ext::uxtx, 3);
+  __ sub(rlocals, rlocals, wordSize);
+
+  // Make room for locals
+  __ sub(rscratch1, esp, r3, ext::uxtx, 3);
+  __ andr(sp, rscratch1, -16);
+
+  // r3 - # of additional locals
+  // allocate space for locals
+  // explicitly initialize locals
+  {
+    Label exit, loop;
+    __ ands(zr, r3, r3);
+    __ br(Assembler::LE, exit); // do nothing if r3 <= 0
+    __ bind(loop);
+    __ str(zr, Address(__ post(rscratch1, wordSize)));
+    __ sub(r3, r3, 1); // until everything initialized
+    __ cbnz(r3, loop);
+    __ bind(exit);
+  }
+
+  // And the base dispatch table
+  __ get_dispatch();
+
+  // initialize fixed part of activation frame
+  generate_fixed_frame(false);
+#ifndef PRODUCT
+  // tell the simulator that a method has been entered
+  if (NotifySimulator) {
+    __ notify(Assembler::method_entry);
+  }
+#endif
+  // make sure method is not native & not abstract
+#ifdef ASSERT
+  __ ldrw(r0, access_flags);
+  {
+    Label L;
+    __ tst(r0, JVM_ACC_NATIVE);
+    __ br(Assembler::EQ, L);
+    __ stop("tried to execute native method as non-native");
+    __ bind(L);
+  }
+ {
+    Label L;
+    __ tst(r0, JVM_ACC_ABSTRACT);
+    __ br(Assembler::EQ, L);
+    __ stop("tried to execute abstract method in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // Since at this point in the method invocation the exception
+  // handler would try to exit the monitor of synchronized methods
+  // which hasn't been entered yet, we set the thread local variable
+  // _do_not_unlock_if_synchronized to true. The remove_activation
+  // will check this flag.
+
+   const Address do_not_unlock_if_synchronized(rthread,
+        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
+  __ mov(rscratch2, true);
+  __ strb(rscratch2, do_not_unlock_if_synchronized);
+
+  // increment invocation count & check for overflow
+  Label invocation_counter_overflow;
+  Label profile_method;
+  Label profile_method_continue;
+  if (inc_counter) {
+    generate_counter_incr(&invocation_counter_overflow,
+                          &profile_method,
+                          &profile_method_continue);
+    if (ProfileInterpreter) {
+      __ bind(profile_method_continue);
+    }
+  }
+
+  Label continue_after_compile;
+  __ bind(continue_after_compile);
+
+  bang_stack_shadow_pages(false);
+
+  // reset the _do_not_unlock_if_synchronized flag
+  __ strb(zr, do_not_unlock_if_synchronized);
+
+  // check for synchronized methods
+  // Must happen AFTER invocation_counter check and stack overflow check,
+  // so method is not locked if overflows.
+  if (synchronized) {
+    // Allocate monitor and lock method
+    lock_method();
+  } else {
+    // no synchronization necessary
+#ifdef ASSERT
+    {
+      Label L;
+      __ ldrw(r0, access_flags);
+      __ tst(r0, JVM_ACC_SYNCHRONIZED);
+      __ br(Assembler::EQ, L);
+      __ stop("method needs synchronization");
+      __ bind(L);
+    }
+#endif
+  }
+
+  // start execution
+#ifdef ASSERT
+  {
+    Label L;
+     const Address monitor_block_top (rfp,
+                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
+    __ ldr(rscratch1, monitor_block_top);
+    __ cmp(esp, rscratch1);
+    __ br(Assembler::EQ, L);
+    __ stop("broken stack frame setup in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // jvmti support
+  __ notify_method_entry();
+
+  __ dispatch_next(vtos);
+
+  // invocation counter overflow
+  if (inc_counter) {
+    if (ProfileInterpreter) {
+      // We have decided to profile this method in the interpreter
+      __ bind(profile_method);
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+      __ set_method_data_pointer_for_bcp();
+      // don't think we need this
+      __ get_method(r1);
+      __ b(profile_method_continue);
+    }
+    // Handle overflow of counter and compile method
+    __ bind(invocation_counter_overflow);
+    generate_counter_overflow(&continue_after_compile);
+  }
+
+  return entry_point;
+}
+
+//-----------------------------------------------------------------------------
+// Exceptions
+
+void TemplateInterpreterGenerator::generate_throw_exception() {
+  // Entry point in previous activation (i.e., if the caller was
+  // interpreted)
+  Interpreter::_rethrow_exception_entry = __ pc();
+  // Restore sp to interpreter_frame_last_sp even though we are going
+  // to empty the expression stack for the exception processing.
+  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+  // r0: exception
+  // r3: return address/pc that threw exception
+  __ restore_bcp();    // rbcp points to call/send
+  __ restore_locals();
+  __ restore_constant_pool_cache();
+  __ reinit_heapbase();  // restore rheapbase as heapbase.
+  __ get_dispatch();
+
+#ifndef PRODUCT
+  // tell the simulator that the caller method has been reentered
+  if (NotifySimulator) {
+    __ get_method(rmethod);
+    __ notify(Assembler::method_reentry);
+  }
+#endif
+  // Entry point for exceptions thrown within interpreter code
+  Interpreter::_throw_exception_entry = __ pc();
+  // If we came here via a NullPointerException on the receiver of a
+  // method, rmethod may be corrupt.
+  __ get_method(rmethod);
+  // expression stack is undefined here
+  // r0: exception
+  // rbcp: exception bcp
+  __ verify_oop(r0);
+  __ mov(c_rarg1, r0);
+
+  // expression stack must be empty before entering the VM in case of
+  // an exception
+  __ empty_expression_stack();
+  // find exception handler address and preserve exception oop
+  __ call_VM(r3,
+             CAST_FROM_FN_PTR(address,
+                          InterpreterRuntime::exception_handler_for_exception),
+             c_rarg1);
+
+  // Calculate stack limit
+  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
+  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
+  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
+  __ ldr(rscratch2,
+         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
+  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
+  __ andr(sp, rscratch1, -16);
+
+  // r0: exception handler entry point
+  // r3: preserved exception oop
+  // rbcp: bcp for exception handler
+  __ push_ptr(r3); // push exception which is now the only value on the stack
+  __ br(r0); // jump to exception handler (may be _remove_activation_entry!)
+
+  // If the exception is not handled in the current frame the frame is
+  // removed and the exception is rethrown (i.e. exception
+  // continuation is _rethrow_exception).
+  //
+  // Note: At this point the bci is still the bxi for the instruction
+  // which caused the exception and the expression stack is
+  // empty. Thus, for any VM calls at this point, GC will find a legal
+  // oop map (with empty expression stack).
+
+  //
+  // JVMTI PopFrame support
+  //
+
+  Interpreter::_remove_activation_preserving_args_entry = __ pc();
+  __ empty_expression_stack();
+  // Set the popframe_processing bit in pending_popframe_condition
+  // indicating that we are currently handling popframe, so that
+  // call_VMs that may happen later do not trigger new popframe
+  // handling cycles.
+  __ ldrw(r3, Address(rthread, JavaThread::popframe_condition_offset()));
+  __ orr(r3, r3, JavaThread::popframe_processing_bit);
+  __ strw(r3, Address(rthread, JavaThread::popframe_condition_offset()));
+
+  {
+    // Check to see whether we are returning to a deoptimized frame.
+    // (The PopFrame call ensures that the caller of the popped frame is
+    // either interpreted or compiled and deoptimizes it if compiled.)
+    // In this case, we can't call dispatch_next() after the frame is
+    // popped, but instead must save the incoming arguments and restore
+    // them after deoptimization has occurred.
+    //
+    // Note that we don't compare the return PC against the
+    // deoptimization blob's unpack entry because of the presence of
+    // adapter frames in C2.
+    Label caller_not_deoptimized;
+    __ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize));
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
+                               InterpreterRuntime::interpreter_contains), c_rarg1);
+    __ cbnz(r0, caller_not_deoptimized);
+
+    // Compute size of arguments for saving when returning to
+    // deoptimized caller
+    __ get_method(r0);
+    __ ldr(r0, Address(r0, Method::const_offset()));
+    __ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod::
+                                                    size_of_parameters_offset())));
+    __ lsl(r0, r0, Interpreter::logStackElementSize);
+    __ restore_locals(); // XXX do we need this?
+    __ sub(rlocals, rlocals, r0);
+    __ add(rlocals, rlocals, wordSize);
+    // Save these arguments
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
+                                           Deoptimization::
+                                           popframe_preserve_args),
+                          rthread, r0, rlocals);
+
+    __ remove_activation(vtos,
+                         /* throw_monitor_exception */ false,
+                         /* install_monitor_exception */ false,
+                         /* notify_jvmdi */ false);
+
+    // Inform deoptimization that it is responsible for restoring
+    // these arguments
+    __ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit);
+    __ strw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
+
+    // Continue in deoptimization handler
+    __ ret(lr);
+
+    __ bind(caller_not_deoptimized);
+  }
+
+  __ remove_activation(vtos,
+                       /* throw_monitor_exception */ false,
+                       /* install_monitor_exception */ false,
+                       /* notify_jvmdi */ false);
+
+  // Restore the last_sp and null it out
+  __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+
+  __ restore_bcp();
+  __ restore_locals();
+  __ restore_constant_pool_cache();
+  __ get_method(rmethod);
+
+  // The method data pointer was incremented already during
+  // call profiling. We have to restore the mdp for the current bcp.
+  if (ProfileInterpreter) {
+    __ set_method_data_pointer_for_bcp();
+  }
+
+  // Clear the popframe condition flag
+  __ strw(zr, Address(rthread, JavaThread::popframe_condition_offset()));
+  assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive");
+
+#if INCLUDE_JVMTI
+  {
+    Label L_done;
+
+    __ ldrb(rscratch1, Address(rbcp, 0));
+    __ cmpw(r1, Bytecodes::_invokestatic);
+    __ br(Assembler::EQ, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+
+    __ ldr(c_rarg0, Address(rlocals, 0));
+    __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp);
+
+    __ cbz(r0, L_done);
+
+    __ str(r0, Address(esp, 0));
+    __ bind(L_done);
+  }
+#endif // INCLUDE_JVMTI
+
+  // Restore machine SP
+  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
+  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
+  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
+  __ ldr(rscratch2,
+         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
+  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3);
+  __ andr(sp, rscratch1, -16);
+
+  __ dispatch_next(vtos);
+  // end of PopFrame support
+
+  Interpreter::_remove_activation_entry = __ pc();
+
+  // preserve exception over this code sequence
+  __ pop_ptr(r0);
+  __ str(r0, Address(rthread, JavaThread::vm_result_offset()));
+  // remove the activation (without doing throws on illegalMonitorExceptions)
+  __ remove_activation(vtos, false, true, false);
+  // restore exception
+  // restore exception
+  __ get_vm_result(r0, rthread);
+
+  // In between activations - previous activation type unknown yet
+  // compute continuation point - the continuation point expects the
+  // following registers set up:
+  //
+  // r0: exception
+  // lr: return address/pc that threw exception
+  // rsp: expression stack of caller
+  // rfp: fp of caller
+  // FIXME: There's no point saving LR here because VM calls don't trash it
+  __ stp(r0, lr, Address(__ pre(sp, -2 * wordSize)));  // save exception & return address
+  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
+                          SharedRuntime::exception_handler_for_return_address),
+                        rthread, lr);
+  __ mov(r1, r0);                               // save exception handler
+  __ ldp(r0, lr, Address(__ post(sp, 2 * wordSize)));  // restore exception & return address
+  // We might be returning to a deopt handler that expects r3 to
+  // contain the exception pc
+  __ mov(r3, lr);
+  // Note that an "issuing PC" is actually the next PC after the call
+  __ br(r1);                                    // jump to exception
+                                                // handler of caller
+}
+
+
+//
+// JVMTI ForceEarlyReturn support
+//
+address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
+  address entry = __ pc();
+
+  __ restore_bcp();
+  __ restore_locals();
+  __ empty_expression_stack();
+  __ load_earlyret_value(state);
+
+  __ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
+  Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset());
+
+  // Clear the earlyret state
+  assert(JvmtiThreadState::earlyret_inactive == 0, "should be");
+  __ str(zr, cond_addr);
+
+  __ remove_activation(state,
+                       false, /* throw_monitor_exception */
+                       false, /* install_monitor_exception */
+                       true); /* notify_jvmdi */
+  __ ret(lr);
+
+  return entry;
+} // end of ForceEarlyReturn support
+
+
+
+//-----------------------------------------------------------------------------
+// Helper for vtos entry point generation
+
+void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
+                                                         address& bep,
+                                                         address& cep,
+                                                         address& sep,
+                                                         address& aep,
+                                                         address& iep,
+                                                         address& lep,
+                                                         address& fep,
+                                                         address& dep,
+                                                         address& vep) {
+  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
+  Label L;
+  aep = __ pc();  __ push_ptr();  __ b(L);
+  fep = __ pc();  __ push_f();    __ b(L);
+  dep = __ pc();  __ push_d();    __ b(L);
+  lep = __ pc();  __ push_l();    __ b(L);
+  bep = cep = sep =
+  iep = __ pc();  __ push_i();
+  vep = __ pc();
+  __ bind(L);
+  generate_and_dispatch(t);
+}
+
+//-----------------------------------------------------------------------------
+// Generation of individual instructions
+
+// helpers for generate_and_dispatch
+
+
+InterpreterGenerator::InterpreterGenerator(StubQueue* code)
+  : TemplateInterpreterGenerator(code) {
+   generate_all(); // down here so it can be "virtual"
+}
+
+//-----------------------------------------------------------------------------
+
+// Non-product code
+#ifndef PRODUCT
+address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
+  address entry = __ pc();
+
+  __ push(lr);
+  __ push(state);
+  __ push(RegSet::range(r0, r15), sp);
+  __ mov(c_rarg2, r0);  // Pass itos
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
+             c_rarg1, c_rarg2, c_rarg3);
+  __ pop(RegSet::range(r0, r15), sp);
+  __ pop(state);
+  __ pop(lr);
+  __ ret(lr);                                   // return from result handler
+
+  return entry;
+}
+
+void TemplateInterpreterGenerator::count_bytecode() {
+  Register rscratch3 = r0;
+  __ push(rscratch1);
+  __ push(rscratch2);
+  __ push(rscratch3);
+  Label L;
+  __ mov(rscratch2, (address) &BytecodeCounter::_counter_value);
+  __ bind(L);
+  __ ldxr(rscratch1, rscratch2);
+  __ add(rscratch1, rscratch1, 1);
+  __ stxr(rscratch3, rscratch1, rscratch2);
+  __ cbnzw(rscratch3, L);
+  __ pop(rscratch3);
+  __ pop(rscratch2);
+  __ pop(rscratch1);
+}
+
+void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; }
+
+void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; }
+
+
+void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
+  // Call a little run-time stub to avoid blow-up for each bytecode.
+  // The run-time runtime saves the right registers, depending on
+  // the tosca in-state for the given template.
+
+  assert(Interpreter::trace_code(t->tos_in()) != NULL,
+         "entry must have been generated");
+  __ bl(Interpreter::trace_code(t->tos_in()));
+  __ reinit_heapbase();
+}
+
+
+void TemplateInterpreterGenerator::stop_interpreter_at() {
+  Label L;
+  __ push(rscratch1);
+  __ mov(rscratch1, (address) &BytecodeCounter::_counter_value);
+  __ ldr(rscratch1, Address(rscratch1));
+  __ mov(rscratch2, StopInterpreterAt);
+  __ cmpw(rscratch1, rscratch2);
+  __ br(Assembler::NE, L);
+  __ brk(0);
+  __ bind(L);
+  __ pop(rscratch1);
+}
+
+#ifdef BUILTIN_SIM
+
+#include <sys/mman.h>
+#include <unistd.h>
+
+extern "C" {
+  static int PAGESIZE = getpagesize();
+  int is_mapped_address(u_int64_t address)
+  {
+    address = (address & ~((u_int64_t)PAGESIZE - 1));
+    if (msync((void *)address, PAGESIZE, MS_ASYNC) == 0) {
+      return true;
+    }
+    if (errno != ENOMEM) {
+      return true;
+    }
+    return false;
+  }
+
+  void bccheck1(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode)
+  {
+    if (method != 0) {
+      method[0] = '\0';
+    }
+    if (bcidx != 0) {
+      *bcidx = -2;
+    }
+    if (decode != 0) {
+      decode[0] = 0;
+    }
+
+    if (framesize != 0) {
+      *framesize = -1;
+    }
+
+    if (Interpreter::contains((address)pc)) {
+      AArch64Simulator *sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
+      Method* meth;
+      address bcp;
+      if (fp) {
+#define FRAME_SLOT_METHOD 3
+#define FRAME_SLOT_BCP 7
+        meth = (Method*)sim->getMemory()->loadU64(fp - (FRAME_SLOT_METHOD << 3));
+        bcp = (address)sim->getMemory()->loadU64(fp - (FRAME_SLOT_BCP << 3));
+#undef FRAME_SLOT_METHOD
+#undef FRAME_SLOT_BCP
+      } else {
+        meth = (Method*)sim->getCPUState().xreg(RMETHOD, 0);
+        bcp = (address)sim->getCPUState().xreg(RBCP, 0);
+      }
+      if (meth->is_native()) {
+        return;
+      }
+      if(method && meth->is_method()) {
+        ResourceMark rm;
+        method[0] = 'I';
+        method[1] = ' ';
+        meth->name_and_sig_as_C_string(method + 2, 398);
+      }
+      if (bcidx) {
+        if (meth->contains(bcp)) {
+          *bcidx = meth->bci_from(bcp);
+        } else {
+          *bcidx = -2;
+        }
+      }
+      if (decode) {
+        if (!BytecodeTracer::closure()) {
+          BytecodeTracer::set_closure(BytecodeTracer::std_closure());
+        }
+        stringStream str(decode, 400);
+        BytecodeTracer::trace(meth, bcp, &str);
+      }
+    } else {
+      if (method) {
+        CodeBlob *cb = CodeCache::find_blob((address)pc);
+        if (cb != NULL) {
+          if (cb->is_nmethod()) {
+            ResourceMark rm;
+            nmethod* nm = (nmethod*)cb;
+            method[0] = 'C';
+            method[1] = ' ';
+            nm->method()->name_and_sig_as_C_string(method + 2, 398);
+          } else if (cb->is_adapter_blob()) {
+            strcpy(method, "B adapter blob");
+          } else if (cb->is_runtime_stub()) {
+            strcpy(method, "B runtime stub");
+          } else if (cb->is_exception_stub()) {
+            strcpy(method, "B exception stub");
+          } else if (cb->is_deoptimization_stub()) {
+            strcpy(method, "B deoptimization stub");
+          } else if (cb->is_safepoint_stub()) {
+            strcpy(method, "B safepoint stub");
+          } else if (cb->is_uncommon_trap_stub()) {
+            strcpy(method, "B uncommon trap stub");
+          } else if (cb->contains((address)StubRoutines::call_stub())) {
+            strcpy(method, "B call stub");
+          } else {
+            strcpy(method, "B unknown blob : ");
+            strcat(method, cb->name());
+          }
+          if (framesize != NULL) {
+            *framesize = cb->frame_size();
+          }
+        }
+      }
+    }
+  }
+
+
+  JNIEXPORT void bccheck(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode)
+  {
+    bccheck1(pc, fp, method, bcidx, framesize, decode);
+  }
+}
+
+#endif // BUILTIN_SIM
+#endif // !PRODUCT
+#endif // ! CC_INTERP
--- a/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -24,238 +24,12 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateTable.hpp"
-#include "interpreter/bytecodeTracer.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#include <sys/types.h>
-
-#ifndef PRODUCT
+#include "oops/constMethod.hpp"
 #include "oops/method.hpp"
-#endif // !PRODUCT
-
-#ifdef BUILTIN_SIM
-#include "../../../../../../simulator/simulator.hpp"
-#endif
-
-#define __ _masm->
-
-#ifndef CC_INTERP
-
-//-----------------------------------------------------------------------------
-
-extern "C" void entry(CodeBuffer*);
-
-//-----------------------------------------------------------------------------
-
-address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
-  address entry = __ pc();
-
-#ifdef ASSERT
-  {
-    Label L;
-    __ ldr(rscratch1, Address(rfp,
-                       frame::interpreter_frame_monitor_block_top_offset *
-                       wordSize));
-    __ mov(rscratch2, sp);
-    __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack
-                           // grows negative)
-    __ br(Assembler::HS, L); // check if frame is complete
-    __ stop ("interpreter frame not set up");
-    __ bind(L);
-  }
-#endif // ASSERT
-  // Restore bcp under the assumption that the current frame is still
-  // interpreted
-  __ restore_bcp();
-
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-  // throw exception
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_StackOverflowError));
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
-        const char* name) {
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-  // setup parameters
-  // ??? convention: expect aberrant index in register r1
-  __ movw(c_rarg2, r1);
-  __ mov(c_rarg1, (address)name);
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::
-                              throw_ArrayIndexOutOfBoundsException),
-             c_rarg1, c_rarg2);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
-  address entry = __ pc();
-
-  // object is at TOS
-  __ pop(c_rarg1);
-
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::
-                              throw_ClassCastException),
-             c_rarg1);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_exception_handler_common(
-        const char* name, const char* message, bool pass_oop) {
-  assert(!pass_oop || message == NULL, "either oop or message but not both");
-  address entry = __ pc();
-  if (pass_oop) {
-    // object is at TOS
-    __ pop(c_rarg2);
-  }
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-  // setup parameters
-  __ lea(c_rarg1, Address((address)name));
-  if (pass_oop) {
-    __ call_VM(r0, CAST_FROM_FN_PTR(address,
-                                    InterpreterRuntime::
-                                    create_klass_exception),
-               c_rarg1, c_rarg2);
-  } else {
-    // kind of lame ExternalAddress can't take NULL because
-    // external_word_Relocation will assert.
-    if (message != NULL) {
-      __ lea(c_rarg2, Address((address)message));
-    } else {
-      __ mov(c_rarg2, NULL_WORD);
-    }
-    __ call_VM(r0,
-               CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
-               c_rarg1, c_rarg2);
-  }
-  // throw exception
-  __ b(address(Interpreter::throw_exception_entry()));
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
-  address entry = __ pc();
-  // NULL last_sp until next java call
-  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-  __ dispatch_next(state);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
-  address entry = __ pc();
-
-  // Restore stack bottom in case i2c adjusted stack
-  __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-  // and NULL it as marker that esp is now tos until next java call
-  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-  __ restore_bcp();
-  __ restore_locals();
-  __ restore_constant_pool_cache();
-  __ get_method(rmethod);
-
-  // Pop N words from the stack
-  __ get_cache_and_index_at_bcp(r1, r2, 1, index_size);
-  __ ldr(r1, Address(r1, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
-  __ andr(r1, r1, ConstantPoolCacheEntry::parameter_size_mask);
-
-  __ add(esp, esp, r1, Assembler::LSL, 3);
-
-  // Restore machine SP
-  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
-  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
-  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
-  __ ldr(rscratch2,
-         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
-  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3);
-  __ andr(sp, rscratch1, -16);
-
-#ifndef PRODUCT
-  // tell the simulator that the method has been reentered
-  if (NotifySimulator) {
-    __ notify(Assembler::method_reentry);
-  }
-#endif
-  __ get_dispatch();
-  __ dispatch_next(state, step);
-
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
-                                                               int step) {
-  address entry = __ pc();
-  __ restore_bcp();
-  __ restore_locals();
-  __ restore_constant_pool_cache();
-  __ get_method(rmethod);
-
-  // handle exceptions
-  {
-    Label L;
-    __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
-    __ cbz(rscratch1, L);
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address,
-                                InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-
-  __ get_dispatch();
-
-  // Calculate stack limit
-  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
-  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
-  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
-  __ ldr(rscratch2,
-         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
-  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
-  __ andr(sp, rscratch1, -16);
-
-  // Restore expression stack pointer
-  __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-  // NULL last_sp until next java call
-  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-
-  __ dispatch_next(state, step);
-  return entry;
-}
+#include "runtime/frame.inline.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
 
 
 int AbstractInterpreter::BasicType_as_index(BasicType type) {
@@ -279,1195 +53,6 @@
   return i;
 }
 
-
-address TemplateInterpreterGenerator::generate_result_handler_for(
-        BasicType type) {
-    address entry = __ pc();
-  switch (type) {
-  case T_BOOLEAN: __ uxtb(r0, r0);        break;
-  case T_CHAR   : __ uxth(r0, r0);       break;
-  case T_BYTE   : __ sxtb(r0, r0);        break;
-  case T_SHORT  : __ sxth(r0, r0);        break;
-  case T_INT    : __ uxtw(r0, r0);        break;  // FIXME: We almost certainly don't need this
-  case T_LONG   : /* nothing to do */        break;
-  case T_VOID   : /* nothing to do */        break;
-  case T_FLOAT  : /* nothing to do */        break;
-  case T_DOUBLE : /* nothing to do */        break;
-  case T_OBJECT :
-    // retrieve result from frame
-    __ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
-    // and verify it
-    __ verify_oop(r0);
-    break;
-  default       : ShouldNotReachHere();
-  }
-  __ ret(lr);                                  // return from result handler
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_safept_entry_for(
-        TosState state,
-        address runtime_entry) {
-  address entry = __ pc();
-  __ push(state);
-  __ call_VM(noreg, runtime_entry);
-  __ membar(Assembler::AnyAny);
-  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
-  return entry;
-}
-
-// Helpers for commoning out cases in the various type of method entries.
-//
-
-
-// increment invocation count & check for overflow
-//
-// Note: checking for negative value instead of overflow
-//       so we have a 'sticky' overflow test
-//
-// rmethod: method
-//
-void InterpreterGenerator::generate_counter_incr(
-        Label* overflow,
-        Label* profile_method,
-        Label* profile_method_continue) {
-  Label done;
-  // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
-  if (TieredCompilation) {
-    int increment = InvocationCounter::count_increment;
-    Label no_mdo;
-    if (ProfileInterpreter) {
-      // Are we profiling?
-      __ ldr(r0, Address(rmethod, Method::method_data_offset()));
-      __ cbz(r0, no_mdo);
-      // Increment counter in the MDO
-      const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
-                                                in_bytes(InvocationCounter::counter_offset()));
-      const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
-      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
-      __ b(done);
-    }
-    __ bind(no_mdo);
-    // Increment counter in MethodCounters
-    const Address invocation_counter(rscratch2,
-                  MethodCounters::invocation_counter_offset() +
-                  InvocationCounter::counter_offset());
-    __ get_method_counters(rmethod, rscratch2, done);
-    const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset()));
-    __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow);
-    __ bind(done);
-  } else { // not TieredCompilation
-    const Address backedge_counter(rscratch2,
-                  MethodCounters::backedge_counter_offset() +
-                  InvocationCounter::counter_offset());
-    const Address invocation_counter(rscratch2,
-                  MethodCounters::invocation_counter_offset() +
-                  InvocationCounter::counter_offset());
-
-    __ get_method_counters(rmethod, rscratch2, done);
-
-    if (ProfileInterpreter) { // %%% Merge this into MethodData*
-      __ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
-      __ addw(r1, r1, 1);
-      __ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
-    }
-    // Update standard invocation counters
-    __ ldrw(r1, invocation_counter);
-    __ ldrw(r0, backedge_counter);
-
-    __ addw(r1, r1, InvocationCounter::count_increment);
-    __ andw(r0, r0, InvocationCounter::count_mask_value);
-
-    __ strw(r1, invocation_counter);
-    __ addw(r0, r0, r1);                // add both counters
-
-    // profile_method is non-null only for interpreted method so
-    // profile_method != NULL == !native_call
-
-    if (ProfileInterpreter && profile_method != NULL) {
-      // Test to see if we should create a method data oop
-      __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
-      __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
-      __ cmpw(r0, rscratch2);
-      __ br(Assembler::LT, *profile_method_continue);
-
-      // if no method data exists, go to profile_method
-      __ test_method_data_pointer(rscratch2, *profile_method);
-    }
-
-    {
-      __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
-      __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
-      __ cmpw(r0, rscratch2);
-      __ br(Assembler::HS, *overflow);
-    }
-    __ bind(done);
-  }
-}
-
-void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
-
-  // Asm interpreter on entry
-  // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
-  // Everything as it was on entry
-
-  // InterpreterRuntime::frequency_counter_overflow takes two
-  // arguments, the first (thread) is passed by call_VM, the second
-  // indicates if the counter overflow occurs at a backwards branch
-  // (NULL bcp).  We pass zero for it.  The call returns the address
-  // of the verified entry point for the method or NULL if the
-  // compilation did not complete (either went background or bailed
-  // out).
-  __ mov(c_rarg1, 0);
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::frequency_counter_overflow),
-             c_rarg1);
-
-  __ b(*do_continue);
-}
-
-// See if we've got enough room on the stack for locals plus overhead.
-// The expression stack grows down incrementally, so the normal guard
-// page mechanism will work for that.
-//
-// NOTE: Since the additional locals are also always pushed (wasn't
-// obvious in generate_method_entry) so the guard should work for them
-// too.
-//
-// Args:
-//      r3: number of additional locals this frame needs (what we must check)
-//      rmethod: Method*
-//
-// Kills:
-//      r0
-void InterpreterGenerator::generate_stack_overflow_check(void) {
-
-  // monitor entry size: see picture of stack set
-  // (generate_method_entry) and frame_amd64.hpp
-  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
-
-  // total overhead size: entry_size + (saved rbp through expr stack
-  // bottom).  be sure to change this if you add/subtract anything
-  // to/from the overhead area
-  const int overhead_size =
-    -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
-
-  const int page_size = os::vm_page_size();
-
-  Label after_frame_check;
-
-  // see if the frame is greater than one page in size. If so,
-  // then we need to verify there is enough stack space remaining
-  // for the additional locals.
-  //
-  // Note that we use SUBS rather than CMP here because the immediate
-  // field of this instruction may overflow.  SUBS can cope with this
-  // because it is a macro that will expand to some number of MOV
-  // instructions and a register operation.
-  __ subs(rscratch1, r3, (page_size - overhead_size) / Interpreter::stackElementSize);
-  __ br(Assembler::LS, after_frame_check);
-
-  // compute rsp as if this were going to be the last frame on
-  // the stack before the red zone
-
-  const Address stack_base(rthread, Thread::stack_base_offset());
-  const Address stack_size(rthread, Thread::stack_size_offset());
-
-  // locals + overhead, in bytes
-  __ mov(r0, overhead_size);
-  __ add(r0, r0, r3, Assembler::LSL, Interpreter::logStackElementSize);  // 2 slots per parameter.
-
-  __ ldr(rscratch1, stack_base);
-  __ ldr(rscratch2, stack_size);
-
-#ifdef ASSERT
-  Label stack_base_okay, stack_size_okay;
-  // verify that thread stack base is non-zero
-  __ cbnz(rscratch1, stack_base_okay);
-  __ stop("stack base is zero");
-  __ bind(stack_base_okay);
-  // verify that thread stack size is non-zero
-  __ cbnz(rscratch2, stack_size_okay);
-  __ stop("stack size is zero");
-  __ bind(stack_size_okay);
-#endif
-
-  // Add stack base to locals and subtract stack size
-  __ sub(rscratch1, rscratch1, rscratch2); // Stack limit
-  __ add(r0, r0, rscratch1);
-
-  // Use the maximum number of pages we might bang.
-  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
-                                                                              (StackRedPages+StackYellowPages);
-
-  // add in the red and yellow zone sizes
-  __ add(r0, r0, max_pages * page_size * 2);
-
-  // check against the current stack bottom
-  __ cmp(sp, r0);
-  __ br(Assembler::HI, after_frame_check);
-
-  // Remove the incoming args, peeling the machine SP back to where it
-  // was in the caller.  This is not strictly necessary, but unless we
-  // do so the stack frame may have a garbage FP; this ensures a
-  // correct call stack that we can always unwind.  The ANDR should be
-  // unnecessary because the sender SP in r13 is always aligned, but
-  // it doesn't hurt.
-  __ andr(sp, r13, -16);
-
-  // Note: the restored frame is not necessarily interpreted.
-  // Use the shared runtime version of the StackOverflowError.
-  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
-  __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
-
-  // all done with frame size check
-  __ bind(after_frame_check);
-}
-
-// Allocate monitor and lock method (asm interpreter)
-//
-// Args:
-//      rmethod: Method*
-//      rlocals: locals
-//
-// Kills:
-//      r0
-//      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
-//      rscratch1, rscratch2 (scratch regs)
-void TemplateInterpreterGenerator::lock_method() {
-  // synchronize method
-  const Address access_flags(rmethod, Method::access_flags_offset());
-  const Address monitor_block_top(
-        rfp,
-        frame::interpreter_frame_monitor_block_top_offset * wordSize);
-  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
-
-#ifdef ASSERT
-  {
-    Label L;
-    __ ldrw(r0, access_flags);
-    __ tst(r0, JVM_ACC_SYNCHRONIZED);
-    __ br(Assembler::NE, L);
-    __ stop("method doesn't need synchronization");
-    __ bind(L);
-  }
-#endif // ASSERT
-
-  // get synchronization object
-  {
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    Label done;
-    __ ldrw(r0, access_flags);
-    __ tst(r0, JVM_ACC_STATIC);
-    // get receiver (assume this is frequent case)
-    __ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
-    __ br(Assembler::EQ, done);
-    __ ldr(r0, Address(rmethod, Method::const_offset()));
-    __ ldr(r0, Address(r0, ConstMethod::constants_offset()));
-    __ ldr(r0, Address(r0,
-                           ConstantPool::pool_holder_offset_in_bytes()));
-    __ ldr(r0, Address(r0, mirror_offset));
-
-#ifdef ASSERT
-    {
-      Label L;
-      __ cbnz(r0, L);
-      __ stop("synchronization object is NULL");
-      __ bind(L);
-    }
-#endif // ASSERT
-
-    __ bind(done);
-  }
-
-  // add space for monitor & lock
-  __ sub(sp, sp, entry_size); // add space for a monitor entry
-  __ sub(esp, esp, entry_size);
-  __ mov(rscratch1, esp);
-  __ str(rscratch1, monitor_block_top);  // set new monitor block top
-  // store object
-  __ str(r0, Address(esp, BasicObjectLock::obj_offset_in_bytes()));
-  __ mov(c_rarg1, esp); // object address
-  __ lock_object(c_rarg1);
-}
-
-// Generate a fixed interpreter frame. This is identical setup for
-// interpreted methods and for native methods hence the shared code.
-//
-// Args:
-//      lr: return address
-//      rmethod: Method*
-//      rlocals: pointer to locals
-//      rcpool: cp cache
-//      stack_pointer: previous sp
-void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
-  // initialize fixed part of activation frame
-  if (native_call) {
-    __ sub(esp, sp, 12 *  wordSize);
-    __ mov(rbcp, zr);
-    __ stp(esp, zr, Address(__ pre(sp, -12 * wordSize)));
-    // add 2 zero-initialized slots for native calls
-    __ stp(zr, zr, Address(sp, 10 * wordSize));
-  } else {
-    __ sub(esp, sp, 10 *  wordSize);
-    __ ldr(rscratch1, Address(rmethod, Method::const_offset()));      // get ConstMethod
-    __ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase
-    __ stp(esp, rbcp, Address(__ pre(sp, -10 * wordSize)));
-  }
-
-  if (ProfileInterpreter) {
-    Label method_data_continue;
-    __ ldr(rscratch1, Address(rmethod, Method::method_data_offset()));
-    __ cbz(rscratch1, method_data_continue);
-    __ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset())));
-    __ bind(method_data_continue);
-    __ stp(rscratch1, rmethod, Address(sp, 4 * wordSize));  // save Method* and mdp (method data pointer)
-  } else {
-    __ stp(zr, rmethod, Address(sp, 4 * wordSize));        // save Method* (no mdp)
-  }
-
-  __ ldr(rcpool, Address(rmethod, Method::const_offset()));
-  __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset()));
-  __ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes()));
-  __ stp(rlocals, rcpool, Address(sp, 2 * wordSize));
-
-  __ stp(rfp, lr, Address(sp, 8 * wordSize));
-  __ lea(rfp, Address(sp, 8 * wordSize));
-
-  // set sender sp
-  // leave last_sp as null
-  __ stp(zr, r13, Address(sp, 6 * wordSize));
-
-  // Move SP out of the way
-  if (! native_call) {
-    __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
-    __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
-    __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
-    __ sub(rscratch1, sp, rscratch1, ext::uxtw, 3);
-    __ andr(sp, rscratch1, -16);
-  }
-}
-
-// End of helpers
-
-// Various method entries
-//------------------------------------------------------------------------------------------------------------------------
-//
-//
-
-// Method entry for java.lang.ref.Reference.get.
-address InterpreterGenerator::generate_Reference_get_entry(void) {
-#if INCLUDE_ALL_GCS
-  // Code: _aload_0, _getfield, _areturn
-  // parameter size = 1
-  //
-  // The code that gets generated by this routine is split into 2 parts:
-  //    1. The "intrinsified" code for G1 (or any SATB based GC),
-  //    2. The slow path - which is an expansion of the regular method entry.
-  //
-  // Notes:-
-  // * In the G1 code we do not check whether we need to block for
-  //   a safepoint. If G1 is enabled then we must execute the specialized
-  //   code for Reference.get (except when the Reference object is null)
-  //   so that we can log the value in the referent field with an SATB
-  //   update buffer.
-  //   If the code for the getfield template is modified so that the
-  //   G1 pre-barrier code is executed when the current method is
-  //   Reference.get() then going through the normal method entry
-  //   will be fine.
-  // * The G1 code can, however, check the receiver object (the instance
-  //   of java.lang.Reference) and jump to the slow path if null. If the
-  //   Reference object is null then we obviously cannot fetch the referent
-  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
-  //   regular method entry code to generate the NPE.
-  //
-  // This code is based on generate_accessor_enty.
-  //
-  // rmethod: Method*
-  // r13: senderSP must preserve for slow path, set SP to it on fast path
-
-  address entry = __ pc();
-
-  const int referent_offset = java_lang_ref_Reference::referent_offset;
-  guarantee(referent_offset > 0, "referent offset not initialized");
-
-  if (UseG1GC) {
-    Label slow_path;
-    const Register local_0 = c_rarg0;
-    // Check if local 0 != NULL
-    // If the receiver is null then it is OK to jump to the slow path.
-    __ ldr(local_0, Address(esp, 0));
-    __ cbz(local_0, slow_path);
-
-
-    // Load the value of the referent field.
-    const Address field_address(local_0, referent_offset);
-    __ load_heap_oop(local_0, field_address);
-
-    // Generate the G1 pre-barrier code to log the value of
-    // the referent field in an SATB buffer.
-    __ enter(); // g1_write may call runtime
-    __ g1_write_barrier_pre(noreg /* obj */,
-                            local_0 /* pre_val */,
-                            rthread /* thread */,
-                            rscratch2 /* tmp */,
-                            true /* tosca_live */,
-                            true /* expand_call */);
-    __ leave();
-    // areturn
-    __ andr(sp, r13, -16);  // done with stack
-    __ ret(lr);
-
-    // generate a vanilla interpreter entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
-    return entry;
-  }
-#endif // INCLUDE_ALL_GCS
-
-  // If G1 is not enabled then attempt to go through the accessor entry point
-  // Reference.get is an accessor
-  return generate_accessor_entry();
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.update(int crc, int b)
- */
-address InterpreterGenerator::generate_CRC32_update_entry() {
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    // rmethod: Method*
-    // r13: senderSP must preserved for slow path
-    // esp: args
-
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    unsigned long offset;
-    __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
-    __ ldrw(rscratch1, Address(rscratch1, offset));
-    assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
-    __ cbnz(rscratch1, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we call stub code and there is no safepoint on this path.
-
-    // Load parameters
-    const Register crc = c_rarg0;  // crc
-    const Register val = c_rarg1;  // source java byte value
-    const Register tbl = c_rarg2;  // scratch
-
-    // Arguments are reversed on java expression stack
-    __ ldrw(val, Address(esp, 0));              // byte value
-    __ ldrw(crc, Address(esp, wordSize));       // Initial CRC
-
-    __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset);
-    __ add(tbl, tbl, offset);
-
-    __ ornw(crc, zr, crc); // ~crc
-    __ update_byte_crc32(crc, val, tbl);
-    __ ornw(crc, zr, crc); // ~crc
-
-    // result in c_rarg0
-
-    __ andr(sp, r13, -16);
-    __ ret(lr);
-
-    // generate a vanilla native entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
- *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
- */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    // rmethod,: Method*
-    // r13: senderSP must preserved for slow path
-
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    unsigned long offset;
-    __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
-    __ ldrw(rscratch1, Address(rscratch1, offset));
-    assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
-    __ cbnz(rscratch1, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we call stub code and there is no safepoint on this path.
-
-    // Load parameters
-    const Register crc = c_rarg0;  // crc
-    const Register buf = c_rarg1;  // source java byte array address
-    const Register len = c_rarg2;  // length
-    const Register off = len;      // offset (never overlaps with 'len')
-
-    // Arguments are reversed on java expression stack
-    // Calculate address of start element
-    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
-      __ ldr(buf, Address(esp, 2*wordSize)); // long buf
-      __ ldrw(off, Address(esp, wordSize)); // offset
-      __ add(buf, buf, off); // + offset
-      __ ldrw(crc,   Address(esp, 4*wordSize)); // Initial CRC
-    } else {
-      __ ldr(buf, Address(esp, 2*wordSize)); // byte[] array
-      __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
-      __ ldrw(off, Address(esp, wordSize)); // offset
-      __ add(buf, buf, off); // + offset
-      __ ldrw(crc,   Address(esp, 3*wordSize)); // Initial CRC
-    }
-    // Can now load 'len' since we're finished with 'off'
-    __ ldrw(len, Address(esp, 0x0)); // Length
-
-    __ andr(sp, r13, -16); // Restore the caller's SP
-
-    // We are frameless so we can just jump to the stub.
-    __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()));
-
-    // generate a vanilla native entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
-  // Bang each page in the shadow zone. We can't assume it's been done for
-  // an interpreter frame with greater than a page of locals, so each page
-  // needs to be checked.  Only true for non-native.
-  if (UseStackBanging) {
-    const int start_page = native_call ? StackShadowPages : 1;
-    const int page_size = os::vm_page_size();
-    for (int pages = start_page; pages <= StackShadowPages ; pages++) {
-      __ sub(rscratch2, sp, pages*page_size);
-      __ str(zr, Address(rscratch2));
-    }
-  }
-}
-
-
-// Interpreter stub for calling a native method. (asm interpreter)
-// This sets up a somewhat different looking stack for calling the
-// native method than the typical interpreter frame setup.
-address InterpreterGenerator::generate_native_entry(bool synchronized) {
-  // determine code generation flags
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // r1: Method*
-  // rscratch1: sender sp
-
-  address entry_point = __ pc();
-
-  const Address constMethod       (rmethod, Method::const_offset());
-  const Address access_flags      (rmethod, Method::access_flags_offset());
-  const Address size_of_parameters(r2, ConstMethod::
-                                       size_of_parameters_offset());
-
-  // get parameter size (always needed)
-  __ ldr(r2, constMethod);
-  __ load_unsigned_short(r2, size_of_parameters);
-
-  // native calls don't need the stack size check since they have no
-  // expression stack and the arguments are already on the stack and
-  // we only add a handful of words to the stack
-
-  // rmethod: Method*
-  // r2: size of parameters
-  // rscratch1: sender sp
-
-  // for natives the size of locals is zero
-
-  // compute beginning of parameters (rlocals)
-  __ add(rlocals, esp, r2, ext::uxtx, 3);
-  __ add(rlocals, rlocals, -wordSize);
-
-  // Pull SP back to minimum size: this avoids holes in the stack
-  __ andr(sp, esp, -16);
-
-  // initialize fixed part of activation frame
-  generate_fixed_frame(true);
-#ifndef PRODUCT
-  // tell the simulator that a method has been entered
-  if (NotifySimulator) {
-    __ notify(Assembler::method_entry);
-  }
-#endif
-
-  // make sure method is native & not abstract
-#ifdef ASSERT
-  __ ldrw(r0, access_flags);
-  {
-    Label L;
-    __ tst(r0, JVM_ACC_NATIVE);
-    __ br(Assembler::NE, L);
-    __ stop("tried to execute non-native method as native");
-    __ bind(L);
-  }
-  {
-    Label L;
-    __ tst(r0, JVM_ACC_ABSTRACT);
-    __ br(Assembler::EQ, L);
-    __ stop("tried to execute abstract method in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // Since at this point in the method invocation the exception
-  // handler would try to exit the monitor of synchronized methods
-  // which hasn't been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. The remove_activation
-  // will check this flag.
-
-   const Address do_not_unlock_if_synchronized(rthread,
-        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
-  __ mov(rscratch2, true);
-  __ strb(rscratch2, do_not_unlock_if_synchronized);
-
-  // increment invocation count & check for overflow
-  Label invocation_counter_overflow;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
-  }
-
-  Label continue_after_compile;
-  __ bind(continue_after_compile);
-
-  bang_stack_shadow_pages(true);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ strb(zr, do_not_unlock_if_synchronized);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-  if (synchronized) {
-    lock_method();
-  } else {
-    // no synchronization necessary
-#ifdef ASSERT
-    {
-      Label L;
-      __ ldrw(r0, access_flags);
-      __ tst(r0, JVM_ACC_SYNCHRONIZED);
-      __ br(Assembler::EQ, L);
-      __ stop("method needs synchronization");
-      __ bind(L);
-    }
-#endif
-  }
-
-  // start execution
-#ifdef ASSERT
-  {
-    Label L;
-    const Address monitor_block_top(rfp,
-                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
-    __ ldr(rscratch1, monitor_block_top);
-    __ cmp(esp, rscratch1);
-    __ br(Assembler::EQ, L);
-    __ stop("broken stack frame setup in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // jvmti support
-  __ notify_method_entry();
-
-  // work registers
-  const Register t = r17;
-  const Register result_handler = r19;
-
-  // allocate space for parameters
-  __ ldr(t, Address(rmethod, Method::const_offset()));
-  __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
-
-  __ sub(rscratch1, esp, t, ext::uxtx, Interpreter::logStackElementSize);
-  __ andr(sp, rscratch1, -16);
-  __ mov(esp, rscratch1);
-
-  // get signature handler
-  {
-    Label L;
-    __ ldr(t, Address(rmethod, Method::signature_handler_offset()));
-    __ cbnz(t, L);
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address,
-                                InterpreterRuntime::prepare_native_call),
-               rmethod);
-    __ ldr(t, Address(rmethod, Method::signature_handler_offset()));
-    __ bind(L);
-  }
-
-  // call signature handler
-  assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
-         "adjust this code");
-  assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
-         "adjust this code");
-  assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
-          "adjust this code");
-
-  // The generated handlers do not touch rmethod (the method).
-  // However, large signatures cannot be cached and are generated
-  // each time here.  The slow-path generator can do a GC on return,
-  // so we must reload it after the call.
-  __ blr(t);
-  __ get_method(rmethod);        // slow path can do a GC, reload rmethod
-
-
-  // result handler is in r0
-  // set result handler
-  __ mov(result_handler, r0);
-  // pass mirror handle if static call
-  {
-    Label L;
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
-    __ tst(t, JVM_ACC_STATIC);
-    __ br(Assembler::EQ, L);
-    // get mirror
-    __ ldr(t, Address(rmethod, Method::const_offset()));
-    __ ldr(t, Address(t, ConstMethod::constants_offset()));
-    __ ldr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
-    __ ldr(t, Address(t, mirror_offset));
-    // copy mirror into activation frame
-    __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize));
-    // pass handle to mirror
-    __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize);
-    __ bind(L);
-  }
-
-  // get native function entry point in r10
-  {
-    Label L;
-    __ ldr(r10, Address(rmethod, Method::native_function_offset()));
-    address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
-    __ mov(rscratch2, unsatisfied);
-    __ ldr(rscratch2, rscratch2);
-    __ cmp(r10, rscratch2);
-    __ br(Assembler::NE, L);
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address,
-                                InterpreterRuntime::prepare_native_call),
-               rmethod);
-    __ get_method(rmethod);
-    __ ldr(r10, Address(rmethod, Method::native_function_offset()));
-    __ bind(L);
-  }
-
-  // pass JNIEnv
-  __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
-
-  // It is enough that the pc() points into the right code
-  // segment. It does not have to be the correct return pc.
-  __ set_last_Java_frame(esp, rfp, (address)NULL, rscratch1);
-
-  // change thread state
-#ifdef ASSERT
-  {
-    Label L;
-    __ ldrw(t, Address(rthread, JavaThread::thread_state_offset()));
-    __ cmp(t, _thread_in_Java);
-    __ br(Assembler::EQ, L);
-    __ stop("Wrong thread state in native stub");
-    __ bind(L);
-  }
-#endif
-
-  // Change state to native
-  __ mov(rscratch1, _thread_in_native);
-  __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
-  __ stlrw(rscratch1, rscratch2);
-
-  // Call the native method.
-  __ blrt(r10, rscratch1);
-  __ maybe_isb();
-  __ get_method(rmethod);
-  // result potentially in r0 or v0
-
-  // make room for the pushes we're about to do
-  __ sub(rscratch1, esp, 4 * wordSize);
-  __ andr(sp, rscratch1, -16);
-
-  // NOTE: The order of these pushes is known to frame::interpreter_frame_result
-  // in order to extract the result of a method call. If the order of these
-  // pushes change or anything else is added to the stack then the code in
-  // interpreter_frame_result must also change.
-  __ push(dtos);
-  __ push(ltos);
-
-  // change thread state
-  __ mov(rscratch1, _thread_in_native_trans);
-  __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
-  __ stlrw(rscratch1, rscratch2);
-
-  if (os::is_MP()) {
-    if (UseMembar) {
-      // Force this write out before the read below
-      __ dsb(Assembler::SY);
-    } else {
-      // Write serialization page so VM thread can do a pseudo remote membar.
-      // We use the current thread pointer to calculate a thread specific
-      // offset to write to within the page. This minimizes bus traffic
-      // due to cache line collision.
-      __ serialize_memory(rthread, rscratch2);
-    }
-  }
-
-  // check for safepoint operation in progress and/or pending suspend requests
-  {
-    Label Continue;
-    {
-      unsigned long offset;
-      __ adrp(rscratch2, SafepointSynchronize::address_of_state(), offset);
-      __ ldrw(rscratch2, Address(rscratch2, offset));
-    }
-    assert(SafepointSynchronize::_not_synchronized == 0,
-           "SafepointSynchronize::_not_synchronized");
-    Label L;
-    __ cbnz(rscratch2, L);
-    __ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
-    __ cbz(rscratch2, Continue);
-    __ bind(L);
-
-    // Don't use call_VM as it will see a possible pending exception
-    // and forward it and never return here preventing us from
-    // clearing _last_native_pc down below. So we do a runtime call by
-    // hand.
-    //
-    __ mov(c_rarg0, rthread);
-    __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
-    __ blrt(rscratch2, 1, 0, 0);
-    __ maybe_isb();
-    __ get_method(rmethod);
-    __ reinit_heapbase();
-    __ bind(Continue);
-  }
-
-  // change thread state
-  __ mov(rscratch1, _thread_in_Java);
-  __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
-  __ stlrw(rscratch1, rscratch2);
-
-  // reset_last_Java_frame
-  __ reset_last_Java_frame(true, true);
-
-  // reset handle block
-  __ ldr(t, Address(rthread, JavaThread::active_handles_offset()));
-  __ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes()));
-
-  // If result is an oop unbox and store it in frame where gc will see it
-  // and result handler will pick it up
-
-  {
-    Label no_oop, store_result;
-    __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
-    __ cmp(t, result_handler);
-    __ br(Assembler::NE, no_oop);
-    // retrieve result
-    __ pop(ltos);
-    __ cbz(r0, store_result);
-    __ ldr(r0, Address(r0, 0));
-    __ bind(store_result);
-    __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
-    // keep stack depth as expected by pushing oop which will eventually be discarded
-    __ push(ltos);
-    __ bind(no_oop);
-  }
-
-  {
-    Label no_reguard;
-    __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
-    __ ldrb(rscratch1, Address(rscratch1));
-    __ cmp(rscratch1, JavaThread::stack_guard_yellow_disabled);
-    __ br(Assembler::NE, no_reguard);
-
-    __ pusha(); // XXX only save smashed registers
-    __ mov(c_rarg0, rthread);
-    __ mov(rscratch2, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
-    __ blrt(rscratch2, 0, 0, 0);
-    __ popa(); // XXX only restore smashed registers
-    __ bind(no_reguard);
-  }
-
-  // The method register is junk from after the thread_in_native transition
-  // until here.  Also can't call_VM until the bcp has been
-  // restored.  Need bcp for throwing exception below so get it now.
-  __ get_method(rmethod);
-
-  // restore bcp to have legal interpreter frame, i.e., bci == 0 <=>
-  // rbcp == code_base()
-  __ ldr(rbcp, Address(rmethod, Method::const_offset()));   // get ConstMethod*
-  __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));          // get codebase
-  // handle exceptions (exception handling will handle unlocking!)
-  {
-    Label L;
-    __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
-    __ cbz(rscratch1, L);
-    // Note: At some point we may want to unify this with the code
-    // used in call_VM_base(); i.e., we should use the
-    // StubRoutines::forward_exception code. For now this doesn't work
-    // here because the rsp is not correctly set at this point.
-    __ MacroAssembler::call_VM(noreg,
-                               CAST_FROM_FN_PTR(address,
-                               InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-
-  // do unlocking if necessary
-  {
-    Label L;
-    __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
-    __ tst(t, JVM_ACC_SYNCHRONIZED);
-    __ br(Assembler::EQ, L);
-    // the code below should be shared with interpreter macro
-    // assembler implementation
-    {
-      Label unlock;
-      // BasicObjectLock will be first in list, since this is a
-      // synchronized method. However, need to check that the object
-      // has not been unlocked by an explicit monitorexit bytecode.
-
-      // monitor expect in c_rarg1 for slow unlock path
-      __ lea (c_rarg1, Address(rfp,   // address of first monitor
-                               (intptr_t)(frame::interpreter_frame_initial_sp_offset *
-                                          wordSize - sizeof(BasicObjectLock))));
-
-      __ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
-      __ cbnz(t, unlock);
-
-      // Entry already unlocked, need to throw exception
-      __ MacroAssembler::call_VM(noreg,
-                                 CAST_FROM_FN_PTR(address,
-                   InterpreterRuntime::throw_illegal_monitor_state_exception));
-      __ should_not_reach_here();
-
-      __ bind(unlock);
-      __ unlock_object(c_rarg1);
-    }
-    __ bind(L);
-  }
-
-  // jvmti support
-  // Note: This must happen _after_ handling/throwing any exceptions since
-  //       the exception handler code notifies the runtime of method exits
-  //       too. If this happens before, method entry/exit notifications are
-  //       not properly paired (was bug - gri 11/22/99).
-  __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
-
-  // restore potential result in r0:d0, call result handler to
-  // restore potential result in ST0 & handle result
-
-  __ pop(ltos);
-  __ pop(dtos);
-
-  __ blr(result_handler);
-
-  // remove activation
-  __ ldr(esp, Address(rfp,
-                    frame::interpreter_frame_sender_sp_offset *
-                    wordSize)); // get sender sp
-  // remove frame anchor
-  __ leave();
-
-  // resture sender sp
-  __ mov(sp, esp);
-
-  __ ret(lr);
-
-  if (inc_counter) {
-    // Handle overflow of counter and compile method
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
-  }
-
-  return entry_point;
-}
-
-//
-// Generic interpreted method entry to (asm) interpreter
-//
-address InterpreterGenerator::generate_normal_entry(bool synchronized) {
-  // determine code generation flags
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // rscratch1: sender sp
-  address entry_point = __ pc();
-
-  const Address constMethod(rmethod, Method::const_offset());
-  const Address access_flags(rmethod, Method::access_flags_offset());
-  const Address size_of_parameters(r3,
-                                   ConstMethod::size_of_parameters_offset());
-  const Address size_of_locals(r3, ConstMethod::size_of_locals_offset());
-
-  // get parameter size (always needed)
-  // need to load the const method first
-  __ ldr(r3, constMethod);
-  __ load_unsigned_short(r2, size_of_parameters);
-
-  // r2: size of parameters
-
-  __ load_unsigned_short(r3, size_of_locals); // get size of locals in words
-  __ sub(r3, r3, r2); // r3 = no. of additional locals
-
-  // see if we've got enough room on the stack for locals plus overhead.
-  generate_stack_overflow_check();
-
-  // compute beginning of parameters (rlocals)
-  __ add(rlocals, esp, r2, ext::uxtx, 3);
-  __ sub(rlocals, rlocals, wordSize);
-
-  // Make room for locals
-  __ sub(rscratch1, esp, r3, ext::uxtx, 3);
-  __ andr(sp, rscratch1, -16);
-
-  // r3 - # of additional locals
-  // allocate space for locals
-  // explicitly initialize locals
-  {
-    Label exit, loop;
-    __ ands(zr, r3, r3);
-    __ br(Assembler::LE, exit); // do nothing if r3 <= 0
-    __ bind(loop);
-    __ str(zr, Address(__ post(rscratch1, wordSize)));
-    __ sub(r3, r3, 1); // until everything initialized
-    __ cbnz(r3, loop);
-    __ bind(exit);
-  }
-
-  // And the base dispatch table
-  __ get_dispatch();
-
-  // initialize fixed part of activation frame
-  generate_fixed_frame(false);
-#ifndef PRODUCT
-  // tell the simulator that a method has been entered
-  if (NotifySimulator) {
-    __ notify(Assembler::method_entry);
-  }
-#endif
-  // make sure method is not native & not abstract
-#ifdef ASSERT
-  __ ldrw(r0, access_flags);
-  {
-    Label L;
-    __ tst(r0, JVM_ACC_NATIVE);
-    __ br(Assembler::EQ, L);
-    __ stop("tried to execute native method as non-native");
-    __ bind(L);
-  }
- {
-    Label L;
-    __ tst(r0, JVM_ACC_ABSTRACT);
-    __ br(Assembler::EQ, L);
-    __ stop("tried to execute abstract method in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // Since at this point in the method invocation the exception
-  // handler would try to exit the monitor of synchronized methods
-  // which hasn't been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. The remove_activation
-  // will check this flag.
-
-   const Address do_not_unlock_if_synchronized(rthread,
-        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
-  __ mov(rscratch2, true);
-  __ strb(rscratch2, do_not_unlock_if_synchronized);
-
-  // increment invocation count & check for overflow
-  Label invocation_counter_overflow;
-  Label profile_method;
-  Label profile_method_continue;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow,
-                          &profile_method,
-                          &profile_method_continue);
-    if (ProfileInterpreter) {
-      __ bind(profile_method_continue);
-    }
-  }
-
-  Label continue_after_compile;
-  __ bind(continue_after_compile);
-
-  bang_stack_shadow_pages(false);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ strb(zr, do_not_unlock_if_synchronized);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-  if (synchronized) {
-    // Allocate monitor and lock method
-    lock_method();
-  } else {
-    // no synchronization necessary
-#ifdef ASSERT
-    {
-      Label L;
-      __ ldrw(r0, access_flags);
-      __ tst(r0, JVM_ACC_SYNCHRONIZED);
-      __ br(Assembler::EQ, L);
-      __ stop("method needs synchronization");
-      __ bind(L);
-    }
-#endif
-  }
-
-  // start execution
-#ifdef ASSERT
-  {
-    Label L;
-     const Address monitor_block_top (rfp,
-                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
-    __ ldr(rscratch1, monitor_block_top);
-    __ cmp(esp, rscratch1);
-    __ br(Assembler::EQ, L);
-    __ stop("broken stack frame setup in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // jvmti support
-  __ notify_method_entry();
-
-  __ dispatch_next(vtos);
-
-  // invocation counter overflow
-  if (inc_counter) {
-    if (ProfileInterpreter) {
-      // We have decided to profile this method in the interpreter
-      __ bind(profile_method);
-      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
-      __ set_method_data_pointer_for_bcp();
-      // don't think we need this
-      __ get_method(r1);
-      __ b(profile_method_continue);
-    }
-    // Handle overflow of counter and compile method
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
-  }
-
-  return entry_point;
-}
-
 // These should never be compiled since the interpreter will prefer
 // the compiled version to the intrinsic version.
 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
@@ -1593,483 +178,3 @@
   *interpreter_frame->interpreter_frame_cache_addr() =
     method->constants()->cache();
 }
-
-
-//-----------------------------------------------------------------------------
-// Exceptions
-
-void TemplateInterpreterGenerator::generate_throw_exception() {
-  // Entry point in previous activation (i.e., if the caller was
-  // interpreted)
-  Interpreter::_rethrow_exception_entry = __ pc();
-  // Restore sp to interpreter_frame_last_sp even though we are going
-  // to empty the expression stack for the exception processing.
-  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-  // r0: exception
-  // r3: return address/pc that threw exception
-  __ restore_bcp();    // rbcp points to call/send
-  __ restore_locals();
-  __ restore_constant_pool_cache();
-  __ reinit_heapbase();  // restore rheapbase as heapbase.
-  __ get_dispatch();
-
-#ifndef PRODUCT
-  // tell the simulator that the caller method has been reentered
-  if (NotifySimulator) {
-    __ get_method(rmethod);
-    __ notify(Assembler::method_reentry);
-  }
-#endif
-  // Entry point for exceptions thrown within interpreter code
-  Interpreter::_throw_exception_entry = __ pc();
-  // If we came here via a NullPointerException on the receiver of a
-  // method, rmethod may be corrupt.
-  __ get_method(rmethod);
-  // expression stack is undefined here
-  // r0: exception
-  // rbcp: exception bcp
-  __ verify_oop(r0);
-  __ mov(c_rarg1, r0);
-
-  // expression stack must be empty before entering the VM in case of
-  // an exception
-  __ empty_expression_stack();
-  // find exception handler address and preserve exception oop
-  __ call_VM(r3,
-             CAST_FROM_FN_PTR(address,
-                          InterpreterRuntime::exception_handler_for_exception),
-             c_rarg1);
-
-  // Calculate stack limit
-  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
-  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
-  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
-  __ ldr(rscratch2,
-         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
-  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
-  __ andr(sp, rscratch1, -16);
-
-  // r0: exception handler entry point
-  // r3: preserved exception oop
-  // rbcp: bcp for exception handler
-  __ push_ptr(r3); // push exception which is now the only value on the stack
-  __ br(r0); // jump to exception handler (may be _remove_activation_entry!)
-
-  // If the exception is not handled in the current frame the frame is
-  // removed and the exception is rethrown (i.e. exception
-  // continuation is _rethrow_exception).
-  //
-  // Note: At this point the bci is still the bxi for the instruction
-  // which caused the exception and the expression stack is
-  // empty. Thus, for any VM calls at this point, GC will find a legal
-  // oop map (with empty expression stack).
-
-  //
-  // JVMTI PopFrame support
-  //
-
-  Interpreter::_remove_activation_preserving_args_entry = __ pc();
-  __ empty_expression_stack();
-  // Set the popframe_processing bit in pending_popframe_condition
-  // indicating that we are currently handling popframe, so that
-  // call_VMs that may happen later do not trigger new popframe
-  // handling cycles.
-  __ ldrw(r3, Address(rthread, JavaThread::popframe_condition_offset()));
-  __ orr(r3, r3, JavaThread::popframe_processing_bit);
-  __ strw(r3, Address(rthread, JavaThread::popframe_condition_offset()));
-
-  {
-    // Check to see whether we are returning to a deoptimized frame.
-    // (The PopFrame call ensures that the caller of the popped frame is
-    // either interpreted or compiled and deoptimizes it if compiled.)
-    // In this case, we can't call dispatch_next() after the frame is
-    // popped, but instead must save the incoming arguments and restore
-    // them after deoptimization has occurred.
-    //
-    // Note that we don't compare the return PC against the
-    // deoptimization blob's unpack entry because of the presence of
-    // adapter frames in C2.
-    Label caller_not_deoptimized;
-    __ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize));
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
-                               InterpreterRuntime::interpreter_contains), c_rarg1);
-    __ cbnz(r0, caller_not_deoptimized);
-
-    // Compute size of arguments for saving when returning to
-    // deoptimized caller
-    __ get_method(r0);
-    __ ldr(r0, Address(r0, Method::const_offset()));
-    __ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod::
-                                                    size_of_parameters_offset())));
-    __ lsl(r0, r0, Interpreter::logStackElementSize);
-    __ restore_locals(); // XXX do we need this?
-    __ sub(rlocals, rlocals, r0);
-    __ add(rlocals, rlocals, wordSize);
-    // Save these arguments
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
-                                           Deoptimization::
-                                           popframe_preserve_args),
-                          rthread, r0, rlocals);
-
-    __ remove_activation(vtos,
-                         /* throw_monitor_exception */ false,
-                         /* install_monitor_exception */ false,
-                         /* notify_jvmdi */ false);
-
-    // Inform deoptimization that it is responsible for restoring
-    // these arguments
-    __ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit);
-    __ strw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
-
-    // Continue in deoptimization handler
-    __ ret(lr);
-
-    __ bind(caller_not_deoptimized);
-  }
-
-  __ remove_activation(vtos,
-                       /* throw_monitor_exception */ false,
-                       /* install_monitor_exception */ false,
-                       /* notify_jvmdi */ false);
-
-  // Restore the last_sp and null it out
-  __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-
-  __ restore_bcp();
-  __ restore_locals();
-  __ restore_constant_pool_cache();
-  __ get_method(rmethod);
-
-  // The method data pointer was incremented already during
-  // call profiling. We have to restore the mdp for the current bcp.
-  if (ProfileInterpreter) {
-    __ set_method_data_pointer_for_bcp();
-  }
-
-  // Clear the popframe condition flag
-  __ strw(zr, Address(rthread, JavaThread::popframe_condition_offset()));
-  assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive");
-
-#if INCLUDE_JVMTI
-  {
-    Label L_done;
-
-    __ ldrb(rscratch1, Address(rbcp, 0));
-    __ cmpw(r1, Bytecodes::_invokestatic);
-    __ br(Assembler::EQ, L_done);
-
-    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
-    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
-
-    __ ldr(c_rarg0, Address(rlocals, 0));
-    __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp);
-
-    __ cbz(r0, L_done);
-
-    __ str(r0, Address(esp, 0));
-    __ bind(L_done);
-  }
-#endif // INCLUDE_JVMTI
-
-  // Restore machine SP
-  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
-  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
-  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
-  __ ldr(rscratch2,
-         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
-  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3);
-  __ andr(sp, rscratch1, -16);
-
-  __ dispatch_next(vtos);
-  // end of PopFrame support
-
-  Interpreter::_remove_activation_entry = __ pc();
-
-  // preserve exception over this code sequence
-  __ pop_ptr(r0);
-  __ str(r0, Address(rthread, JavaThread::vm_result_offset()));
-  // remove the activation (without doing throws on illegalMonitorExceptions)
-  __ remove_activation(vtos, false, true, false);
-  // restore exception
-  // restore exception
-  __ get_vm_result(r0, rthread);
-
-  // In between activations - previous activation type unknown yet
-  // compute continuation point - the continuation point expects the
-  // following registers set up:
-  //
-  // r0: exception
-  // lr: return address/pc that threw exception
-  // rsp: expression stack of caller
-  // rfp: fp of caller
-  // FIXME: There's no point saving LR here because VM calls don't trash it
-  __ stp(r0, lr, Address(__ pre(sp, -2 * wordSize)));  // save exception & return address
-  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
-                          SharedRuntime::exception_handler_for_return_address),
-                        rthread, lr);
-  __ mov(r1, r0);                               // save exception handler
-  __ ldp(r0, lr, Address(__ post(sp, 2 * wordSize)));  // restore exception & return address
-  // We might be returning to a deopt handler that expects r3 to
-  // contain the exception pc
-  __ mov(r3, lr);
-  // Note that an "issuing PC" is actually the next PC after the call
-  __ br(r1);                                    // jump to exception
-                                                // handler of caller
-}
-
-
-//
-// JVMTI ForceEarlyReturn support
-//
-address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
-  address entry = __ pc();
-
-  __ restore_bcp();
-  __ restore_locals();
-  __ empty_expression_stack();
-  __ load_earlyret_value(state);
-
-  __ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
-  Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset());
-
-  // Clear the earlyret state
-  assert(JvmtiThreadState::earlyret_inactive == 0, "should be");
-  __ str(zr, cond_addr);
-
-  __ remove_activation(state,
-                       false, /* throw_monitor_exception */
-                       false, /* install_monitor_exception */
-                       true); /* notify_jvmdi */
-  __ ret(lr);
-
-  return entry;
-} // end of ForceEarlyReturn support
-
-
-
-//-----------------------------------------------------------------------------
-// Helper for vtos entry point generation
-
-void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
-                                                         address& bep,
-                                                         address& cep,
-                                                         address& sep,
-                                                         address& aep,
-                                                         address& iep,
-                                                         address& lep,
-                                                         address& fep,
-                                                         address& dep,
-                                                         address& vep) {
-  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
-  Label L;
-  aep = __ pc();  __ push_ptr();  __ b(L);
-  fep = __ pc();  __ push_f();    __ b(L);
-  dep = __ pc();  __ push_d();    __ b(L);
-  lep = __ pc();  __ push_l();    __ b(L);
-  bep = cep = sep =
-  iep = __ pc();  __ push_i();
-  vep = __ pc();
-  __ bind(L);
-  generate_and_dispatch(t);
-}
-
-//-----------------------------------------------------------------------------
-// Generation of individual instructions
-
-// helpers for generate_and_dispatch
-
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
-  : TemplateInterpreterGenerator(code) {
-   generate_all(); // down here so it can be "virtual"
-}
-
-//-----------------------------------------------------------------------------
-
-// Non-product code
-#ifndef PRODUCT
-address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
-  address entry = __ pc();
-
-  __ push(lr);
-  __ push(state);
-  __ push(RegSet::range(r0, r15), sp);
-  __ mov(c_rarg2, r0);  // Pass itos
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
-             c_rarg1, c_rarg2, c_rarg3);
-  __ pop(RegSet::range(r0, r15), sp);
-  __ pop(state);
-  __ pop(lr);
-  __ ret(lr);                                   // return from result handler
-
-  return entry;
-}
-
-void TemplateInterpreterGenerator::count_bytecode() {
-  Register rscratch3 = r0;
-  __ push(rscratch1);
-  __ push(rscratch2);
-  __ push(rscratch3);
-  Label L;
-  __ mov(rscratch2, (address) &BytecodeCounter::_counter_value);
-  __ bind(L);
-  __ ldxr(rscratch1, rscratch2);
-  __ add(rscratch1, rscratch1, 1);
-  __ stxr(rscratch3, rscratch1, rscratch2);
-  __ cbnzw(rscratch3, L);
-  __ pop(rscratch3);
-  __ pop(rscratch2);
-  __ pop(rscratch1);
-}
-
-void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; }
-
-void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; }
-
-
-void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
-  // Call a little run-time stub to avoid blow-up for each bytecode.
-  // The run-time runtime saves the right registers, depending on
-  // the tosca in-state for the given template.
-
-  assert(Interpreter::trace_code(t->tos_in()) != NULL,
-         "entry must have been generated");
-  __ bl(Interpreter::trace_code(t->tos_in()));
-  __ reinit_heapbase();
-}
-
-
-void TemplateInterpreterGenerator::stop_interpreter_at() {
-  Label L;
-  __ push(rscratch1);
-  __ mov(rscratch1, (address) &BytecodeCounter::_counter_value);
-  __ ldr(rscratch1, Address(rscratch1));
-  __ mov(rscratch2, StopInterpreterAt);
-  __ cmpw(rscratch1, rscratch2);
-  __ br(Assembler::NE, L);
-  __ brk(0);
-  __ bind(L);
-  __ pop(rscratch1);
-}
-
-#ifdef BUILTIN_SIM
-
-#include <sys/mman.h>
-#include <unistd.h>
-
-extern "C" {
-  static int PAGESIZE = getpagesize();
-  int is_mapped_address(u_int64_t address)
-  {
-    address = (address & ~((u_int64_t)PAGESIZE - 1));
-    if (msync((void *)address, PAGESIZE, MS_ASYNC) == 0) {
-      return true;
-    }
-    if (errno != ENOMEM) {
-      return true;
-    }
-    return false;
-  }
-
-  void bccheck1(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode)
-  {
-    if (method != 0) {
-      method[0] = '\0';
-    }
-    if (bcidx != 0) {
-      *bcidx = -2;
-    }
-    if (decode != 0) {
-      decode[0] = 0;
-    }
-
-    if (framesize != 0) {
-      *framesize = -1;
-    }
-
-    if (Interpreter::contains((address)pc)) {
-      AArch64Simulator *sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
-      Method* meth;
-      address bcp;
-      if (fp) {
-#define FRAME_SLOT_METHOD 3
-#define FRAME_SLOT_BCP 7
-        meth = (Method*)sim->getMemory()->loadU64(fp - (FRAME_SLOT_METHOD << 3));
-        bcp = (address)sim->getMemory()->loadU64(fp - (FRAME_SLOT_BCP << 3));
-#undef FRAME_SLOT_METHOD
-#undef FRAME_SLOT_BCP
-      } else {
-        meth = (Method*)sim->getCPUState().xreg(RMETHOD, 0);
-        bcp = (address)sim->getCPUState().xreg(RBCP, 0);
-      }
-      if (meth->is_native()) {
-        return;
-      }
-      if(method && meth->is_method()) {
-        ResourceMark rm;
-        method[0] = 'I';
-        method[1] = ' ';
-        meth->name_and_sig_as_C_string(method + 2, 398);
-      }
-      if (bcidx) {
-        if (meth->contains(bcp)) {
-          *bcidx = meth->bci_from(bcp);
-        } else {
-          *bcidx = -2;
-        }
-      }
-      if (decode) {
-        if (!BytecodeTracer::closure()) {
-          BytecodeTracer::set_closure(BytecodeTracer::std_closure());
-        }
-        stringStream str(decode, 400);
-        BytecodeTracer::trace(meth, bcp, &str);
-      }
-    } else {
-      if (method) {
-        CodeBlob *cb = CodeCache::find_blob((address)pc);
-        if (cb != NULL) {
-          if (cb->is_nmethod()) {
-            ResourceMark rm;
-            nmethod* nm = (nmethod*)cb;
-            method[0] = 'C';
-            method[1] = ' ';
-            nm->method()->name_and_sig_as_C_string(method + 2, 398);
-          } else if (cb->is_adapter_blob()) {
-            strcpy(method, "B adapter blob");
-          } else if (cb->is_runtime_stub()) {
-            strcpy(method, "B runtime stub");
-          } else if (cb->is_exception_stub()) {
-            strcpy(method, "B exception stub");
-          } else if (cb->is_deoptimization_stub()) {
-            strcpy(method, "B deoptimization stub");
-          } else if (cb->is_safepoint_stub()) {
-            strcpy(method, "B safepoint stub");
-          } else if (cb->is_uncommon_trap_stub()) {
-            strcpy(method, "B uncommon trap stub");
-          } else if (cb->contains((address)StubRoutines::call_stub())) {
-            strcpy(method, "B call stub");
-          } else {
-            strcpy(method, "B unknown blob : ");
-            strcat(method, cb->name());
-          }
-          if (framesize != NULL) {
-            *framesize = cb->frame_size();
-          }
-        }
-      }
-    }
-  }
-
-
-  JNIEXPORT void bccheck(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode)
-  {
-    bccheck1(pc, fp, method, bcidx, framesize, decode);
-  }
-}
-
-#endif // BUILTIN_SIM
-#endif // !PRODUCT
-#endif // ! CC_INTERP
--- a/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -43,4 +43,9 @@
 // The expected size in bytes of a cache line, used to pad data structures.
 #define DEFAULT_CACHE_LINE_SIZE 128
 
+#if defined(COMPILER2) && defined(AIX)
+// Include Transactional Memory lock eliding optimization
+#define INCLUDE_RTM_OPT 1
+#endif
+
 #endif // CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
--- a/src/cpu/ppc/vm/globals_ppc.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/ppc/vm/globals_ppc.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -44,14 +44,17 @@
 #define DEFAULT_STACK_YELLOW_PAGES (6)
 #define DEFAULT_STACK_RED_PAGES (1)
 #define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
+#define DEFAULT_STACK_RESERVED_PAGES (0)
 
 #define MIN_STACK_YELLOW_PAGES (1)
 #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
 #define MIN_STACK_SHADOW_PAGES (1)
+#define MIN_STACK_RESERVED_PAGES (0)
 
 define_pd_global(intx, StackYellowPages,      DEFAULT_STACK_YELLOW_PAGES);
 define_pd_global(intx, StackRedPages,         DEFAULT_STACK_RED_PAGES);
 define_pd_global(intx, StackShadowPages,      DEFAULT_STACK_SHADOW_PAGES);
+define_pd_global(intx, StackReservedPages,    DEFAULT_STACK_RESERVED_PAGES);
 
 // Use large code-entry alignment.
 define_pd_global(intx, CodeEntryAlignment,    128);
--- a/src/cpu/ppc/vm/interpreter_ppc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/ppc/vm/interpreter_ppc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -39,7 +39,6 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -61,26 +60,6 @@
 
 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
 
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : i = 4; break;
-    case T_LONG   : i = 5; break;
-    case T_VOID   : i = 6; break;
-    case T_FLOAT  : i = 7; break;
-    case T_DOUBLE : i = 8; break;
-    case T_OBJECT : i = 9; break;
-    case T_ARRAY  : i = 9; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
-  return i;
-}
-
 address AbstractInterpreterGenerator::generate_slow_signature_handler() {
   // Slow_signature handler that respects the PPC C calling conventions.
   //
@@ -579,18 +558,3 @@
 
   return NULL;
 }
-
-void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
-  // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
-  // the days we had adapter frames. When we deoptimize a situation where a
-  // compiled caller calls a compiled caller will have registers it expects
-  // to survive the call to the callee. If we deoptimize the callee the only
-  // way we can restore these registers is to have the oldest interpreter
-  // frame that we create restore these values. That is what this routine
-  // will accomplish.
-
-  // At the moment we have modified c2 to not have any callee save registers
-  // so this problem does not exist and this routine is just a place holder.
-
-  assert(f->is_interpreted_frame(), "must be interpreted");
-}
--- a/src/cpu/ppc/vm/metaspaceShared_ppc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/ppc/vm/metaspaceShared_ppc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -50,12 +50,29 @@
 // to be 'vtbl_list_size' instances of the vtable in order to
 // differentiate between the 'vtable_list_size' original Klass objects.
 
+#define __ masm->
+
 void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
                                               void** vtable,
                                               char** md_top,
                                               char* md_end,
                                               char** mc_top,
                                               char* mc_end) {
-  Unimplemented();
+  intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*);
+  *(intptr_t *)(*md_top) = vtable_bytes;
+  *md_top += sizeof(intptr_t);
+  void** dummy_vtable = (void**)*md_top;
+  *vtable = dummy_vtable;
+  *md_top += vtable_bytes;
+
+  // Get ready to generate dummy methods.
+
+  CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top);
+  MacroAssembler* masm = new MacroAssembler(&cb);
+
+  // There are more general problems with CDS on ppc, so I can not
+  // really test this. But having this instead of Unimplementd() allows
+  // us to pass TestOptionsWithRanges.java.
+  __ unimplemented();
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,1798 @@
+/*
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#ifndef CC_INTERP
+#include "asm/macroAssembler.inline.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "interpreter/templateTable.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
+
+#undef __
+#define __ _masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label)        __ bind(label); BLOCK_COMMENT(#label ":")
+
+//-----------------------------------------------------------------------------
+
+// Actually we should never reach here since we do stack overflow checks before pushing any frame.
+address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
+  address entry = __ pc();
+  __ unimplemented("generate_StackOverflowError_handler");
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
+  address entry = __ pc();
+  __ empty_expression_stack();
+  __ load_const_optimized(R4_ARG2, (address) name);
+  // Index is in R17_tos.
+  __ mr(R5_ARG3, R17_tos);
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException));
+  return entry;
+}
+
+#if 0
+// Call special ClassCastException constructor taking object to cast
+// and target class as arguments.
+address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler() {
+  address entry = __ pc();
+
+  // Expression stack must be empty before entering the VM if an
+  // exception happened.
+  __ empty_expression_stack();
+
+  // Thread will be loaded to R3_ARG1.
+  // Target class oop is in register R5_ARG3 by convention!
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose), R17_tos, R5_ARG3);
+  // Above call must not return here since exception pending.
+  DEBUG_ONLY(__ should_not_reach_here();)
+  return entry;
+}
+#endif
+
+address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
+  address entry = __ pc();
+  // Expression stack must be empty before entering the VM if an
+  // exception happened.
+  __ empty_expression_stack();
+
+  // Load exception object.
+  // Thread will be loaded to R3_ARG1.
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException), R17_tos);
+#ifdef ASSERT
+  // Above call must not return here since exception pending.
+  __ should_not_reach_here();
+#endif
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
+  address entry = __ pc();
+  //__ untested("generate_exception_handler_common");
+  Register Rexception = R17_tos;
+
+  // Expression stack must be empty before entering the VM if an exception happened.
+  __ empty_expression_stack();
+
+  __ load_const_optimized(R4_ARG2, (address) name, R11_scratch1);
+  if (pass_oop) {
+    __ mr(R5_ARG3, Rexception);
+    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), false);
+  } else {
+    __ load_const_optimized(R5_ARG3, (address) message, R11_scratch1);
+    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), false);
+  }
+
+  // Throw exception.
+  __ mr(R3_ARG1, Rexception);
+  __ load_const_optimized(R11_scratch1, Interpreter::throw_exception_entry(), R12_scratch2);
+  __ mtctr(R11_scratch1);
+  __ bctr();
+
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
+  address entry = __ pc();
+  __ unimplemented("generate_continuation_for");
+  return entry;
+}
+
+// This entry is returned to when a call returns to the interpreter.
+// When we arrive here, we expect that the callee stack frame is already popped.
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
+  address entry = __ pc();
+
+  // Move the value out of the return register back to the TOS cache of current frame.
+  switch (state) {
+    case ltos:
+    case btos:
+    case ctos:
+    case stos:
+    case atos:
+    case itos: __ mr(R17_tos, R3_RET); break;   // RET -> TOS cache
+    case ftos:
+    case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET
+    case vtos: break;                           // Nothing to do, this was a void return.
+    default  : ShouldNotReachHere();
+  }
+
+  __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp.
+  __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
+  __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
+
+  // Compiled code destroys templateTableBase, reload.
+  __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2);
+
+  if (state == atos) {
+    __ profile_return_type(R3_RET, R11_scratch1, R12_scratch2);
+  }
+
+  const Register cache = R11_scratch1;
+  const Register size  = R12_scratch2;
+  __ get_cache_and_index_at_bcp(cache, 1, index_size);
+
+  // Get least significant byte of 64 bit value:
+#if defined(VM_LITTLE_ENDIAN)
+  __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache);
+#else
+  __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache);
+#endif
+  __ sldi(size, size, Interpreter::logStackElementSize);
+  __ add(R15_esp, R15_esp, size);
+  __ dispatch_next(state, step);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
+  address entry = __ pc();
+  // If state != vtos, we're returning from a native method, which put it's result
+  // into the result register. So move the value out of the return register back
+  // to the TOS cache of current frame.
+
+  switch (state) {
+    case ltos:
+    case btos:
+    case ctos:
+    case stos:
+    case atos:
+    case itos: __ mr(R17_tos, R3_RET); break;   // GR_RET -> TOS cache
+    case ftos:
+    case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET
+    case vtos: break;                           // Nothing to do, this was a void return.
+    default  : ShouldNotReachHere();
+  }
+
+  // Load LcpoolCache @@@ should be already set!
+  __ get_constant_pool_cache(R27_constPoolCache);
+
+  // Handle a pending exception, fall through if none.
+  __ check_and_forward_exception(R11_scratch1, R12_scratch2);
+
+  // Start executing bytecodes.
+  __ dispatch_next(state, step);
+
+  return entry;
+}
+
+// A result handler converts the native result into java format.
+// Use the shared code between c++ and template interpreter.
+address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
+  return AbstractInterpreterGenerator::generate_result_handler_for(type);
+}
+
+address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
+  address entry = __ pc();
+
+  __ push(state);
+  __ call_VM(noreg, runtime_entry);
+  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
+
+  return entry;
+}
+
+// Helpers for commoning out cases in the various type of method entries.
+
+// Increment invocation count & check for overflow.
+//
+// Note: checking for negative value instead of overflow
+//       so we have a 'sticky' overflow test.
+//
+void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
+  // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not.
+  Register Rscratch1   = R11_scratch1;
+  Register Rscratch2   = R12_scratch2;
+  Register R3_counters = R3_ARG1;
+  Label done;
+
+  if (TieredCompilation) {
+    const int increment = InvocationCounter::count_increment;
+    Label no_mdo;
+    if (ProfileInterpreter) {
+      const Register Rmdo = R3_counters;
+      // If no method data exists, go to profile_continue.
+      __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
+      __ cmpdi(CCR0, Rmdo, 0);
+      __ beq(CCR0, no_mdo);
+
+      // Increment backedge counter in the MDO.
+      const int mdo_ic_offs = in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
+      __ lwz(Rscratch2, mdo_ic_offs, Rmdo);
+      __ lwz(Rscratch1, in_bytes(MethodData::invoke_mask_offset()), Rmdo);
+      __ addi(Rscratch2, Rscratch2, increment);
+      __ stw(Rscratch2, mdo_ic_offs, Rmdo);
+      __ and_(Rscratch1, Rscratch2, Rscratch1);
+      __ bne(CCR0, done);
+      __ b(*overflow);
+    }
+
+    // Increment counter in MethodCounters*.
+    const int mo_bc_offs = in_bytes(MethodCounters::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
+    __ bind(no_mdo);
+    __ get_method_counters(R19_method, R3_counters, done);
+    __ lwz(Rscratch2, mo_bc_offs, R3_counters);
+    __ lwz(Rscratch1, in_bytes(MethodCounters::invoke_mask_offset()), R3_counters);
+    __ addi(Rscratch2, Rscratch2, increment);
+    __ stw(Rscratch2, mo_bc_offs, R3_counters);
+    __ and_(Rscratch1, Rscratch2, Rscratch1);
+    __ beq(CCR0, *overflow);
+
+    __ bind(done);
+
+  } else {
+
+    // Update standard invocation counters.
+    Register Rsum_ivc_bec = R4_ARG2;
+    __ get_method_counters(R19_method, R3_counters, done);
+    __ increment_invocation_counter(R3_counters, Rsum_ivc_bec, R12_scratch2);
+    // Increment interpreter invocation counter.
+    if (ProfileInterpreter) {  // %%% Merge this into methodDataOop.
+      __ lwz(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
+      __ addi(R12_scratch2, R12_scratch2, 1);
+      __ stw(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
+    }
+    // Check if we must create a method data obj.
+    if (ProfileInterpreter && profile_method != NULL) {
+      const Register profile_limit = Rscratch1;
+      __ lwz(profile_limit, in_bytes(MethodCounters::interpreter_profile_limit_offset()), R3_counters);
+      // Test to see if we should create a method data oop.
+      __ cmpw(CCR0, Rsum_ivc_bec, profile_limit);
+      __ blt(CCR0, *profile_method_continue);
+      // If no method data exists, go to profile_method.
+      __ test_method_data_pointer(*profile_method);
+    }
+    // Finally check for counter overflow.
+    if (overflow) {
+      const Register invocation_limit = Rscratch1;
+      __ lwz(invocation_limit, in_bytes(MethodCounters::interpreter_invocation_limit_offset()), R3_counters);
+      __ cmpw(CCR0, Rsum_ivc_bec, invocation_limit);
+      __ bge(CCR0, *overflow);
+    }
+
+    __ bind(done);
+  }
+}
+
+// Generate code to initiate compilation on invocation counter overflow.
+void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_entry) {
+  // Generate code to initiate compilation on the counter overflow.
+
+  // InterpreterRuntime::frequency_counter_overflow takes one arguments,
+  // which indicates if the counter overflow occurs at a backwards branch (NULL bcp)
+  // We pass zero in.
+  // The call returns the address of the verified entry point for the method or NULL
+  // if the compilation did not complete (either went background or bailed out).
+  //
+  // Unlike the C++ interpreter above: Check exceptions!
+  // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed
+  // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur.
+
+  __ li(R4_ARG2, 0);
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
+
+  // Returns verified_entry_point or NULL.
+  // We ignore it in any case.
+  __ b(continue_entry);
+}
+
+void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) {
+  assert_different_registers(Rmem_frame_size, Rscratch1);
+  __ generate_stack_overflow_check_with_compare_and_throw(Rmem_frame_size, Rscratch1);
+}
+
+void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) {
+  __ unlock_object(R26_monitor, check_exceptions);
+}
+
+// Lock the current method, interpreter register window must be set up!
+void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) {
+  const Register Robj_to_lock = Rscratch2;
+
+  {
+    if (!flags_preloaded) {
+      __ lwz(Rflags, method_(access_flags));
+    }
+
+#ifdef ASSERT
+    // Check if methods needs synchronization.
+    {
+      Label Lok;
+      __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT);
+      __ btrue(CCR0,Lok);
+      __ stop("method doesn't need synchronization");
+      __ bind(Lok);
+    }
+#endif // ASSERT
+  }
+
+  // Get synchronization object to Rscratch2.
+  {
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+    Label Lstatic;
+    Label Ldone;
+
+    __ testbitdi(CCR0, R0, Rflags, JVM_ACC_STATIC_BIT);
+    __ btrue(CCR0, Lstatic);
+
+    // Non-static case: load receiver obj from stack and we're done.
+    __ ld(Robj_to_lock, R18_locals);
+    __ b(Ldone);
+
+    __ bind(Lstatic); // Static case: Lock the java mirror
+    __ ld(Robj_to_lock, in_bytes(Method::const_offset()), R19_method);
+    __ ld(Robj_to_lock, in_bytes(ConstMethod::constants_offset()), Robj_to_lock);
+    __ ld(Robj_to_lock, ConstantPool::pool_holder_offset_in_bytes(), Robj_to_lock);
+    __ ld(Robj_to_lock, mirror_offset, Robj_to_lock);
+
+    __ bind(Ldone);
+    __ verify_oop(Robj_to_lock);
+  }
+
+  // Got the oop to lock => execute!
+  __ add_monitor_to_stack(true, Rscratch1, R0);
+
+  __ std(Robj_to_lock, BasicObjectLock::obj_offset_in_bytes(), R26_monitor);
+  __ lock_object(R26_monitor, Robj_to_lock);
+}
+
+// Generate a fixed interpreter frame for pure interpreter
+// and I2N native transition frames.
+//
+// Before (stack grows downwards):
+//
+//         |  ...         |
+//         |------------- |
+//         |  java arg0   |
+//         |  ...         |
+//         |  java argn   |
+//         |              |   <-   R15_esp
+//         |              |
+//         |--------------|
+//         | abi_112      |
+//         |              |   <-   R1_SP
+//         |==============|
+//
+//
+// After:
+//
+//         |  ...         |
+//         |  java arg0   |<-   R18_locals
+//         |  ...         |
+//         |  java argn   |
+//         |--------------|
+//         |              |
+//         |  java locals |
+//         |              |
+//         |--------------|
+//         |  abi_48      |
+//         |==============|
+//         |              |
+//         |   istate     |
+//         |              |
+//         |--------------|
+//         |   monitor    |<-   R26_monitor
+//         |--------------|
+//         |              |<-   R15_esp
+//         | expression   |
+//         | stack        |
+//         |              |
+//         |--------------|
+//         |              |
+//         | abi_112      |<-   R1_SP
+//         |==============|
+//
+// The top most frame needs an abi space of 112 bytes. This space is needed,
+// since we call to c. The c function may spill their arguments to the caller
+// frame. When we call to java, we don't need these spill slots. In order to save
+// space on the stack, we resize the caller. However, java local reside in
+// the caller frame and the frame has to be increased. The frame_size for the
+// current frame was calculated based on max_stack as size for the expression
+// stack. At the call, just a part of the expression stack might be used.
+// We don't want to waste this space and cut the frame back accordingly.
+// The resulting amount for resizing is calculated as follows:
+// resize =   (number_of_locals - number_of_arguments) * slot_size
+//          + (R1_SP - R15_esp) + 48
+//
+// The size for the callee frame is calculated:
+// framesize = 112 + max_stack + monitor + state_size
+//
+// maxstack:   Max number of slots on the expression stack, loaded from the method.
+// monitor:    We statically reserve room for one monitor object.
+// state_size: We save the current state of the interpreter to this area.
+//
+void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals) {
+  Register parent_frame_resize = R6_ARG4, // Frame will grow by this number of bytes.
+           top_frame_size      = R7_ARG5,
+           Rconst_method       = R8_ARG6;
+
+  assert_different_registers(Rsize_of_parameters, Rsize_of_locals, parent_frame_resize, top_frame_size);
+
+  __ ld(Rconst_method, method_(const));
+  __ lhz(Rsize_of_parameters /* number of params */,
+         in_bytes(ConstMethod::size_of_parameters_offset()), Rconst_method);
+  if (native_call) {
+    // If we're calling a native method, we reserve space for the worst-case signature
+    // handler varargs vector, which is max(Argument::n_register_parameters, parameter_count+2).
+    // We add two slots to the parameter_count, one for the jni
+    // environment and one for a possible native mirror.
+    Label skip_native_calculate_max_stack;
+    __ addi(top_frame_size, Rsize_of_parameters, 2);
+    __ cmpwi(CCR0, top_frame_size, Argument::n_register_parameters);
+    __ bge(CCR0, skip_native_calculate_max_stack);
+    __ li(top_frame_size, Argument::n_register_parameters);
+    __ bind(skip_native_calculate_max_stack);
+    __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize);
+    __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize);
+    __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize!
+    assert(Rsize_of_locals == noreg, "Rsize_of_locals not initialized"); // Only relevant value is Rsize_of_parameters.
+  } else {
+    __ lhz(Rsize_of_locals /* number of params */, in_bytes(ConstMethod::size_of_locals_offset()), Rconst_method);
+    __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize);
+    __ sldi(Rsize_of_locals, Rsize_of_locals, Interpreter::logStackElementSize);
+    __ lhz(top_frame_size, in_bytes(ConstMethod::max_stack_offset()), Rconst_method);
+    __ sub(R11_scratch1, Rsize_of_locals, Rsize_of_parameters); // >=0
+    __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize!
+    __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize);
+    __ add(parent_frame_resize, parent_frame_resize, R11_scratch1);
+  }
+
+  // Compute top frame size.
+  __ addi(top_frame_size, top_frame_size, frame::abi_reg_args_size + frame::ijava_state_size);
+
+  // Cut back area between esp and max_stack.
+  __ addi(parent_frame_resize, parent_frame_resize, frame::abi_minframe_size - Interpreter::stackElementSize);
+
+  __ round_to(top_frame_size, frame::alignment_in_bytes);
+  __ round_to(parent_frame_resize, frame::alignment_in_bytes);
+  // parent_frame_resize = (locals-parameters) - (ESP-SP-ABI48) Rounded to frame alignment size.
+  // Enlarge by locals-parameters (not in case of native_call), shrink by ESP-SP-ABI48.
+
+  {
+    // --------------------------------------------------------------------------
+    // Stack overflow check
+
+    Label cont;
+    __ add(R11_scratch1, parent_frame_resize, top_frame_size);
+    generate_stack_overflow_check(R11_scratch1, R12_scratch2);
+  }
+
+  // Set up interpreter state registers.
+
+  __ add(R18_locals, R15_esp, Rsize_of_parameters);
+  __ ld(R27_constPoolCache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
+  __ ld(R27_constPoolCache, ConstantPool::cache_offset_in_bytes(), R27_constPoolCache);
+
+  // Set method data pointer.
+  if (ProfileInterpreter) {
+    Label zero_continue;
+    __ ld(R28_mdx, method_(method_data));
+    __ cmpdi(CCR0, R28_mdx, 0);
+    __ beq(CCR0, zero_continue);
+    __ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset()));
+    __ bind(zero_continue);
+  }
+
+  if (native_call) {
+    __ li(R14_bcp, 0); // Must initialize.
+  } else {
+    __ add(R14_bcp, in_bytes(ConstMethod::codes_offset()), Rconst_method);
+  }
+
+  // Resize parent frame.
+  __ mflr(R12_scratch2);
+  __ neg(parent_frame_resize, parent_frame_resize);
+  __ resize_frame(parent_frame_resize, R11_scratch1);
+  __ std(R12_scratch2, _abi(lr), R1_SP);
+
+  __ addi(R26_monitor, R1_SP, - frame::ijava_state_size);
+  __ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize);
+
+  // Store values.
+  // R15_esp, R14_bcp, R26_monitor, R28_mdx are saved at java calls
+  // in InterpreterMacroAssembler::call_from_interpreter.
+  __ std(R19_method, _ijava_state_neg(method), R1_SP);
+  __ std(R21_sender_SP, _ijava_state_neg(sender_sp), R1_SP);
+  __ std(R27_constPoolCache, _ijava_state_neg(cpoolCache), R1_SP);
+  __ std(R18_locals, _ijava_state_neg(locals), R1_SP);
+
+  // Note: esp, bcp, monitor, mdx live in registers. Hence, the correct version can only
+  // be found in the frame after save_interpreter_state is done. This is always true
+  // for non-top frames. But when a signal occurs, dumping the top frame can go wrong,
+  // because e.g. frame::interpreter_frame_bcp() will not access the correct value
+  // (Enhanced Stack Trace).
+  // The signal handler does not save the interpreter state into the frame.
+  __ li(R0, 0);
+#ifdef ASSERT
+  // Fill remaining slots with constants.
+  __ load_const_optimized(R11_scratch1, 0x5afe);
+  __ load_const_optimized(R12_scratch2, 0xdead);
+#endif
+  // We have to initialize some frame slots for native calls (accessed by GC).
+  if (native_call) {
+    __ std(R26_monitor, _ijava_state_neg(monitors), R1_SP);
+    __ std(R14_bcp, _ijava_state_neg(bcp), R1_SP);
+    if (ProfileInterpreter) { __ std(R28_mdx, _ijava_state_neg(mdx), R1_SP); }
+  }
+#ifdef ASSERT
+  else {
+    __ std(R12_scratch2, _ijava_state_neg(monitors), R1_SP);
+    __ std(R12_scratch2, _ijava_state_neg(bcp), R1_SP);
+    __ std(R12_scratch2, _ijava_state_neg(mdx), R1_SP);
+  }
+  __ std(R11_scratch1, _ijava_state_neg(ijava_reserved), R1_SP);
+  __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP);
+  __ std(R12_scratch2, _ijava_state_neg(lresult), R1_SP);
+  __ std(R12_scratch2, _ijava_state_neg(fresult), R1_SP);
+#endif
+  __ subf(R12_scratch2, top_frame_size, R1_SP);
+  __ std(R0, _ijava_state_neg(oop_tmp), R1_SP);
+  __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP);
+
+  // Push top frame.
+  __ push_frame(top_frame_size, R11_scratch1);
+}
+
+// End of helpers
+
+address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
+  if (!TemplateInterpreter::math_entry_available(kind)) {
+    NOT_PRODUCT(__ should_not_reach_here();)
+    return NULL;
+  }
+
+  address entry = __ pc();
+
+  __ lfd(F1_RET, Interpreter::stackElementSize, R15_esp);
+
+  // Pop c2i arguments (if any) off when we return.
+#ifdef ASSERT
+  __ ld(R9_ARG7, 0, R1_SP);
+  __ ld(R10_ARG8, 0, R21_sender_SP);
+  __ cmpd(CCR0, R9_ARG7, R10_ARG8);
+  __ asm_assert_eq("backlink", 0x545);
+#endif // ASSERT
+  __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+
+  if (kind == Interpreter::java_lang_math_sqrt) {
+    __ fsqrt(F1_RET, F1_RET);
+  } else if (kind == Interpreter::java_lang_math_abs) {
+    __ fabs(F1_RET, F1_RET);
+  } else {
+    ShouldNotReachHere();
+  }
+
+  // And we're done.
+  __ blr();
+
+  __ flush();
+
+  return entry;
+}
+
+// Interpreter stub for calling a native method. (asm interpreter)
+// This sets up a somewhat different looking stack for calling the
+// native method than the typical interpreter frame setup.
+//
+// On entry:
+//   R19_method    - method
+//   R16_thread    - JavaThread*
+//   R15_esp       - intptr_t* sender tos
+//
+//   abstract stack (grows up)
+//     [  IJava (caller of JNI callee)  ]  <-- ASP
+//        ...
+address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
+
+  address entry = __ pc();
+
+  const bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+
+  // -----------------------------------------------------------------------------
+  // Allocate a new frame that represents the native callee (i2n frame).
+  // This is not a full-blown interpreter frame, but in particular, the
+  // following registers are valid after this:
+  // - R19_method
+  // - R18_local (points to start of argumuments to native function)
+  //
+  //   abstract stack (grows up)
+  //     [  IJava (caller of JNI callee)  ]  <-- ASP
+  //        ...
+
+  const Register signature_handler_fd = R11_scratch1;
+  const Register pending_exception    = R0;
+  const Register result_handler_addr  = R31;
+  const Register native_method_fd     = R11_scratch1;
+  const Register access_flags         = R22_tmp2;
+  const Register active_handles       = R11_scratch1; // R26_monitor saved to state.
+  const Register sync_state           = R12_scratch2;
+  const Register sync_state_addr      = sync_state;   // Address is dead after use.
+  const Register suspend_flags        = R11_scratch1;
+
+  //=============================================================================
+  // Allocate new frame and initialize interpreter state.
+
+  Label exception_return;
+  Label exception_return_sync_check;
+  Label stack_overflow_return;
+
+  // Generate new interpreter state and jump to stack_overflow_return in case of
+  // a stack overflow.
+  //generate_compute_interpreter_state(stack_overflow_return);
+
+  Register size_of_parameters = R22_tmp2;
+
+  generate_fixed_frame(true, size_of_parameters, noreg /* unused */);
+
+  //=============================================================================
+  // Increment invocation counter. On overflow, entry to JNI method
+  // will be compiled.
+  Label invocation_counter_overflow, continue_after_compile;
+  if (inc_counter) {
+    if (synchronized) {
+      // Since at this point in the method invocation the exception handler
+      // would try to exit the monitor of synchronized methods which hasn't
+      // been entered yet, we set the thread local variable
+      // _do_not_unlock_if_synchronized to true. If any exception was thrown by
+      // runtime, exception handling i.e. unlock_if_synchronized_method will
+      // check this thread local flag.
+      // This flag has two effects, one is to force an unwind in the topmost
+      // interpreter frame and not perform an unlock while doing so.
+      __ li(R0, 1);
+      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
+    }
+    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
+
+    BIND(continue_after_compile);
+    // Reset the _do_not_unlock_if_synchronized flag.
+    if (synchronized) {
+      __ li(R0, 0);
+      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
+    }
+  }
+
+  // access_flags = method->access_flags();
+  // Load access flags.
+  assert(access_flags->is_nonvolatile(),
+         "access_flags must be in a non-volatile register");
+  // Type check.
+  assert(4 == sizeof(AccessFlags), "unexpected field size");
+  __ lwz(access_flags, method_(access_flags));
+
+  // We don't want to reload R19_method and access_flags after calls
+  // to some helper functions.
+  assert(R19_method->is_nonvolatile(),
+         "R19_method must be a non-volatile register");
+
+  // Check for synchronized methods. Must happen AFTER invocation counter
+  // check, so method is not locked if counter overflows.
+
+  if (synchronized) {
+    lock_method(access_flags, R11_scratch1, R12_scratch2, true);
+
+    // Update monitor in state.
+    __ ld(R11_scratch1, 0, R1_SP);
+    __ std(R26_monitor, _ijava_state_neg(monitors), R11_scratch1);
+  }
+
+  // jvmti/jvmpi support
+  __ notify_method_entry();
+
+  //=============================================================================
+  // Get and call the signature handler.
+
+  __ ld(signature_handler_fd, method_(signature_handler));
+  Label call_signature_handler;
+
+  __ cmpdi(CCR0, signature_handler_fd, 0);
+  __ bne(CCR0, call_signature_handler);
+
+  // Method has never been called. Either generate a specialized
+  // handler or point to the slow one.
+  //
+  // Pass parameter 'false' to avoid exception check in call_VM.
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false);
+
+  // Check for an exception while looking up the target method. If we
+  // incurred one, bail.
+  __ ld(pending_exception, thread_(pending_exception));
+  __ cmpdi(CCR0, pending_exception, 0);
+  __ bne(CCR0, exception_return_sync_check); // Has pending exception.
+
+  // Reload signature handler, it may have been created/assigned in the meanwhile.
+  __ ld(signature_handler_fd, method_(signature_handler));
+  __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below).
+
+  BIND(call_signature_handler);
+
+  // Before we call the signature handler we push a new frame to
+  // protect the interpreter frame volatile registers when we return
+  // from jni but before we can get back to Java.
+
+  // First set the frame anchor while the SP/FP registers are
+  // convenient and the slow signature handler can use this same frame
+  // anchor.
+
+  // We have a TOP_IJAVA_FRAME here, which belongs to us.
+  __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
+
+  // Now the interpreter frame (and its call chain) have been
+  // invalidated and flushed. We are now protected against eager
+  // being enabled in native code. Even if it goes eager the
+  // registers will be reloaded as clean and we will invalidate after
+  // the call so no spurious flush should be possible.
+
+  // Call signature handler and pass locals address.
+  //
+  // Our signature handlers copy required arguments to the C stack
+  // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13.
+  __ mr(R3_ARG1, R18_locals);
+#if !defined(ABI_ELFv2)
+  __ ld(signature_handler_fd, 0, signature_handler_fd);
+#endif
+
+  __ call_stub(signature_handler_fd);
+
+  // Remove the register parameter varargs slots we allocated in
+  // compute_interpreter_state. SP+16 ends up pointing to the ABI
+  // outgoing argument area.
+  //
+  // Not needed on PPC64.
+  //__ add(SP, SP, Argument::n_register_parameters*BytesPerWord);
+
+  assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register");
+  // Save across call to native method.
+  __ mr(result_handler_addr, R3_RET);
+
+  __ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror.
+
+  // Set up fixed parameters and call the native method.
+  // If the method is static, get mirror into R4_ARG2.
+  {
+    Label method_is_not_static;
+    // Access_flags is non-volatile and still, no need to restore it.
+
+    // Restore access flags.
+    __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
+    __ bfalse(CCR0, method_is_not_static);
+
+    // constants = method->constants();
+    __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
+    __ ld(R11_scratch1, in_bytes(ConstMethod::constants_offset()), R11_scratch1);
+    // pool_holder = method->constants()->pool_holder();
+    __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(),
+          R11_scratch1/*constants*/);
+
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+
+    // mirror = pool_holder->klass_part()->java_mirror();
+    __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/);
+    // state->_native_mirror = mirror;
+
+    __ ld(R11_scratch1, 0, R1_SP);
+    __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
+    // R4_ARG2 = &state->_oop_temp;
+    __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp));
+    BIND(method_is_not_static);
+  }
+
+  // At this point, arguments have been copied off the stack into
+  // their JNI positions. Oops are boxed in-place on the stack, with
+  // handles copied to arguments. The result handler address is in a
+  // register.
+
+  // Pass JNIEnv address as first parameter.
+  __ addir(R3_ARG1, thread_(jni_environment));
+
+  // Load the native_method entry before we change the thread state.
+  __ ld(native_method_fd, method_(native_function));
+
+  //=============================================================================
+  // Transition from _thread_in_Java to _thread_in_native. As soon as
+  // we make this change the safepoint code needs to be certain that
+  // the last Java frame we established is good. The pc in that frame
+  // just needs to be near here not an actual return address.
+
+  // We use release_store_fence to update values like the thread state, where
+  // we don't want the current thread to continue until all our prior memory
+  // accesses (including the new thread state) are visible to other threads.
+  __ li(R0, _thread_in_native);
+  __ release();
+
+  // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
+  __ stw(R0, thread_(thread_state));
+
+  if (UseMembar) {
+    __ fence();
+  }
+
+  //=============================================================================
+  // Call the native method. Argument registers must not have been
+  // overwritten since "__ call_stub(signature_handler);" (except for
+  // ARG1 and ARG2 for static methods).
+  __ call_c(native_method_fd);
+
+  __ li(R0, 0);
+  __ ld(R11_scratch1, 0, R1_SP);
+  __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
+  __ stfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
+  __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); // reset
+
+  // Note: C++ interpreter needs the following here:
+  // The frame_manager_lr field, which we use for setting the last
+  // java frame, gets overwritten by the signature handler. Restore
+  // it now.
+  //__ get_PC_trash_LR(R11_scratch1);
+  //__ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+
+  // Because of GC R19_method may no longer be valid.
+
+  // Block, if necessary, before resuming in _thread_in_Java state.
+  // In order for GC to work, don't clear the last_Java_sp until after
+  // blocking.
+
+  //=============================================================================
+  // Switch thread to "native transition" state before reading the
+  // synchronization state. This additional state is necessary
+  // because reading and testing the synchronization state is not
+  // atomic w.r.t. GC, as this scenario demonstrates: Java thread A,
+  // in _thread_in_native state, loads _not_synchronized and is
+  // preempted. VM thread changes sync state to synchronizing and
+  // suspends threads for GC. Thread A is resumed to finish this
+  // native method, but doesn't block here since it didn't see any
+  // synchronization in progress, and escapes.
+
+  // We use release_store_fence to update values like the thread state, where
+  // we don't want the current thread to continue until all our prior memory
+  // accesses (including the new thread state) are visible to other threads.
+  __ li(R0/*thread_state*/, _thread_in_native_trans);
+  __ release();
+  __ stw(R0/*thread_state*/, thread_(thread_state));
+  if (UseMembar) {
+    __ fence();
+  }
+  // Write serialization page so that the VM thread can do a pseudo remote
+  // membar. We use the current thread pointer to calculate a thread
+  // specific offset to write to within the page. This minimizes bus
+  // traffic due to cache line collision.
+  else {
+    __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2);
+  }
+
+  // Now before we return to java we must look for a current safepoint
+  // (a new safepoint can not start since we entered native_trans).
+  // We must check here because a current safepoint could be modifying
+  // the callers registers right this moment.
+
+  // Acquire isn't strictly necessary here because of the fence, but
+  // sync_state is declared to be volatile, so we do it anyway
+  // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path).
+  int sync_state_offs = __ load_const_optimized(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
+
+  // TODO PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
+  __ lwz(sync_state, sync_state_offs, sync_state_addr);
+
+  // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
+  __ lwz(suspend_flags, thread_(suspend_flags));
+
+  Label sync_check_done;
+  Label do_safepoint;
+  // No synchronization in progress nor yet synchronized.
+  __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
+  // Not suspended.
+  __ cmpwi(CCR1, suspend_flags, 0);
+
+  __ bne(CCR0, do_safepoint);
+  __ beq(CCR1, sync_check_done);
+  __ bind(do_safepoint);
+  __ isync();
+  // Block. We do the call directly and leave the current
+  // last_Java_frame setup undisturbed. We must save any possible
+  // native result across the call. No oop is present.
+
+  __ mr(R3_ARG1, R16_thread);
+#if defined(ABI_ELFv2)
+  __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
+            relocInfo::none);
+#else
+  __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
+            relocInfo::none);
+#endif
+
+  __ bind(sync_check_done);
+
+  //=============================================================================
+  // <<<<<< Back in Interpreter Frame >>>>>
+
+  // We are in thread_in_native_trans here and back in the normal
+  // interpreter frame. We don't have to do anything special about
+  // safepoints and we can switch to Java mode anytime we are ready.
+
+  // Note: frame::interpreter_frame_result has a dependency on how the
+  // method result is saved across the call to post_method_exit. For
+  // native methods it assumes that the non-FPU/non-void result is
+  // saved in _native_lresult and a FPU result in _native_fresult. If
+  // this changes then the interpreter_frame_result implementation
+  // will need to be updated too.
+
+  // On PPC64, we have stored the result directly after the native call.
+
+  //=============================================================================
+  // Back in Java
+
+  // We use release_store_fence to update values like the thread state, where
+  // we don't want the current thread to continue until all our prior memory
+  // accesses (including the new thread state) are visible to other threads.
+  __ li(R0/*thread_state*/, _thread_in_Java);
+  __ release();
+  __ stw(R0/*thread_state*/, thread_(thread_state));
+  if (UseMembar) {
+    __ fence();
+  }
+
+  __ reset_last_Java_frame();
+
+  // Jvmdi/jvmpi support. Whether we've got an exception pending or
+  // not, and whether unlocking throws an exception or not, we notify
+  // on native method exit. If we do have an exception, we'll end up
+  // in the caller's context to handle it, so if we don't do the
+  // notify here, we'll drop it on the floor.
+  __ notify_method_exit(true/*native method*/,
+                        ilgl /*illegal state (not used for native methods)*/,
+                        InterpreterMacroAssembler::NotifyJVMTI,
+                        false /*check_exceptions*/);
+
+  //=============================================================================
+  // Handle exceptions
+
+  if (synchronized) {
+    // Don't check for exceptions since we're still in the i2n frame. Do that
+    // manually afterwards.
+    unlock_method(false);
+  }
+
+  // Reset active handles after returning from native.
+  // thread->active_handles()->clear();
+  __ ld(active_handles, thread_(active_handles));
+  // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
+  __ li(R0, 0);
+  __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles);
+
+  Label exception_return_sync_check_already_unlocked;
+  __ ld(R0/*pending_exception*/, thread_(pending_exception));
+  __ cmpdi(CCR0, R0/*pending_exception*/, 0);
+  __ bne(CCR0, exception_return_sync_check_already_unlocked);
+
+  //-----------------------------------------------------------------------------
+  // No exception pending.
+
+  // Move native method result back into proper registers and return.
+  // Invoke result handler (may unbox/promote).
+  __ ld(R11_scratch1, 0, R1_SP);
+  __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
+  __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
+  __ call_stub(result_handler_addr);
+
+  __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
+
+  // Must use the return pc which was loaded from the caller's frame
+  // as the VM uses return-pc-patching for deoptimization.
+  __ mtlr(R0);
+  __ blr();
+
+  //-----------------------------------------------------------------------------
+  // An exception is pending. We call into the runtime only if the
+  // caller was not interpreted. If it was interpreted the
+  // interpreter will do the correct thing. If it isn't interpreted
+  // (call stub/compiled code) we will change our return and continue.
+
+  BIND(exception_return_sync_check);
+
+  if (synchronized) {
+    // Don't check for exceptions since we're still in the i2n frame. Do that
+    // manually afterwards.
+    unlock_method(false);
+  }
+  BIND(exception_return_sync_check_already_unlocked);
+
+  const Register return_pc = R31;
+
+  __ ld(return_pc, 0, R1_SP);
+  __ ld(return_pc, _abi(lr), return_pc);
+
+  // Get the address of the exception handler.
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
+                  R16_thread,
+                  return_pc /* return pc */);
+  __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2);
+
+  // Load the PC of the the exception handler into LR.
+  __ mtlr(R3_RET);
+
+  // Load exception into R3_ARG1 and clear pending exception in thread.
+  __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
+  __ li(R4_ARG2, 0);
+  __ std(R4_ARG2, thread_(pending_exception));
+
+  // Load the original return pc into R4_ARG2.
+  __ mr(R4_ARG2/*issuing_pc*/, return_pc);
+
+  // Return to exception handler.
+  __ blr();
+
+  //=============================================================================
+  // Counter overflow.
+
+  if (inc_counter) {
+    // Handle invocation counter overflow.
+    __ bind(invocation_counter_overflow);
+
+    generate_counter_overflow(continue_after_compile);
+  }
+
+  return entry;
+}
+
+// Generic interpreted method entry to (asm) interpreter.
+//
+address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
+  bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+  address entry = __ pc();
+  // Generate the code to allocate the interpreter stack frame.
+  Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame.
+           Rsize_of_locals     = R5_ARG3; // Written by generate_fixed_frame.
+
+  generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
+
+  // --------------------------------------------------------------------------
+  // Zero out non-parameter locals.
+  // Note: *Always* zero out non-parameter locals as Sparc does. It's not
+  // worth to ask the flag, just do it.
+  Register Rslot_addr = R6_ARG4,
+           Rnum       = R7_ARG5;
+  Label Lno_locals, Lzero_loop;
+
+  // Set up the zeroing loop.
+  __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals);
+  __ subf(Rslot_addr, Rsize_of_parameters, R18_locals);
+  __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize);
+  __ beq(CCR0, Lno_locals);
+  __ li(R0, 0);
+  __ mtctr(Rnum);
+
+  // The zero locals loop.
+  __ bind(Lzero_loop);
+  __ std(R0, 0, Rslot_addr);
+  __ addi(Rslot_addr, Rslot_addr, -Interpreter::stackElementSize);
+  __ bdnz(Lzero_loop);
+
+  __ bind(Lno_locals);
+
+  // --------------------------------------------------------------------------
+  // Counter increment and overflow check.
+  Label invocation_counter_overflow,
+        profile_method,
+        profile_method_continue;
+  if (inc_counter || ProfileInterpreter) {
+
+    Register Rdo_not_unlock_if_synchronized_addr = R11_scratch1;
+    if (synchronized) {
+      // Since at this point in the method invocation the exception handler
+      // would try to exit the monitor of synchronized methods which hasn't
+      // been entered yet, we set the thread local variable
+      // _do_not_unlock_if_synchronized to true. If any exception was thrown by
+      // runtime, exception handling i.e. unlock_if_synchronized_method will
+      // check this thread local flag.
+      // This flag has two effects, one is to force an unwind in the topmost
+      // interpreter frame and not perform an unlock while doing so.
+      __ li(R0, 1);
+      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
+    }
+
+    // Argument and return type profiling.
+    __ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4);
+
+    // Increment invocation counter and check for overflow.
+    if (inc_counter) {
+      generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
+    }
+
+    __ bind(profile_method_continue);
+
+    // Reset the _do_not_unlock_if_synchronized flag.
+    if (synchronized) {
+      __ li(R0, 0);
+      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
+    }
+  }
+
+  // --------------------------------------------------------------------------
+  // Locking of synchronized methods. Must happen AFTER invocation_counter
+  // check and stack overflow check, so method is not locked if overflows.
+  if (synchronized) {
+    lock_method(R3_ARG1, R4_ARG2, R5_ARG3);
+  }
+#ifdef ASSERT
+  else {
+    Label Lok;
+    __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method);
+    __ andi_(R0, R0, JVM_ACC_SYNCHRONIZED);
+    __ asm_assert_eq("method needs synchronization", 0x8521);
+    __ bind(Lok);
+  }
+#endif // ASSERT
+
+  __ verify_thread();
+
+  // --------------------------------------------------------------------------
+  // JVMTI support
+  __ notify_method_entry();
+
+  // --------------------------------------------------------------------------
+  // Start executing instructions.
+  __ dispatch_next(vtos);
+
+  // --------------------------------------------------------------------------
+  // Out of line counter overflow and MDO creation code.
+  if (ProfileInterpreter) {
+    // We have decided to profile this method in the interpreter.
+    __ bind(profile_method);
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+    __ set_method_data_pointer_for_bcp();
+    __ b(profile_method_continue);
+  }
+
+  if (inc_counter) {
+    // Handle invocation counter overflow.
+    __ bind(invocation_counter_overflow);
+    generate_counter_overflow(profile_method_continue);
+  }
+  return entry;
+}
+
+// CRC32 Intrinsics.
+//
+// Contract on scratch and work registers.
+// =======================================
+//
+// On ppc, the register set {R2..R12} is available in the interpreter as scratch/work registers.
+// You should, however, keep in mind that {R3_ARG1..R10_ARG8} is the C-ABI argument register set.
+// You can't rely on these registers across calls.
+//
+// The generators for CRC32_update and for CRC32_updateBytes use the
+// scratch/work register set internally, passing the work registers
+// as arguments to the MacroAssembler emitters as required.
+//
+// R3_ARG1..R6_ARG4 are preset to hold the incoming java arguments.
+// Their contents is not constant but may change according to the requirements
+// of the emitted code.
+//
+// All other registers from the scratch/work register set are used "internally"
+// and contain garbage (i.e. unpredictable values) once blr() is reached.
+// Basically, only R3_RET contains a defined value which is the function result.
+//
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.update(int crc, int b)
+ */
+address InterpreterGenerator::generate_CRC32_update_entry() {
+  if (UseCRC32Intrinsics) {
+    address start = __ pc();  // Remember stub start address (is rtn value).
+    Label slow_path;
+
+    // Safepoint check
+    const Register sync_state = R11_scratch1;
+    int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
+    __ lwz(sync_state, sync_state_offs, sync_state);
+    __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
+    __ bne(CCR0, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we not even call stub code (we generate the code inline)
+    // and there is no safepoint on this path.
+
+    // Load java parameters.
+    // R15_esp is callers operand stack pointer, i.e. it points to the parameters.
+    const Register argP    = R15_esp;
+    const Register crc     = R3_ARG1;  // crc value
+    const Register data    = R4_ARG2;  // address of java byte value (kernel_crc32 needs address)
+    const Register dataLen = R5_ARG3;  // source data len (1 byte). Not used because calling the single-byte emitter.
+    const Register table   = R6_ARG4;  // address of crc32 table
+    const Register tmp     = dataLen;  // Reuse unused len register to show we don't actually need a separate tmp here.
+
+    BLOCK_COMMENT("CRC32_update {");
+
+    // Arguments are reversed on java expression stack
+#ifdef VM_LITTLE_ENDIAN
+    __ addi(data, argP, 0+1*wordSize); // (stack) address of byte value. Emitter expects address, not value.
+                                       // Being passed as an int, the single byte is at offset +0.
+#else
+    __ addi(data, argP, 3+1*wordSize); // (stack) address of byte value. Emitter expects address, not value.
+                                       // Being passed from java as an int, the single byte is at offset +3.
+#endif
+    __ lwz(crc,  2*wordSize, argP);    // Current crc state, zero extend to 64 bit to have a clean register.
+
+    StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
+    __ kernel_crc32_singleByte(crc, data, dataLen, table, tmp);
+
+    // Restore caller sp for c2i case and return.
+    __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+    __ blr();
+
+    // Generate a vanilla native entry as the slow path.
+    BLOCK_COMMENT("} CRC32_update");
+    BIND(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
+    return start;
+  }
+
+  return NULL;
+}
+
+// CRC32 Intrinsics.
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.updateBytes(     int crc, byte[] b,  int off, int len)
+ *   int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
+ */
+address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  if (UseCRC32Intrinsics) {
+    address start = __ pc();  // Remember stub start address (is rtn value).
+    Label slow_path;
+
+    // Safepoint check
+    const Register sync_state = R11_scratch1;
+    int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
+    __ lwz(sync_state, sync_state_offs, sync_state);
+    __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
+    __ bne(CCR0, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we not even call stub code (we generate the code inline)
+    // and there is no safepoint on this path.
+
+    // Load parameters.
+    // Z_esp is callers operand stack pointer, i.e. it points to the parameters.
+    const Register argP    = R15_esp;
+    const Register crc     = R3_ARG1;  // crc value
+    const Register data    = R4_ARG2;  // address of java byte array
+    const Register dataLen = R5_ARG3;  // source data len
+    const Register table   = R6_ARG4;  // address of crc32 table
+
+    const Register t0      = R9;       // scratch registers for crc calculation
+    const Register t1      = R10;
+    const Register t2      = R11;
+    const Register t3      = R12;
+
+    const Register tc0     = R2;       // registers to hold pre-calculated column addresses
+    const Register tc1     = R7;
+    const Register tc2     = R8;
+    const Register tc3     = table;    // table address is reconstructed at the end of kernel_crc32_* emitters
+
+    const Register tmp     = t0;       // Only used very locally to calculate byte buffer address.
+
+    // Arguments are reversed on java expression stack.
+    // Calculate address of start element.
+    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { // Used for "updateByteBuffer direct".
+      BLOCK_COMMENT("CRC32_updateByteBuffer {");
+      // crc     @ (SP + 5W) (32bit)
+      // buf     @ (SP + 3W) (64bit ptr to long array)
+      // off     @ (SP + 2W) (32bit)
+      // dataLen @ (SP + 1W) (32bit)
+      // data = buf + off
+      __ ld(  data,    3*wordSize, argP);  // start of byte buffer
+      __ lwa( tmp,     2*wordSize, argP);  // byte buffer offset
+      __ lwa( dataLen, 1*wordSize, argP);  // #bytes to process
+      __ lwz( crc,     5*wordSize, argP);  // current crc state
+      __ add( data, data, tmp);            // Add byte buffer offset.
+    } else {                                                         // Used for "updateBytes update".
+      BLOCK_COMMENT("CRC32_updateBytes {");
+      // crc     @ (SP + 4W) (32bit)
+      // buf     @ (SP + 3W) (64bit ptr to byte array)
+      // off     @ (SP + 2W) (32bit)
+      // dataLen @ (SP + 1W) (32bit)
+      // data = buf + off + base_offset
+      __ ld(  data,    3*wordSize, argP);  // start of byte buffer
+      __ lwa( tmp,     2*wordSize, argP);  // byte buffer offset
+      __ lwa( dataLen, 1*wordSize, argP);  // #bytes to process
+      __ add( data, data, tmp);            // add byte buffer offset
+      __ lwz( crc,     4*wordSize, argP);  // current crc state
+      __ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
+    }
+
+    StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
+
+    // Performance measurements show the 1word and 2word variants to be almost equivalent,
+    // with very light advantages for the 1word variant. We chose the 1word variant for
+    // code compactness.
+    __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3);
+
+    // Restore caller sp for c2i case and return.
+    __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+    __ blr();
+
+    // Generate a vanilla native entry as the slow path.
+    BLOCK_COMMENT("} CRC32_updateBytes(Buffer)");
+    BIND(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
+    return start;
+  }
+
+  return NULL;
+}
+
+// =============================================================================
+// Exceptions
+
+void TemplateInterpreterGenerator::generate_throw_exception() {
+  Register Rexception    = R17_tos,
+           Rcontinuation = R3_RET;
+
+  // --------------------------------------------------------------------------
+  // Entry point if an method returns with a pending exception (rethrow).
+  Interpreter::_rethrow_exception_entry = __ pc();
+  {
+    __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp.
+    __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
+    __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
+
+    // Compiled code destroys templateTableBase, reload.
+    __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
+  }
+
+  // Entry point if a interpreted method throws an exception (throw).
+  Interpreter::_throw_exception_entry = __ pc();
+  {
+    __ mr(Rexception, R3_RET);
+
+    __ verify_thread();
+    __ verify_oop(Rexception);
+
+    // Expression stack must be empty before entering the VM in case of an exception.
+    __ empty_expression_stack();
+    // Find exception handler address and preserve exception oop.
+    // Call C routine to find handler and jump to it.
+    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Rexception);
+    __ mtctr(Rcontinuation);
+    // Push exception for exception handler bytecodes.
+    __ push_ptr(Rexception);
+
+    // Jump to exception handler (may be remove activation entry!).
+    __ bctr();
+  }
+
+  // If the exception is not handled in the current frame the frame is
+  // removed and the exception is rethrown (i.e. exception
+  // continuation is _rethrow_exception).
+  //
+  // Note: At this point the bci is still the bxi for the instruction
+  // which caused the exception and the expression stack is
+  // empty. Thus, for any VM calls at this point, GC will find a legal
+  // oop map (with empty expression stack).
+
+  // In current activation
+  // tos: exception
+  // bcp: exception bcp
+
+  // --------------------------------------------------------------------------
+  // JVMTI PopFrame support
+
+  Interpreter::_remove_activation_preserving_args_entry = __ pc();
+  {
+    // Set the popframe_processing bit in popframe_condition indicating that we are
+    // currently handling popframe, so that call_VMs that may happen later do not
+    // trigger new popframe handling cycles.
+    __ lwz(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
+    __ ori(R11_scratch1, R11_scratch1, JavaThread::popframe_processing_bit);
+    __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
+
+    // Empty the expression stack, as in normal exception handling.
+    __ empty_expression_stack();
+    __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
+
+    // Check to see whether we are returning to a deoptimized frame.
+    // (The PopFrame call ensures that the caller of the popped frame is
+    // either interpreted or compiled and deoptimizes it if compiled.)
+    // Note that we don't compare the return PC against the
+    // deoptimization blob's unpack entry because of the presence of
+    // adapter frames in C2.
+    Label Lcaller_not_deoptimized;
+    Register return_pc = R3_ARG1;
+    __ ld(return_pc, 0, R1_SP);
+    __ ld(return_pc, _abi(lr), return_pc);
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc);
+    __ cmpdi(CCR0, R3_RET, 0);
+    __ bne(CCR0, Lcaller_not_deoptimized);
+
+    // The deoptimized case.
+    // In this case, we can't call dispatch_next() after the frame is
+    // popped, but instead must save the incoming arguments and restore
+    // them after deoptimization has occurred.
+    __ ld(R4_ARG2, in_bytes(Method::const_offset()), R19_method);
+    __ lhz(R4_ARG2 /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), R4_ARG2);
+    __ slwi(R4_ARG2, R4_ARG2, Interpreter::logStackElementSize);
+    __ addi(R5_ARG3, R18_locals, Interpreter::stackElementSize);
+    __ subf(R5_ARG3, R4_ARG2, R5_ARG3);
+    // Save these arguments.
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R16_thread, R4_ARG2, R5_ARG3);
+
+    // Inform deoptimization that it is responsible for restoring these arguments.
+    __ load_const_optimized(R11_scratch1, JavaThread::popframe_force_deopt_reexecution_bit);
+    __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
+
+    // Return from the current method into the deoptimization blob. Will eventually
+    // end up in the deopt interpeter entry, deoptimization prepared everything that
+    // we will reexecute the call that called us.
+    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*reload return_pc*/ return_pc, R11_scratch1, R12_scratch2);
+    __ mtlr(return_pc);
+    __ blr();
+
+    // The non-deoptimized case.
+    __ bind(Lcaller_not_deoptimized);
+
+    // Clear the popframe condition flag.
+    __ li(R0, 0);
+    __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
+
+    // Get out of the current method and re-execute the call that called us.
+    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
+    __ restore_interpreter_state(R11_scratch1);
+    __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
+    __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
+    if (ProfileInterpreter) {
+      __ set_method_data_pointer_for_bcp();
+      __ ld(R11_scratch1, 0, R1_SP);
+      __ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1);
+    }
+#if INCLUDE_JVMTI
+    Label L_done;
+
+    __ lbz(R11_scratch1, 0, R14_bcp);
+    __ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic);
+    __ bne(CCR0, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+    __ ld(R4_ARG2, 0, R18_locals);
+    __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false);
+    __ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
+    __ cmpdi(CCR0, R4_ARG2, 0);
+    __ beq(CCR0, L_done);
+    __ std(R4_ARG2, wordSize, R15_esp);
+    __ bind(L_done);
+#endif // INCLUDE_JVMTI
+    __ dispatch_next(vtos);
+  }
+  // end of JVMTI PopFrame support
+
+  // --------------------------------------------------------------------------
+  // Remove activation exception entry.
+  // This is jumped to if an interpreted method can't handle an exception itself
+  // (we come from the throw/rethrow exception entry above). We're going to call
+  // into the VM to find the exception handler in the caller, pop the current
+  // frame and return the handler we calculated.
+  Interpreter::_remove_activation_entry = __ pc();
+  {
+    __ pop_ptr(Rexception);
+    __ verify_thread();
+    __ verify_oop(Rexception);
+    __ std(Rexception, in_bytes(JavaThread::vm_result_offset()), R16_thread);
+
+    __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, true);
+    __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI, false);
+
+    __ get_vm_result(Rexception);
+
+    // We are done with this activation frame; find out where to go next.
+    // The continuation point will be an exception handler, which expects
+    // the following registers set up:
+    //
+    // RET:  exception oop
+    // ARG2: Issuing PC (see generate_exception_blob()), only used if the caller is compiled.
+
+    Register return_pc = R31; // Needs to survive the runtime call.
+    __ ld(return_pc, 0, R1_SP);
+    __ ld(return_pc, _abi(lr), return_pc);
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, return_pc);
+
+    // Remove the current activation.
+    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
+
+    __ mr(R4_ARG2, return_pc);
+    __ mtlr(R3_RET);
+    __ mr(R3_RET, Rexception);
+    __ blr();
+  }
+}
+
+// JVMTI ForceEarlyReturn support.
+// Returns "in the middle" of a method with a "fake" return value.
+address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
+
+  Register Rscratch1 = R11_scratch1,
+           Rscratch2 = R12_scratch2;
+
+  address entry = __ pc();
+  __ empty_expression_stack();
+
+  __ load_earlyret_value(state, Rscratch1);
+
+  __ ld(Rscratch1, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
+  // Clear the earlyret state.
+  __ li(R0, 0);
+  __ stw(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rscratch1);
+
+  __ remove_activation(state, false, false);
+  // Copied from TemplateTable::_return.
+  // Restoration of lr done by remove_activation.
+  switch (state) {
+    case ltos:
+    case btos:
+    case ctos:
+    case stos:
+    case atos:
+    case itos: __ mr(R3_RET, R17_tos); break;
+    case ftos:
+    case dtos: __ fmr(F1_RET, F15_ftos); break;
+    case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need
+               // to get visible before the reference to the object gets stored anywhere.
+               __ membar(Assembler::StoreStore); break;
+    default  : ShouldNotReachHere();
+  }
+  __ blr();
+
+  return entry;
+} // end of ForceEarlyReturn support
+
+//-----------------------------------------------------------------------------
+// Helper for vtos entry point generation
+
+void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
+                                                         address& bep,
+                                                         address& cep,
+                                                         address& sep,
+                                                         address& aep,
+                                                         address& iep,
+                                                         address& lep,
+                                                         address& fep,
+                                                         address& dep,
+                                                         address& vep) {
+  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
+  Label L;
+
+  aep = __ pc();  __ push_ptr();  __ b(L);
+  fep = __ pc();  __ push_f();    __ b(L);
+  dep = __ pc();  __ push_d();    __ b(L);
+  lep = __ pc();  __ push_l();    __ b(L);
+  __ align(32, 12, 24); // align L
+  bep = cep = sep =
+  iep = __ pc();  __ push_i();
+  vep = __ pc();
+  __ bind(L);
+  generate_and_dispatch(t);
+}
+
+//-----------------------------------------------------------------------------
+// Generation of individual instructions
+
+// helpers for generate_and_dispatch
+
+InterpreterGenerator::InterpreterGenerator(StubQueue* code)
+  : TemplateInterpreterGenerator(code) {
+  generate_all(); // Down here so it can be "virtual".
+}
+
+//-----------------------------------------------------------------------------
+
+// Non-product code
+#ifndef PRODUCT
+address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
+  //__ flush_bundle();
+  address entry = __ pc();
+
+  const char *bname = NULL;
+  uint tsize = 0;
+  switch(state) {
+  case ftos:
+    bname = "trace_code_ftos {";
+    tsize = 2;
+    break;
+  case btos:
+    bname = "trace_code_btos {";
+    tsize = 2;
+    break;
+  case ctos:
+    bname = "trace_code_ctos {";
+    tsize = 2;
+    break;
+  case stos:
+    bname = "trace_code_stos {";
+    tsize = 2;
+    break;
+  case itos:
+    bname = "trace_code_itos {";
+    tsize = 2;
+    break;
+  case ltos:
+    bname = "trace_code_ltos {";
+    tsize = 3;
+    break;
+  case atos:
+    bname = "trace_code_atos {";
+    tsize = 2;
+    break;
+  case vtos:
+    // Note: In case of vtos, the topmost of stack value could be a int or doubl
+    // In case of a double (2 slots) we won't see the 2nd stack value.
+    // Maybe we simply should print the topmost 3 stack slots to cope with the problem.
+    bname = "trace_code_vtos {";
+    tsize = 2;
+
+    break;
+  case dtos:
+    bname = "trace_code_dtos {";
+    tsize = 3;
+    break;
+  default:
+    ShouldNotReachHere();
+  }
+  BLOCK_COMMENT(bname);
+
+  // Support short-cut for TraceBytecodesAt.
+  // Don't call into the VM if we don't want to trace to speed up things.
+  Label Lskip_vm_call;
+  if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) {
+    int offs1 = __ load_const_optimized(R11_scratch1, (address) &TraceBytecodesAt, R0, true);
+    int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
+    __ ld(R11_scratch1, offs1, R11_scratch1);
+    __ lwa(R12_scratch2, offs2, R12_scratch2);
+    __ cmpd(CCR0, R12_scratch2, R11_scratch1);
+    __ blt(CCR0, Lskip_vm_call);
+  }
+
+  __ push(state);
+  // Load 2 topmost expression stack values.
+  __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp);
+  __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp);
+  __ mflr(R31);
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false);
+  __ mtlr(R31);
+  __ pop(state);
+
+  if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) {
+    __ bind(Lskip_vm_call);
+  }
+  __ blr();
+  BLOCK_COMMENT("} trace_code");
+  return entry;
+}
+
+void TemplateInterpreterGenerator::count_bytecode() {
+  int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true);
+  __ lwz(R12_scratch2, offs, R11_scratch1);
+  __ addi(R12_scratch2, R12_scratch2, 1);
+  __ stw(R12_scratch2, offs, R11_scratch1);
+}
+
+void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
+  int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true);
+  __ lwz(R12_scratch2, offs, R11_scratch1);
+  __ addi(R12_scratch2, R12_scratch2, 1);
+  __ stw(R12_scratch2, offs, R11_scratch1);
+}
+
+void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
+  const Register addr = R11_scratch1,
+                 tmp  = R12_scratch2;
+  // Get index, shift out old bytecode, bring in new bytecode, and store it.
+  // _index = (_index >> log2_number_of_codes) |
+  //          (bytecode << log2_number_of_codes);
+  int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true);
+  __ lwz(tmp, offs1, addr);
+  __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes);
+  __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
+  __ stw(tmp, offs1, addr);
+
+  // Bump bucket contents.
+  // _counters[_index] ++;
+  int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true);
+  __ sldi(tmp, tmp, LogBytesPerInt);
+  __ add(addr, tmp, addr);
+  __ lwz(tmp, offs2, addr);
+  __ addi(tmp, tmp, 1);
+  __ stw(tmp, offs2, addr);
+}
+
+void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
+  // Call a little run-time stub to avoid blow-up for each bytecode.
+  // The run-time runtime saves the right registers, depending on
+  // the tosca in-state for the given template.
+
+  assert(Interpreter::trace_code(t->tos_in()) != NULL,
+         "entry must have been generated");
+
+  // Note: we destroy LR here.
+  __ bl(Interpreter::trace_code(t->tos_in()));
+}
+
+void TemplateInterpreterGenerator::stop_interpreter_at() {
+  Label L;
+  int offs1 = __ load_const_optimized(R11_scratch1, (address) &StopInterpreterAt, R0, true);
+  int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
+  __ ld(R11_scratch1, offs1, R11_scratch1);
+  __ lwa(R12_scratch2, offs2, R12_scratch2);
+  __ cmpd(CCR0, R12_scratch2, R11_scratch1);
+  __ bne(CCR0, L);
+  __ illtrap();
+  __ bind(L);
+}
+
+#endif // !PRODUCT
+#endif // !CC_INTERP
--- a/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2013, 2015 SAP AG. All rights reserved.
+ * Copyright (c) 2015 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,1385 +24,38 @@
  */
 
 #include "precompiled.hpp"
-#ifndef CC_INTERP
-#include "asm/macroAssembler.inline.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
+#include "oops/constMethod.hpp"
 #include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 
-#undef __
-#define __ _masm->
 
-#ifdef PRODUCT
-#define BLOCK_COMMENT(str) /* nothing */
-#else
-#define BLOCK_COMMENT(str) __ block_comment(str)
-#endif
-
-#define BIND(label)        __ bind(label); BLOCK_COMMENT(#label ":")
-
-//-----------------------------------------------------------------------------
-
-// Actually we should never reach here since we do stack overflow checks before pushing any frame.
-address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
-  address entry = __ pc();
-  __ unimplemented("generate_StackOverflowError_handler");
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
-  address entry = __ pc();
-  __ empty_expression_stack();
-  __ load_const_optimized(R4_ARG2, (address) name);
-  // Index is in R17_tos.
-  __ mr(R5_ARG3, R17_tos);
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException));
-  return entry;
-}
-
-#if 0
-// Call special ClassCastException constructor taking object to cast
-// and target class as arguments.
-address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler() {
-  address entry = __ pc();
-
-  // Expression stack must be empty before entering the VM if an
-  // exception happened.
-  __ empty_expression_stack();
-
-  // Thread will be loaded to R3_ARG1.
-  // Target class oop is in register R5_ARG3 by convention!
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose), R17_tos, R5_ARG3);
-  // Above call must not return here since exception pending.
-  DEBUG_ONLY(__ should_not_reach_here();)
-  return entry;
-}
-#endif
-
-address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
-  address entry = __ pc();
-  // Expression stack must be empty before entering the VM if an
-  // exception happened.
-  __ empty_expression_stack();
-
-  // Load exception object.
-  // Thread will be loaded to R3_ARG1.
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException), R17_tos);
-#ifdef ASSERT
-  // Above call must not return here since exception pending.
-  __ should_not_reach_here();
-#endif
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
-  address entry = __ pc();
-  //__ untested("generate_exception_handler_common");
-  Register Rexception = R17_tos;
-
-  // Expression stack must be empty before entering the VM if an exception happened.
-  __ empty_expression_stack();
-
-  __ load_const_optimized(R4_ARG2, (address) name, R11_scratch1);
-  if (pass_oop) {
-    __ mr(R5_ARG3, Rexception);
-    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), false);
-  } else {
-    __ load_const_optimized(R5_ARG3, (address) message, R11_scratch1);
-    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), false);
-  }
-
-  // Throw exception.
-  __ mr(R3_ARG1, Rexception);
-  __ load_const_optimized(R11_scratch1, Interpreter::throw_exception_entry(), R12_scratch2);
-  __ mtctr(R11_scratch1);
-  __ bctr();
-
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
-  address entry = __ pc();
-  __ unimplemented("generate_continuation_for");
-  return entry;
-}
-
-// This entry is returned to when a call returns to the interpreter.
-// When we arrive here, we expect that the callee stack frame is already popped.
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
-  address entry = __ pc();
-
-  // Move the value out of the return register back to the TOS cache of current frame.
-  switch (state) {
-    case ltos:
-    case btos:
-    case ctos:
-    case stos:
-    case atos:
-    case itos: __ mr(R17_tos, R3_RET); break;   // RET -> TOS cache
-    case ftos:
-    case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET
-    case vtos: break;                           // Nothing to do, this was a void return.
-    default  : ShouldNotReachHere();
-  }
-
-  __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp.
-  __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
-  __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
-
-  // Compiled code destroys templateTableBase, reload.
-  __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2);
-
-  if (state == atos) {
-    __ profile_return_type(R3_RET, R11_scratch1, R12_scratch2);
-  }
-
-  const Register cache = R11_scratch1;
-  const Register size  = R12_scratch2;
-  __ get_cache_and_index_at_bcp(cache, 1, index_size);
-
-  // Get least significant byte of 64 bit value:
-#if defined(VM_LITTLE_ENDIAN)
-  __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache);
-#else
-  __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache);
-#endif
-  __ sldi(size, size, Interpreter::logStackElementSize);
-  __ add(R15_esp, R15_esp, size);
-  __ dispatch_next(state, step);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
-  address entry = __ pc();
-  // If state != vtos, we're returning from a native method, which put it's result
-  // into the result register. So move the value out of the return register back
-  // to the TOS cache of current frame.
-
-  switch (state) {
-    case ltos:
-    case btos:
-    case ctos:
-    case stos:
-    case atos:
-    case itos: __ mr(R17_tos, R3_RET); break;   // GR_RET -> TOS cache
-    case ftos:
-    case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET
-    case vtos: break;                           // Nothing to do, this was a void return.
-    default  : ShouldNotReachHere();
-  }
-
-  // Load LcpoolCache @@@ should be already set!
-  __ get_constant_pool_cache(R27_constPoolCache);
-
-  // Handle a pending exception, fall through if none.
-  __ check_and_forward_exception(R11_scratch1, R12_scratch2);
-
-  // Start executing bytecodes.
-  __ dispatch_next(state, step);
-
-  return entry;
-}
-
-// A result handler converts the native result into java format.
-// Use the shared code between c++ and template interpreter.
-address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
-  return AbstractInterpreterGenerator::generate_result_handler_for(type);
-}
-
-address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
-  address entry = __ pc();
-
-  __ push(state);
-  __ call_VM(noreg, runtime_entry);
-  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
-
-  return entry;
-}
-
-// Helpers for commoning out cases in the various type of method entries.
-
-// Increment invocation count & check for overflow.
-//
-// Note: checking for negative value instead of overflow
-//       so we have a 'sticky' overflow test.
-//
-void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
-  // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not.
-  Register Rscratch1   = R11_scratch1;
-  Register Rscratch2   = R12_scratch2;
-  Register R3_counters = R3_ARG1;
-  Label done;
-
-  if (TieredCompilation) {
-    const int increment = InvocationCounter::count_increment;
-    Label no_mdo;
-    if (ProfileInterpreter) {
-      const Register Rmdo = R3_counters;
-      // If no method data exists, go to profile_continue.
-      __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
-      __ cmpdi(CCR0, Rmdo, 0);
-      __ beq(CCR0, no_mdo);
-
-      // Increment backedge counter in the MDO.
-      const int mdo_ic_offs = in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
-      __ lwz(Rscratch2, mdo_ic_offs, Rmdo);
-      __ lwz(Rscratch1, in_bytes(MethodData::invoke_mask_offset()), Rmdo);
-      __ addi(Rscratch2, Rscratch2, increment);
-      __ stw(Rscratch2, mdo_ic_offs, Rmdo);
-      __ and_(Rscratch1, Rscratch2, Rscratch1);
-      __ bne(CCR0, done);
-      __ b(*overflow);
-    }
-
-    // Increment counter in MethodCounters*.
-    const int mo_bc_offs = in_bytes(MethodCounters::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
-    __ bind(no_mdo);
-    __ get_method_counters(R19_method, R3_counters, done);
-    __ lwz(Rscratch2, mo_bc_offs, R3_counters);
-    __ lwz(Rscratch1, in_bytes(MethodCounters::invoke_mask_offset()), R3_counters);
-    __ addi(Rscratch2, Rscratch2, increment);
-    __ stw(Rscratch2, mo_bc_offs, R3_counters);
-    __ and_(Rscratch1, Rscratch2, Rscratch1);
-    __ beq(CCR0, *overflow);
-
-    __ bind(done);
-
-  } else {
-
-    // Update standard invocation counters.
-    Register Rsum_ivc_bec = R4_ARG2;
-    __ get_method_counters(R19_method, R3_counters, done);
-    __ increment_invocation_counter(R3_counters, Rsum_ivc_bec, R12_scratch2);
-    // Increment interpreter invocation counter.
-    if (ProfileInterpreter) {  // %%% Merge this into methodDataOop.
-      __ lwz(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
-      __ addi(R12_scratch2, R12_scratch2, 1);
-      __ stw(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
-    }
-    // Check if we must create a method data obj.
-    if (ProfileInterpreter && profile_method != NULL) {
-      const Register profile_limit = Rscratch1;
-      __ lwz(profile_limit, in_bytes(MethodCounters::interpreter_profile_limit_offset()), R3_counters);
-      // Test to see if we should create a method data oop.
-      __ cmpw(CCR0, Rsum_ivc_bec, profile_limit);
-      __ blt(CCR0, *profile_method_continue);
-      // If no method data exists, go to profile_method.
-      __ test_method_data_pointer(*profile_method);
-    }
-    // Finally check for counter overflow.
-    if (overflow) {
-      const Register invocation_limit = Rscratch1;
-      __ lwz(invocation_limit, in_bytes(MethodCounters::interpreter_invocation_limit_offset()), R3_counters);
-      __ cmpw(CCR0, Rsum_ivc_bec, invocation_limit);
-      __ bge(CCR0, *overflow);
-    }
-
-    __ bind(done);
-  }
-}
-
-// Generate code to initiate compilation on invocation counter overflow.
-void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_entry) {
-  // Generate code to initiate compilation on the counter overflow.
-
-  // InterpreterRuntime::frequency_counter_overflow takes one arguments,
-  // which indicates if the counter overflow occurs at a backwards branch (NULL bcp)
-  // We pass zero in.
-  // The call returns the address of the verified entry point for the method or NULL
-  // if the compilation did not complete (either went background or bailed out).
-  //
-  // Unlike the C++ interpreter above: Check exceptions!
-  // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed
-  // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur.
-
-  __ li(R4_ARG2, 0);
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
-
-  // Returns verified_entry_point or NULL.
-  // We ignore it in any case.
-  __ b(continue_entry);
-}
-
-void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) {
-  assert_different_registers(Rmem_frame_size, Rscratch1);
-  __ generate_stack_overflow_check_with_compare_and_throw(Rmem_frame_size, Rscratch1);
-}
-
-void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) {
-  __ unlock_object(R26_monitor, check_exceptions);
-}
-
-// Lock the current method, interpreter register window must be set up!
-void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) {
-  const Register Robj_to_lock = Rscratch2;
-
-  {
-    if (!flags_preloaded) {
-      __ lwz(Rflags, method_(access_flags));
-    }
-
-#ifdef ASSERT
-    // Check if methods needs synchronization.
-    {
-      Label Lok;
-      __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT);
-      __ btrue(CCR0,Lok);
-      __ stop("method doesn't need synchronization");
-      __ bind(Lok);
-    }
-#endif // ASSERT
-  }
-
-  // Get synchronization object to Rscratch2.
-  {
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    Label Lstatic;
-    Label Ldone;
-
-    __ testbitdi(CCR0, R0, Rflags, JVM_ACC_STATIC_BIT);
-    __ btrue(CCR0, Lstatic);
-
-    // Non-static case: load receiver obj from stack and we're done.
-    __ ld(Robj_to_lock, R18_locals);
-    __ b(Ldone);
-
-    __ bind(Lstatic); // Static case: Lock the java mirror
-    __ ld(Robj_to_lock, in_bytes(Method::const_offset()), R19_method);
-    __ ld(Robj_to_lock, in_bytes(ConstMethod::constants_offset()), Robj_to_lock);
-    __ ld(Robj_to_lock, ConstantPool::pool_holder_offset_in_bytes(), Robj_to_lock);
-    __ ld(Robj_to_lock, mirror_offset, Robj_to_lock);
-
-    __ bind(Ldone);
-    __ verify_oop(Robj_to_lock);
-  }
-
-  // Got the oop to lock => execute!
-  __ add_monitor_to_stack(true, Rscratch1, R0);
-
-  __ std(Robj_to_lock, BasicObjectLock::obj_offset_in_bytes(), R26_monitor);
-  __ lock_object(R26_monitor, Robj_to_lock);
-}
-
-// Generate a fixed interpreter frame for pure interpreter
-// and I2N native transition frames.
-//
-// Before (stack grows downwards):
-//
-//         |  ...         |
-//         |------------- |
-//         |  java arg0   |
-//         |  ...         |
-//         |  java argn   |
-//         |              |   <-   R15_esp
-//         |              |
-//         |--------------|
-//         | abi_112      |
-//         |              |   <-   R1_SP
-//         |==============|
-//
-//
-// After:
-//
-//         |  ...         |
-//         |  java arg0   |<-   R18_locals
-//         |  ...         |
-//         |  java argn   |
-//         |--------------|
-//         |              |
-//         |  java locals |
-//         |              |
-//         |--------------|
-//         |  abi_48      |
-//         |==============|
-//         |              |
-//         |   istate     |
-//         |              |
-//         |--------------|
-//         |   monitor    |<-   R26_monitor
-//         |--------------|
-//         |              |<-   R15_esp
-//         | expression   |
-//         | stack        |
-//         |              |
-//         |--------------|
-//         |              |
-//         | abi_112      |<-   R1_SP
-//         |==============|
-//
-// The top most frame needs an abi space of 112 bytes. This space is needed,
-// since we call to c. The c function may spill their arguments to the caller
-// frame. When we call to java, we don't need these spill slots. In order to save
-// space on the stack, we resize the caller. However, java local reside in
-// the caller frame and the frame has to be increased. The frame_size for the
-// current frame was calculated based on max_stack as size for the expression
-// stack. At the call, just a part of the expression stack might be used.
-// We don't want to waste this space and cut the frame back accordingly.
-// The resulting amount for resizing is calculated as follows:
-// resize =   (number_of_locals - number_of_arguments) * slot_size
-//          + (R1_SP - R15_esp) + 48
-//
-// The size for the callee frame is calculated:
-// framesize = 112 + max_stack + monitor + state_size
-//
-// maxstack:   Max number of slots on the expression stack, loaded from the method.
-// monitor:    We statically reserve room for one monitor object.
-// state_size: We save the current state of the interpreter to this area.
-//
-void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals) {
-  Register parent_frame_resize = R6_ARG4, // Frame will grow by this number of bytes.
-           top_frame_size      = R7_ARG5,
-           Rconst_method       = R8_ARG6;
-
-  assert_different_registers(Rsize_of_parameters, Rsize_of_locals, parent_frame_resize, top_frame_size);
-
-  __ ld(Rconst_method, method_(const));
-  __ lhz(Rsize_of_parameters /* number of params */,
-         in_bytes(ConstMethod::size_of_parameters_offset()), Rconst_method);
-  if (native_call) {
-    // If we're calling a native method, we reserve space for the worst-case signature
-    // handler varargs vector, which is max(Argument::n_register_parameters, parameter_count+2).
-    // We add two slots to the parameter_count, one for the jni
-    // environment and one for a possible native mirror.
-    Label skip_native_calculate_max_stack;
-    __ addi(top_frame_size, Rsize_of_parameters, 2);
-    __ cmpwi(CCR0, top_frame_size, Argument::n_register_parameters);
-    __ bge(CCR0, skip_native_calculate_max_stack);
-    __ li(top_frame_size, Argument::n_register_parameters);
-    __ bind(skip_native_calculate_max_stack);
-    __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize);
-    __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize);
-    __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize!
-    assert(Rsize_of_locals == noreg, "Rsize_of_locals not initialized"); // Only relevant value is Rsize_of_parameters.
-  } else {
-    __ lhz(Rsize_of_locals /* number of params */, in_bytes(ConstMethod::size_of_locals_offset()), Rconst_method);
-    __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize);
-    __ sldi(Rsize_of_locals, Rsize_of_locals, Interpreter::logStackElementSize);
-    __ lhz(top_frame_size, in_bytes(ConstMethod::max_stack_offset()), Rconst_method);
-    __ sub(R11_scratch1, Rsize_of_locals, Rsize_of_parameters); // >=0
-    __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize!
-    __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize);
-    __ add(parent_frame_resize, parent_frame_resize, R11_scratch1);
-  }
-
-  // Compute top frame size.
-  __ addi(top_frame_size, top_frame_size, frame::abi_reg_args_size + frame::ijava_state_size);
-
-  // Cut back area between esp and max_stack.
-  __ addi(parent_frame_resize, parent_frame_resize, frame::abi_minframe_size - Interpreter::stackElementSize);
-
-  __ round_to(top_frame_size, frame::alignment_in_bytes);
-  __ round_to(parent_frame_resize, frame::alignment_in_bytes);
-  // parent_frame_resize = (locals-parameters) - (ESP-SP-ABI48) Rounded to frame alignment size.
-  // Enlarge by locals-parameters (not in case of native_call), shrink by ESP-SP-ABI48.
-
-  {
-    // --------------------------------------------------------------------------
-    // Stack overflow check
-
-    Label cont;
-    __ add(R11_scratch1, parent_frame_resize, top_frame_size);
-    generate_stack_overflow_check(R11_scratch1, R12_scratch2);
-  }
-
-  // Set up interpreter state registers.
-
-  __ add(R18_locals, R15_esp, Rsize_of_parameters);
-  __ ld(R27_constPoolCache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
-  __ ld(R27_constPoolCache, ConstantPool::cache_offset_in_bytes(), R27_constPoolCache);
-
-  // Set method data pointer.
-  if (ProfileInterpreter) {
-    Label zero_continue;
-    __ ld(R28_mdx, method_(method_data));
-    __ cmpdi(CCR0, R28_mdx, 0);
-    __ beq(CCR0, zero_continue);
-    __ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset()));
-    __ bind(zero_continue);
-  }
-
-  if (native_call) {
-    __ li(R14_bcp, 0); // Must initialize.
-  } else {
-    __ add(R14_bcp, in_bytes(ConstMethod::codes_offset()), Rconst_method);
-  }
-
-  // Resize parent frame.
-  __ mflr(R12_scratch2);
-  __ neg(parent_frame_resize, parent_frame_resize);
-  __ resize_frame(parent_frame_resize, R11_scratch1);
-  __ std(R12_scratch2, _abi(lr), R1_SP);
-
-  __ addi(R26_monitor, R1_SP, - frame::ijava_state_size);
-  __ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize);
-
-  // Store values.
-  // R15_esp, R14_bcp, R26_monitor, R28_mdx are saved at java calls
-  // in InterpreterMacroAssembler::call_from_interpreter.
-  __ std(R19_method, _ijava_state_neg(method), R1_SP);
-  __ std(R21_sender_SP, _ijava_state_neg(sender_sp), R1_SP);
-  __ std(R27_constPoolCache, _ijava_state_neg(cpoolCache), R1_SP);
-  __ std(R18_locals, _ijava_state_neg(locals), R1_SP);
-
-  // Note: esp, bcp, monitor, mdx live in registers. Hence, the correct version can only
-  // be found in the frame after save_interpreter_state is done. This is always true
-  // for non-top frames. But when a signal occurs, dumping the top frame can go wrong,
-  // because e.g. frame::interpreter_frame_bcp() will not access the correct value
-  // (Enhanced Stack Trace).
-  // The signal handler does not save the interpreter state into the frame.
-  __ li(R0, 0);
-#ifdef ASSERT
-  // Fill remaining slots with constants.
-  __ load_const_optimized(R11_scratch1, 0x5afe);
-  __ load_const_optimized(R12_scratch2, 0xdead);
-#endif
-  // We have to initialize some frame slots for native calls (accessed by GC).
-  if (native_call) {
-    __ std(R26_monitor, _ijava_state_neg(monitors), R1_SP);
-    __ std(R14_bcp, _ijava_state_neg(bcp), R1_SP);
-    if (ProfileInterpreter) { __ std(R28_mdx, _ijava_state_neg(mdx), R1_SP); }
-  }
-#ifdef ASSERT
-  else {
-    __ std(R12_scratch2, _ijava_state_neg(monitors), R1_SP);
-    __ std(R12_scratch2, _ijava_state_neg(bcp), R1_SP);
-    __ std(R12_scratch2, _ijava_state_neg(mdx), R1_SP);
-  }
-  __ std(R11_scratch1, _ijava_state_neg(ijava_reserved), R1_SP);
-  __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP);
-  __ std(R12_scratch2, _ijava_state_neg(lresult), R1_SP);
-  __ std(R12_scratch2, _ijava_state_neg(fresult), R1_SP);
-#endif
-  __ subf(R12_scratch2, top_frame_size, R1_SP);
-  __ std(R0, _ijava_state_neg(oop_tmp), R1_SP);
-  __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP);
-
-  // Push top frame.
-  __ push_frame(top_frame_size, R11_scratch1);
-}
-
-// End of helpers
-
-
-// Support abs and sqrt like in compiler.
-// For others we can use a normal (native) entry.
-
-inline bool math_entry_available(AbstractInterpreter::MethodKind kind) {
-  if (!InlineIntrinsics) return false;
-
-  return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) ||
-          (kind==Interpreter::java_lang_math_abs));
-}
-
-address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
-  if (!math_entry_available(kind)) {
-    NOT_PRODUCT(__ should_not_reach_here();)
-    return NULL;
-  }
-
-  address entry = __ pc();
-
-  __ lfd(F1_RET, Interpreter::stackElementSize, R15_esp);
-
-  // Pop c2i arguments (if any) off when we return.
-#ifdef ASSERT
-  __ ld(R9_ARG7, 0, R1_SP);
-  __ ld(R10_ARG8, 0, R21_sender_SP);
-  __ cmpd(CCR0, R9_ARG7, R10_ARG8);
-  __ asm_assert_eq("backlink", 0x545);
-#endif // ASSERT
-  __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
-
-  if (kind == Interpreter::java_lang_math_sqrt) {
-    __ fsqrt(F1_RET, F1_RET);
-  } else if (kind == Interpreter::java_lang_math_abs) {
-    __ fabs(F1_RET, F1_RET);
-  } else {
-    ShouldNotReachHere();
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : i = 4; break;
+    case T_LONG   : i = 5; break;
+    case T_VOID   : i = 6; break;
+    case T_FLOAT  : i = 7; break;
+    case T_DOUBLE : i = 8; break;
+    case T_OBJECT : i = 9; break;
+    case T_ARRAY  : i = 9; break;
+    default       : ShouldNotReachHere();
   }
-
-  // And we're done.
-  __ blr();
-
-  __ flush();
-
-  return entry;
-}
-
-// Interpreter stub for calling a native method. (asm interpreter)
-// This sets up a somewhat different looking stack for calling the
-// native method than the typical interpreter frame setup.
-//
-// On entry:
-//   R19_method    - method
-//   R16_thread    - JavaThread*
-//   R15_esp       - intptr_t* sender tos
-//
-//   abstract stack (grows up)
-//     [  IJava (caller of JNI callee)  ]  <-- ASP
-//        ...
-address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
-
-  address entry = __ pc();
-
-  const bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // -----------------------------------------------------------------------------
-  // Allocate a new frame that represents the native callee (i2n frame).
-  // This is not a full-blown interpreter frame, but in particular, the
-  // following registers are valid after this:
-  // - R19_method
-  // - R18_local (points to start of argumuments to native function)
-  //
-  //   abstract stack (grows up)
-  //     [  IJava (caller of JNI callee)  ]  <-- ASP
-  //        ...
-
-  const Register signature_handler_fd = R11_scratch1;
-  const Register pending_exception    = R0;
-  const Register result_handler_addr  = R31;
-  const Register native_method_fd     = R11_scratch1;
-  const Register access_flags         = R22_tmp2;
-  const Register active_handles       = R11_scratch1; // R26_monitor saved to state.
-  const Register sync_state           = R12_scratch2;
-  const Register sync_state_addr      = sync_state;   // Address is dead after use.
-  const Register suspend_flags        = R11_scratch1;
-
-  //=============================================================================
-  // Allocate new frame and initialize interpreter state.
-
-  Label exception_return;
-  Label exception_return_sync_check;
-  Label stack_overflow_return;
-
-  // Generate new interpreter state and jump to stack_overflow_return in case of
-  // a stack overflow.
-  //generate_compute_interpreter_state(stack_overflow_return);
-
-  Register size_of_parameters = R22_tmp2;
-
-  generate_fixed_frame(true, size_of_parameters, noreg /* unused */);
-
-  //=============================================================================
-  // Increment invocation counter. On overflow, entry to JNI method
-  // will be compiled.
-  Label invocation_counter_overflow, continue_after_compile;
-  if (inc_counter) {
-    if (synchronized) {
-      // Since at this point in the method invocation the exception handler
-      // would try to exit the monitor of synchronized methods which hasn't
-      // been entered yet, we set the thread local variable
-      // _do_not_unlock_if_synchronized to true. If any exception was thrown by
-      // runtime, exception handling i.e. unlock_if_synchronized_method will
-      // check this thread local flag.
-      // This flag has two effects, one is to force an unwind in the topmost
-      // interpreter frame and not perform an unlock while doing so.
-      __ li(R0, 1);
-      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
-    }
-    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
-
-    BIND(continue_after_compile);
-    // Reset the _do_not_unlock_if_synchronized flag.
-    if (synchronized) {
-      __ li(R0, 0);
-      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
-    }
-  }
-
-  // access_flags = method->access_flags();
-  // Load access flags.
-  assert(access_flags->is_nonvolatile(),
-         "access_flags must be in a non-volatile register");
-  // Type check.
-  assert(4 == sizeof(AccessFlags), "unexpected field size");
-  __ lwz(access_flags, method_(access_flags));
-
-  // We don't want to reload R19_method and access_flags after calls
-  // to some helper functions.
-  assert(R19_method->is_nonvolatile(),
-         "R19_method must be a non-volatile register");
-
-  // Check for synchronized methods. Must happen AFTER invocation counter
-  // check, so method is not locked if counter overflows.
-
-  if (synchronized) {
-    lock_method(access_flags, R11_scratch1, R12_scratch2, true);
-
-    // Update monitor in state.
-    __ ld(R11_scratch1, 0, R1_SP);
-    __ std(R26_monitor, _ijava_state_neg(monitors), R11_scratch1);
-  }
-
-  // jvmti/jvmpi support
-  __ notify_method_entry();
-
-  //=============================================================================
-  // Get and call the signature handler.
-
-  __ ld(signature_handler_fd, method_(signature_handler));
-  Label call_signature_handler;
-
-  __ cmpdi(CCR0, signature_handler_fd, 0);
-  __ bne(CCR0, call_signature_handler);
-
-  // Method has never been called. Either generate a specialized
-  // handler or point to the slow one.
-  //
-  // Pass parameter 'false' to avoid exception check in call_VM.
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false);
-
-  // Check for an exception while looking up the target method. If we
-  // incurred one, bail.
-  __ ld(pending_exception, thread_(pending_exception));
-  __ cmpdi(CCR0, pending_exception, 0);
-  __ bne(CCR0, exception_return_sync_check); // Has pending exception.
-
-  // Reload signature handler, it may have been created/assigned in the meanwhile.
-  __ ld(signature_handler_fd, method_(signature_handler));
-  __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below).
-
-  BIND(call_signature_handler);
-
-  // Before we call the signature handler we push a new frame to
-  // protect the interpreter frame volatile registers when we return
-  // from jni but before we can get back to Java.
-
-  // First set the frame anchor while the SP/FP registers are
-  // convenient and the slow signature handler can use this same frame
-  // anchor.
-
-  // We have a TOP_IJAVA_FRAME here, which belongs to us.
-  __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
-
-  // Now the interpreter frame (and its call chain) have been
-  // invalidated and flushed. We are now protected against eager
-  // being enabled in native code. Even if it goes eager the
-  // registers will be reloaded as clean and we will invalidate after
-  // the call so no spurious flush should be possible.
-
-  // Call signature handler and pass locals address.
-  //
-  // Our signature handlers copy required arguments to the C stack
-  // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13.
-  __ mr(R3_ARG1, R18_locals);
-#if !defined(ABI_ELFv2)
-  __ ld(signature_handler_fd, 0, signature_handler_fd);
-#endif
-
-  __ call_stub(signature_handler_fd);
-
-  // Remove the register parameter varargs slots we allocated in
-  // compute_interpreter_state. SP+16 ends up pointing to the ABI
-  // outgoing argument area.
-  //
-  // Not needed on PPC64.
-  //__ add(SP, SP, Argument::n_register_parameters*BytesPerWord);
-
-  assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register");
-  // Save across call to native method.
-  __ mr(result_handler_addr, R3_RET);
-
-  __ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror.
-
-  // Set up fixed parameters and call the native method.
-  // If the method is static, get mirror into R4_ARG2.
-  {
-    Label method_is_not_static;
-    // Access_flags is non-volatile and still, no need to restore it.
-
-    // Restore access flags.
-    __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
-    __ bfalse(CCR0, method_is_not_static);
-
-    // constants = method->constants();
-    __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
-    __ ld(R11_scratch1, in_bytes(ConstMethod::constants_offset()), R11_scratch1);
-    // pool_holder = method->constants()->pool_holder();
-    __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(),
-          R11_scratch1/*constants*/);
-
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-
-    // mirror = pool_holder->klass_part()->java_mirror();
-    __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/);
-    // state->_native_mirror = mirror;
-
-    __ ld(R11_scratch1, 0, R1_SP);
-    __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
-    // R4_ARG2 = &state->_oop_temp;
-    __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp));
-    BIND(method_is_not_static);
-  }
-
-  // At this point, arguments have been copied off the stack into
-  // their JNI positions. Oops are boxed in-place on the stack, with
-  // handles copied to arguments. The result handler address is in a
-  // register.
-
-  // Pass JNIEnv address as first parameter.
-  __ addir(R3_ARG1, thread_(jni_environment));
-
-  // Load the native_method entry before we change the thread state.
-  __ ld(native_method_fd, method_(native_function));
-
-  //=============================================================================
-  // Transition from _thread_in_Java to _thread_in_native. As soon as
-  // we make this change the safepoint code needs to be certain that
-  // the last Java frame we established is good. The pc in that frame
-  // just needs to be near here not an actual return address.
-
-  // We use release_store_fence to update values like the thread state, where
-  // we don't want the current thread to continue until all our prior memory
-  // accesses (including the new thread state) are visible to other threads.
-  __ li(R0, _thread_in_native);
-  __ release();
-
-  // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
-  __ stw(R0, thread_(thread_state));
-
-  if (UseMembar) {
-    __ fence();
-  }
-
-  //=============================================================================
-  // Call the native method. Argument registers must not have been
-  // overwritten since "__ call_stub(signature_handler);" (except for
-  // ARG1 and ARG2 for static methods).
-  __ call_c(native_method_fd);
-
-  __ li(R0, 0);
-  __ ld(R11_scratch1, 0, R1_SP);
-  __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
-  __ stfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
-  __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); // reset
-
-  // Note: C++ interpreter needs the following here:
-  // The frame_manager_lr field, which we use for setting the last
-  // java frame, gets overwritten by the signature handler. Restore
-  // it now.
-  //__ get_PC_trash_LR(R11_scratch1);
-  //__ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
-
-  // Because of GC R19_method may no longer be valid.
-
-  // Block, if necessary, before resuming in _thread_in_Java state.
-  // In order for GC to work, don't clear the last_Java_sp until after
-  // blocking.
-
-  //=============================================================================
-  // Switch thread to "native transition" state before reading the
-  // synchronization state. This additional state is necessary
-  // because reading and testing the synchronization state is not
-  // atomic w.r.t. GC, as this scenario demonstrates: Java thread A,
-  // in _thread_in_native state, loads _not_synchronized and is
-  // preempted. VM thread changes sync state to synchronizing and
-  // suspends threads for GC. Thread A is resumed to finish this
-  // native method, but doesn't block here since it didn't see any
-  // synchronization in progress, and escapes.
-
-  // We use release_store_fence to update values like the thread state, where
-  // we don't want the current thread to continue until all our prior memory
-  // accesses (including the new thread state) are visible to other threads.
-  __ li(R0/*thread_state*/, _thread_in_native_trans);
-  __ release();
-  __ stw(R0/*thread_state*/, thread_(thread_state));
-  if (UseMembar) {
-    __ fence();
-  }
-  // Write serialization page so that the VM thread can do a pseudo remote
-  // membar. We use the current thread pointer to calculate a thread
-  // specific offset to write to within the page. This minimizes bus
-  // traffic due to cache line collision.
-  else {
-    __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2);
-  }
-
-  // Now before we return to java we must look for a current safepoint
-  // (a new safepoint can not start since we entered native_trans).
-  // We must check here because a current safepoint could be modifying
-  // the callers registers right this moment.
-
-  // Acquire isn't strictly necessary here because of the fence, but
-  // sync_state is declared to be volatile, so we do it anyway
-  // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path).
-  int sync_state_offs = __ load_const_optimized(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
-
-  // TODO PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
-  __ lwz(sync_state, sync_state_offs, sync_state_addr);
-
-  // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
-  __ lwz(suspend_flags, thread_(suspend_flags));
-
-  Label sync_check_done;
-  Label do_safepoint;
-  // No synchronization in progress nor yet synchronized.
-  __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
-  // Not suspended.
-  __ cmpwi(CCR1, suspend_flags, 0);
-
-  __ bne(CCR0, do_safepoint);
-  __ beq(CCR1, sync_check_done);
-  __ bind(do_safepoint);
-  __ isync();
-  // Block. We do the call directly and leave the current
-  // last_Java_frame setup undisturbed. We must save any possible
-  // native result across the call. No oop is present.
-
-  __ mr(R3_ARG1, R16_thread);
-#if defined(ABI_ELFv2)
-  __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
-            relocInfo::none);
-#else
-  __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
-            relocInfo::none);
-#endif
-
-  __ bind(sync_check_done);
-
-  //=============================================================================
-  // <<<<<< Back in Interpreter Frame >>>>>
-
-  // We are in thread_in_native_trans here and back in the normal
-  // interpreter frame. We don't have to do anything special about
-  // safepoints and we can switch to Java mode anytime we are ready.
-
-  // Note: frame::interpreter_frame_result has a dependency on how the
-  // method result is saved across the call to post_method_exit. For
-  // native methods it assumes that the non-FPU/non-void result is
-  // saved in _native_lresult and a FPU result in _native_fresult. If
-  // this changes then the interpreter_frame_result implementation
-  // will need to be updated too.
-
-  // On PPC64, we have stored the result directly after the native call.
-
-  //=============================================================================
-  // Back in Java
-
-  // We use release_store_fence to update values like the thread state, where
-  // we don't want the current thread to continue until all our prior memory
-  // accesses (including the new thread state) are visible to other threads.
-  __ li(R0/*thread_state*/, _thread_in_Java);
-  __ release();
-  __ stw(R0/*thread_state*/, thread_(thread_state));
-  if (UseMembar) {
-    __ fence();
-  }
-
-  __ reset_last_Java_frame();
-
-  // Jvmdi/jvmpi support. Whether we've got an exception pending or
-  // not, and whether unlocking throws an exception or not, we notify
-  // on native method exit. If we do have an exception, we'll end up
-  // in the caller's context to handle it, so if we don't do the
-  // notify here, we'll drop it on the floor.
-  __ notify_method_exit(true/*native method*/,
-                        ilgl /*illegal state (not used for native methods)*/,
-                        InterpreterMacroAssembler::NotifyJVMTI,
-                        false /*check_exceptions*/);
-
-  //=============================================================================
-  // Handle exceptions
-
-  if (synchronized) {
-    // Don't check for exceptions since we're still in the i2n frame. Do that
-    // manually afterwards.
-    unlock_method(false);
-  }
-
-  // Reset active handles after returning from native.
-  // thread->active_handles()->clear();
-  __ ld(active_handles, thread_(active_handles));
-  // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
-  __ li(R0, 0);
-  __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles);
-
-  Label exception_return_sync_check_already_unlocked;
-  __ ld(R0/*pending_exception*/, thread_(pending_exception));
-  __ cmpdi(CCR0, R0/*pending_exception*/, 0);
-  __ bne(CCR0, exception_return_sync_check_already_unlocked);
-
-  //-----------------------------------------------------------------------------
-  // No exception pending.
-
-  // Move native method result back into proper registers and return.
-  // Invoke result handler (may unbox/promote).
-  __ ld(R11_scratch1, 0, R1_SP);
-  __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
-  __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
-  __ call_stub(result_handler_addr);
-
-  __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
-
-  // Must use the return pc which was loaded from the caller's frame
-  // as the VM uses return-pc-patching for deoptimization.
-  __ mtlr(R0);
-  __ blr();
-
-  //-----------------------------------------------------------------------------
-  // An exception is pending. We call into the runtime only if the
-  // caller was not interpreted. If it was interpreted the
-  // interpreter will do the correct thing. If it isn't interpreted
-  // (call stub/compiled code) we will change our return and continue.
-
-  BIND(exception_return_sync_check);
-
-  if (synchronized) {
-    // Don't check for exceptions since we're still in the i2n frame. Do that
-    // manually afterwards.
-    unlock_method(false);
-  }
-  BIND(exception_return_sync_check_already_unlocked);
-
-  const Register return_pc = R31;
-
-  __ ld(return_pc, 0, R1_SP);
-  __ ld(return_pc, _abi(lr), return_pc);
-
-  // Get the address of the exception handler.
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
-                  R16_thread,
-                  return_pc /* return pc */);
-  __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2);
-
-  // Load the PC of the the exception handler into LR.
-  __ mtlr(R3_RET);
-
-  // Load exception into R3_ARG1 and clear pending exception in thread.
-  __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
-  __ li(R4_ARG2, 0);
-  __ std(R4_ARG2, thread_(pending_exception));
-
-  // Load the original return pc into R4_ARG2.
-  __ mr(R4_ARG2/*issuing_pc*/, return_pc);
-
-  // Return to exception handler.
-  __ blr();
-
-  //=============================================================================
-  // Counter overflow.
-
-  if (inc_counter) {
-    // Handle invocation counter overflow.
-    __ bind(invocation_counter_overflow);
-
-    generate_counter_overflow(continue_after_compile);
-  }
-
-  return entry;
-}
-
-// Generic interpreted method entry to (asm) interpreter.
-//
-address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
-  bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-  address entry = __ pc();
-  // Generate the code to allocate the interpreter stack frame.
-  Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame.
-           Rsize_of_locals     = R5_ARG3; // Written by generate_fixed_frame.
-
-  generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
-
-  // --------------------------------------------------------------------------
-  // Zero out non-parameter locals.
-  // Note: *Always* zero out non-parameter locals as Sparc does. It's not
-  // worth to ask the flag, just do it.
-  Register Rslot_addr = R6_ARG4,
-           Rnum       = R7_ARG5;
-  Label Lno_locals, Lzero_loop;
-
-  // Set up the zeroing loop.
-  __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals);
-  __ subf(Rslot_addr, Rsize_of_parameters, R18_locals);
-  __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize);
-  __ beq(CCR0, Lno_locals);
-  __ li(R0, 0);
-  __ mtctr(Rnum);
-
-  // The zero locals loop.
-  __ bind(Lzero_loop);
-  __ std(R0, 0, Rslot_addr);
-  __ addi(Rslot_addr, Rslot_addr, -Interpreter::stackElementSize);
-  __ bdnz(Lzero_loop);
-
-  __ bind(Lno_locals);
-
-  // --------------------------------------------------------------------------
-  // Counter increment and overflow check.
-  Label invocation_counter_overflow,
-        profile_method,
-        profile_method_continue;
-  if (inc_counter || ProfileInterpreter) {
-
-    Register Rdo_not_unlock_if_synchronized_addr = R11_scratch1;
-    if (synchronized) {
-      // Since at this point in the method invocation the exception handler
-      // would try to exit the monitor of synchronized methods which hasn't
-      // been entered yet, we set the thread local variable
-      // _do_not_unlock_if_synchronized to true. If any exception was thrown by
-      // runtime, exception handling i.e. unlock_if_synchronized_method will
-      // check this thread local flag.
-      // This flag has two effects, one is to force an unwind in the topmost
-      // interpreter frame and not perform an unlock while doing so.
-      __ li(R0, 1);
-      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
-    }
-
-    // Argument and return type profiling.
-    __ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4);
-
-    // Increment invocation counter and check for overflow.
-    if (inc_counter) {
-      generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
-    }
-
-    __ bind(profile_method_continue);
-
-    // Reset the _do_not_unlock_if_synchronized flag.
-    if (synchronized) {
-      __ li(R0, 0);
-      __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
-    }
-  }
-
-  // --------------------------------------------------------------------------
-  // Locking of synchronized methods. Must happen AFTER invocation_counter
-  // check and stack overflow check, so method is not locked if overflows.
-  if (synchronized) {
-    lock_method(R3_ARG1, R4_ARG2, R5_ARG3);
-  }
-#ifdef ASSERT
-  else {
-    Label Lok;
-    __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method);
-    __ andi_(R0, R0, JVM_ACC_SYNCHRONIZED);
-    __ asm_assert_eq("method needs synchronization", 0x8521);
-    __ bind(Lok);
-  }
-#endif // ASSERT
-
-  __ verify_thread();
-
-  // --------------------------------------------------------------------------
-  // JVMTI support
-  __ notify_method_entry();
-
-  // --------------------------------------------------------------------------
-  // Start executing instructions.
-  __ dispatch_next(vtos);
-
-  // --------------------------------------------------------------------------
-  // Out of line counter overflow and MDO creation code.
-  if (ProfileInterpreter) {
-    // We have decided to profile this method in the interpreter.
-    __ bind(profile_method);
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
-    __ set_method_data_pointer_for_bcp();
-    __ b(profile_method_continue);
-  }
-
-  if (inc_counter) {
-    // Handle invocation counter overflow.
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(profile_method_continue);
-  }
-  return entry;
-}
-
-// CRC32 Intrinsics.
-//
-// Contract on scratch and work registers.
-// =======================================
-//
-// On ppc, the register set {R2..R12} is available in the interpreter as scratch/work registers.
-// You should, however, keep in mind that {R3_ARG1..R10_ARG8} is the C-ABI argument register set.
-// You can't rely on these registers across calls.
-//
-// The generators for CRC32_update and for CRC32_updateBytes use the
-// scratch/work register set internally, passing the work registers
-// as arguments to the MacroAssembler emitters as required.
-//
-// R3_ARG1..R6_ARG4 are preset to hold the incoming java arguments.
-// Their contents is not constant but may change according to the requirements
-// of the emitted code.
-//
-// All other registers from the scratch/work register set are used "internally"
-// and contain garbage (i.e. unpredictable values) once blr() is reached.
-// Basically, only R3_RET contains a defined value which is the function result.
-//
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.update(int crc, int b)
- */
-address InterpreterGenerator::generate_CRC32_update_entry() {
-  if (UseCRC32Intrinsics) {
-    address start = __ pc();  // Remember stub start address (is rtn value).
-    Label slow_path;
-
-    // Safepoint check
-    const Register sync_state = R11_scratch1;
-    int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
-    __ lwz(sync_state, sync_state_offs, sync_state);
-    __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
-    __ bne(CCR0, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we not even call stub code (we generate the code inline)
-    // and there is no safepoint on this path.
-
-    // Load java parameters.
-    // R15_esp is callers operand stack pointer, i.e. it points to the parameters.
-    const Register argP    = R15_esp;
-    const Register crc     = R3_ARG1;  // crc value
-    const Register data    = R4_ARG2;  // address of java byte value (kernel_crc32 needs address)
-    const Register dataLen = R5_ARG3;  // source data len (1 byte). Not used because calling the single-byte emitter.
-    const Register table   = R6_ARG4;  // address of crc32 table
-    const Register tmp     = dataLen;  // Reuse unused len register to show we don't actually need a separate tmp here.
-
-    BLOCK_COMMENT("CRC32_update {");
-
-    // Arguments are reversed on java expression stack
-#ifdef VM_LITTLE_ENDIAN
-    __ addi(data, argP, 0+1*wordSize); // (stack) address of byte value. Emitter expects address, not value.
-                                       // Being passed as an int, the single byte is at offset +0.
-#else
-    __ addi(data, argP, 3+1*wordSize); // (stack) address of byte value. Emitter expects address, not value.
-                                       // Being passed from java as an int, the single byte is at offset +3.
-#endif
-    __ lwz(crc,  2*wordSize, argP);    // Current crc state, zero extend to 64 bit to have a clean register.
-
-    StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
-    __ kernel_crc32_singleByte(crc, data, dataLen, table, tmp);
-
-    // Restore caller sp for c2i case and return.
-    __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
-    __ blr();
-
-    // Generate a vanilla native entry as the slow path.
-    BLOCK_COMMENT("} CRC32_update");
-    BIND(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
-    return start;
-  }
-
-  return NULL;
-}
-
-// CRC32 Intrinsics.
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.updateBytes(     int crc, byte[] b,  int off, int len)
- *   int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
- */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-  if (UseCRC32Intrinsics) {
-    address start = __ pc();  // Remember stub start address (is rtn value).
-    Label slow_path;
-
-    // Safepoint check
-    const Register sync_state = R11_scratch1;
-    int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
-    __ lwz(sync_state, sync_state_offs, sync_state);
-    __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
-    __ bne(CCR0, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we not even call stub code (we generate the code inline)
-    // and there is no safepoint on this path.
-
-    // Load parameters.
-    // Z_esp is callers operand stack pointer, i.e. it points to the parameters.
-    const Register argP    = R15_esp;
-    const Register crc     = R3_ARG1;  // crc value
-    const Register data    = R4_ARG2;  // address of java byte array
-    const Register dataLen = R5_ARG3;  // source data len
-    const Register table   = R6_ARG4;  // address of crc32 table
-
-    const Register t0      = R9;       // scratch registers for crc calculation
-    const Register t1      = R10;
-    const Register t2      = R11;
-    const Register t3      = R12;
-
-    const Register tc0     = R2;       // registers to hold pre-calculated column addresses
-    const Register tc1     = R7;
-    const Register tc2     = R8;
-    const Register tc3     = table;    // table address is reconstructed at the end of kernel_crc32_* emitters
-
-    const Register tmp     = t0;       // Only used very locally to calculate byte buffer address.
-
-    // Arguments are reversed on java expression stack.
-    // Calculate address of start element.
-    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { // Used for "updateByteBuffer direct".
-      BLOCK_COMMENT("CRC32_updateByteBuffer {");
-      // crc     @ (SP + 5W) (32bit)
-      // buf     @ (SP + 3W) (64bit ptr to long array)
-      // off     @ (SP + 2W) (32bit)
-      // dataLen @ (SP + 1W) (32bit)
-      // data = buf + off
-      __ ld(  data,    3*wordSize, argP);  // start of byte buffer
-      __ lwa( tmp,     2*wordSize, argP);  // byte buffer offset
-      __ lwa( dataLen, 1*wordSize, argP);  // #bytes to process
-      __ lwz( crc,     5*wordSize, argP);  // current crc state
-      __ add( data, data, tmp);            // Add byte buffer offset.
-    } else {                                                         // Used for "updateBytes update".
-      BLOCK_COMMENT("CRC32_updateBytes {");
-      // crc     @ (SP + 4W) (32bit)
-      // buf     @ (SP + 3W) (64bit ptr to byte array)
-      // off     @ (SP + 2W) (32bit)
-      // dataLen @ (SP + 1W) (32bit)
-      // data = buf + off + base_offset
-      __ ld(  data,    3*wordSize, argP);  // start of byte buffer
-      __ lwa( tmp,     2*wordSize, argP);  // byte buffer offset
-      __ lwa( dataLen, 1*wordSize, argP);  // #bytes to process
-      __ add( data, data, tmp);            // add byte buffer offset
-      __ lwz( crc,     4*wordSize, argP);  // current crc state
-      __ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
-    }
-
-    StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
-
-    // Performance measurements show the 1word and 2word variants to be almost equivalent,
-    // with very light advantages for the 1word variant. We chose the 1word variant for
-    // code compactness.
-    __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3);
-
-    // Restore caller sp for c2i case and return.
-    __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
-    __ blr();
-
-    // Generate a vanilla native entry as the slow path.
-    BLOCK_COMMENT("} CRC32_updateBytes(Buffer)");
-    BIND(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
-    return start;
-  }
-
-  return NULL;
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
+  return i;
 }
 
 // These should never be compiled since the interpreter will prefer
 // the compiled version to the intrinsic version.
 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  return !math_entry_available(method_kind(m));
+  return !TemplateInterpreter::math_entry_available(method_kind(m));
 }
 
 // How much stack a method activation needs in stack slots.
@@ -1501,411 +154,14 @@
   }
 }
 
-// =============================================================================
-// Exceptions
-
-void TemplateInterpreterGenerator::generate_throw_exception() {
-  Register Rexception    = R17_tos,
-           Rcontinuation = R3_RET;
-
-  // --------------------------------------------------------------------------
-  // Entry point if an method returns with a pending exception (rethrow).
-  Interpreter::_rethrow_exception_entry = __ pc();
-  {
-    __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp.
-    __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
-    __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
-
-    // Compiled code destroys templateTableBase, reload.
-    __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
-  }
-
-  // Entry point if a interpreted method throws an exception (throw).
-  Interpreter::_throw_exception_entry = __ pc();
-  {
-    __ mr(Rexception, R3_RET);
-
-    __ verify_thread();
-    __ verify_oop(Rexception);
-
-    // Expression stack must be empty before entering the VM in case of an exception.
-    __ empty_expression_stack();
-    // Find exception handler address and preserve exception oop.
-    // Call C routine to find handler and jump to it.
-    __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Rexception);
-    __ mtctr(Rcontinuation);
-    // Push exception for exception handler bytecodes.
-    __ push_ptr(Rexception);
-
-    // Jump to exception handler (may be remove activation entry!).
-    __ bctr();
-  }
-
-  // If the exception is not handled in the current frame the frame is
-  // removed and the exception is rethrown (i.e. exception
-  // continuation is _rethrow_exception).
-  //
-  // Note: At this point the bci is still the bxi for the instruction
-  // which caused the exception and the expression stack is
-  // empty. Thus, for any VM calls at this point, GC will find a legal
-  // oop map (with empty expression stack).
-
-  // In current activation
-  // tos: exception
-  // bcp: exception bcp
-
-  // --------------------------------------------------------------------------
-  // JVMTI PopFrame support
-
-  Interpreter::_remove_activation_preserving_args_entry = __ pc();
-  {
-    // Set the popframe_processing bit in popframe_condition indicating that we are
-    // currently handling popframe, so that call_VMs that may happen later do not
-    // trigger new popframe handling cycles.
-    __ lwz(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
-    __ ori(R11_scratch1, R11_scratch1, JavaThread::popframe_processing_bit);
-    __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
-
-    // Empty the expression stack, as in normal exception handling.
-    __ empty_expression_stack();
-    __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
-
-    // Check to see whether we are returning to a deoptimized frame.
-    // (The PopFrame call ensures that the caller of the popped frame is
-    // either interpreted or compiled and deoptimizes it if compiled.)
-    // Note that we don't compare the return PC against the
-    // deoptimization blob's unpack entry because of the presence of
-    // adapter frames in C2.
-    Label Lcaller_not_deoptimized;
-    Register return_pc = R3_ARG1;
-    __ ld(return_pc, 0, R1_SP);
-    __ ld(return_pc, _abi(lr), return_pc);
-    __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc);
-    __ cmpdi(CCR0, R3_RET, 0);
-    __ bne(CCR0, Lcaller_not_deoptimized);
+// Support abs and sqrt like in compiler.
+// For others we can use a normal (native) entry.
 
-    // The deoptimized case.
-    // In this case, we can't call dispatch_next() after the frame is
-    // popped, but instead must save the incoming arguments and restore
-    // them after deoptimization has occurred.
-    __ ld(R4_ARG2, in_bytes(Method::const_offset()), R19_method);
-    __ lhz(R4_ARG2 /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), R4_ARG2);
-    __ slwi(R4_ARG2, R4_ARG2, Interpreter::logStackElementSize);
-    __ addi(R5_ARG3, R18_locals, Interpreter::stackElementSize);
-    __ subf(R5_ARG3, R4_ARG2, R5_ARG3);
-    // Save these arguments.
-    __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R16_thread, R4_ARG2, R5_ARG3);
-
-    // Inform deoptimization that it is responsible for restoring these arguments.
-    __ load_const_optimized(R11_scratch1, JavaThread::popframe_force_deopt_reexecution_bit);
-    __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
-
-    // Return from the current method into the deoptimization blob. Will eventually
-    // end up in the deopt interpeter entry, deoptimization prepared everything that
-    // we will reexecute the call that called us.
-    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*reload return_pc*/ return_pc, R11_scratch1, R12_scratch2);
-    __ mtlr(return_pc);
-    __ blr();
-
-    // The non-deoptimized case.
-    __ bind(Lcaller_not_deoptimized);
-
-    // Clear the popframe condition flag.
-    __ li(R0, 0);
-    __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
-
-    // Get out of the current method and re-execute the call that called us.
-    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
-    __ restore_interpreter_state(R11_scratch1);
-    __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
-    __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
-    if (ProfileInterpreter) {
-      __ set_method_data_pointer_for_bcp();
-      __ ld(R11_scratch1, 0, R1_SP);
-      __ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1);
-    }
-#if INCLUDE_JVMTI
-    Label L_done;
-
-    __ lbz(R11_scratch1, 0, R14_bcp);
-    __ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic);
-    __ bne(CCR0, L_done);
+bool TemplateInterpreter::math_entry_available(AbstractInterpreter::MethodKind kind) {
+  if (!InlineIntrinsics) return false;
 
-    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
-    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
-    __ ld(R4_ARG2, 0, R18_locals);
-    __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false);
-    __ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
-    __ cmpdi(CCR0, R4_ARG2, 0);
-    __ beq(CCR0, L_done);
-    __ std(R4_ARG2, wordSize, R15_esp);
-    __ bind(L_done);
-#endif // INCLUDE_JVMTI
-    __ dispatch_next(vtos);
-  }
-  // end of JVMTI PopFrame support
-
-  // --------------------------------------------------------------------------
-  // Remove activation exception entry.
-  // This is jumped to if an interpreted method can't handle an exception itself
-  // (we come from the throw/rethrow exception entry above). We're going to call
-  // into the VM to find the exception handler in the caller, pop the current
-  // frame and return the handler we calculated.
-  Interpreter::_remove_activation_entry = __ pc();
-  {
-    __ pop_ptr(Rexception);
-    __ verify_thread();
-    __ verify_oop(Rexception);
-    __ std(Rexception, in_bytes(JavaThread::vm_result_offset()), R16_thread);
-
-    __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, true);
-    __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI, false);
-
-    __ get_vm_result(Rexception);
-
-    // We are done with this activation frame; find out where to go next.
-    // The continuation point will be an exception handler, which expects
-    // the following registers set up:
-    //
-    // RET:  exception oop
-    // ARG2: Issuing PC (see generate_exception_blob()), only used if the caller is compiled.
-
-    Register return_pc = R31; // Needs to survive the runtime call.
-    __ ld(return_pc, 0, R1_SP);
-    __ ld(return_pc, _abi(lr), return_pc);
-    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, return_pc);
-
-    // Remove the current activation.
-    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
-
-    __ mr(R4_ARG2, return_pc);
-    __ mtlr(R3_RET);
-    __ mr(R3_RET, Rexception);
-    __ blr();
-  }
+  return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) ||
+          (kind==Interpreter::java_lang_math_abs));
 }
 
-// JVMTI ForceEarlyReturn support.
-// Returns "in the middle" of a method with a "fake" return value.
-address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
 
-  Register Rscratch1 = R11_scratch1,
-           Rscratch2 = R12_scratch2;
-
-  address entry = __ pc();
-  __ empty_expression_stack();
-
-  __ load_earlyret_value(state, Rscratch1);
-
-  __ ld(Rscratch1, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
-  // Clear the earlyret state.
-  __ li(R0, 0);
-  __ stw(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rscratch1);
-
-  __ remove_activation(state, false, false);
-  // Copied from TemplateTable::_return.
-  // Restoration of lr done by remove_activation.
-  switch (state) {
-    case ltos:
-    case btos:
-    case ctos:
-    case stos:
-    case atos:
-    case itos: __ mr(R3_RET, R17_tos); break;
-    case ftos:
-    case dtos: __ fmr(F1_RET, F15_ftos); break;
-    case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need
-               // to get visible before the reference to the object gets stored anywhere.
-               __ membar(Assembler::StoreStore); break;
-    default  : ShouldNotReachHere();
-  }
-  __ blr();
-
-  return entry;
-} // end of ForceEarlyReturn support
-
-//-----------------------------------------------------------------------------
-// Helper for vtos entry point generation
-
-void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
-                                                         address& bep,
-                                                         address& cep,
-                                                         address& sep,
-                                                         address& aep,
-                                                         address& iep,
-                                                         address& lep,
-                                                         address& fep,
-                                                         address& dep,
-                                                         address& vep) {
-  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
-  Label L;
-
-  aep = __ pc();  __ push_ptr();  __ b(L);
-  fep = __ pc();  __ push_f();    __ b(L);
-  dep = __ pc();  __ push_d();    __ b(L);
-  lep = __ pc();  __ push_l();    __ b(L);
-  __ align(32, 12, 24); // align L
-  bep = cep = sep =
-  iep = __ pc();  __ push_i();
-  vep = __ pc();
-  __ bind(L);
-  generate_and_dispatch(t);
-}
-
-//-----------------------------------------------------------------------------
-// Generation of individual instructions
-
-// helpers for generate_and_dispatch
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
-  : TemplateInterpreterGenerator(code) {
-  generate_all(); // Down here so it can be "virtual".
-}
-
-//-----------------------------------------------------------------------------
-
-// Non-product code
-#ifndef PRODUCT
-address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
-  //__ flush_bundle();
-  address entry = __ pc();
-
-  const char *bname = NULL;
-  uint tsize = 0;
-  switch(state) {
-  case ftos:
-    bname = "trace_code_ftos {";
-    tsize = 2;
-    break;
-  case btos:
-    bname = "trace_code_btos {";
-    tsize = 2;
-    break;
-  case ctos:
-    bname = "trace_code_ctos {";
-    tsize = 2;
-    break;
-  case stos:
-    bname = "trace_code_stos {";
-    tsize = 2;
-    break;
-  case itos:
-    bname = "trace_code_itos {";
-    tsize = 2;
-    break;
-  case ltos:
-    bname = "trace_code_ltos {";
-    tsize = 3;
-    break;
-  case atos:
-    bname = "trace_code_atos {";
-    tsize = 2;
-    break;
-  case vtos:
-    // Note: In case of vtos, the topmost of stack value could be a int or doubl
-    // In case of a double (2 slots) we won't see the 2nd stack value.
-    // Maybe we simply should print the topmost 3 stack slots to cope with the problem.
-    bname = "trace_code_vtos {";
-    tsize = 2;
-
-    break;
-  case dtos:
-    bname = "trace_code_dtos {";
-    tsize = 3;
-    break;
-  default:
-    ShouldNotReachHere();
-  }
-  BLOCK_COMMENT(bname);
-
-  // Support short-cut for TraceBytecodesAt.
-  // Don't call into the VM if we don't want to trace to speed up things.
-  Label Lskip_vm_call;
-  if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) {
-    int offs1 = __ load_const_optimized(R11_scratch1, (address) &TraceBytecodesAt, R0, true);
-    int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
-    __ ld(R11_scratch1, offs1, R11_scratch1);
-    __ lwa(R12_scratch2, offs2, R12_scratch2);
-    __ cmpd(CCR0, R12_scratch2, R11_scratch1);
-    __ blt(CCR0, Lskip_vm_call);
-  }
-
-  __ push(state);
-  // Load 2 topmost expression stack values.
-  __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp);
-  __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp);
-  __ mflr(R31);
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false);
-  __ mtlr(R31);
-  __ pop(state);
-
-  if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) {
-    __ bind(Lskip_vm_call);
-  }
-  __ blr();
-  BLOCK_COMMENT("} trace_code");
-  return entry;
-}
-
-void TemplateInterpreterGenerator::count_bytecode() {
-  int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true);
-  __ lwz(R12_scratch2, offs, R11_scratch1);
-  __ addi(R12_scratch2, R12_scratch2, 1);
-  __ stw(R12_scratch2, offs, R11_scratch1);
-}
-
-void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
-  int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true);
-  __ lwz(R12_scratch2, offs, R11_scratch1);
-  __ addi(R12_scratch2, R12_scratch2, 1);
-  __ stw(R12_scratch2, offs, R11_scratch1);
-}
-
-void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
-  const Register addr = R11_scratch1,
-                 tmp  = R12_scratch2;
-  // Get index, shift out old bytecode, bring in new bytecode, and store it.
-  // _index = (_index >> log2_number_of_codes) |
-  //          (bytecode << log2_number_of_codes);
-  int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true);
-  __ lwz(tmp, offs1, addr);
-  __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes);
-  __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
-  __ stw(tmp, offs1, addr);
-
-  // Bump bucket contents.
-  // _counters[_index] ++;
-  int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true);
-  __ sldi(tmp, tmp, LogBytesPerInt);
-  __ add(addr, tmp, addr);
-  __ lwz(tmp, offs2, addr);
-  __ addi(tmp, tmp, 1);
-  __ stw(tmp, offs2, addr);
-}
-
-void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
-  // Call a little run-time stub to avoid blow-up for each bytecode.
-  // The run-time runtime saves the right registers, depending on
-  // the tosca in-state for the given template.
-
-  assert(Interpreter::trace_code(t->tos_in()) != NULL,
-         "entry must have been generated");
-
-  // Note: we destroy LR here.
-  __ bl(Interpreter::trace_code(t->tos_in()));
-}
-
-void TemplateInterpreterGenerator::stop_interpreter_at() {
-  Label L;
-  int offs1 = __ load_const_optimized(R11_scratch1, (address) &StopInterpreterAt, R0, true);
-  int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
-  __ ld(R11_scratch1, offs1, R11_scratch1);
-  __ lwa(R12_scratch2, offs2, R12_scratch2);
-  __ cmpd(CCR0, R12_scratch2, R11_scratch1);
-  __ bne(CCR0, L);
-  __ illtrap();
-  __ bind(L);
-}
-
-#endif // !PRODUCT
-#endif // !CC_INTERP
--- a/src/cpu/ppc/vm/templateInterpreter_ppc.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/ppc/vm/templateInterpreter_ppc.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2013, 2015 SAP AG. All rights reserved.
+ * Copyright (c) 2013, 2015 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,12 +28,15 @@
 
  protected:
 
-  // Size of interpreter code.  Increase if too small.  Interpreter will
+  // Size of interpreter code. Increase if too small.  Interpreter will
   // fail with a guarantee ("not enough space for interpreter generation");
   // if too small.
   // Run with +PrintInterpreter to get the VM to print out the size.
   // Max size with JVMTI
-
   const static int InterpreterCodeSize = 230*K;
 
+ public:
+  // Support abs and sqrt like in compiler.
+  // For others we can use a normal (native) entry.
+  static bool math_entry_available(AbstractInterpreter::MethodKind kind);
 #endif // CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
--- a/src/cpu/ppc/vm/vm_version_ppc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/ppc/vm/vm_version_ppc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -229,12 +229,27 @@
   }
 
   // Adjust RTM (Restricted Transactional Memory) flags.
-  if (!has_tcheck() && UseRTMLocking) {
+  if (UseRTMLocking) {
+    // If CPU or OS are too old:
     // Can't continue because UseRTMLocking affects UseBiasedLocking flag
     // setting during arguments processing. See use_biased_locking().
     // VM_Version_init() is executed after UseBiasedLocking is used
     // in Thread::allocate().
-    vm_exit_during_initialization("RTM instructions are not available on this CPU");
+    if (!has_tcheck()) {
+      vm_exit_during_initialization("RTM instructions are not available on this CPU");
+    }
+    bool os_too_old = true;
+#ifdef AIX
+    if (os::Aix::os_version() >= 0x0701031e) { // at least AIX 7.1.3.30
+      os_too_old = false;
+    }
+#endif
+#ifdef linux
+    // TODO: check kernel version (we currently have too old versions only)
+#endif
+    if (os_too_old) {
+      vm_exit_during_initialization("RTM is not supported on this OS version.");
+    }
   }
 
   if (UseRTMLocking) {
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1453,6 +1453,9 @@
 
 
 void LIR_Assembler::return_op(LIR_Opr result) {
+  if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
+    __ reserved_stack_check();
+  }
   // the poll may need a register so just pick one that isn't the return register
 #if defined(TIERED) && !defined(_LP64)
   if (result->type_field() == LIR_OprDesc::long_type) {
--- a/src/cpu/sparc/vm/frame_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/frame_sparc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -632,7 +632,7 @@
 
   // stack frames shouldn't be much larger than max_stack elements
 
-  if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
+  if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
     return false;
   }
 
--- a/src/cpu/sparc/vm/globalDefinitions_sparc.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/globalDefinitions_sparc.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -58,4 +58,8 @@
   #endif
 #endif
 
+#if defined(SOLARIS)
+#define SUPPORT_RESERVED_STACK_AREA
+#endif
+
 #endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP
--- a/src/cpu/sparc/vm/globals_sparc.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/globals_sparc.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -54,6 +54,7 @@
 
 #define DEFAULT_STACK_YELLOW_PAGES (2)
 #define DEFAULT_STACK_RED_PAGES (1)
+#define DEFAULT_STACK_RESERVED_PAGES (SOLARIS_ONLY(1) NOT_SOLARIS(0))
 
 #ifdef _LP64
 // Stack slots are 2X larger in LP64 than in the 32 bit VM.
@@ -69,10 +70,12 @@
 #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
 #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
 #define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
+#define MIN_STACK_RESERVED_PAGES (0)
 
 define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
 define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
 define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
+define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
--- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1140,6 +1140,19 @@
   // save result (push state before jvmti call and pop it afterwards) and notify jvmti
   notify_method_exit(false, state, NotifyJVMTI);
 
+  if (StackReservedPages > 0) {
+    // testing if Stack Reserved Area needs to be re-enabled
+    Label no_reserved_zone_enabling;
+    ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G3_scratch);
+    cmp_and_brx_short(SP, G3_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling);
+
+    call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread);
+    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_delayed_StackOverflowError), G2_thread);
+    should_not_reach_here();
+
+    bind(no_reserved_zone_enabling);
+  }
+
   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
   verify_thread();
 
--- a/src/cpu/sparc/vm/interpreter_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/interpreter_sparc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,6 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -62,30 +61,6 @@
 
 //----------------------------------------------------------------------------------------------------
 
-
-
-
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : i = 4; break;
-    case T_LONG   : i = 5; break;
-    case T_VOID   : i = 6; break;
-    case T_FLOAT  : i = 7; break;
-    case T_DOUBLE : i = 8; break;
-    case T_OBJECT : i = 9; break;
-    case T_ARRAY  : i = 9; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
-  return i;
-}
-
-
 #ifndef _LP64
 address AbstractInterpreterGenerator::generate_slow_signature_handler() {
   address entry = __ pc();
@@ -254,28 +229,3 @@
   return entry;
 
 }
-
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  // No special entry points that preclude compilation
-  return true;
-}
-
-void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
-
-  // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
-  // the days we had adapter frames. When we deoptimize a situation where a
-  // compiled caller calls a compiled caller will have registers it expects
-  // to survive the call to the callee. If we deoptimize the callee the only
-  // way we can restore these registers is to have the oldest interpreter
-  // frame that we create restore these values. That is what this routine
-  // will accomplish.
-
-  // At the moment we have modified c2 to not have any callee save registers
-  // so this problem does not exist and this routine is just a place holder.
-
-  assert(f->is_interpreted_frame(), "must be interpreted");
-}
-
-
-//----------------------------------------------------------------------------------------------------
-// Exceptions
--- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -347,10 +347,10 @@
 #ifdef ASSERT
 // a hook for debugging
 static Thread* reinitialize_thread() {
-  return ThreadLocalStorage::thread();
+  return Thread::current();
 }
 #else
-#define reinitialize_thread ThreadLocalStorage::thread
+#define reinitialize_thread Thread::current
 #endif
 
 #ifdef ASSERT
@@ -380,7 +380,7 @@
 }
 
 static Thread* verify_thread_subroutine(Thread* gthread_value) {
-  Thread* correct_value = ThreadLocalStorage::thread();
+  Thread* correct_value = Thread::current();
   guarantee(gthread_value == correct_value, "G2_thread value must be the thread");
   return correct_value;
 }
@@ -3587,6 +3587,24 @@
   }
 }
 
+void MacroAssembler::reserved_stack_check() {
+  // testing if reserved zone needs to be enabled
+  Label no_reserved_zone_enabling;
+
+  ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G4_scratch);
+  cmp_and_brx_short(SP, G4_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling);
+
+  call_VM_leaf(L0, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread);
+
+  AddressLiteral stub(StubRoutines::throw_delayed_StackOverflowError_entry());
+  jump_to(stub, G4_scratch);
+  delayed()->restore();
+
+  should_not_reach_here();
+
+  bind(no_reserved_zone_enabling);
+}
+
 ///////////////////////////////////////////////////////////////////////////////////
 #if INCLUDE_ALL_GCS
 
--- a/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1392,6 +1392,9 @@
   // stack overflow + shadow pages.  Clobbers tsp and scratch registers.
   void bang_stack_size(Register Rsize, Register Rtsp, Register Rscratch);
 
+  // Check for reserved stack access in method being exited (for JIT)
+  void reserved_stack_check();
+
   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
 
   void verify_tlab();
--- a/src/cpu/sparc/vm/memset_with_concurrent_readers_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/memset_with_concurrent_readers_sparc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -52,7 +52,7 @@
 
 inline void fill_subword(void* start, void* end, int value) {
   STATIC_ASSERT(BytesPerWord == 8);
-  assert(pointer_delta(end, start, 1) < BytesPerWord, "precondition");
+  assert(pointer_delta(end, start, 1) < (size_t)BytesPerWord, "precondition");
   // Dispatch on (end - start).
   void* pc;
   __asm__ volatile(
@@ -73,10 +73,10 @@
     " stb %[value], [%[end]-3]\n\t"
     " stb %[value], [%[end]-2]\n\t"
     " stb %[value], [%[end]-1]\n\t" // end[-1] = value
-    : /* no outputs */
-      [pc] "&=r" (pc)               // temp
-    : [offset] "&+r" (start),
-      [end] "r" (end),
+    : /* only temporaries/overwritten outputs */
+      [pc] "=&r" (pc),               // temp
+      [offset] "+&r" (start)
+    : [end] "r" (end),
       [value] "r" (value)
     : "memory");
 }
@@ -84,7 +84,7 @@
 void memset_with_concurrent_readers(void* to, int value, size_t size) {
   Prefetch::write(to, 0);
   void* end = static_cast<char*>(to) + size;
-  if (size >= BytesPerWord) {
+  if (size >= (size_t)BytesPerWord) {
     // Fill any partial word prefix.
     uintx* aligned_to = static_cast<uintx*>(align_ptr_up(to, BytesPerWord));
     fill_subword(to, aligned_to, value);
@@ -144,10 +144,10 @@
       " stx %[xvalue], [%[aend]-24]\n\t"
       " stx %[xvalue], [%[aend]-16]\n\t"
       " stx %[xvalue], [%[aend]-8]\n\t"  // aligned_end[-1] = xvalue
-      : /* no outputs */
-        [temp] "&=r" (temp)
-      : [ato] "&+r" (aligned_to),
-        [aend] "r" (aligned_end),
+      : /* only temporaries/overwritten outputs */
+        [temp] "=&r" (temp),
+        [ato] "+&r" (aligned_to)
+      : [aend] "r" (aligned_end),
         [xvalue] "r" (xvalue)
       : "cc", "memory");
     to = aligned_end;           // setup for suffix
--- a/src/cpu/sparc/vm/nativeInst_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -434,7 +434,7 @@
 
 
 void NativeMovConstReg32::print() {
-  tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
+  tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, p2i(instruction_address()), data());
 }
 
 
--- a/src/cpu/sparc/vm/sparc.ad	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/sparc.ad	Fri Dec 18 12:39:02 2015 -0800
@@ -1294,6 +1294,10 @@
 
   __ verify_thread();
 
+  if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
+    __ reserved_stack_check();
+  }
+
   // If this does safepoint polling, then do it here
   if(do_polling() && ra_->C->is_method_compilation()) {
     AddressLiteral polling_page(os::get_polling_page());
@@ -1651,6 +1655,7 @@
 #endif // !_LP64
 
   Unimplemented();
+  return 0;
 }
 
 #ifndef PRODUCT
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -5355,7 +5355,12 @@
 #endif  // COMPILER2 !=> _LP64
 
     // Build this early so it's available for the interpreter.
-    StubRoutines::_throw_StackOverflowError_entry          = generate_throw_exception("StackOverflowError throw_exception",           CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
+    StubRoutines::_throw_StackOverflowError_entry =
+            generate_throw_exception("StackOverflowError throw_exception",
+            CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
+    StubRoutines::_throw_delayed_StackOverflowError_entry =
+            generate_throw_exception("delayed StackOverflowError throw_exception",
+            CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
 
     if (UseCRC32Intrinsics) {
       // set table address before stub generation which use it
--- a/src/cpu/sparc/vm/stubRoutines_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/stubRoutines_sparc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -36,7 +36,7 @@
   address _flush_reg_windows();   // in .s file.
   // Flush registers to stack. In case of error we will need to stack walk.
   address bootstrap_flush_windows(void) {
-    Thread* thread = ThreadLocalStorage::get_thread_slow();
+    Thread* thread = Thread::current_or_null();
     // Very early in process there is no thread.
     if (thread != NULL) {
       guarantee(thread->is_Java_thread(), "Not a Java thread.");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,1832 @@
+/*
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "interpreter/templateTable.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
+
+#ifndef CC_INTERP
+#ifndef FAST_DISPATCH
+#define FAST_DISPATCH 1
+#endif
+#undef FAST_DISPATCH
+
+
+// Generation of Interpreter
+//
+// The InterpreterGenerator generates the interpreter into Interpreter::_code.
+
+
+#define __ _masm->
+
+
+//----------------------------------------------------------------------------------------------------
+
+
+void InterpreterGenerator::save_native_result(void) {
+  // result potentially in O0/O1: save it across calls
+  const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
+
+  // result potentially in F0/F1: save it across calls
+  const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
+
+  // save and restore any potential method result value around the unlocking operation
+  __ stf(FloatRegisterImpl::D, F0, d_tmp);
+#ifdef _LP64
+  __ stx(O0, l_tmp);
+#else
+  __ std(O0, l_tmp);
+#endif
+}
+
+void InterpreterGenerator::restore_native_result(void) {
+  const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
+  const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
+
+  // Restore any method result value
+  __ ldf(FloatRegisterImpl::D, d_tmp, F0);
+#ifdef _LP64
+  __ ldx(l_tmp, O0);
+#else
+  __ ldd(l_tmp, O0);
+#endif
+}
+
+address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
+  assert(!pass_oop || message == NULL, "either oop or message but not both");
+  address entry = __ pc();
+  // expression stack must be empty before entering the VM if an exception happened
+  __ empty_expression_stack();
+  // load exception object
+  __ set((intptr_t)name, G3_scratch);
+  if (pass_oop) {
+    __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
+  } else {
+    __ set((intptr_t)message, G4_scratch);
+    __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
+  }
+  // throw exception
+  assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
+  AddressLiteral thrower(Interpreter::throw_exception_entry());
+  __ jump_to(thrower, G3_scratch);
+  __ delayed()->nop();
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
+  address entry = __ pc();
+  // expression stack must be empty before entering the VM if an exception
+  // happened
+  __ empty_expression_stack();
+  // load exception object
+  __ call_VM(Oexception,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::throw_ClassCastException),
+             Otos_i);
+  __ should_not_reach_here();
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
+  address entry = __ pc();
+  // expression stack must be empty before entering the VM if an exception happened
+  __ empty_expression_stack();
+  // convention: expect aberrant index in register G3_scratch, then shuffle the
+  // index to G4_scratch for the VM call
+  __ mov(G3_scratch, G4_scratch);
+  __ set((intptr_t)name, G3_scratch);
+  __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
+  __ should_not_reach_here();
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
+  address entry = __ pc();
+  // expression stack must be empty before entering the VM if an exception happened
+  __ empty_expression_stack();
+  __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
+  __ should_not_reach_here();
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
+  address entry = __ pc();
+
+  if (state == atos) {
+    __ profile_return_type(O0, G3_scratch, G1_scratch);
+  }
+
+#if !defined(_LP64) && defined(COMPILER2)
+  // All return values are where we want them, except for Longs.  C2 returns
+  // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
+  // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
+  // build even if we are returning from interpreted we just do a little
+  // stupid shuffing.
+  // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
+  // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
+  // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
+
+  if (state == ltos) {
+    __ srl (G1,  0, O1);
+    __ srlx(G1, 32, O0);
+  }
+#endif // !_LP64 && COMPILER2
+
+  // The callee returns with the stack possibly adjusted by adapter transition
+  // We remove that possible adjustment here.
+  // All interpreter local registers are untouched. Any result is passed back
+  // in the O0/O1 or float registers. Before continuing, the arguments must be
+  // popped from the java expression stack; i.e., Lesp must be adjusted.
+
+  __ mov(Llast_SP, SP);   // Remove any adapter added stack space.
+
+  const Register cache = G3_scratch;
+  const Register index  = G1_scratch;
+  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
+
+  const Register flags = cache;
+  __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags);
+  const Register parameter_size = flags;
+  __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size);  // argument size in words
+  __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size);     // each argument size in bytes
+  __ add(Lesp, parameter_size, Lesp);                                           // pop arguments
+  __ dispatch_next(state, step);
+
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
+  address entry = __ pc();
+  __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
+#if INCLUDE_JVMCI
+  // Check if we need to take lock at entry of synchronized method.
+  if (UseJVMCICompiler) {
+    Label L;
+    Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
+    __ ldbool(pending_monitor_enter_addr, Gtemp);  // Load if pending monitor enter
+    __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L);
+    // Clear flag.
+    __ stbool(G0, pending_monitor_enter_addr);
+    // Take lock.
+    lock_method();
+    __ bind(L);
+  }
+#endif
+  { Label L;
+    Address exception_addr(G2_thread, Thread::pending_exception_offset());
+    __ ld_ptr(exception_addr, Gtemp);  // Load pending exception.
+    __ br_null_short(Gtemp, Assembler::pt, L);
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
+    __ should_not_reach_here();
+    __ bind(L);
+  }
+  __ dispatch_next(state, step);
+  return entry;
+}
+
+// A result handler converts/unboxes a native call result into
+// a java interpreter/compiler result. The current frame is an
+// interpreter frame. The activation frame unwind code must be
+// consistent with that of TemplateTable::_return(...). In the
+// case of native methods, the caller's SP was not modified.
+address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
+  address entry = __ pc();
+  Register Itos_i  = Otos_i ->after_save();
+  Register Itos_l  = Otos_l ->after_save();
+  Register Itos_l1 = Otos_l1->after_save();
+  Register Itos_l2 = Otos_l2->after_save();
+  switch (type) {
+    case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
+    case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i);   break; // cannot use and3, 0xFFFF too big as immediate value!
+    case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i);   break;
+    case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i);   break;
+    case T_LONG   :
+#ifndef _LP64
+                    __ mov(O1, Itos_l2);  // move other half of long
+#endif              // ifdef or no ifdef, fall through to the T_INT case
+    case T_INT    : __ mov(O0, Itos_i);                         break;
+    case T_VOID   : /* nothing to do */                         break;
+    case T_FLOAT  : assert(F0 == Ftos_f, "fix this code" );     break;
+    case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" );     break;
+    case T_OBJECT :
+      __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
+      __ verify_oop(Itos_i);
+      break;
+    default       : ShouldNotReachHere();
+  }
+  __ ret();                           // return from interpreter activation
+  __ delayed()->restore(I5_savedSP, G0, SP);  // remove interpreter frame
+  NOT_PRODUCT(__ emit_int32(0);)       // marker for disassembly
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
+  address entry = __ pc();
+  __ push(state);
+  __ call_VM(noreg, runtime_entry);
+  __ dispatch_via(vtos, Interpreter::normal_table(vtos));
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
+  address entry = __ pc();
+  __ dispatch_next(state);
+  return entry;
+}
+
+//
+// Helpers for commoning out cases in the various type of method entries.
+//
+
+// increment invocation count & check for overflow
+//
+// Note: checking for negative value instead of overflow
+//       so we have a 'sticky' overflow test
+//
+// Lmethod: method
+// ??: invocation counter
+//
+void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
+  // Note: In tiered we increment either counters in MethodCounters* or in
+  // MDO depending if we're profiling or not.
+  const Register G3_method_counters = G3_scratch;
+  Label done;
+
+  if (TieredCompilation) {
+    const int increment = InvocationCounter::count_increment;
+    Label no_mdo;
+    if (ProfileInterpreter) {
+      // If no method data exists, go to profile_continue.
+      __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
+      __ br_null_short(G4_scratch, Assembler::pn, no_mdo);
+      // Increment counter
+      Address mdo_invocation_counter(G4_scratch,
+                                     in_bytes(MethodData::invocation_counter_offset()) +
+                                     in_bytes(InvocationCounter::counter_offset()));
+      Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset()));
+      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
+                                 G3_scratch, Lscratch,
+                                 Assembler::zero, overflow);
+      __ ba_short(done);
+    }
+
+    // Increment counter in MethodCounters*
+    __ bind(no_mdo);
+    Address invocation_counter(G3_method_counters,
+            in_bytes(MethodCounters::invocation_counter_offset()) +
+            in_bytes(InvocationCounter::counter_offset()));
+    __ get_method_counters(Lmethod, G3_method_counters, done);
+    Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset()));
+    __ increment_mask_and_jump(invocation_counter, increment, mask,
+                               G4_scratch, Lscratch,
+                               Assembler::zero, overflow);
+    __ bind(done);
+  } else { // not TieredCompilation
+    // Update standard invocation counters
+    __ get_method_counters(Lmethod, G3_method_counters, done);
+    __ increment_invocation_counter(G3_method_counters, O0, G4_scratch);
+    if (ProfileInterpreter) {
+      Address interpreter_invocation_counter(G3_method_counters,
+            in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
+      __ ld(interpreter_invocation_counter, G4_scratch);
+      __ inc(G4_scratch);
+      __ st(G4_scratch, interpreter_invocation_counter);
+    }
+
+    if (ProfileInterpreter && profile_method != NULL) {
+      // Test to see if we should create a method data oop
+      Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
+      __ ld(profile_limit, G1_scratch);
+      __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
+
+      // if no method data exists, go to profile_method
+      __ test_method_data_pointer(*profile_method);
+    }
+
+    Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset()));
+    __ ld(invocation_limit, G3_scratch);
+    __ cmp(O0, G3_scratch);
+    __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
+    __ delayed()->nop();
+    __ bind(done);
+  }
+
+}
+
+// Allocate monitor and lock method (asm interpreter)
+// ebx - Method*
+//
+void TemplateInterpreterGenerator::lock_method() {
+  __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0);  // Load access flags.
+
+#ifdef ASSERT
+ { Label ok;
+   __ btst(JVM_ACC_SYNCHRONIZED, O0);
+   __ br( Assembler::notZero, false, Assembler::pt, ok);
+   __ delayed()->nop();
+   __ stop("method doesn't need synchronization");
+   __ bind(ok);
+  }
+#endif // ASSERT
+
+  // get synchronization object to O0
+  { Label done;
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+    __ btst(JVM_ACC_STATIC, O0);
+    __ br( Assembler::zero, true, Assembler::pt, done);
+    __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
+
+    __ ld_ptr( Lmethod, in_bytes(Method::const_offset()), O0);
+    __ ld_ptr( O0, in_bytes(ConstMethod::constants_offset()), O0);
+    __ ld_ptr( O0, ConstantPool::pool_holder_offset_in_bytes(), O0);
+
+    // lock the mirror, not the Klass*
+    __ ld_ptr( O0, mirror_offset, O0);
+
+#ifdef ASSERT
+    __ tst(O0);
+    __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
+#endif // ASSERT
+
+    __ bind(done);
+  }
+
+  __ add_monitor_to_stack(true, noreg, noreg);  // allocate monitor elem
+  __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes());   // store object
+  // __ untested("lock_object from method entry");
+  __ lock_object(Lmonitors, O0);
+}
+
+
+void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
+                                                         Register Rscratch,
+                                                         Register Rscratch2) {
+  const int page_size = os::vm_page_size();
+  Label after_frame_check;
+
+  assert_different_registers(Rframe_size, Rscratch, Rscratch2);
+
+  __ set(page_size, Rscratch);
+  __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check);
+
+  // get the stack base, and in debug, verify it is non-zero
+  __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
+#ifdef ASSERT
+  Label base_not_zero;
+  __ br_notnull_short(Rscratch, Assembler::pn, base_not_zero);
+  __ stop("stack base is zero in generate_stack_overflow_check");
+  __ bind(base_not_zero);
+#endif
+
+  // get the stack size, and in debug, verify it is non-zero
+  assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
+  __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
+#ifdef ASSERT
+  Label size_not_zero;
+  __ br_notnull_short(Rscratch2, Assembler::pn, size_not_zero);
+  __ stop("stack size is zero in generate_stack_overflow_check");
+  __ bind(size_not_zero);
+#endif
+
+  // compute the beginning of the protected zone minus the requested frame size
+  __ sub( Rscratch, Rscratch2,   Rscratch );
+  __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 );
+  __ add( Rscratch, Rscratch2,   Rscratch );
+
+  // Add in the size of the frame (which is the same as subtracting it from the
+  // SP, which would take another register
+  __ add( Rscratch, Rframe_size, Rscratch );
+
+  // the frame is greater than one page in size, so check against
+  // the bottom of the stack
+  __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check);
+
+  // the stack will overflow, throw an exception
+
+  // Note that SP is restored to sender's sp (in the delay slot). This
+  // is necessary if the sender's frame is an extended compiled frame
+  // (see gen_c2i_adapter()) and safer anyway in case of JSR292
+  // adaptations.
+
+  // Note also that the restored frame is not necessarily interpreted.
+  // Use the shared runtime version of the StackOverflowError.
+  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
+  AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry());
+  __ jump_to(stub, Rscratch);
+  __ delayed()->mov(O5_savedSP, SP);
+
+  // if you get to here, then there is enough stack space
+  __ bind( after_frame_check );
+}
+
+
+//
+// Generate a fixed interpreter frame. This is identical setup for interpreted
+// methods and for native methods hence the shared code.
+
+
+//----------------------------------------------------------------------------------------------------
+// Stack frame layout
+//
+// When control flow reaches any of the entry types for the interpreter
+// the following holds ->
+//
+// C2 Calling Conventions:
+//
+// The entry code below assumes that the following registers are set
+// when coming in:
+//    G5_method: holds the Method* of the method to call
+//    Lesp:    points to the TOS of the callers expression stack
+//             after having pushed all the parameters
+//
+// The entry code does the following to setup an interpreter frame
+//   pop parameters from the callers stack by adjusting Lesp
+//   set O0 to Lesp
+//   compute X = (max_locals - num_parameters)
+//   bump SP up by X to accomadate the extra locals
+//   compute X = max_expression_stack
+//               + vm_local_words
+//               + 16 words of register save area
+//   save frame doing a save sp, -X, sp growing towards lower addresses
+//   set Lbcp, Lmethod, LcpoolCache
+//   set Llocals to i0
+//   set Lmonitors to FP - rounded_vm_local_words
+//   set Lesp to Lmonitors - 4
+//
+//  The frame has now been setup to do the rest of the entry code
+
+// Try this optimization:  Most method entries could live in a
+// "one size fits all" stack frame without all the dynamic size
+// calculations.  It might be profitable to do all this calculation
+// statically and approximately for "small enough" methods.
+
+//-----------------------------------------------------------------------------------------------
+
+// C1 Calling conventions
+//
+// Upon method entry, the following registers are setup:
+//
+// g2 G2_thread: current thread
+// g5 G5_method: method to activate
+// g4 Gargs  : pointer to last argument
+//
+//
+// Stack:
+//
+// +---------------+ <--- sp
+// |               |
+// : reg save area :
+// |               |
+// +---------------+ <--- sp + 0x40
+// |               |
+// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
+// |               |
+// +---------------+ <--- sp + 0x5c
+// |               |
+// :     free      :
+// |               |
+// +---------------+ <--- Gargs
+// |               |
+// :   arguments   :
+// |               |
+// +---------------+
+// |               |
+//
+//
+//
+// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
+//
+// +---------------+ <--- sp
+// |               |
+// : reg save area :
+// |               |
+// +---------------+ <--- sp + 0x40
+// |               |
+// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
+// |               |
+// +---------------+ <--- sp + 0x5c
+// |               |
+// :               :
+// |               | <--- Lesp
+// +---------------+ <--- Lmonitors (fp - 0x18)
+// |   VM locals   |
+// +---------------+ <--- fp
+// |               |
+// : reg save area :
+// |               |
+// +---------------+ <--- fp + 0x40
+// |               |
+// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
+// |               |
+// +---------------+ <--- fp + 0x5c
+// |               |
+// :     free      :
+// |               |
+// +---------------+
+// |               |
+// : nonarg locals :
+// |               |
+// +---------------+
+// |               |
+// :   arguments   :
+// |               | <--- Llocals
+// +---------------+ <--- Gargs
+// |               |
+
+void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
+  //
+  //
+  // The entry code sets up a new interpreter frame in 4 steps:
+  //
+  // 1) Increase caller's SP by for the extra local space needed:
+  //    (check for overflow)
+  //    Efficient implementation of xload/xstore bytecodes requires
+  //    that arguments and non-argument locals are in a contigously
+  //    addressable memory block => non-argument locals must be
+  //    allocated in the caller's frame.
+  //
+  // 2) Create a new stack frame and register window:
+  //    The new stack frame must provide space for the standard
+  //    register save area, the maximum java expression stack size,
+  //    the monitor slots (0 slots initially), and some frame local
+  //    scratch locations.
+  //
+  // 3) The following interpreter activation registers must be setup:
+  //    Lesp       : expression stack pointer
+  //    Lbcp       : bytecode pointer
+  //    Lmethod    : method
+  //    Llocals    : locals pointer
+  //    Lmonitors  : monitor pointer
+  //    LcpoolCache: constant pool cache
+  //
+  // 4) Initialize the non-argument locals if necessary:
+  //    Non-argument locals may need to be initialized to NULL
+  //    for GC to work. If the oop-map information is accurate
+  //    (in the absence of the JSR problem), no initialization
+  //    is necessary.
+  //
+  // (gri - 2/25/2000)
+
+
+  int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
+
+  const int extra_space =
+    rounded_vm_local_words +                   // frame local scratch space
+    Method::extra_stack_entries() +            // extra stack for jsr 292
+    frame::memory_parameter_word_sp_offset +   // register save area
+    (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
+
+  const Register Glocals_size = G3;
+  const Register RconstMethod = Glocals_size;
+  const Register Otmp1 = O3;
+  const Register Otmp2 = O4;
+  // Lscratch can't be used as a temporary because the call_stub uses
+  // it to assert that the stack frame was setup correctly.
+  const Address constMethod       (G5_method, Method::const_offset());
+  const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
+
+  __ ld_ptr( constMethod, RconstMethod );
+  __ lduh( size_of_parameters, Glocals_size);
+
+  // Gargs points to first local + BytesPerWord
+  // Set the saved SP after the register window save
+  //
+  assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
+  __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
+  __ add(Gargs, Otmp1, Gargs);
+
+  if (native_call) {
+    __ calc_mem_param_words( Glocals_size, Gframe_size );
+    __ add( Gframe_size,  extra_space, Gframe_size);
+    __ round_to( Gframe_size, WordsPerLong );
+    __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
+  } else {
+
+    //
+    // Compute number of locals in method apart from incoming parameters
+    //
+    const Address size_of_locals    (Otmp1, ConstMethod::size_of_locals_offset());
+    __ ld_ptr( constMethod, Otmp1 );
+    __ lduh( size_of_locals, Otmp1 );
+    __ sub( Otmp1, Glocals_size, Glocals_size );
+    __ round_to( Glocals_size, WordsPerLong );
+    __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
+
+    // see if the frame is greater than one page in size. If so,
+    // then we need to verify there is enough stack space remaining
+    // Frame_size = (max_stack + extra_space) * BytesPerWord;
+    __ ld_ptr( constMethod, Gframe_size );
+    __ lduh( Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size );
+    __ add( Gframe_size, extra_space, Gframe_size );
+    __ round_to( Gframe_size, WordsPerLong );
+    __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
+
+    // Add in java locals size for stack overflow check only
+    __ add( Gframe_size, Glocals_size, Gframe_size );
+
+    const Register Otmp2 = O4;
+    assert_different_registers(Otmp1, Otmp2, O5_savedSP);
+    generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2);
+
+    __ sub( Gframe_size, Glocals_size, Gframe_size);
+
+    //
+    // bump SP to accomodate the extra locals
+    //
+    __ sub( SP, Glocals_size, SP );
+  }
+
+  //
+  // now set up a stack frame with the size computed above
+  //
+  __ neg( Gframe_size );
+  __ save( SP, Gframe_size, SP );
+
+  //
+  // now set up all the local cache registers
+  //
+  // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
+  // that all present references to Lbyte_code initialize the register
+  // immediately before use
+  if (native_call) {
+    __ mov(G0, Lbcp);
+  } else {
+    __ ld_ptr(G5_method, Method::const_offset(), Lbcp);
+    __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp);
+  }
+  __ mov( G5_method, Lmethod);                 // set Lmethod
+  __ get_constant_pool_cache( LcpoolCache );   // set LcpoolCache
+  __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
+#ifdef _LP64
+  __ add( Lmonitors, STACK_BIAS, Lmonitors );   // Account for 64 bit stack bias
+#endif
+  __ sub(Lmonitors, BytesPerWord, Lesp);       // set Lesp
+
+  // setup interpreter activation registers
+  __ sub(Gargs, BytesPerWord, Llocals);        // set Llocals
+
+  if (ProfileInterpreter) {
+#ifdef FAST_DISPATCH
+    // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
+    // they both use I2.
+    assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
+#endif // FAST_DISPATCH
+    __ set_method_data_pointer();
+  }
+
+}
+
+// Method entry for java.lang.ref.Reference.get.
+address InterpreterGenerator::generate_Reference_get_entry(void) {
+#if INCLUDE_ALL_GCS
+  // Code: _aload_0, _getfield, _areturn
+  // parameter size = 1
+  //
+  // The code that gets generated by this routine is split into 2 parts:
+  //    1. The "intrinsified" code for G1 (or any SATB based GC),
+  //    2. The slow path - which is an expansion of the regular method entry.
+  //
+  // Notes:-
+  // * In the G1 code we do not check whether we need to block for
+  //   a safepoint. If G1 is enabled then we must execute the specialized
+  //   code for Reference.get (except when the Reference object is null)
+  //   so that we can log the value in the referent field with an SATB
+  //   update buffer.
+  //   If the code for the getfield template is modified so that the
+  //   G1 pre-barrier code is executed when the current method is
+  //   Reference.get() then going through the normal method entry
+  //   will be fine.
+  // * The G1 code can, however, check the receiver object (the instance
+  //   of java.lang.Reference) and jump to the slow path if null. If the
+  //   Reference object is null then we obviously cannot fetch the referent
+  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
+  //   regular method entry code to generate the NPE.
+  //
+  // This code is based on generate_accessor_enty.
+
+  address entry = __ pc();
+
+  const int referent_offset = java_lang_ref_Reference::referent_offset;
+  guarantee(referent_offset > 0, "referent offset not initialized");
+
+  if (UseG1GC) {
+     Label slow_path;
+
+    // In the G1 code we don't check if we need to reach a safepoint. We
+    // continue and the thread will safepoint at the next bytecode dispatch.
+
+    // Check if local 0 != NULL
+    // If the receiver is null then it is OK to jump to the slow path.
+    __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
+    // check if local 0 == NULL and go the slow path
+    __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path);
+
+
+    // Load the value of the referent field.
+    if (Assembler::is_simm13(referent_offset)) {
+      __ load_heap_oop(Otos_i, referent_offset, Otos_i);
+    } else {
+      __ set(referent_offset, G3_scratch);
+      __ load_heap_oop(Otos_i, G3_scratch, Otos_i);
+    }
+
+    // Generate the G1 pre-barrier code to log the value of
+    // the referent field in an SATB buffer. Note with
+    // these parameters the pre-barrier does not generate
+    // the load of the previous value
+
+    __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */,
+                            Otos_i /* pre_val */,
+                            G3_scratch /* tmp */,
+                            true /* preserve_o_regs */);
+
+    // _areturn
+    __ retl();                      // return from leaf routine
+    __ delayed()->mov(O5_savedSP, SP);
+
+    // Generate regular method entry
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
+    return entry;
+  }
+#endif // INCLUDE_ALL_GCS
+
+  // If G1 is not enabled then attempt to go through the accessor entry point
+  // Reference.get is an accessor
+  return NULL;
+}
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.update(int crc, int b)
+ */
+address InterpreterGenerator::generate_CRC32_update_entry() {
+
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    Label L_slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
+    __ set(SafepointSynchronize::_not_synchronized, O3);
+    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
+
+    // Load parameters
+    const Register crc   = O0; // initial crc
+    const Register val   = O1; // byte to update with
+    const Register table = O2; // address of 256-entry lookup table
+
+    __ ldub(Gargs, 3, val);
+    __ lduw(Gargs, 8, crc);
+
+    __ set(ExternalAddress(StubRoutines::crc_table_addr()), table);
+
+    __ not1(crc); // ~crc
+    __ clruwu(crc);
+    __ update_byte_crc32(crc, val, table);
+    __ not1(crc); // ~crc
+
+    // result in O0
+    __ retl();
+    __ delayed()->nop();
+
+    // generate a vanilla native entry as the slow path
+    __ bind(L_slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
+ *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+ */
+address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    Label L_slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
+    __ set(SafepointSynchronize::_not_synchronized, O3);
+    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
+
+    // Load parameters from the stack
+    const Register crc    = O0; // initial crc
+    const Register buf    = O1; // source java byte array address
+    const Register len    = O2; // len
+    const Register offset = O3; // offset
+
+    // Arguments are reversed on java expression stack
+    // Calculate address of start element
+    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
+      __ lduw(Gargs, 0,  len);
+      __ lduw(Gargs, 8,  offset);
+      __ ldx( Gargs, 16, buf);
+      __ lduw(Gargs, 32, crc);
+      __ add(buf, offset, buf);
+    } else {
+      __ lduw(Gargs, 0,  len);
+      __ lduw(Gargs, 8,  offset);
+      __ ldx( Gargs, 16, buf);
+      __ lduw(Gargs, 24, crc);
+      __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size
+      __ add(buf ,offset, buf);
+    }
+
+    // Call the crc32 kernel
+    __ MacroAssembler::save_thread(L7_thread_cache);
+    __ kernel_crc32(crc, buf, len, O3);
+    __ MacroAssembler::restore_thread(L7_thread_cache);
+
+    // result in O0
+    __ retl();
+    __ delayed()->nop();
+
+    // generate a vanilla native entry as the slow path
+    __ bind(L_slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+//
+// Interpreter stub for calling a native method. (asm interpreter)
+// This sets up a somewhat different looking stack for calling the native method
+// than the typical interpreter frame setup.
+//
+
+address InterpreterGenerator::generate_native_entry(bool synchronized) {
+  address entry = __ pc();
+
+  // the following temporary registers are used during frame creation
+  const Register Gtmp1 = G3_scratch ;
+  const Register Gtmp2 = G1_scratch;
+  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+
+  // make sure registers are different!
+  assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
+
+  const Address Laccess_flags(Lmethod, Method::access_flags_offset());
+
+  const Register Glocals_size = G3;
+  assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
+
+  // make sure method is native & not abstract
+  // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
+#ifdef ASSERT
+  __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
+  {
+    Label L;
+    __ btst(JVM_ACC_NATIVE, Gtmp1);
+    __ br(Assembler::notZero, false, Assembler::pt, L);
+    __ delayed()->nop();
+    __ stop("tried to execute non-native method as native");
+    __ bind(L);
+  }
+  { Label L;
+    __ btst(JVM_ACC_ABSTRACT, Gtmp1);
+    __ br(Assembler::zero, false, Assembler::pt, L);
+    __ delayed()->nop();
+    __ stop("tried to execute abstract method as non-abstract");
+    __ bind(L);
+  }
+#endif // ASSERT
+
+ // generate the code to allocate the interpreter stack frame
+  generate_fixed_frame(true);
+
+  //
+  // No locals to initialize for native method
+  //
+
+  // this slot will be set later, we initialize it to null here just in
+  // case we get a GC before the actual value is stored later
+  __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
+
+  const Address do_not_unlock_if_synchronized(G2_thread,
+    JavaThread::do_not_unlock_if_synchronized_offset());
+  // Since at this point in the method invocation the exception handler
+  // would try to exit the monitor of synchronized methods which hasn't
+  // been entered yet, we set the thread local variable
+  // _do_not_unlock_if_synchronized to true. If any exception was thrown by
+  // runtime, exception handling i.e. unlock_if_synchronized_method will
+  // check this thread local flag.
+  // This flag has two effects, one is to force an unwind in the topmost
+  // interpreter frame and not perform an unlock while doing so.
+
+  __ movbool(true, G3_scratch);
+  __ stbool(G3_scratch, do_not_unlock_if_synchronized);
+
+  // increment invocation counter and check for overflow
+  //
+  // Note: checking for negative value instead of overflow
+  //       so we have a 'sticky' overflow test (may be of
+  //       importance as soon as we have true MT/MP)
+  Label invocation_counter_overflow;
+  Label Lcontinue;
+  if (inc_counter) {
+    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
+
+  }
+  __ bind(Lcontinue);
+
+  bang_stack_shadow_pages(true);
+
+  // reset the _do_not_unlock_if_synchronized flag
+  __ stbool(G0, do_not_unlock_if_synchronized);
+
+  // check for synchronized methods
+  // Must happen AFTER invocation_counter check and stack overflow check,
+  // so method is not locked if overflows.
+
+  if (synchronized) {
+    lock_method();
+  } else {
+#ifdef ASSERT
+    { Label ok;
+      __ ld(Laccess_flags, O0);
+      __ btst(JVM_ACC_SYNCHRONIZED, O0);
+      __ br( Assembler::zero, false, Assembler::pt, ok);
+      __ delayed()->nop();
+      __ stop("method needs synchronization");
+      __ bind(ok);
+    }
+#endif // ASSERT
+  }
+
+
+  // start execution
+  __ verify_thread();
+
+  // JVMTI support
+  __ notify_method_entry();
+
+  // native call
+
+  // (note that O0 is never an oop--at most it is a handle)
+  // It is important not to smash any handles created by this call,
+  // until any oop handle in O0 is dereferenced.
+
+  // (note that the space for outgoing params is preallocated)
+
+  // get signature handler
+  { Label L;
+    Address signature_handler(Lmethod, Method::signature_handler_offset());
+    __ ld_ptr(signature_handler, G3_scratch);
+    __ br_notnull_short(G3_scratch, Assembler::pt, L);
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
+    __ ld_ptr(signature_handler, G3_scratch);
+    __ bind(L);
+  }
+
+  // Push a new frame so that the args will really be stored in
+  // Copy a few locals across so the new frame has the variables
+  // we need but these values will be dead at the jni call and
+  // therefore not gc volatile like the values in the current
+  // frame (Lmethod in particular)
+
+  // Flush the method pointer to the register save area
+  __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
+  __ mov(Llocals, O1);
+
+  // calculate where the mirror handle body is allocated in the interpreter frame:
+  __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
+
+  // Calculate current frame size
+  __ sub(SP, FP, O3);         // Calculate negative of current frame size
+  __ save(SP, O3, SP);        // Allocate an identical sized frame
+
+  // Note I7 has leftover trash. Slow signature handler will fill it in
+  // should we get there. Normal jni call will set reasonable last_Java_pc
+  // below (and fix I7 so the stack trace doesn't have a meaningless frame
+  // in it).
+
+  // Load interpreter frame's Lmethod into same register here
+
+  __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
+
+  __ mov(I1, Llocals);
+  __ mov(I2, Lscratch2);     // save the address of the mirror
+
+
+  // ONLY Lmethod and Llocals are valid here!
+
+  // call signature handler, It will move the arg properly since Llocals in current frame
+  // matches that in outer frame
+
+  __ callr(G3_scratch, 0);
+  __ delayed()->nop();
+
+  // Result handler is in Lscratch
+
+  // Reload interpreter frame's Lmethod since slow signature handler may block
+  __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
+
+  { Label not_static;
+
+    __ ld(Laccess_flags, O0);
+    __ btst(JVM_ACC_STATIC, O0);
+    __ br( Assembler::zero, false, Assembler::pt, not_static);
+    // get native function entry point(O0 is a good temp until the very end)
+    __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0);
+    // for static methods insert the mirror argument
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+
+    __ ld_ptr(Lmethod, Method:: const_offset(), O1);
+    __ ld_ptr(O1, ConstMethod::constants_offset(), O1);
+    __ ld_ptr(O1, ConstantPool::pool_holder_offset_in_bytes(), O1);
+    __ ld_ptr(O1, mirror_offset, O1);
+#ifdef ASSERT
+    if (!PrintSignatureHandlers)  // do not dirty the output with this
+    { Label L;
+      __ br_notnull_short(O1, Assembler::pt, L);
+      __ stop("mirror is missing");
+      __ bind(L);
+    }
+#endif // ASSERT
+    __ st_ptr(O1, Lscratch2, 0);
+    __ mov(Lscratch2, O1);
+    __ bind(not_static);
+  }
+
+  // At this point, arguments have been copied off of stack into
+  // their JNI positions, which are O1..O5 and SP[68..].
+  // Oops are boxed in-place on the stack, with handles copied to arguments.
+  // The result handler is in Lscratch.  O0 will shortly hold the JNIEnv*.
+
+#ifdef ASSERT
+  { Label L;
+    __ br_notnull_short(O0, Assembler::pt, L);
+    __ stop("native entry point is missing");
+    __ bind(L);
+  }
+#endif // ASSERT
+
+  //
+  // setup the frame anchor
+  //
+  // The scavenge function only needs to know that the PC of this frame is
+  // in the interpreter method entry code, it doesn't need to know the exact
+  // PC and hence we can use O7 which points to the return address from the
+  // previous call in the code stream (signature handler function)
+  //
+  // The other trick is we set last_Java_sp to FP instead of the usual SP because
+  // we have pushed the extra frame in order to protect the volatile register(s)
+  // in that frame when we return from the jni call
+  //
+
+  __ set_last_Java_frame(FP, O7);
+  __ mov(O7, I7);  // make dummy interpreter frame look like one above,
+                   // not meaningless information that'll confuse me.
+
+  // flush the windows now. We don't care about the current (protection) frame
+  // only the outer frames
+
+  __ flushw();
+
+  // mark windows as flushed
+  Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
+  __ set(JavaFrameAnchor::flushed, G3_scratch);
+  __ st(G3_scratch, flags);
+
+  // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
+
+  Address thread_state(G2_thread, JavaThread::thread_state_offset());
+#ifdef ASSERT
+  { Label L;
+    __ ld(thread_state, G3_scratch);
+    __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L);
+    __ stop("Wrong thread state in native stub");
+    __ bind(L);
+  }
+#endif // ASSERT
+  __ set(_thread_in_native, G3_scratch);
+  __ st(G3_scratch, thread_state);
+
+  // Call the jni method, using the delay slot to set the JNIEnv* argument.
+  __ save_thread(L7_thread_cache); // save Gthread
+  __ callr(O0, 0);
+  __ delayed()->
+     add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
+
+  // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
+
+  __ restore_thread(L7_thread_cache); // restore G2_thread
+  __ reinit_heapbase();
+
+  // must we block?
+
+  // Block, if necessary, before resuming in _thread_in_Java state.
+  // In order for GC to work, don't clear the last_Java_sp until after blocking.
+  { Label no_block;
+    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
+
+    // Switch thread to "native transition" state before reading the synchronization state.
+    // This additional state is necessary because reading and testing the synchronization
+    // state is not atomic w.r.t. GC, as this scenario demonstrates:
+    //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
+    //     VM thread changes sync state to synchronizing and suspends threads for GC.
+    //     Thread A is resumed to finish this native method, but doesn't block here since it
+    //     didn't see any synchronization is progress, and escapes.
+    __ set(_thread_in_native_trans, G3_scratch);
+    __ st(G3_scratch, thread_state);
+    if(os::is_MP()) {
+      if (UseMembar) {
+        // Force this write out before the read below
+        __ membar(Assembler::StoreLoad);
+      } else {
+        // Write serialization page so VM thread can do a pseudo remote membar.
+        // We use the current thread pointer to calculate a thread specific
+        // offset to write to within the page. This minimizes bus traffic
+        // due to cache line collision.
+        __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
+      }
+    }
+    __ load_contents(sync_state, G3_scratch);
+    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
+
+    Label L;
+    __ br(Assembler::notEqual, false, Assembler::pn, L);
+    __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
+    __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
+    __ bind(L);
+
+    // Block.  Save any potential method result value before the operation and
+    // use a leaf call to leave the last_Java_frame setup undisturbed.
+    save_native_result();
+    __ call_VM_leaf(L7_thread_cache,
+                    CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
+                    G2_thread);
+
+    // Restore any method result value
+    restore_native_result();
+    __ bind(no_block);
+  }
+
+  // Clear the frame anchor now
+
+  __ reset_last_Java_frame();
+
+  // Move the result handler address
+  __ mov(Lscratch, G3_scratch);
+  // return possible result to the outer frame
+#ifndef __LP64
+  __ mov(O0, I0);
+  __ restore(O1, G0, O1);
+#else
+  __ restore(O0, G0, O0);
+#endif /* __LP64 */
+
+  // Move result handler to expected register
+  __ mov(G3_scratch, Lscratch);
+
+  // Back in normal (native) interpreter frame. State is thread_in_native_trans
+  // switch to thread_in_Java.
+
+  __ set(_thread_in_Java, G3_scratch);
+  __ st(G3_scratch, thread_state);
+
+  // reset handle block
+  __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
+  __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
+
+  // If we have an oop result store it where it will be safe for any further gc
+  // until we return now that we've released the handle it might be protected by
+
+  {
+    Label no_oop, store_result;
+
+    __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
+    __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop);
+    __ addcc(G0, O0, O0);
+    __ brx(Assembler::notZero, true, Assembler::pt, store_result);     // if result is not NULL:
+    __ delayed()->ld_ptr(O0, 0, O0);                                   // unbox it
+    __ mov(G0, O0);
+
+    __ bind(store_result);
+    // Store it where gc will look for it and result handler expects it.
+    __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
+
+    __ bind(no_oop);
+
+  }
+
+
+  // handle exceptions (exception handling will handle unlocking!)
+  { Label L;
+    Address exception_addr(G2_thread, Thread::pending_exception_offset());
+    __ ld_ptr(exception_addr, Gtemp);
+    __ br_null_short(Gtemp, Assembler::pt, L);
+    // Note: This could be handled more efficiently since we know that the native
+    //       method doesn't have an exception handler. We could directly return
+    //       to the exception handler for the caller.
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
+    __ should_not_reach_here();
+    __ bind(L);
+  }
+
+  // JVMTI support (preserves thread register)
+  __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
+
+  if (synchronized) {
+    // save and restore any potential method result value around the unlocking operation
+    save_native_result();
+
+    __ add( __ top_most_monitor(), O1);
+    __ unlock_object(O1);
+
+    restore_native_result();
+  }
+
+#if defined(COMPILER2) && !defined(_LP64)
+
+  // C2 expects long results in G1 we can't tell if we're returning to interpreted
+  // or compiled so just be safe.
+
+  __ sllx(O0, 32, G1);          // Shift bits into high G1
+  __ srl (O1, 0, O1);           // Zero extend O1
+  __ or3 (O1, G1, G1);          // OR 64 bits into G1
+
+#endif /* COMPILER2 && !_LP64 */
+
+  // dispose of return address and remove activation
+#ifdef ASSERT
+  {
+    Label ok;
+    __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);
+    __ stop("bad I5_savedSP value");
+    __ should_not_reach_here();
+    __ bind(ok);
+  }
+#endif
+  if (TraceJumps) {
+    // Move target to register that is recordable
+    __ mov(Lscratch, G3_scratch);
+    __ JMP(G3_scratch, 0);
+  } else {
+    __ jmp(Lscratch, 0);
+  }
+  __ delayed()->nop();
+
+
+  if (inc_counter) {
+    // handle invocation counter overflow
+    __ bind(invocation_counter_overflow);
+    generate_counter_overflow(Lcontinue);
+  }
+
+
+
+  return entry;
+}
+
+
+// Generic method entry to (asm) interpreter
+address InterpreterGenerator::generate_normal_entry(bool synchronized) {
+  address entry = __ pc();
+
+  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+
+  // the following temporary registers are used during frame creation
+  const Register Gtmp1 = G3_scratch ;
+  const Register Gtmp2 = G1_scratch;
+
+  // make sure registers are different!
+  assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
+
+  const Address constMethod       (G5_method, Method::const_offset());
+  // Seems like G5_method is live at the point this is used. So we could make this look consistent
+  // and use in the asserts.
+  const Address access_flags      (Lmethod,   Method::access_flags_offset());
+
+  const Register Glocals_size = G3;
+  assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
+
+  // make sure method is not native & not abstract
+  // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
+#ifdef ASSERT
+  __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
+  {
+    Label L;
+    __ btst(JVM_ACC_NATIVE, Gtmp1);
+    __ br(Assembler::zero, false, Assembler::pt, L);
+    __ delayed()->nop();
+    __ stop("tried to execute native method as non-native");
+    __ bind(L);
+  }
+  { Label L;
+    __ btst(JVM_ACC_ABSTRACT, Gtmp1);
+    __ br(Assembler::zero, false, Assembler::pt, L);
+    __ delayed()->nop();
+    __ stop("tried to execute abstract method as non-abstract");
+    __ bind(L);
+  }
+#endif // ASSERT
+
+  // generate the code to allocate the interpreter stack frame
+
+  generate_fixed_frame(false);
+
+#ifdef FAST_DISPATCH
+  __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
+                                          // set bytecode dispatch table base
+#endif
+
+  //
+  // Code to initialize the extra (i.e. non-parm) locals
+  //
+  Register init_value = noreg;    // will be G0 if we must clear locals
+  // The way the code was setup before zerolocals was always true for vanilla java entries.
+  // It could only be false for the specialized entries like accessor or empty which have
+  // no extra locals so the testing was a waste of time and the extra locals were always
+  // initialized. We removed this extra complication to already over complicated code.
+
+  init_value = G0;
+  Label clear_loop;
+
+  const Register RconstMethod = O1;
+  const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
+  const Address size_of_locals    (RconstMethod, ConstMethod::size_of_locals_offset());
+
+  // NOTE: If you change the frame layout, this code will need to
+  // be updated!
+  __ ld_ptr( constMethod, RconstMethod );
+  __ lduh( size_of_locals, O2 );
+  __ lduh( size_of_parameters, O1 );
+  __ sll( O2, Interpreter::logStackElementSize, O2);
+  __ sll( O1, Interpreter::logStackElementSize, O1 );
+  __ sub( Llocals, O2, O2 );
+  __ sub( Llocals, O1, O1 );
+
+  __ bind( clear_loop );
+  __ inc( O2, wordSize );
+
+  __ cmp( O2, O1 );
+  __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
+  __ delayed()->st_ptr( init_value, O2, 0 );
+
+  const Address do_not_unlock_if_synchronized(G2_thread,
+    JavaThread::do_not_unlock_if_synchronized_offset());
+  // Since at this point in the method invocation the exception handler
+  // would try to exit the monitor of synchronized methods which hasn't
+  // been entered yet, we set the thread local variable
+  // _do_not_unlock_if_synchronized to true. If any exception was thrown by
+  // runtime, exception handling i.e. unlock_if_synchronized_method will
+  // check this thread local flag.
+  __ movbool(true, G3_scratch);
+  __ stbool(G3_scratch, do_not_unlock_if_synchronized);
+
+  __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch);
+  // increment invocation counter and check for overflow
+  //
+  // Note: checking for negative value instead of overflow
+  //       so we have a 'sticky' overflow test (may be of
+  //       importance as soon as we have true MT/MP)
+  Label invocation_counter_overflow;
+  Label profile_method;
+  Label profile_method_continue;
+  Label Lcontinue;
+  if (inc_counter) {
+    generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
+    if (ProfileInterpreter) {
+      __ bind(profile_method_continue);
+    }
+  }
+  __ bind(Lcontinue);
+
+  bang_stack_shadow_pages(false);
+
+  // reset the _do_not_unlock_if_synchronized flag
+  __ stbool(G0, do_not_unlock_if_synchronized);
+
+  // check for synchronized methods
+  // Must happen AFTER invocation_counter check and stack overflow check,
+  // so method is not locked if overflows.
+
+  if (synchronized) {
+    lock_method();
+  } else {
+#ifdef ASSERT
+    { Label ok;
+      __ ld(access_flags, O0);
+      __ btst(JVM_ACC_SYNCHRONIZED, O0);
+      __ br( Assembler::zero, false, Assembler::pt, ok);
+      __ delayed()->nop();
+      __ stop("method needs synchronization");
+      __ bind(ok);
+    }
+#endif // ASSERT
+  }
+
+  // start execution
+
+  __ verify_thread();
+
+  // jvmti support
+  __ notify_method_entry();
+
+  // start executing instructions
+  __ dispatch_next(vtos);
+
+
+  if (inc_counter) {
+    if (ProfileInterpreter) {
+      // We have decided to profile this method in the interpreter
+      __ bind(profile_method);
+
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+      __ set_method_data_pointer_for_bcp();
+      __ ba_short(profile_method_continue);
+    }
+
+    // handle invocation counter overflow
+    __ bind(invocation_counter_overflow);
+    generate_counter_overflow(Lcontinue);
+  }
+
+
+  return entry;
+}
+
+//----------------------------------------------------------------------------------------------------
+// Exceptions
+void TemplateInterpreterGenerator::generate_throw_exception() {
+
+  // Entry point in previous activation (i.e., if the caller was interpreted)
+  Interpreter::_rethrow_exception_entry = __ pc();
+  // O0: exception
+
+  // entry point for exceptions thrown within interpreter code
+  Interpreter::_throw_exception_entry = __ pc();
+  __ verify_thread();
+  // expression stack is undefined here
+  // O0: exception, i.e. Oexception
+  // Lbcp: exception bcp
+  __ verify_oop(Oexception);
+
+
+  // expression stack must be empty before entering the VM in case of an exception
+  __ empty_expression_stack();
+  // find exception handler address and preserve exception oop
+  // call C routine to find handler and jump to it
+  __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
+  __ push_ptr(O1); // push exception for exception handler bytecodes
+
+  __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
+  __ delayed()->nop();
+
+
+  // if the exception is not handled in the current frame
+  // the frame is removed and the exception is rethrown
+  // (i.e. exception continuation is _rethrow_exception)
+  //
+  // Note: At this point the bci is still the bxi for the instruction which caused
+  //       the exception and the expression stack is empty. Thus, for any VM calls
+  //       at this point, GC will find a legal oop map (with empty expression stack).
+
+  // in current activation
+  // tos: exception
+  // Lbcp: exception bcp
+
+  //
+  // JVMTI PopFrame support
+  //
+
+  Interpreter::_remove_activation_preserving_args_entry = __ pc();
+  Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
+  // Set the popframe_processing bit in popframe_condition indicating that we are
+  // currently handling popframe, so that call_VMs that may happen later do not trigger new
+  // popframe handling cycles.
+
+  __ ld(popframe_condition_addr, G3_scratch);
+  __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
+  __ stw(G3_scratch, popframe_condition_addr);
+
+  // Empty the expression stack, as in normal exception handling
+  __ empty_expression_stack();
+  __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
+
+  {
+    // Check to see whether we are returning to a deoptimized frame.
+    // (The PopFrame call ensures that the caller of the popped frame is
+    // either interpreted or compiled and deoptimizes it if compiled.)
+    // In this case, we can't call dispatch_next() after the frame is
+    // popped, but instead must save the incoming arguments and restore
+    // them after deoptimization has occurred.
+    //
+    // Note that we don't compare the return PC against the
+    // deoptimization blob's unpack entry because of the presence of
+    // adapter frames in C2.
+    Label caller_not_deoptimized;
+    __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
+    __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized);
+
+    const Register Gtmp1 = G3_scratch;
+    const Register Gtmp2 = G1_scratch;
+    const Register RconstMethod = Gtmp1;
+    const Address constMethod(Lmethod, Method::const_offset());
+    const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
+
+    // Compute size of arguments for saving when returning to deoptimized caller
+    __ ld_ptr(constMethod, RconstMethod);
+    __ lduh(size_of_parameters, Gtmp1);
+    __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
+    __ sub(Llocals, Gtmp1, Gtmp2);
+    __ add(Gtmp2, wordSize, Gtmp2);
+    // Save these arguments
+    __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
+    // Inform deoptimization that it is responsible for restoring these arguments
+    __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
+    Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
+    __ st(Gtmp1, popframe_condition_addr);
+
+    // Return from the current method
+    // The caller's SP was adjusted upon method entry to accomodate
+    // the callee's non-argument locals. Undo that adjustment.
+    __ ret();
+    __ delayed()->restore(I5_savedSP, G0, SP);
+
+    __ bind(caller_not_deoptimized);
+  }
+
+  // Clear the popframe condition flag
+  __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
+
+  // Get out of the current method (how this is done depends on the particular compiler calling
+  // convention that the interpreter currently follows)
+  // The caller's SP was adjusted upon method entry to accomodate
+  // the callee's non-argument locals. Undo that adjustment.
+  __ restore(I5_savedSP, G0, SP);
+  // The method data pointer was incremented already during
+  // call profiling. We have to restore the mdp for the current bcp.
+  if (ProfileInterpreter) {
+    __ set_method_data_pointer_for_bcp();
+  }
+
+#if INCLUDE_JVMTI
+  {
+    Label L_done;
+
+    __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode
+    __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+
+    __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp);
+
+    __ br_null(G1_scratch, false, Assembler::pn, L_done);
+    __ delayed()->nop();
+
+    __ st_ptr(G1_scratch, Lesp, wordSize);
+    __ bind(L_done);
+  }
+#endif // INCLUDE_JVMTI
+
+  // Resume bytecode interpretation at the current bcp
+  __ dispatch_next(vtos);
+  // end of JVMTI PopFrame support
+
+  Interpreter::_remove_activation_entry = __ pc();
+
+  // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
+  __ pop_ptr(Oexception);                                  // get exception
+
+  // Intel has the following comment:
+  //// remove the activation (without doing throws on illegalMonitorExceptions)
+  // They remove the activation without checking for bad monitor state.
+  // %%% We should make sure this is the right semantics before implementing.
+
+  __ set_vm_result(Oexception);
+  __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
+
+  __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
+
+  __ get_vm_result(Oexception);
+  __ verify_oop(Oexception);
+
+    const int return_reg_adjustment = frame::pc_return_offset;
+  Address issuing_pc_addr(I7, return_reg_adjustment);
+
+  // We are done with this activation frame; find out where to go next.
+  // The continuation point will be an exception handler, which expects
+  // the following registers set up:
+  //
+  // Oexception: exception
+  // Oissuing_pc: the local call that threw exception
+  // Other On: garbage
+  // In/Ln:  the contents of the caller's register window
+  //
+  // We do the required restore at the last possible moment, because we
+  // need to preserve some state across a runtime call.
+  // (Remember that the caller activation is unknown--it might not be
+  // interpreted, so things like Lscratch are useless in the caller.)
+
+  // Although the Intel version uses call_C, we can use the more
+  // compact call_VM.  (The only real difference on SPARC is a
+  // harmlessly ignored [re]set_last_Java_frame, compared with
+  // the Intel code which lacks this.)
+  __ mov(Oexception,      Oexception ->after_save());  // get exception in I0 so it will be on O0 after restore
+  __ add(issuing_pc_addr, Oissuing_pc->after_save());  // likewise set I1 to a value local to the caller
+  __ super_call_VM_leaf(L7_thread_cache,
+                        CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
+                        G2_thread, Oissuing_pc->after_save());
+
+  // The caller's SP was adjusted upon method entry to accomodate
+  // the callee's non-argument locals. Undo that adjustment.
+  __ JMP(O0, 0);                         // return exception handler in caller
+  __ delayed()->restore(I5_savedSP, G0, SP);
+
+  // (same old exception object is already in Oexception; see above)
+  // Note that an "issuing PC" is actually the next PC after the call
+}
+
+
+//
+// JVMTI ForceEarlyReturn support
+//
+
+address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
+  address entry = __ pc();
+
+  __ empty_expression_stack();
+  __ load_earlyret_value(state);
+
+  __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
+  Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
+
+  // Clear the earlyret state
+  __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
+
+  __ remove_activation(state,
+                       /* throw_monitor_exception */ false,
+                       /* install_monitor_exception */ false);
+
+  // The caller's SP was adjusted upon method entry to accomodate
+  // the callee's non-argument locals. Undo that adjustment.
+  __ ret();                             // return to caller
+  __ delayed()->restore(I5_savedSP, G0, SP);
+
+  return entry;
+} // end of JVMTI ForceEarlyReturn support
+
+
+//------------------------------------------------------------------------------------------------------------------------
+// Helper for vtos entry point generation
+
+void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
+  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
+  Label L;
+  aep = __ pc(); __ push_ptr(); __ ba_short(L);
+  fep = __ pc(); __ push_f();   __ ba_short(L);
+  dep = __ pc(); __ push_d();   __ ba_short(L);
+  lep = __ pc(); __ push_l();   __ ba_short(L);
+  iep = __ pc(); __ push_i();
+  bep = cep = sep = iep;                        // there aren't any
+  vep = __ pc(); __ bind(L);                    // fall through
+  generate_and_dispatch(t);
+}
+
+// --------------------------------------------------------------------------------
+
+
+InterpreterGenerator::InterpreterGenerator(StubQueue* code)
+ : TemplateInterpreterGenerator(code) {
+   generate_all(); // down here so it can be "virtual"
+}
+
+// --------------------------------------------------------------------------------
+
+// Non-product code
+#ifndef PRODUCT
+address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
+  address entry = __ pc();
+
+  __ push(state);
+  __ mov(O7, Lscratch); // protect return address within interpreter
+
+  // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
+  __ mov( Otos_l2, G3_scratch );
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
+  __ mov(Lscratch, O7); // restore return address
+  __ pop(state);
+  __ retl();
+  __ delayed()->nop();
+
+  return entry;
+}
+
+
+// helpers for generate_and_dispatch
+
+void TemplateInterpreterGenerator::count_bytecode() {
+  __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
+}
+
+
+void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
+  __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
+}
+
+
+void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
+  AddressLiteral index   (&BytecodePairHistogram::_index);
+  AddressLiteral counters((address) &BytecodePairHistogram::_counters);
+
+  // get index, shift out old bytecode, bring in new bytecode, and store it
+  // _index = (_index >> log2_number_of_codes) |
+  //          (bytecode << log2_number_of_codes);
+
+  __ load_contents(index, G4_scratch);
+  __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
+  __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes,  G3_scratch );
+  __ or3( G3_scratch,  G4_scratch, G4_scratch );
+  __ store_contents(G4_scratch, index, G3_scratch);
+
+  // bump bucket contents
+  // _counters[_index] ++;
+
+  __ set(counters, G3_scratch);                       // loads into G3_scratch
+  __ sll( G4_scratch, LogBytesPerWord, G4_scratch );  // Index is word address
+  __ add (G3_scratch, G4_scratch, G3_scratch);        // Add in index
+  __ ld (G3_scratch, 0, G4_scratch);
+  __ inc (G4_scratch);
+  __ st (G4_scratch, 0, G3_scratch);
+}
+
+
+void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
+  // Call a little run-time stub to avoid blow-up for each bytecode.
+  // The run-time runtime saves the right registers, depending on
+  // the tosca in-state for the given template.
+  address entry = Interpreter::trace_code(t->tos_in());
+  guarantee(entry != NULL, "entry must have been generated");
+  __ call(entry, relocInfo::none);
+  __ delayed()->nop();
+}
+
+
+void TemplateInterpreterGenerator::stop_interpreter_at() {
+  AddressLiteral counter(&BytecodeCounter::_counter_value);
+  __ load_contents(counter, G3_scratch);
+  AddressLiteral stop_at(&StopInterpreterAt);
+  __ load_ptr_contents(stop_at, G4_scratch);
+  __ cmp(G3_scratch, G4_scratch);
+  __ breakpoint_trap(Assembler::equal, Assembler::icc);
+}
+#endif // not PRODUCT
+#endif // !CC_INTERP
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -23,1483 +23,39 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterGenerator.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
+#include "oops/constMethod.hpp"
 #include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
 #include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 
-#ifndef CC_INTERP
-#ifndef FAST_DISPATCH
-#define FAST_DISPATCH 1
-#endif
-#undef FAST_DISPATCH
-
-
-// Generation of Interpreter
-//
-// The InterpreterGenerator generates the interpreter into Interpreter::_code.
-
-
-#define __ _masm->
-
-
-//----------------------------------------------------------------------------------------------------
-
-
-void InterpreterGenerator::save_native_result(void) {
-  // result potentially in O0/O1: save it across calls
-  const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
-
-  // result potentially in F0/F1: save it across calls
-  const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
-
-  // save and restore any potential method result value around the unlocking operation
-  __ stf(FloatRegisterImpl::D, F0, d_tmp);
-#ifdef _LP64
-  __ stx(O0, l_tmp);
-#else
-  __ std(O0, l_tmp);
-#endif
-}
-
-void InterpreterGenerator::restore_native_result(void) {
-  const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
-  const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
-
-  // Restore any method result value
-  __ ldf(FloatRegisterImpl::D, d_tmp, F0);
-#ifdef _LP64
-  __ ldx(l_tmp, O0);
-#else
-  __ ldd(l_tmp, O0);
-#endif
-}
-
-address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
-  assert(!pass_oop || message == NULL, "either oop or message but not both");
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an exception happened
-  __ empty_expression_stack();
-  // load exception object
-  __ set((intptr_t)name, G3_scratch);
-  if (pass_oop) {
-    __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
-  } else {
-    __ set((intptr_t)message, G4_scratch);
-    __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
-  }
-  // throw exception
-  assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
-  AddressLiteral thrower(Interpreter::throw_exception_entry());
-  __ jump_to(thrower, G3_scratch);
-  __ delayed()->nop();
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an exception
-  // happened
-  __ empty_expression_stack();
-  // load exception object
-  __ call_VM(Oexception,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_ClassCastException),
-             Otos_i);
-  __ should_not_reach_here();
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an exception happened
-  __ empty_expression_stack();
-  // convention: expect aberrant index in register G3_scratch, then shuffle the
-  // index to G4_scratch for the VM call
-  __ mov(G3_scratch, G4_scratch);
-  __ set((intptr_t)name, G3_scratch);
-  __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
-  __ should_not_reach_here();
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an exception happened
-  __ empty_expression_stack();
-  __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
-  __ should_not_reach_here();
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
-  address entry = __ pc();
-
-  if (state == atos) {
-    __ profile_return_type(O0, G3_scratch, G1_scratch);
-  }
-
-#if !defined(_LP64) && defined(COMPILER2)
-  // All return values are where we want them, except for Longs.  C2 returns
-  // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
-  // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
-  // build even if we are returning from interpreted we just do a little
-  // stupid shuffing.
-  // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
-  // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
-  // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
-
-  if (state == ltos) {
-    __ srl (G1,  0, O1);
-    __ srlx(G1, 32, O0);
-  }
-#endif // !_LP64 && COMPILER2
-
-  // The callee returns with the stack possibly adjusted by adapter transition
-  // We remove that possible adjustment here.
-  // All interpreter local registers are untouched. Any result is passed back
-  // in the O0/O1 or float registers. Before continuing, the arguments must be
-  // popped from the java expression stack; i.e., Lesp must be adjusted.
-
-  __ mov(Llast_SP, SP);   // Remove any adapter added stack space.
-
-  const Register cache = G3_scratch;
-  const Register index  = G1_scratch;
-  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
-
-  const Register flags = cache;
-  __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags);
-  const Register parameter_size = flags;
-  __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size);  // argument size in words
-  __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size);     // each argument size in bytes
-  __ add(Lesp, parameter_size, Lesp);                                           // pop arguments
-  __ dispatch_next(state, step);
-
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
-  address entry = __ pc();
-  __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
-#if INCLUDE_JVMCI
-  // Check if we need to take lock at entry of synchronized method.
-  if (UseJVMCICompiler) {
-    Label L;
-    Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
-    __ ldbool(pending_monitor_enter_addr, Gtemp);  // Load if pending monitor enter
-    __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L);
-    // Clear flag.
-    __ stbool(G0, pending_monitor_enter_addr);
-    // Take lock.
-    lock_method();
-    __ bind(L);
-  }
-#endif
-  { Label L;
-    Address exception_addr(G2_thread, Thread::pending_exception_offset());
-    __ ld_ptr(exception_addr, Gtemp);  // Load pending exception.
-    __ br_null_short(Gtemp, Assembler::pt, L);
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-  __ dispatch_next(state, step);
-  return entry;
-}
-
-// A result handler converts/unboxes a native call result into
-// a java interpreter/compiler result. The current frame is an
-// interpreter frame. The activation frame unwind code must be
-// consistent with that of TemplateTable::_return(...). In the
-// case of native methods, the caller's SP was not modified.
-address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
-  address entry = __ pc();
-  Register Itos_i  = Otos_i ->after_save();
-  Register Itos_l  = Otos_l ->after_save();
-  Register Itos_l1 = Otos_l1->after_save();
-  Register Itos_l2 = Otos_l2->after_save();
-  switch (type) {
-    case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
-    case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i);   break; // cannot use and3, 0xFFFF too big as immediate value!
-    case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i);   break;
-    case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i);   break;
-    case T_LONG   :
-#ifndef _LP64
-                    __ mov(O1, Itos_l2);  // move other half of long
-#endif              // ifdef or no ifdef, fall through to the T_INT case
-    case T_INT    : __ mov(O0, Itos_i);                         break;
-    case T_VOID   : /* nothing to do */                         break;
-    case T_FLOAT  : assert(F0 == Ftos_f, "fix this code" );     break;
-    case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" );     break;
-    case T_OBJECT :
-      __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
-      __ verify_oop(Itos_i);
-      break;
-    default       : ShouldNotReachHere();
-  }
-  __ ret();                           // return from interpreter activation
-  __ delayed()->restore(I5_savedSP, G0, SP);  // remove interpreter frame
-  NOT_PRODUCT(__ emit_int32(0);)       // marker for disassembly
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
-  address entry = __ pc();
-  __ push(state);
-  __ call_VM(noreg, runtime_entry);
-  __ dispatch_via(vtos, Interpreter::normal_table(vtos));
-  return entry;
-}
-
 
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
-  address entry = __ pc();
-  __ dispatch_next(state);
-  return entry;
-}
-
-//
-// Helpers for commoning out cases in the various type of method entries.
-//
-
-// increment invocation count & check for overflow
-//
-// Note: checking for negative value instead of overflow
-//       so we have a 'sticky' overflow test
-//
-// Lmethod: method
-// ??: invocation counter
-//
-void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
-  // Note: In tiered we increment either counters in MethodCounters* or in
-  // MDO depending if we're profiling or not.
-  const Register G3_method_counters = G3_scratch;
-  Label done;
-
-  if (TieredCompilation) {
-    const int increment = InvocationCounter::count_increment;
-    Label no_mdo;
-    if (ProfileInterpreter) {
-      // If no method data exists, go to profile_continue.
-      __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
-      __ br_null_short(G4_scratch, Assembler::pn, no_mdo);
-      // Increment counter
-      Address mdo_invocation_counter(G4_scratch,
-                                     in_bytes(MethodData::invocation_counter_offset()) +
-                                     in_bytes(InvocationCounter::counter_offset()));
-      Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset()));
-      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
-                                 G3_scratch, Lscratch,
-                                 Assembler::zero, overflow);
-      __ ba_short(done);
-    }
-
-    // Increment counter in MethodCounters*
-    __ bind(no_mdo);
-    Address invocation_counter(G3_method_counters,
-            in_bytes(MethodCounters::invocation_counter_offset()) +
-            in_bytes(InvocationCounter::counter_offset()));
-    __ get_method_counters(Lmethod, G3_method_counters, done);
-    Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset()));
-    __ increment_mask_and_jump(invocation_counter, increment, mask,
-                               G4_scratch, Lscratch,
-                               Assembler::zero, overflow);
-    __ bind(done);
-  } else { // not TieredCompilation
-    // Update standard invocation counters
-    __ get_method_counters(Lmethod, G3_method_counters, done);
-    __ increment_invocation_counter(G3_method_counters, O0, G4_scratch);
-    if (ProfileInterpreter) {
-      Address interpreter_invocation_counter(G3_method_counters,
-            in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
-      __ ld(interpreter_invocation_counter, G4_scratch);
-      __ inc(G4_scratch);
-      __ st(G4_scratch, interpreter_invocation_counter);
-    }
-
-    if (ProfileInterpreter && profile_method != NULL) {
-      // Test to see if we should create a method data oop
-      Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
-      __ ld(profile_limit, G1_scratch);
-      __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
-
-      // if no method data exists, go to profile_method
-      __ test_method_data_pointer(*profile_method);
-    }
-
-    Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset()));
-    __ ld(invocation_limit, G3_scratch);
-    __ cmp(O0, G3_scratch);
-    __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
-    __ delayed()->nop();
-    __ bind(done);
-  }
-
-}
-
-// Allocate monitor and lock method (asm interpreter)
-// ebx - Method*
-//
-void TemplateInterpreterGenerator::lock_method() {
-  __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0);  // Load access flags.
-
-#ifdef ASSERT
- { Label ok;
-   __ btst(JVM_ACC_SYNCHRONIZED, O0);
-   __ br( Assembler::notZero, false, Assembler::pt, ok);
-   __ delayed()->nop();
-   __ stop("method doesn't need synchronization");
-   __ bind(ok);
-  }
-#endif // ASSERT
-
-  // get synchronization object to O0
-  { Label done;
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    __ btst(JVM_ACC_STATIC, O0);
-    __ br( Assembler::zero, true, Assembler::pt, done);
-    __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
-
-    __ ld_ptr( Lmethod, in_bytes(Method::const_offset()), O0);
-    __ ld_ptr( O0, in_bytes(ConstMethod::constants_offset()), O0);
-    __ ld_ptr( O0, ConstantPool::pool_holder_offset_in_bytes(), O0);
-
-    // lock the mirror, not the Klass*
-    __ ld_ptr( O0, mirror_offset, O0);
-
-#ifdef ASSERT
-    __ tst(O0);
-    __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
-#endif // ASSERT
-
-    __ bind(done);
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : i = 4; break;
+    case T_LONG   : i = 5; break;
+    case T_VOID   : i = 6; break;
+    case T_FLOAT  : i = 7; break;
+    case T_DOUBLE : i = 8; break;
+    case T_OBJECT : i = 9; break;
+    case T_ARRAY  : i = 9; break;
+    default       : ShouldNotReachHere();
   }
-
-  __ add_monitor_to_stack(true, noreg, noreg);  // allocate monitor elem
-  __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes());   // store object
-  // __ untested("lock_object from method entry");
-  __ lock_object(Lmonitors, O0);
-}
-
-
-void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
-                                                         Register Rscratch,
-                                                         Register Rscratch2) {
-  const int page_size = os::vm_page_size();
-  Label after_frame_check;
-
-  assert_different_registers(Rframe_size, Rscratch, Rscratch2);
-
-  __ set(page_size, Rscratch);
-  __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check);
-
-  // get the stack base, and in debug, verify it is non-zero
-  __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
-#ifdef ASSERT
-  Label base_not_zero;
-  __ br_notnull_short(Rscratch, Assembler::pn, base_not_zero);
-  __ stop("stack base is zero in generate_stack_overflow_check");
-  __ bind(base_not_zero);
-#endif
-
-  // get the stack size, and in debug, verify it is non-zero
-  assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
-  __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
-#ifdef ASSERT
-  Label size_not_zero;
-  __ br_notnull_short(Rscratch2, Assembler::pn, size_not_zero);
-  __ stop("stack size is zero in generate_stack_overflow_check");
-  __ bind(size_not_zero);
-#endif
-
-  // compute the beginning of the protected zone minus the requested frame size
-  __ sub( Rscratch, Rscratch2,   Rscratch );
-  __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 );
-  __ add( Rscratch, Rscratch2,   Rscratch );
-
-  // Add in the size of the frame (which is the same as subtracting it from the
-  // SP, which would take another register
-  __ add( Rscratch, Rframe_size, Rscratch );
-
-  // the frame is greater than one page in size, so check against
-  // the bottom of the stack
-  __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check);
-
-  // the stack will overflow, throw an exception
-
-  // Note that SP is restored to sender's sp (in the delay slot). This
-  // is necessary if the sender's frame is an extended compiled frame
-  // (see gen_c2i_adapter()) and safer anyway in case of JSR292
-  // adaptations.
-
-  // Note also that the restored frame is not necessarily interpreted.
-  // Use the shared runtime version of the StackOverflowError.
-  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
-  AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry());
-  __ jump_to(stub, Rscratch);
-  __ delayed()->mov(O5_savedSP, SP);
-
-  // if you get to here, then there is enough stack space
-  __ bind( after_frame_check );
-}
-
-
-//
-// Generate a fixed interpreter frame. This is identical setup for interpreted
-// methods and for native methods hence the shared code.
-
-
-//----------------------------------------------------------------------------------------------------
-// Stack frame layout
-//
-// When control flow reaches any of the entry types for the interpreter
-// the following holds ->
-//
-// C2 Calling Conventions:
-//
-// The entry code below assumes that the following registers are set
-// when coming in:
-//    G5_method: holds the Method* of the method to call
-//    Lesp:    points to the TOS of the callers expression stack
-//             after having pushed all the parameters
-//
-// The entry code does the following to setup an interpreter frame
-//   pop parameters from the callers stack by adjusting Lesp
-//   set O0 to Lesp
-//   compute X = (max_locals - num_parameters)
-//   bump SP up by X to accomadate the extra locals
-//   compute X = max_expression_stack
-//               + vm_local_words
-//               + 16 words of register save area
-//   save frame doing a save sp, -X, sp growing towards lower addresses
-//   set Lbcp, Lmethod, LcpoolCache
-//   set Llocals to i0
-//   set Lmonitors to FP - rounded_vm_local_words
-//   set Lesp to Lmonitors - 4
-//
-//  The frame has now been setup to do the rest of the entry code
-
-// Try this optimization:  Most method entries could live in a
-// "one size fits all" stack frame without all the dynamic size
-// calculations.  It might be profitable to do all this calculation
-// statically and approximately for "small enough" methods.
-
-//-----------------------------------------------------------------------------------------------
-
-// C1 Calling conventions
-//
-// Upon method entry, the following registers are setup:
-//
-// g2 G2_thread: current thread
-// g5 G5_method: method to activate
-// g4 Gargs  : pointer to last argument
-//
-//
-// Stack:
-//
-// +---------------+ <--- sp
-// |               |
-// : reg save area :
-// |               |
-// +---------------+ <--- sp + 0x40
-// |               |
-// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
-// |               |
-// +---------------+ <--- sp + 0x5c
-// |               |
-// :     free      :
-// |               |
-// +---------------+ <--- Gargs
-// |               |
-// :   arguments   :
-// |               |
-// +---------------+
-// |               |
-//
-//
-//
-// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
-//
-// +---------------+ <--- sp
-// |               |
-// : reg save area :
-// |               |
-// +---------------+ <--- sp + 0x40
-// |               |
-// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
-// |               |
-// +---------------+ <--- sp + 0x5c
-// |               |
-// :               :
-// |               | <--- Lesp
-// +---------------+ <--- Lmonitors (fp - 0x18)
-// |   VM locals   |
-// +---------------+ <--- fp
-// |               |
-// : reg save area :
-// |               |
-// +---------------+ <--- fp + 0x40
-// |               |
-// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
-// |               |
-// +---------------+ <--- fp + 0x5c
-// |               |
-// :     free      :
-// |               |
-// +---------------+
-// |               |
-// : nonarg locals :
-// |               |
-// +---------------+
-// |               |
-// :   arguments   :
-// |               | <--- Llocals
-// +---------------+ <--- Gargs
-// |               |
-
-void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
-  //
-  //
-  // The entry code sets up a new interpreter frame in 4 steps:
-  //
-  // 1) Increase caller's SP by for the extra local space needed:
-  //    (check for overflow)
-  //    Efficient implementation of xload/xstore bytecodes requires
-  //    that arguments and non-argument locals are in a contigously
-  //    addressable memory block => non-argument locals must be
-  //    allocated in the caller's frame.
-  //
-  // 2) Create a new stack frame and register window:
-  //    The new stack frame must provide space for the standard
-  //    register save area, the maximum java expression stack size,
-  //    the monitor slots (0 slots initially), and some frame local
-  //    scratch locations.
-  //
-  // 3) The following interpreter activation registers must be setup:
-  //    Lesp       : expression stack pointer
-  //    Lbcp       : bytecode pointer
-  //    Lmethod    : method
-  //    Llocals    : locals pointer
-  //    Lmonitors  : monitor pointer
-  //    LcpoolCache: constant pool cache
-  //
-  // 4) Initialize the non-argument locals if necessary:
-  //    Non-argument locals may need to be initialized to NULL
-  //    for GC to work. If the oop-map information is accurate
-  //    (in the absence of the JSR problem), no initialization
-  //    is necessary.
-  //
-  // (gri - 2/25/2000)
-
-
-  int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
-
-  const int extra_space =
-    rounded_vm_local_words +                   // frame local scratch space
-    Method::extra_stack_entries() +            // extra stack for jsr 292
-    frame::memory_parameter_word_sp_offset +   // register save area
-    (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
-
-  const Register Glocals_size = G3;
-  const Register RconstMethod = Glocals_size;
-  const Register Otmp1 = O3;
-  const Register Otmp2 = O4;
-  // Lscratch can't be used as a temporary because the call_stub uses
-  // it to assert that the stack frame was setup correctly.
-  const Address constMethod       (G5_method, Method::const_offset());
-  const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
-
-  __ ld_ptr( constMethod, RconstMethod );
-  __ lduh( size_of_parameters, Glocals_size);
-
-  // Gargs points to first local + BytesPerWord
-  // Set the saved SP after the register window save
-  //
-  assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
-  __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
-  __ add(Gargs, Otmp1, Gargs);
-
-  if (native_call) {
-    __ calc_mem_param_words( Glocals_size, Gframe_size );
-    __ add( Gframe_size,  extra_space, Gframe_size);
-    __ round_to( Gframe_size, WordsPerLong );
-    __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
-  } else {
-
-    //
-    // Compute number of locals in method apart from incoming parameters
-    //
-    const Address size_of_locals    (Otmp1, ConstMethod::size_of_locals_offset());
-    __ ld_ptr( constMethod, Otmp1 );
-    __ lduh( size_of_locals, Otmp1 );
-    __ sub( Otmp1, Glocals_size, Glocals_size );
-    __ round_to( Glocals_size, WordsPerLong );
-    __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
-
-    // see if the frame is greater than one page in size. If so,
-    // then we need to verify there is enough stack space remaining
-    // Frame_size = (max_stack + extra_space) * BytesPerWord;
-    __ ld_ptr( constMethod, Gframe_size );
-    __ lduh( Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size );
-    __ add( Gframe_size, extra_space, Gframe_size );
-    __ round_to( Gframe_size, WordsPerLong );
-    __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
-
-    // Add in java locals size for stack overflow check only
-    __ add( Gframe_size, Glocals_size, Gframe_size );
-
-    const Register Otmp2 = O4;
-    assert_different_registers(Otmp1, Otmp2, O5_savedSP);
-    generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2);
-
-    __ sub( Gframe_size, Glocals_size, Gframe_size);
-
-    //
-    // bump SP to accomodate the extra locals
-    //
-    __ sub( SP, Glocals_size, SP );
-  }
-
-  //
-  // now set up a stack frame with the size computed above
-  //
-  __ neg( Gframe_size );
-  __ save( SP, Gframe_size, SP );
-
-  //
-  // now set up all the local cache registers
-  //
-  // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
-  // that all present references to Lbyte_code initialize the register
-  // immediately before use
-  if (native_call) {
-    __ mov(G0, Lbcp);
-  } else {
-    __ ld_ptr(G5_method, Method::const_offset(), Lbcp);
-    __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp);
-  }
-  __ mov( G5_method, Lmethod);                 // set Lmethod
-  __ get_constant_pool_cache( LcpoolCache );   // set LcpoolCache
-  __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
-#ifdef _LP64
-  __ add( Lmonitors, STACK_BIAS, Lmonitors );   // Account for 64 bit stack bias
-#endif
-  __ sub(Lmonitors, BytesPerWord, Lesp);       // set Lesp
-
-  // setup interpreter activation registers
-  __ sub(Gargs, BytesPerWord, Llocals);        // set Llocals
-
-  if (ProfileInterpreter) {
-#ifdef FAST_DISPATCH
-    // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
-    // they both use I2.
-    assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
-#endif // FAST_DISPATCH
-    __ set_method_data_pointer();
-  }
-
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
+  return i;
 }
 
-// Method entry for java.lang.ref.Reference.get.
-address InterpreterGenerator::generate_Reference_get_entry(void) {
-#if INCLUDE_ALL_GCS
-  // Code: _aload_0, _getfield, _areturn
-  // parameter size = 1
-  //
-  // The code that gets generated by this routine is split into 2 parts:
-  //    1. The "intrinsified" code for G1 (or any SATB based GC),
-  //    2. The slow path - which is an expansion of the regular method entry.
-  //
-  // Notes:-
-  // * In the G1 code we do not check whether we need to block for
-  //   a safepoint. If G1 is enabled then we must execute the specialized
-  //   code for Reference.get (except when the Reference object is null)
-  //   so that we can log the value in the referent field with an SATB
-  //   update buffer.
-  //   If the code for the getfield template is modified so that the
-  //   G1 pre-barrier code is executed when the current method is
-  //   Reference.get() then going through the normal method entry
-  //   will be fine.
-  // * The G1 code can, however, check the receiver object (the instance
-  //   of java.lang.Reference) and jump to the slow path if null. If the
-  //   Reference object is null then we obviously cannot fetch the referent
-  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
-  //   regular method entry code to generate the NPE.
-  //
-  // This code is based on generate_accessor_enty.
-
-  address entry = __ pc();
-
-  const int referent_offset = java_lang_ref_Reference::referent_offset;
-  guarantee(referent_offset > 0, "referent offset not initialized");
-
-  if (UseG1GC) {
-     Label slow_path;
-
-    // In the G1 code we don't check if we need to reach a safepoint. We
-    // continue and the thread will safepoint at the next bytecode dispatch.
-
-    // Check if local 0 != NULL
-    // If the receiver is null then it is OK to jump to the slow path.
-    __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
-    // check if local 0 == NULL and go the slow path
-    __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path);
-
-
-    // Load the value of the referent field.
-    if (Assembler::is_simm13(referent_offset)) {
-      __ load_heap_oop(Otos_i, referent_offset, Otos_i);
-    } else {
-      __ set(referent_offset, G3_scratch);
-      __ load_heap_oop(Otos_i, G3_scratch, Otos_i);
-    }
-
-    // Generate the G1 pre-barrier code to log the value of
-    // the referent field in an SATB buffer. Note with
-    // these parameters the pre-barrier does not generate
-    // the load of the previous value
-
-    __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */,
-                            Otos_i /* pre_val */,
-                            G3_scratch /* tmp */,
-                            true /* preserve_o_regs */);
-
-    // _areturn
-    __ retl();                      // return from leaf routine
-    __ delayed()->mov(O5_savedSP, SP);
-
-    // Generate regular method entry
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
-    return entry;
-  }
-#endif // INCLUDE_ALL_GCS
-
-  // If G1 is not enabled then attempt to go through the accessor entry point
-  // Reference.get is an accessor
-  return NULL;
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.update(int crc, int b)
- */
-address InterpreterGenerator::generate_CRC32_update_entry() {
-
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    Label L_slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
-    __ set(SafepointSynchronize::_not_synchronized, O3);
-    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
-
-    // Load parameters
-    const Register crc   = O0; // initial crc
-    const Register val   = O1; // byte to update with
-    const Register table = O2; // address of 256-entry lookup table
-
-    __ ldub(Gargs, 3, val);
-    __ lduw(Gargs, 8, crc);
-
-    __ set(ExternalAddress(StubRoutines::crc_table_addr()), table);
-
-    __ not1(crc); // ~crc
-    __ clruwu(crc);
-    __ update_byte_crc32(crc, val, table);
-    __ not1(crc); // ~crc
-
-    // result in O0
-    __ retl();
-    __ delayed()->nop();
-
-    // generate a vanilla native entry as the slow path
-    __ bind(L_slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
- *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
- */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    Label L_slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
-    __ set(SafepointSynchronize::_not_synchronized, O3);
-    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
-
-    // Load parameters from the stack
-    const Register crc    = O0; // initial crc
-    const Register buf    = O1; // source java byte array address
-    const Register len    = O2; // len
-    const Register offset = O3; // offset
-
-    // Arguments are reversed on java expression stack
-    // Calculate address of start element
-    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
-      __ lduw(Gargs, 0,  len);
-      __ lduw(Gargs, 8,  offset);
-      __ ldx( Gargs, 16, buf);
-      __ lduw(Gargs, 32, crc);
-      __ add(buf, offset, buf);
-    } else {
-      __ lduw(Gargs, 0,  len);
-      __ lduw(Gargs, 8,  offset);
-      __ ldx( Gargs, 16, buf);
-      __ lduw(Gargs, 24, crc);
-      __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size
-      __ add(buf ,offset, buf);
-    }
-
-    // Call the crc32 kernel
-    __ MacroAssembler::save_thread(L7_thread_cache);
-    __ kernel_crc32(crc, buf, len, O3);
-    __ MacroAssembler::restore_thread(L7_thread_cache);
-
-    // result in O0
-    __ retl();
-    __ delayed()->nop();
-
-    // generate a vanilla native entry as the slow path
-    __ bind(L_slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-//
-// Interpreter stub for calling a native method. (asm interpreter)
-// This sets up a somewhat different looking stack for calling the native method
-// than the typical interpreter frame setup.
-//
-
-address InterpreterGenerator::generate_native_entry(bool synchronized) {
-  address entry = __ pc();
-
-  // the following temporary registers are used during frame creation
-  const Register Gtmp1 = G3_scratch ;
-  const Register Gtmp2 = G1_scratch;
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // make sure registers are different!
-  assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
-
-  const Address Laccess_flags(Lmethod, Method::access_flags_offset());
-
-  const Register Glocals_size = G3;
-  assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
-
-  // make sure method is native & not abstract
-  // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
-#ifdef ASSERT
-  __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
-  {
-    Label L;
-    __ btst(JVM_ACC_NATIVE, Gtmp1);
-    __ br(Assembler::notZero, false, Assembler::pt, L);
-    __ delayed()->nop();
-    __ stop("tried to execute non-native method as native");
-    __ bind(L);
-  }
-  { Label L;
-    __ btst(JVM_ACC_ABSTRACT, Gtmp1);
-    __ br(Assembler::zero, false, Assembler::pt, L);
-    __ delayed()->nop();
-    __ stop("tried to execute abstract method as non-abstract");
-    __ bind(L);
-  }
-#endif // ASSERT
-
- // generate the code to allocate the interpreter stack frame
-  generate_fixed_frame(true);
-
-  //
-  // No locals to initialize for native method
-  //
-
-  // this slot will be set later, we initialize it to null here just in
-  // case we get a GC before the actual value is stored later
-  __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
-
-  const Address do_not_unlock_if_synchronized(G2_thread,
-    JavaThread::do_not_unlock_if_synchronized_offset());
-  // Since at this point in the method invocation the exception handler
-  // would try to exit the monitor of synchronized methods which hasn't
-  // been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. If any exception was thrown by
-  // runtime, exception handling i.e. unlock_if_synchronized_method will
-  // check this thread local flag.
-  // This flag has two effects, one is to force an unwind in the topmost
-  // interpreter frame and not perform an unlock while doing so.
-
-  __ movbool(true, G3_scratch);
-  __ stbool(G3_scratch, do_not_unlock_if_synchronized);
-
-  // increment invocation counter and check for overflow
-  //
-  // Note: checking for negative value instead of overflow
-  //       so we have a 'sticky' overflow test (may be of
-  //       importance as soon as we have true MT/MP)
-  Label invocation_counter_overflow;
-  Label Lcontinue;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
-
-  }
-  __ bind(Lcontinue);
-
-  bang_stack_shadow_pages(true);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ stbool(G0, do_not_unlock_if_synchronized);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-
-  if (synchronized) {
-    lock_method();
-  } else {
-#ifdef ASSERT
-    { Label ok;
-      __ ld(Laccess_flags, O0);
-      __ btst(JVM_ACC_SYNCHRONIZED, O0);
-      __ br( Assembler::zero, false, Assembler::pt, ok);
-      __ delayed()->nop();
-      __ stop("method needs synchronization");
-      __ bind(ok);
-    }
-#endif // ASSERT
-  }
-
-
-  // start execution
-  __ verify_thread();
-
-  // JVMTI support
-  __ notify_method_entry();
-
-  // native call
-
-  // (note that O0 is never an oop--at most it is a handle)
-  // It is important not to smash any handles created by this call,
-  // until any oop handle in O0 is dereferenced.
-
-  // (note that the space for outgoing params is preallocated)
-
-  // get signature handler
-  { Label L;
-    Address signature_handler(Lmethod, Method::signature_handler_offset());
-    __ ld_ptr(signature_handler, G3_scratch);
-    __ br_notnull_short(G3_scratch, Assembler::pt, L);
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
-    __ ld_ptr(signature_handler, G3_scratch);
-    __ bind(L);
-  }
-
-  // Push a new frame so that the args will really be stored in
-  // Copy a few locals across so the new frame has the variables
-  // we need but these values will be dead at the jni call and
-  // therefore not gc volatile like the values in the current
-  // frame (Lmethod in particular)
-
-  // Flush the method pointer to the register save area
-  __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
-  __ mov(Llocals, O1);
-
-  // calculate where the mirror handle body is allocated in the interpreter frame:
-  __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
-
-  // Calculate current frame size
-  __ sub(SP, FP, O3);         // Calculate negative of current frame size
-  __ save(SP, O3, SP);        // Allocate an identical sized frame
-
-  // Note I7 has leftover trash. Slow signature handler will fill it in
-  // should we get there. Normal jni call will set reasonable last_Java_pc
-  // below (and fix I7 so the stack trace doesn't have a meaningless frame
-  // in it).
-
-  // Load interpreter frame's Lmethod into same register here
-
-  __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
-
-  __ mov(I1, Llocals);
-  __ mov(I2, Lscratch2);     // save the address of the mirror
-
-
-  // ONLY Lmethod and Llocals are valid here!
-
-  // call signature handler, It will move the arg properly since Llocals in current frame
-  // matches that in outer frame
-
-  __ callr(G3_scratch, 0);
-  __ delayed()->nop();
-
-  // Result handler is in Lscratch
-
-  // Reload interpreter frame's Lmethod since slow signature handler may block
-  __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
-
-  { Label not_static;
-
-    __ ld(Laccess_flags, O0);
-    __ btst(JVM_ACC_STATIC, O0);
-    __ br( Assembler::zero, false, Assembler::pt, not_static);
-    // get native function entry point(O0 is a good temp until the very end)
-    __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0);
-    // for static methods insert the mirror argument
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-
-    __ ld_ptr(Lmethod, Method:: const_offset(), O1);
-    __ ld_ptr(O1, ConstMethod::constants_offset(), O1);
-    __ ld_ptr(O1, ConstantPool::pool_holder_offset_in_bytes(), O1);
-    __ ld_ptr(O1, mirror_offset, O1);
-#ifdef ASSERT
-    if (!PrintSignatureHandlers)  // do not dirty the output with this
-    { Label L;
-      __ br_notnull_short(O1, Assembler::pt, L);
-      __ stop("mirror is missing");
-      __ bind(L);
-    }
-#endif // ASSERT
-    __ st_ptr(O1, Lscratch2, 0);
-    __ mov(Lscratch2, O1);
-    __ bind(not_static);
-  }
-
-  // At this point, arguments have been copied off of stack into
-  // their JNI positions, which are O1..O5 and SP[68..].
-  // Oops are boxed in-place on the stack, with handles copied to arguments.
-  // The result handler is in Lscratch.  O0 will shortly hold the JNIEnv*.
-
-#ifdef ASSERT
-  { Label L;
-    __ br_notnull_short(O0, Assembler::pt, L);
-    __ stop("native entry point is missing");
-    __ bind(L);
-  }
-#endif // ASSERT
-
-  //
-  // setup the frame anchor
-  //
-  // The scavenge function only needs to know that the PC of this frame is
-  // in the interpreter method entry code, it doesn't need to know the exact
-  // PC and hence we can use O7 which points to the return address from the
-  // previous call in the code stream (signature handler function)
-  //
-  // The other trick is we set last_Java_sp to FP instead of the usual SP because
-  // we have pushed the extra frame in order to protect the volatile register(s)
-  // in that frame when we return from the jni call
-  //
-
-  __ set_last_Java_frame(FP, O7);
-  __ mov(O7, I7);  // make dummy interpreter frame look like one above,
-                   // not meaningless information that'll confuse me.
-
-  // flush the windows now. We don't care about the current (protection) frame
-  // only the outer frames
-
-  __ flushw();
-
-  // mark windows as flushed
-  Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
-  __ set(JavaFrameAnchor::flushed, G3_scratch);
-  __ st(G3_scratch, flags);
-
-  // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
-
-  Address thread_state(G2_thread, JavaThread::thread_state_offset());
-#ifdef ASSERT
-  { Label L;
-    __ ld(thread_state, G3_scratch);
-    __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L);
-    __ stop("Wrong thread state in native stub");
-    __ bind(L);
-  }
-#endif // ASSERT
-  __ set(_thread_in_native, G3_scratch);
-  __ st(G3_scratch, thread_state);
-
-  // Call the jni method, using the delay slot to set the JNIEnv* argument.
-  __ save_thread(L7_thread_cache); // save Gthread
-  __ callr(O0, 0);
-  __ delayed()->
-     add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
-
-  // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
-
-  __ restore_thread(L7_thread_cache); // restore G2_thread
-  __ reinit_heapbase();
-
-  // must we block?
-
-  // Block, if necessary, before resuming in _thread_in_Java state.
-  // In order for GC to work, don't clear the last_Java_sp until after blocking.
-  { Label no_block;
-    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
-
-    // Switch thread to "native transition" state before reading the synchronization state.
-    // This additional state is necessary because reading and testing the synchronization
-    // state is not atomic w.r.t. GC, as this scenario demonstrates:
-    //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
-    //     VM thread changes sync state to synchronizing and suspends threads for GC.
-    //     Thread A is resumed to finish this native method, but doesn't block here since it
-    //     didn't see any synchronization is progress, and escapes.
-    __ set(_thread_in_native_trans, G3_scratch);
-    __ st(G3_scratch, thread_state);
-    if(os::is_MP()) {
-      if (UseMembar) {
-        // Force this write out before the read below
-        __ membar(Assembler::StoreLoad);
-      } else {
-        // Write serialization page so VM thread can do a pseudo remote membar.
-        // We use the current thread pointer to calculate a thread specific
-        // offset to write to within the page. This minimizes bus traffic
-        // due to cache line collision.
-        __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
-      }
-    }
-    __ load_contents(sync_state, G3_scratch);
-    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
-
-    Label L;
-    __ br(Assembler::notEqual, false, Assembler::pn, L);
-    __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
-    __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
-    __ bind(L);
-
-    // Block.  Save any potential method result value before the operation and
-    // use a leaf call to leave the last_Java_frame setup undisturbed.
-    save_native_result();
-    __ call_VM_leaf(L7_thread_cache,
-                    CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
-                    G2_thread);
-
-    // Restore any method result value
-    restore_native_result();
-    __ bind(no_block);
-  }
-
-  // Clear the frame anchor now
-
-  __ reset_last_Java_frame();
-
-  // Move the result handler address
-  __ mov(Lscratch, G3_scratch);
-  // return possible result to the outer frame
-#ifndef __LP64
-  __ mov(O0, I0);
-  __ restore(O1, G0, O1);
-#else
-  __ restore(O0, G0, O0);
-#endif /* __LP64 */
-
-  // Move result handler to expected register
-  __ mov(G3_scratch, Lscratch);
-
-  // Back in normal (native) interpreter frame. State is thread_in_native_trans
-  // switch to thread_in_Java.
-
-  __ set(_thread_in_Java, G3_scratch);
-  __ st(G3_scratch, thread_state);
-
-  // reset handle block
-  __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
-  __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
-
-  // If we have an oop result store it where it will be safe for any further gc
-  // until we return now that we've released the handle it might be protected by
-
-  {
-    Label no_oop, store_result;
-
-    __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
-    __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop);
-    __ addcc(G0, O0, O0);
-    __ brx(Assembler::notZero, true, Assembler::pt, store_result);     // if result is not NULL:
-    __ delayed()->ld_ptr(O0, 0, O0);                                   // unbox it
-    __ mov(G0, O0);
-
-    __ bind(store_result);
-    // Store it where gc will look for it and result handler expects it.
-    __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
-
-    __ bind(no_oop);
-
-  }
-
-
-  // handle exceptions (exception handling will handle unlocking!)
-  { Label L;
-    Address exception_addr(G2_thread, Thread::pending_exception_offset());
-    __ ld_ptr(exception_addr, Gtemp);
-    __ br_null_short(Gtemp, Assembler::pt, L);
-    // Note: This could be handled more efficiently since we know that the native
-    //       method doesn't have an exception handler. We could directly return
-    //       to the exception handler for the caller.
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-
-  // JVMTI support (preserves thread register)
-  __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
-
-  if (synchronized) {
-    // save and restore any potential method result value around the unlocking operation
-    save_native_result();
-
-    __ add( __ top_most_monitor(), O1);
-    __ unlock_object(O1);
-
-    restore_native_result();
-  }
-
-#if defined(COMPILER2) && !defined(_LP64)
-
-  // C2 expects long results in G1 we can't tell if we're returning to interpreted
-  // or compiled so just be safe.
-
-  __ sllx(O0, 32, G1);          // Shift bits into high G1
-  __ srl (O1, 0, O1);           // Zero extend O1
-  __ or3 (O1, G1, G1);          // OR 64 bits into G1
-
-#endif /* COMPILER2 && !_LP64 */
-
-  // dispose of return address and remove activation
-#ifdef ASSERT
-  {
-    Label ok;
-    __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);
-    __ stop("bad I5_savedSP value");
-    __ should_not_reach_here();
-    __ bind(ok);
-  }
-#endif
-  if (TraceJumps) {
-    // Move target to register that is recordable
-    __ mov(Lscratch, G3_scratch);
-    __ JMP(G3_scratch, 0);
-  } else {
-    __ jmp(Lscratch, 0);
-  }
-  __ delayed()->nop();
-
-
-  if (inc_counter) {
-    // handle invocation counter overflow
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(Lcontinue);
-  }
-
-
-
-  return entry;
-}
-
-
-// Generic method entry to (asm) interpreter
-address InterpreterGenerator::generate_normal_entry(bool synchronized) {
-  address entry = __ pc();
-
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // the following temporary registers are used during frame creation
-  const Register Gtmp1 = G3_scratch ;
-  const Register Gtmp2 = G1_scratch;
-
-  // make sure registers are different!
-  assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
-
-  const Address constMethod       (G5_method, Method::const_offset());
-  // Seems like G5_method is live at the point this is used. So we could make this look consistent
-  // and use in the asserts.
-  const Address access_flags      (Lmethod,   Method::access_flags_offset());
-
-  const Register Glocals_size = G3;
-  assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
-
-  // make sure method is not native & not abstract
-  // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
-#ifdef ASSERT
-  __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
-  {
-    Label L;
-    __ btst(JVM_ACC_NATIVE, Gtmp1);
-    __ br(Assembler::zero, false, Assembler::pt, L);
-    __ delayed()->nop();
-    __ stop("tried to execute native method as non-native");
-    __ bind(L);
-  }
-  { Label L;
-    __ btst(JVM_ACC_ABSTRACT, Gtmp1);
-    __ br(Assembler::zero, false, Assembler::pt, L);
-    __ delayed()->nop();
-    __ stop("tried to execute abstract method as non-abstract");
-    __ bind(L);
-  }
-#endif // ASSERT
-
-  // generate the code to allocate the interpreter stack frame
-
-  generate_fixed_frame(false);
-
-#ifdef FAST_DISPATCH
-  __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
-                                          // set bytecode dispatch table base
-#endif
-
-  //
-  // Code to initialize the extra (i.e. non-parm) locals
-  //
-  Register init_value = noreg;    // will be G0 if we must clear locals
-  // The way the code was setup before zerolocals was always true for vanilla java entries.
-  // It could only be false for the specialized entries like accessor or empty which have
-  // no extra locals so the testing was a waste of time and the extra locals were always
-  // initialized. We removed this extra complication to already over complicated code.
-
-  init_value = G0;
-  Label clear_loop;
-
-  const Register RconstMethod = O1;
-  const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
-  const Address size_of_locals    (RconstMethod, ConstMethod::size_of_locals_offset());
-
-  // NOTE: If you change the frame layout, this code will need to
-  // be updated!
-  __ ld_ptr( constMethod, RconstMethod );
-  __ lduh( size_of_locals, O2 );
-  __ lduh( size_of_parameters, O1 );
-  __ sll( O2, Interpreter::logStackElementSize, O2);
-  __ sll( O1, Interpreter::logStackElementSize, O1 );
-  __ sub( Llocals, O2, O2 );
-  __ sub( Llocals, O1, O1 );
-
-  __ bind( clear_loop );
-  __ inc( O2, wordSize );
-
-  __ cmp( O2, O1 );
-  __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
-  __ delayed()->st_ptr( init_value, O2, 0 );
-
-  const Address do_not_unlock_if_synchronized(G2_thread,
-    JavaThread::do_not_unlock_if_synchronized_offset());
-  // Since at this point in the method invocation the exception handler
-  // would try to exit the monitor of synchronized methods which hasn't
-  // been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. If any exception was thrown by
-  // runtime, exception handling i.e. unlock_if_synchronized_method will
-  // check this thread local flag.
-  __ movbool(true, G3_scratch);
-  __ stbool(G3_scratch, do_not_unlock_if_synchronized);
-
-  __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch);
-  // increment invocation counter and check for overflow
-  //
-  // Note: checking for negative value instead of overflow
-  //       so we have a 'sticky' overflow test (may be of
-  //       importance as soon as we have true MT/MP)
-  Label invocation_counter_overflow;
-  Label profile_method;
-  Label profile_method_continue;
-  Label Lcontinue;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
-    if (ProfileInterpreter) {
-      __ bind(profile_method_continue);
-    }
-  }
-  __ bind(Lcontinue);
-
-  bang_stack_shadow_pages(false);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ stbool(G0, do_not_unlock_if_synchronized);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-
-  if (synchronized) {
-    lock_method();
-  } else {
-#ifdef ASSERT
-    { Label ok;
-      __ ld(access_flags, O0);
-      __ btst(JVM_ACC_SYNCHRONIZED, O0);
-      __ br( Assembler::zero, false, Assembler::pt, ok);
-      __ delayed()->nop();
-      __ stop("method needs synchronization");
-      __ bind(ok);
-    }
-#endif // ASSERT
-  }
-
-  // start execution
-
-  __ verify_thread();
-
-  // jvmti support
-  __ notify_method_entry();
-
-  // start executing instructions
-  __ dispatch_next(vtos);
-
-
-  if (inc_counter) {
-    if (ProfileInterpreter) {
-      // We have decided to profile this method in the interpreter
-      __ bind(profile_method);
-
-      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
-      __ set_method_data_pointer_for_bcp();
-      __ ba_short(profile_method_continue);
-    }
-
-    // handle invocation counter overflow
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(Lcontinue);
-  }
-
-
-  return entry;
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  // No special entry points that preclude compilation
+  return true;
 }
 
 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
@@ -1747,332 +303,3 @@
   assert(lo <= esp && esp < monitors, "esp in bounds");
 #endif // ASSERT
 }
-
-//----------------------------------------------------------------------------------------------------
-// Exceptions
-void TemplateInterpreterGenerator::generate_throw_exception() {
-
-  // Entry point in previous activation (i.e., if the caller was interpreted)
-  Interpreter::_rethrow_exception_entry = __ pc();
-  // O0: exception
-
-  // entry point for exceptions thrown within interpreter code
-  Interpreter::_throw_exception_entry = __ pc();
-  __ verify_thread();
-  // expression stack is undefined here
-  // O0: exception, i.e. Oexception
-  // Lbcp: exception bcp
-  __ verify_oop(Oexception);
-
-
-  // expression stack must be empty before entering the VM in case of an exception
-  __ empty_expression_stack();
-  // find exception handler address and preserve exception oop
-  // call C routine to find handler and jump to it
-  __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
-  __ push_ptr(O1); // push exception for exception handler bytecodes
-
-  __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
-  __ delayed()->nop();
-
-
-  // if the exception is not handled in the current frame
-  // the frame is removed and the exception is rethrown
-  // (i.e. exception continuation is _rethrow_exception)
-  //
-  // Note: At this point the bci is still the bxi for the instruction which caused
-  //       the exception and the expression stack is empty. Thus, for any VM calls
-  //       at this point, GC will find a legal oop map (with empty expression stack).
-
-  // in current activation
-  // tos: exception
-  // Lbcp: exception bcp
-
-  //
-  // JVMTI PopFrame support
-  //
-
-  Interpreter::_remove_activation_preserving_args_entry = __ pc();
-  Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
-  // Set the popframe_processing bit in popframe_condition indicating that we are
-  // currently handling popframe, so that call_VMs that may happen later do not trigger new
-  // popframe handling cycles.
-
-  __ ld(popframe_condition_addr, G3_scratch);
-  __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
-  __ stw(G3_scratch, popframe_condition_addr);
-
-  // Empty the expression stack, as in normal exception handling
-  __ empty_expression_stack();
-  __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
-
-  {
-    // Check to see whether we are returning to a deoptimized frame.
-    // (The PopFrame call ensures that the caller of the popped frame is
-    // either interpreted or compiled and deoptimizes it if compiled.)
-    // In this case, we can't call dispatch_next() after the frame is
-    // popped, but instead must save the incoming arguments and restore
-    // them after deoptimization has occurred.
-    //
-    // Note that we don't compare the return PC against the
-    // deoptimization blob's unpack entry because of the presence of
-    // adapter frames in C2.
-    Label caller_not_deoptimized;
-    __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
-    __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized);
-
-    const Register Gtmp1 = G3_scratch;
-    const Register Gtmp2 = G1_scratch;
-    const Register RconstMethod = Gtmp1;
-    const Address constMethod(Lmethod, Method::const_offset());
-    const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
-
-    // Compute size of arguments for saving when returning to deoptimized caller
-    __ ld_ptr(constMethod, RconstMethod);
-    __ lduh(size_of_parameters, Gtmp1);
-    __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
-    __ sub(Llocals, Gtmp1, Gtmp2);
-    __ add(Gtmp2, wordSize, Gtmp2);
-    // Save these arguments
-    __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
-    // Inform deoptimization that it is responsible for restoring these arguments
-    __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
-    Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
-    __ st(Gtmp1, popframe_condition_addr);
-
-    // Return from the current method
-    // The caller's SP was adjusted upon method entry to accomodate
-    // the callee's non-argument locals. Undo that adjustment.
-    __ ret();
-    __ delayed()->restore(I5_savedSP, G0, SP);
-
-    __ bind(caller_not_deoptimized);
-  }
-
-  // Clear the popframe condition flag
-  __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
-
-  // Get out of the current method (how this is done depends on the particular compiler calling
-  // convention that the interpreter currently follows)
-  // The caller's SP was adjusted upon method entry to accomodate
-  // the callee's non-argument locals. Undo that adjustment.
-  __ restore(I5_savedSP, G0, SP);
-  // The method data pointer was incremented already during
-  // call profiling. We have to restore the mdp for the current bcp.
-  if (ProfileInterpreter) {
-    __ set_method_data_pointer_for_bcp();
-  }
-
-#if INCLUDE_JVMTI
-  {
-    Label L_done;
-
-    __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode
-    __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done);
-
-    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
-    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
-
-    __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp);
-
-    __ br_null(G1_scratch, false, Assembler::pn, L_done);
-    __ delayed()->nop();
-
-    __ st_ptr(G1_scratch, Lesp, wordSize);
-    __ bind(L_done);
-  }
-#endif // INCLUDE_JVMTI
-
-  // Resume bytecode interpretation at the current bcp
-  __ dispatch_next(vtos);
-  // end of JVMTI PopFrame support
-
-  Interpreter::_remove_activation_entry = __ pc();
-
-  // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
-  __ pop_ptr(Oexception);                                  // get exception
-
-  // Intel has the following comment:
-  //// remove the activation (without doing throws on illegalMonitorExceptions)
-  // They remove the activation without checking for bad monitor state.
-  // %%% We should make sure this is the right semantics before implementing.
-
-  __ set_vm_result(Oexception);
-  __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
-
-  __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
-
-  __ get_vm_result(Oexception);
-  __ verify_oop(Oexception);
-
-    const int return_reg_adjustment = frame::pc_return_offset;
-  Address issuing_pc_addr(I7, return_reg_adjustment);
-
-  // We are done with this activation frame; find out where to go next.
-  // The continuation point will be an exception handler, which expects
-  // the following registers set up:
-  //
-  // Oexception: exception
-  // Oissuing_pc: the local call that threw exception
-  // Other On: garbage
-  // In/Ln:  the contents of the caller's register window
-  //
-  // We do the required restore at the last possible moment, because we
-  // need to preserve some state across a runtime call.
-  // (Remember that the caller activation is unknown--it might not be
-  // interpreted, so things like Lscratch are useless in the caller.)
-
-  // Although the Intel version uses call_C, we can use the more
-  // compact call_VM.  (The only real difference on SPARC is a
-  // harmlessly ignored [re]set_last_Java_frame, compared with
-  // the Intel code which lacks this.)
-  __ mov(Oexception,      Oexception ->after_save());  // get exception in I0 so it will be on O0 after restore
-  __ add(issuing_pc_addr, Oissuing_pc->after_save());  // likewise set I1 to a value local to the caller
-  __ super_call_VM_leaf(L7_thread_cache,
-                        CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
-                        G2_thread, Oissuing_pc->after_save());
-
-  // The caller's SP was adjusted upon method entry to accomodate
-  // the callee's non-argument locals. Undo that adjustment.
-  __ JMP(O0, 0);                         // return exception handler in caller
-  __ delayed()->restore(I5_savedSP, G0, SP);
-
-  // (same old exception object is already in Oexception; see above)
-  // Note that an "issuing PC" is actually the next PC after the call
-}
-
-
-//
-// JVMTI ForceEarlyReturn support
-//
-
-address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
-  address entry = __ pc();
-
-  __ empty_expression_stack();
-  __ load_earlyret_value(state);
-
-  __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
-  Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
-
-  // Clear the earlyret state
-  __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
-
-  __ remove_activation(state,
-                       /* throw_monitor_exception */ false,
-                       /* install_monitor_exception */ false);
-
-  // The caller's SP was adjusted upon method entry to accomodate
-  // the callee's non-argument locals. Undo that adjustment.
-  __ ret();                             // return to caller
-  __ delayed()->restore(I5_savedSP, G0, SP);
-
-  return entry;
-} // end of JVMTI ForceEarlyReturn support
-
-
-//------------------------------------------------------------------------------------------------------------------------
-// Helper for vtos entry point generation
-
-void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
-  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
-  Label L;
-  aep = __ pc(); __ push_ptr(); __ ba_short(L);
-  fep = __ pc(); __ push_f();   __ ba_short(L);
-  dep = __ pc(); __ push_d();   __ ba_short(L);
-  lep = __ pc(); __ push_l();   __ ba_short(L);
-  iep = __ pc(); __ push_i();
-  bep = cep = sep = iep;                        // there aren't any
-  vep = __ pc(); __ bind(L);                    // fall through
-  generate_and_dispatch(t);
-}
-
-// --------------------------------------------------------------------------------
-
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
- : TemplateInterpreterGenerator(code) {
-   generate_all(); // down here so it can be "virtual"
-}
-
-// --------------------------------------------------------------------------------
-
-// Non-product code
-#ifndef PRODUCT
-address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
-  address entry = __ pc();
-
-  __ push(state);
-  __ mov(O7, Lscratch); // protect return address within interpreter
-
-  // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
-  __ mov( Otos_l2, G3_scratch );
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
-  __ mov(Lscratch, O7); // restore return address
-  __ pop(state);
-  __ retl();
-  __ delayed()->nop();
-
-  return entry;
-}
-
-
-// helpers for generate_and_dispatch
-
-void TemplateInterpreterGenerator::count_bytecode() {
-  __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
-}
-
-
-void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
-  __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
-}
-
-
-void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
-  AddressLiteral index   (&BytecodePairHistogram::_index);
-  AddressLiteral counters((address) &BytecodePairHistogram::_counters);
-
-  // get index, shift out old bytecode, bring in new bytecode, and store it
-  // _index = (_index >> log2_number_of_codes) |
-  //          (bytecode << log2_number_of_codes);
-
-  __ load_contents(index, G4_scratch);
-  __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
-  __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes,  G3_scratch );
-  __ or3( G3_scratch,  G4_scratch, G4_scratch );
-  __ store_contents(G4_scratch, index, G3_scratch);
-
-  // bump bucket contents
-  // _counters[_index] ++;
-
-  __ set(counters, G3_scratch);                       // loads into G3_scratch
-  __ sll( G4_scratch, LogBytesPerWord, G4_scratch );  // Index is word address
-  __ add (G3_scratch, G4_scratch, G3_scratch);        // Add in index
-  __ ld (G3_scratch, 0, G4_scratch);
-  __ inc (G4_scratch);
-  __ st (G4_scratch, 0, G3_scratch);
-}
-
-
-void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
-  // Call a little run-time stub to avoid blow-up for each bytecode.
-  // The run-time runtime saves the right registers, depending on
-  // the tosca in-state for the given template.
-  address entry = Interpreter::trace_code(t->tos_in());
-  guarantee(entry != NULL, "entry must have been generated");
-  __ call(entry, relocInfo::none);
-  __ delayed()->nop();
-}
-
-
-void TemplateInterpreterGenerator::stop_interpreter_at() {
-  AddressLiteral counter(&BytecodeCounter::_counter_value);
-  __ load_contents(counter, G3_scratch);
-  AddressLiteral stop_at(&StopInterpreterAt);
-  __ load_ptr_contents(stop_at, G4_scratch);
-  __ cmp(G3_scratch, G4_scratch);
-  __ breakpoint_trap(Assembler::equal, Assembler::icc);
-}
-#endif // not PRODUCT
-#endif // !CC_INTERP
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -35,7 +35,10 @@
 unsigned int VM_Version::_L2_data_cache_line_size = 0;
 
 void VM_Version::initialize() {
-  _features = determine_features();
+
+  assert(_features != VM_Version::unknown_m, "System pre-initialization is not complete.");
+  guarantee(VM_Version::has_v9(), "only SPARC v9 is supported");
+
   PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes();
   PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes();
   PrefetchFieldsAhead         = prefetch_fields_ahead();
@@ -60,8 +63,6 @@
     FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
   }
 
-  guarantee(VM_Version::has_v9(), "only SPARC v9 is supported");
-
   UseSSE = 0; // Only on x86 and x64
 
   _supports_cx8 = has_v9();
--- a/src/cpu/sparc/vm/vm_version_sparc.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/sparc/vm/vm_version_sparc.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -128,6 +128,8 @@
   // Initialization
   static void initialize();
 
+  static void init_before_ergo()        { _features = determine_features(); }
+
   // Instruction support
   static bool has_v8()                  { return (_features & v8_instructions_m) != 0; }
   static bool has_v9()                  { return (_features & v9_instructions_m) != 0; }
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -518,6 +518,10 @@
   // Pop the stack before the safepoint code
   __ remove_frame(initial_frame_size_in_bytes());
 
+  if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
+    __ reserved_stack_check();
+  }
+
   bool result_is_oop = result->is_valid() ? result->is_oop() : false;
 
   // Note: we do not need to round double result; float result has the right precision
--- a/src/cpu/x86/vm/globalDefinitions_x86.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/globalDefinitions_x86.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -61,4 +61,8 @@
 #define INCLUDE_RTM_OPT 1
 #endif
 
+#if defined(LINUX) || defined(SOLARIS) || defined(__APPLE__)
+#define SUPPORT_RESERVED_STACK_AREA
+#endif
+
 #endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
--- a/src/cpu/x86/vm/globals_x86.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/globals_x86.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -57,9 +57,11 @@
 
 #define DEFAULT_STACK_YELLOW_PAGES (NOT_WINDOWS(2) WINDOWS_ONLY(3))
 #define DEFAULT_STACK_RED_PAGES (1)
+#define DEFAULT_STACK_RESERVED_PAGES (NOT_WINDOWS(1) WINDOWS_ONLY(0))
 
 #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
 #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
+#define MIN_STACK_RESERVED_PAGES (0)
 
 #ifdef AMD64
 // Very large C++ stack frames using solaris-amd64 optimized builds
@@ -76,6 +78,7 @@
 define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
 define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
 define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
+define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
--- a/src/cpu/x86/vm/interp_masm_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/interp_masm_x86.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1023,6 +1023,25 @@
   // get sender sp
   movptr(rbx,
          Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
+  if (StackReservedPages > 0) {
+    // testing if reserved zone needs to be re-enabled
+    Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
+    Label no_reserved_zone_enabling;
+
+    NOT_LP64(get_thread(rthread);)
+
+    cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
+    jcc(Assembler::lessEqual, no_reserved_zone_enabling);
+
+    call_VM_leaf(
+      CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
+    push(rthread);
+    call_VM(noreg, CAST_FROM_FN_PTR(address,
+                   InterpreterRuntime::throw_delayed_StackOverflowError));
+    should_not_reach_here();
+
+    bind(no_reserved_zone_enabling);
+  }
   leave();                           // remove frame anchor
   pop(ret_addr);                     // get return address
   mov(rsp, rbx);                     // set sp to sender sp
--- a/src/cpu/x86/vm/interp_masm_x86.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/interp_masm_x86.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -175,6 +175,7 @@
     movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
     // NULL last_sp until next java call
     movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+    NOT_LP64(empty_FPU_stack());
   }
 
   // Helpers for swap and dup
--- a/src/cpu/x86/vm/interpreter_x86_32.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/interpreter_x86_32.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,6 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -184,20 +183,3 @@
 
   return entry_point;
 }
-
-
-void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
-
-  // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
-  // the days we had adapter frames. When we deoptimize a situation where a
-  // compiled caller calls a compiled caller will have registers it expects
-  // to survive the call to the callee. If we deoptimize the callee the only
-  // way we can restore these registers is to have the oldest interpreter
-  // frame that we create restore these values. That is what this routine
-  // will accomplish.
-
-  // At the moment we have modified c2 to not have any callee save registers
-  // so this problem does not exist and this routine is just a place holder.
-
-  assert(f->is_interpreted_frame(), "must be interpreted");
-}
--- a/src/cpu/x86/vm/interpreter_x86_64.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/interpreter_x86_64.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,6 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -298,19 +297,3 @@
 
   return entry_point;
 }
-
-void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
-
-  // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
-  // the days we had adapter frames. When we deoptimize a situation where a
-  // compiled caller calls a compiled caller will have registers it expects
-  // to survive the call to the callee. If we deoptimize the callee the only
-  // way we can restore these registers is to have the oldest interpreter
-  // frame that we create restore these values. That is what this routine
-  // will accomplish.
-
-  // At the moment we have modified c2 to not have any callee save registers
-  // so this problem does not exist and this routine is just a place holder.
-
-  assert(f->is_interpreted_frame(), "must be interpreted");
-}
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -39,6 +39,7 @@
 #include "runtime/os.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
+#include "runtime/thread.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc/g1/g1CollectedHeap.inline.hpp"
@@ -1066,6 +1067,22 @@
   }
 }
 
+void MacroAssembler::reserved_stack_check() {
+    // testing if reserved zone needs to be enabled
+    Label no_reserved_zone_enabling;
+    Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
+    NOT_LP64(get_thread(rsi);)
+
+    cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset()));
+    jcc(Assembler::below, no_reserved_zone_enabling);
+
+    call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread);
+    jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()));
+    should_not_reach_here();
+
+    bind(no_reserved_zone_enabling);
+}
+
 int MacroAssembler::biased_locking_enter(Register lock_reg,
                                          Register obj_reg,
                                          Register swap_reg,
@@ -11072,3 +11089,43 @@
 SkipIfEqual::~SkipIfEqual() {
   _masm->bind(_label);
 }
+
+// 32-bit Windows has its own fast-path implementation
+// of get_thread
+#if !defined(WIN32) || defined(_LP64)
+
+// This is simply a call to Thread::current()
+void MacroAssembler::get_thread(Register thread) {
+  if (thread != rax) {
+    push(rax);
+  }
+  LP64_ONLY(push(rdi);)
+  LP64_ONLY(push(rsi);)
+  push(rdx);
+  push(rcx);
+#ifdef _LP64
+  push(r8);
+  push(r9);
+  push(r10);
+  push(r11);
+#endif
+
+  MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0);
+
+#ifdef _LP64
+  pop(r11);
+  pop(r10);
+  pop(r9);
+  pop(r8);
+#endif
+  pop(rcx);
+  pop(rdx);
+  LP64_ONLY(pop(rsi);)
+  LP64_ONLY(pop(rdi);)
+  if (thread != rax) {
+    mov(thread, rax);
+    pop(rax);
+  }
+}
+
+#endif
--- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -641,6 +641,9 @@
   // stack overflow + shadow pages.  Also, clobbers tmp
   void bang_stack_size(Register size, Register tmp);
 
+  // Check for reserved stack access in method being exited (for JIT)
+  void reserved_stack_check();
+
   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
                                                 Register tmp,
                                                 int offset);
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -3290,7 +3290,10 @@
                                                                                    CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
 
     // Build this early so it's available for the interpreter
-    StubRoutines::_throw_StackOverflowError_entry          = generate_throw_exception("StackOverflowError throw_exception",           CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
+    StubRoutines::_throw_StackOverflowError_entry          = generate_throw_exception("StackOverflowError throw_exception",
+                                                                                      CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
+    StubRoutines::_throw_delayed_StackOverflowError_entry  = generate_throw_exception("delayed StackOverflowError throw_exception",
+                                                                                      CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
 
     if (UseCRC32Intrinsics) {
       // set table address before stub generation which use it
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -4458,6 +4458,11 @@
                                CAST_FROM_FN_PTR(address,
                                                 SharedRuntime::
                                                 throw_StackOverflowError));
+    StubRoutines::_throw_delayed_StackOverflowError_entry =
+      generate_throw_exception("delayed StackOverflowError throw_exception",
+                               CAST_FROM_FN_PTR(address,
+                                                SharedRuntime::
+                                                throw_delayed_StackOverflowError));
     if (UseCRC32Intrinsics) {
       // set table address before stub generation which use it
       StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,1874 @@
+/*
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "interpreter/templateTable.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
+
+#define __ _masm->
+
+#ifndef CC_INTERP
+
+// Global Register Names
+static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
+static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
+
+const int method_offset = frame::interpreter_frame_method_offset * wordSize;
+const int bcp_offset    = frame::interpreter_frame_bcp_offset    * wordSize;
+const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
+
+//-----------------------------------------------------------------------------
+
+address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
+  address entry = __ pc();
+
+#ifdef ASSERT
+  {
+    Label L;
+    __ lea(rax, Address(rbp,
+                        frame::interpreter_frame_monitor_block_top_offset *
+                        wordSize));
+    __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack
+                         // grows negative)
+    __ jcc(Assembler::aboveEqual, L); // check if frame is complete
+    __ stop ("interpreter frame not set up");
+    __ bind(L);
+  }
+#endif // ASSERT
+  // Restore bcp under the assumption that the current frame is still
+  // interpreted
+  __ restore_bcp();
+
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+  // throw exception
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::throw_StackOverflowError));
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
+        const char* name) {
+  address entry = __ pc();
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+  // setup parameters
+  // ??? convention: expect aberrant index in register ebx
+  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+  __ lea(rarg, ExternalAddress((address)name));
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::
+                              throw_ArrayIndexOutOfBoundsException),
+             rarg, rbx);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
+  address entry = __ pc();
+
+  // object is at TOS
+  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+  __ pop(rarg);
+
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::
+                              throw_ClassCastException),
+             rarg);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_exception_handler_common(
+        const char* name, const char* message, bool pass_oop) {
+  assert(!pass_oop || message == NULL, "either oop or message but not both");
+  address entry = __ pc();
+
+  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+  Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2);
+
+  if (pass_oop) {
+    // object is at TOS
+    __ pop(rarg2);
+  }
+  // expression stack must be empty before entering the VM if an
+  // exception happened
+  __ empty_expression_stack();
+  // setup parameters
+  __ lea(rarg, ExternalAddress((address)name));
+  if (pass_oop) {
+    __ call_VM(rax, CAST_FROM_FN_PTR(address,
+                                     InterpreterRuntime::
+                                     create_klass_exception),
+               rarg, rarg2);
+  } else {
+    // kind of lame ExternalAddress can't take NULL because
+    // external_word_Relocation will assert.
+    if (message != NULL) {
+      __ lea(rarg2, ExternalAddress((address)message));
+    } else {
+      __ movptr(rarg2, NULL_WORD);
+    }
+    __ call_VM(rax,
+               CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
+               rarg, rarg2);
+  }
+  // throw exception
+  __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
+  address entry = __ pc();
+  // NULL last_sp until next java call
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+  __ dispatch_next(state);
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
+  address entry = __ pc();
+
+#ifndef _LP64
+#ifdef COMPILER2
+  // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
+  if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
+    for (int i = 1; i < 8; i++) {
+        __ ffree(i);
+    }
+  } else if (UseSSE < 2) {
+    __ empty_FPU_stack();
+  }
+#endif // COMPILER2
+  if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
+    __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
+  } else {
+    __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
+  }
+
+  if (state == ftos) {
+    __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
+  } else if (state == dtos) {
+    __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
+  }
+#endif // _LP64
+
+  // Restore stack bottom in case i2c adjusted stack
+  __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+  // and NULL it as marker that esp is now tos until next java call
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+
+  __ restore_bcp();
+  __ restore_locals();
+
+  if (state == atos) {
+    Register mdp = rbx;
+    Register tmp = rcx;
+    __ profile_return_type(mdp, rax, tmp);
+  }
+
+  const Register cache = rbx;
+  const Register index = rcx;
+  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
+
+  const Register flags = cache;
+  __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
+  __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
+  __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
+  __ dispatch_next(state, step);
+
+  return entry;
+}
+
+
+address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
+  address entry = __ pc();
+
+#ifndef _LP64
+  if (state == ftos) {
+    __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter");
+  } else if (state == dtos) {
+    __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter");
+  }
+#endif // _LP64
+
+  // NULL last_sp until next java call
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+  __ restore_bcp();
+  __ restore_locals();
+  const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
+  NOT_LP64(__ get_thread(thread));
+#if INCLUDE_JVMCI
+  // Check if we need to take lock at entry of synchronized method.
+  if (UseJVMCICompiler) {
+    Label L;
+    __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
+    __ jcc(Assembler::zero, L);
+    // Clear flag.
+    __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
+    // Satisfy calling convention for lock_method().
+    __ get_method(rbx);
+    // Take lock.
+    lock_method();
+    __ bind(L);
+  }
+#endif
+  // handle exceptions
+  {
+    Label L;
+    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
+    __ jcc(Assembler::zero, L);
+    __ call_VM(noreg,
+               CAST_FROM_FN_PTR(address,
+                                InterpreterRuntime::throw_pending_exception));
+    __ should_not_reach_here();
+    __ bind(L);
+  }
+  __ dispatch_next(state, step);
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_result_handler_for(
+        BasicType type) {
+  address entry = __ pc();
+  switch (type) {
+  case T_BOOLEAN: __ c2bool(rax);            break;
+#ifndef _LP64
+  case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
+#else
+  case T_CHAR   : __ movzwl(rax, rax);       break;
+#endif // _LP64
+  case T_BYTE   : __ sign_extend_byte(rax);  break;
+  case T_SHORT  : __ sign_extend_short(rax); break;
+  case T_INT    : /* nothing to do */        break;
+  case T_LONG   : /* nothing to do */        break;
+  case T_VOID   : /* nothing to do */        break;
+#ifndef _LP64
+  case T_DOUBLE :
+  case T_FLOAT  :
+    { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
+      __ pop(t);                            // remove return address first
+      // Must return a result for interpreter or compiler. In SSE
+      // mode, results are returned in xmm0 and the FPU stack must
+      // be empty.
+      if (type == T_FLOAT && UseSSE >= 1) {
+        // Load ST0
+        __ fld_d(Address(rsp, 0));
+        // Store as float and empty fpu stack
+        __ fstp_s(Address(rsp, 0));
+        // and reload
+        __ movflt(xmm0, Address(rsp, 0));
+      } else if (type == T_DOUBLE && UseSSE >= 2 ) {
+        __ movdbl(xmm0, Address(rsp, 0));
+      } else {
+        // restore ST0
+        __ fld_d(Address(rsp, 0));
+      }
+      // and pop the temp
+      __ addptr(rsp, 2 * wordSize);
+      __ push(t);                           // restore return address
+    }
+    break;
+#else
+  case T_FLOAT  : /* nothing to do */        break;
+  case T_DOUBLE : /* nothing to do */        break;
+#endif // _LP64
+
+  case T_OBJECT :
+    // retrieve result from frame
+    __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
+    // and verify it
+    __ verify_oop(rax);
+    break;
+  default       : ShouldNotReachHere();
+  }
+  __ ret(0);                                   // return from result handler
+  return entry;
+}
+
+address TemplateInterpreterGenerator::generate_safept_entry_for(
+        TosState state,
+        address runtime_entry) {
+  address entry = __ pc();
+  __ push(state);
+  __ call_VM(noreg, runtime_entry);
+  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
+  return entry;
+}
+
+
+
+// Helpers for commoning out cases in the various type of method entries.
+//
+
+
+// increment invocation count & check for overflow
+//
+// Note: checking for negative value instead of overflow
+//       so we have a 'sticky' overflow test
+//
+// rbx: method
+// rcx: invocation counter
+//
+void InterpreterGenerator::generate_counter_incr(
+        Label* overflow,
+        Label* profile_method,
+        Label* profile_method_continue) {
+  Label done;
+  // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
+  if (TieredCompilation) {
+    int increment = InvocationCounter::count_increment;
+    Label no_mdo;
+    if (ProfileInterpreter) {
+      // Are we profiling?
+      __ movptr(rax, Address(rbx, Method::method_data_offset()));
+      __ testptr(rax, rax);
+      __ jccb(Assembler::zero, no_mdo);
+      // Increment counter in the MDO
+      const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
+                                                in_bytes(InvocationCounter::counter_offset()));
+      const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
+      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
+      __ jmp(done);
+    }
+    __ bind(no_mdo);
+    // Increment counter in MethodCounters
+    const Address invocation_counter(rax,
+                  MethodCounters::invocation_counter_offset() +
+                  InvocationCounter::counter_offset());
+    __ get_method_counters(rbx, rax, done);
+    const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
+    __ increment_mask_and_jump(invocation_counter, increment, mask, rcx,
+                               false, Assembler::zero, overflow);
+    __ bind(done);
+  } else { // not TieredCompilation
+    const Address backedge_counter(rax,
+                  MethodCounters::backedge_counter_offset() +
+                  InvocationCounter::counter_offset());
+    const Address invocation_counter(rax,
+                  MethodCounters::invocation_counter_offset() +
+                  InvocationCounter::counter_offset());
+
+    __ get_method_counters(rbx, rax, done);
+
+    if (ProfileInterpreter) {
+      __ incrementl(Address(rax,
+              MethodCounters::interpreter_invocation_counter_offset()));
+    }
+    // Update standard invocation counters
+    __ movl(rcx, invocation_counter);
+    __ incrementl(rcx, InvocationCounter::count_increment);
+    __ movl(invocation_counter, rcx); // save invocation count
+
+    __ movl(rax, backedge_counter);   // load backedge counter
+    __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
+
+    __ addl(rcx, rax);                // add both counters
+
+    // profile_method is non-null only for interpreted method so
+    // profile_method != NULL == !native_call
+
+    if (ProfileInterpreter && profile_method != NULL) {
+      // Test to see if we should create a method data oop
+      __ movptr(rax, Address(rbx, Method::method_counters_offset()));
+      __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
+      __ jcc(Assembler::less, *profile_method_continue);
+
+      // if no method data exists, go to profile_method
+      __ test_method_data_pointer(rax, *profile_method);
+    }
+
+    __ movptr(rax, Address(rbx, Method::method_counters_offset()));
+    __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
+    __ jcc(Assembler::aboveEqual, *overflow);
+    __ bind(done);
+  }
+}
+
+void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
+
+  // Asm interpreter on entry
+  // r14/rdi - locals
+  // r13/rsi - bcp
+  // rbx - method
+  // rdx - cpool --- DOES NOT APPEAR TO BE TRUE
+  // rbp - interpreter frame
+
+  // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
+  // Everything as it was on entry
+  // rdx is not restored. Doesn't appear to really be set.
+
+  // InterpreterRuntime::frequency_counter_overflow takes two
+  // arguments, the first (thread) is passed by call_VM, the second
+  // indicates if the counter overflow occurs at a backwards branch
+  // (NULL bcp).  We pass zero for it.  The call returns the address
+  // of the verified entry point for the method or NULL if the
+  // compilation did not complete (either went background or bailed
+  // out).
+  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+  __ movl(rarg, 0);
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::frequency_counter_overflow),
+             rarg);
+
+  __ movptr(rbx, Address(rbp, method_offset));   // restore Method*
+  // Preserve invariant that r13/r14 contain bcp/locals of sender frame
+  // and jump to the interpreted entry.
+  __ jmp(*do_continue, relocInfo::none);
+}
+
+// See if we've got enough room on the stack for locals plus overhead.
+// The expression stack grows down incrementally, so the normal guard
+// page mechanism will work for that.
+//
+// NOTE: Since the additional locals are also always pushed (wasn't
+// obvious in generate_fixed_frame) so the guard should work for them
+// too.
+//
+// Args:
+//      rdx: number of additional locals this frame needs (what we must check)
+//      rbx: Method*
+//
+// Kills:
+//      rax
+void InterpreterGenerator::generate_stack_overflow_check(void) {
+
+  // monitor entry size: see picture of stack in frame_x86.hpp
+  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+
+  // total overhead size: entry_size + (saved rbp through expr stack
+  // bottom).  be sure to change this if you add/subtract anything
+  // to/from the overhead area
+  const int overhead_size =
+    -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
+
+  const int page_size = os::vm_page_size();
+
+  Label after_frame_check;
+
+  // see if the frame is greater than one page in size. If so,
+  // then we need to verify there is enough stack space remaining
+  // for the additional locals.
+  __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
+  __ jcc(Assembler::belowEqual, after_frame_check);
+
+  // compute rsp as if this were going to be the last frame on
+  // the stack before the red zone
+
+  Label after_frame_check_pop;
+  const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
+#ifndef _LP64
+  __ push(thread);
+  __ get_thread(thread);
+#endif
+
+  const Address stack_base(thread, Thread::stack_base_offset());
+  const Address stack_size(thread, Thread::stack_size_offset());
+
+  // locals + overhead, in bytes
+  __ mov(rax, rdx);
+  __ shlptr(rax, Interpreter::logStackElementSize);  // 2 slots per parameter.
+  __ addptr(rax, overhead_size);
+
+#ifdef ASSERT
+  Label stack_base_okay, stack_size_okay;
+  // verify that thread stack base is non-zero
+  __ cmpptr(stack_base, (int32_t)NULL_WORD);
+  __ jcc(Assembler::notEqual, stack_base_okay);
+  __ stop("stack base is zero");
+  __ bind(stack_base_okay);
+  // verify that thread stack size is non-zero
+  __ cmpptr(stack_size, 0);
+  __ jcc(Assembler::notEqual, stack_size_okay);
+  __ stop("stack size is zero");
+  __ bind(stack_size_okay);
+#endif
+
+  // Add stack base to locals and subtract stack size
+  __ addptr(rax, stack_base);
+  __ subptr(rax, stack_size);
+
+  // Use the maximum number of pages we might bang.
+  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages+StackReservedPages) ? StackShadowPages :
+                        (StackRedPages+StackYellowPages+StackReservedPages);
+
+  // add in the red and yellow zone sizes
+  __ addptr(rax, max_pages * page_size);
+
+  // check against the current stack bottom
+  __ cmpptr(rsp, rax);
+
+  __ jcc(Assembler::above, after_frame_check_pop);
+  NOT_LP64(__ pop(rsi));  // get saved bcp
+
+  // Restore sender's sp as SP. This is necessary if the sender's
+  // frame is an extended compiled frame (see gen_c2i_adapter())
+  // and safer anyway in case of JSR292 adaptations.
+
+  __ pop(rax); // return address must be moved if SP is changed
+  __ mov(rsp, rbcp);
+  __ push(rax);
+
+  // Note: the restored frame is not necessarily interpreted.
+  // Use the shared runtime version of the StackOverflowError.
+  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
+  __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
+  // all done with frame size check
+  __ bind(after_frame_check_pop);
+  NOT_LP64(__ pop(rsi));
+
+  // all done with frame size check
+  __ bind(after_frame_check);
+}
+
+// Allocate monitor and lock method (asm interpreter)
+//
+// Args:
+//      rbx: Method*
+//      r14/rdi: locals
+//
+// Kills:
+//      rax
+//      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
+//      rscratch1, rscratch2 (scratch regs)
+void TemplateInterpreterGenerator::lock_method() {
+  // synchronize method
+  const Address access_flags(rbx, Method::access_flags_offset());
+  const Address monitor_block_top(
+        rbp,
+        frame::interpreter_frame_monitor_block_top_offset * wordSize);
+  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+
+#ifdef ASSERT
+  {
+    Label L;
+    __ movl(rax, access_flags);
+    __ testl(rax, JVM_ACC_SYNCHRONIZED);
+    __ jcc(Assembler::notZero, L);
+    __ stop("method doesn't need synchronization");
+    __ bind(L);
+  }
+#endif // ASSERT
+
+  // get synchronization object
+  {
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+    Label done;
+    __ movl(rax, access_flags);
+    __ testl(rax, JVM_ACC_STATIC);
+    // get receiver (assume this is frequent case)
+    __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
+    __ jcc(Assembler::zero, done);
+    __ movptr(rax, Address(rbx, Method::const_offset()));
+    __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
+    __ movptr(rax, Address(rax,
+                           ConstantPool::pool_holder_offset_in_bytes()));
+    __ movptr(rax, Address(rax, mirror_offset));
+
+#ifdef ASSERT
+    {
+      Label L;
+      __ testptr(rax, rax);
+      __ jcc(Assembler::notZero, L);
+      __ stop("synchronization object is NULL");
+      __ bind(L);
+    }
+#endif // ASSERT
+
+    __ bind(done);
+  }
+
+  // add space for monitor & lock
+  __ subptr(rsp, entry_size); // add space for a monitor entry
+  __ movptr(monitor_block_top, rsp);  // set new monitor block top
+  // store object
+  __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
+  const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
+  __ movptr(lockreg, rsp); // object address
+  __ lock_object(lockreg);
+}
+
+// Generate a fixed interpreter frame. This is identical setup for
+// interpreted methods and for native methods hence the shared code.
+//
+// Args:
+//      rax: return address
+//      rbx: Method*
+//      r14/rdi: pointer to locals
+//      r13/rsi: sender sp
+//      rdx: cp cache
+void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
+  // initialize fixed part of activation frame
+  __ push(rax);        // save return address
+  __ enter();          // save old & set new rbp
+  __ push(rbcp);        // set sender sp
+  __ push((int)NULL_WORD); // leave last_sp as null
+  __ movptr(rbcp, Address(rbx, Method::const_offset()));      // get ConstMethod*
+  __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
+  __ push(rbx);        // save Method*
+  if (ProfileInterpreter) {
+    Label method_data_continue;
+    __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
+    __ testptr(rdx, rdx);
+    __ jcc(Assembler::zero, method_data_continue);
+    __ addptr(rdx, in_bytes(MethodData::data_offset()));
+    __ bind(method_data_continue);
+    __ push(rdx);      // set the mdp (method data pointer)
+  } else {
+    __ push(0);
+  }
+
+  __ movptr(rdx, Address(rbx, Method::const_offset()));
+  __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
+  __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
+  __ push(rdx); // set constant pool cache
+  __ push(rlocals); // set locals pointer
+  if (native_call) {
+    __ push(0); // no bcp
+  } else {
+    __ push(rbcp); // set bcp
+  }
+  __ push(0); // reserve word for pointer to expression stack bottom
+  __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
+}
+
+// End of helpers
+
+// Method entry for java.lang.ref.Reference.get.
+address InterpreterGenerator::generate_Reference_get_entry(void) {
+#if INCLUDE_ALL_GCS
+  // Code: _aload_0, _getfield, _areturn
+  // parameter size = 1
+  //
+  // The code that gets generated by this routine is split into 2 parts:
+  //    1. The "intrinsified" code for G1 (or any SATB based GC),
+  //    2. The slow path - which is an expansion of the regular method entry.
+  //
+  // Notes:-
+  // * In the G1 code we do not check whether we need to block for
+  //   a safepoint. If G1 is enabled then we must execute the specialized
+  //   code for Reference.get (except when the Reference object is null)
+  //   so that we can log the value in the referent field with an SATB
+  //   update buffer.
+  //   If the code for the getfield template is modified so that the
+  //   G1 pre-barrier code is executed when the current method is
+  //   Reference.get() then going through the normal method entry
+  //   will be fine.
+  // * The G1 code can, however, check the receiver object (the instance
+  //   of java.lang.Reference) and jump to the slow path if null. If the
+  //   Reference object is null then we obviously cannot fetch the referent
+  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
+  //   regular method entry code to generate the NPE.
+  //
+  // rbx: Method*
+
+  // r13: senderSP must preserve for slow path, set SP to it on fast path
+
+  address entry = __ pc();
+
+  const int referent_offset = java_lang_ref_Reference::referent_offset;
+  guarantee(referent_offset > 0, "referent offset not initialized");
+
+  if (UseG1GC) {
+    Label slow_path;
+    // rbx: method
+
+    // Check if local 0 != NULL
+    // If the receiver is null then it is OK to jump to the slow path.
+    __ movptr(rax, Address(rsp, wordSize));
+
+    __ testptr(rax, rax);
+    __ jcc(Assembler::zero, slow_path);
+
+    // rax: local 0
+    // rbx: method (but can be used as scratch now)
+    // rdx: scratch
+    // rdi: scratch
+
+    // Preserve the sender sp in case the pre-barrier
+    // calls the runtime
+    NOT_LP64(__ push(rsi));
+
+    // Generate the G1 pre-barrier code to log the value of
+    // the referent field in an SATB buffer.
+
+    // Load the value of the referent field.
+    const Address field_address(rax, referent_offset);
+    __ load_heap_oop(rax, field_address);
+
+    const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13);
+    const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
+    NOT_LP64(__ get_thread(thread));
+
+    // Generate the G1 pre-barrier code to log the value of
+    // the referent field in an SATB buffer.
+    __ g1_write_barrier_pre(noreg /* obj */,
+                            rax /* pre_val */,
+                            thread /* thread */,
+                            rbx /* tmp */,
+                            true /* tosca_live */,
+                            true /* expand_call */);
+
+    // _areturn
+    NOT_LP64(__ pop(rsi));      // get sender sp
+    __ pop(rdi);                // get return address
+    __ mov(rsp, sender_sp);     // set sp to sender sp
+    __ jmp(rdi);
+    __ ret(0);
+
+    // generate a vanilla interpreter entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
+    return entry;
+  }
+#endif // INCLUDE_ALL_GCS
+
+  // If G1 is not enabled then attempt to go through the accessor entry point
+  // Reference.get is an accessor
+  return NULL;
+}
+
+// Interpreter stub for calling a native method. (asm interpreter)
+// This sets up a somewhat different looking stack for calling the
+// native method than the typical interpreter frame setup.
+address InterpreterGenerator::generate_native_entry(bool synchronized) {
+  // determine code generation flags
+  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+
+  // rbx: Method*
+  // rbcp: sender sp
+
+  address entry_point = __ pc();
+
+  const Address constMethod       (rbx, Method::const_offset());
+  const Address access_flags      (rbx, Method::access_flags_offset());
+  const Address size_of_parameters(rcx, ConstMethod::
+                                        size_of_parameters_offset());
+
+
+  // get parameter size (always needed)
+  __ movptr(rcx, constMethod);
+  __ load_unsigned_short(rcx, size_of_parameters);
+
+  // native calls don't need the stack size check since they have no
+  // expression stack and the arguments are already on the stack and
+  // we only add a handful of words to the stack
+
+  // rbx: Method*
+  // rcx: size of parameters
+  // rbcp: sender sp
+  __ pop(rax);                                       // get return address
+
+  // for natives the size of locals is zero
+
+  // compute beginning of parameters
+  __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
+
+  // add 2 zero-initialized slots for native calls
+  // initialize result_handler slot
+  __ push((int) NULL_WORD);
+  // slot for oop temp
+  // (static native method holder mirror/jni oop result)
+  __ push((int) NULL_WORD);
+
+  // initialize fixed part of activation frame
+  generate_fixed_frame(true);
+
+  // make sure method is native & not abstract
+#ifdef ASSERT
+  __ movl(rax, access_flags);
+  {
+    Label L;
+    __ testl(rax, JVM_ACC_NATIVE);
+    __ jcc(Assembler::notZero, L);
+    __ stop("tried to execute non-native method as native");
+    __ bind(L);
+  }
+  {
+    Label L;
+    __ testl(rax, JVM_ACC_ABSTRACT);
+    __ jcc(Assembler::zero, L);
+    __ stop("tried to execute abstract method in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // Since at this point in the method invocation the exception handler
+  // would try to exit the monitor of synchronized methods which hasn't
+  // been entered yet, we set the thread local variable
+  // _do_not_unlock_if_synchronized to true. The remove_activation will
+  // check this flag.
+
+  const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread);
+  NOT_LP64(__ get_thread(thread1));
+  const Address do_not_unlock_if_synchronized(thread1,
+        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
+  __ movbool(do_not_unlock_if_synchronized, true);
+
+  // increment invocation count & check for overflow
+  Label invocation_counter_overflow;
+  if (inc_counter) {
+    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
+  }
+
+  Label continue_after_compile;
+  __ bind(continue_after_compile);
+
+  bang_stack_shadow_pages(true);
+
+  // reset the _do_not_unlock_if_synchronized flag
+  NOT_LP64(__ get_thread(thread1));
+  __ movbool(do_not_unlock_if_synchronized, false);
+
+  // check for synchronized methods
+  // Must happen AFTER invocation_counter check and stack overflow check,
+  // so method is not locked if overflows.
+  if (synchronized) {
+    lock_method();
+  } else {
+    // no synchronization necessary
+#ifdef ASSERT
+    {
+      Label L;
+      __ movl(rax, access_flags);
+      __ testl(rax, JVM_ACC_SYNCHRONIZED);
+      __ jcc(Assembler::zero, L);
+      __ stop("method needs synchronization");
+      __ bind(L);
+    }
+#endif
+  }
+
+  // start execution
+#ifdef ASSERT
+  {
+    Label L;
+    const Address monitor_block_top(rbp,
+                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
+    __ movptr(rax, monitor_block_top);
+    __ cmpptr(rax, rsp);
+    __ jcc(Assembler::equal, L);
+    __ stop("broken stack frame setup in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // jvmti support
+  __ notify_method_entry();
+
+  // work registers
+  const Register method = rbx;
+  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
+  const Register t      = NOT_LP64(rcx) LP64_ONLY(r11);
+
+  // allocate space for parameters
+  __ get_method(method);
+  __ movptr(t, Address(method, Method::const_offset()));
+  __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
+
+#ifndef _LP64
+  __ shlptr(t, Interpreter::logStackElementSize);
+  __ addptr(t, 2*wordSize);     // allocate two more slots for JNIEnv and possible mirror
+  __ subptr(rsp, t);
+  __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
+#else
+  __ shll(t, Interpreter::logStackElementSize);
+
+  __ subptr(rsp, t);
+  __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+  __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
+#endif // _LP64
+
+  // get signature handler
+  {
+    Label L;
+    __ movptr(t, Address(method, Method::signature_handler_offset()));
+    __ testptr(t, t);
+    __ jcc(Assembler::notZero, L);
+    __ call_VM(noreg,
+               CAST_FROM_FN_PTR(address,
+                                InterpreterRuntime::prepare_native_call),
+               method);
+    __ get_method(method);
+    __ movptr(t, Address(method, Method::signature_handler_offset()));
+    __ bind(L);
+  }
+
+  // call signature handler
+  assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
+         "adjust this code");
+  assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
+         "adjust this code");
+  assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1),
+         "adjust this code");
+
+  // The generated handlers do not touch RBX (the method oop).
+  // However, large signatures cannot be cached and are generated
+  // each time here.  The slow-path generator can do a GC on return,
+  // so we must reload it after the call.
+  __ call(t);
+  __ get_method(method);        // slow path can do a GC, reload RBX
+
+
+  // result handler is in rax
+  // set result handler
+  __ movptr(Address(rbp,
+                    (frame::interpreter_frame_result_handler_offset) * wordSize),
+            rax);
+
+  // pass mirror handle if static call
+  {
+    Label L;
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+    __ movl(t, Address(method, Method::access_flags_offset()));
+    __ testl(t, JVM_ACC_STATIC);
+    __ jcc(Assembler::zero, L);
+    // get mirror
+    __ movptr(t, Address(method, Method::const_offset()));
+    __ movptr(t, Address(t, ConstMethod::constants_offset()));
+    __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
+    __ movptr(t, Address(t, mirror_offset));
+    // copy mirror into activation frame
+    __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
+            t);
+    // pass handle to mirror
+#ifndef _LP64
+    __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
+    __ movptr(Address(rsp, wordSize), t);
+#else
+    __ lea(c_rarg1,
+           Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
+#endif // _LP64
+    __ bind(L);
+  }
+
+  // get native function entry point
+  {
+    Label L;
+    __ movptr(rax, Address(method, Method::native_function_offset()));
+    ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
+    __ cmpptr(rax, unsatisfied.addr());
+    __ jcc(Assembler::notEqual, L);
+    __ call_VM(noreg,
+               CAST_FROM_FN_PTR(address,
+                                InterpreterRuntime::prepare_native_call),
+               method);
+    __ get_method(method);
+    __ movptr(rax, Address(method, Method::native_function_offset()));
+    __ bind(L);
+  }
+
+  // pass JNIEnv
+#ifndef _LP64
+   __ get_thread(thread);
+   __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
+   __ movptr(Address(rsp, 0), t);
+
+   // set_last_Java_frame_before_call
+   // It is enough that the pc()
+   // points into the right code segment. It does not have to be the correct return pc.
+   __ set_last_Java_frame(thread, noreg, rbp, __ pc());
+#else
+   __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
+
+   // It is enough that the pc() points into the right code
+   // segment. It does not have to be the correct return pc.
+   __ set_last_Java_frame(rsp, rbp, (address) __ pc());
+#endif // _LP64
+
+  // change thread state
+#ifdef ASSERT
+  {
+    Label L;
+    __ movl(t, Address(thread, JavaThread::thread_state_offset()));
+    __ cmpl(t, _thread_in_Java);
+    __ jcc(Assembler::equal, L);
+    __ stop("Wrong thread state in native stub");
+    __ bind(L);
+  }
+#endif
+
+  // Change state to native
+
+  __ movl(Address(thread, JavaThread::thread_state_offset()),
+          _thread_in_native);
+
+  // Call the native method.
+  __ call(rax);
+  // 32: result potentially in rdx:rax or ST0
+  // 64: result potentially in rax or xmm0
+
+  // Verify or restore cpu control state after JNI call
+  __ restore_cpu_control_state_after_jni();
+
+  // NOTE: The order of these pushes is known to frame::interpreter_frame_result
+  // in order to extract the result of a method call. If the order of these
+  // pushes change or anything else is added to the stack then the code in
+  // interpreter_frame_result must also change.
+
+#ifndef _LP64
+  // save potential result in ST(0) & rdx:rax
+  // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
+  // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
+  // It is safe to do this push because state is _thread_in_native and return address will be found
+  // via _last_native_pc and not via _last_jave_sp
+
+  // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
+  // If the order changes or anything else is added to the stack the code in
+  // interpreter_frame_result will have to be changed.
+
+  { Label L;
+    Label push_double;
+    ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
+    ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
+    __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
+              float_handler.addr());
+    __ jcc(Assembler::equal, push_double);
+    __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
+              double_handler.addr());
+    __ jcc(Assembler::notEqual, L);
+    __ bind(push_double);
+    __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0).
+    __ bind(L);
+  }
+#else
+  __ push(dtos);
+#endif // _LP64
+
+  __ push(ltos);
+
+  // change thread state
+  NOT_LP64(__ get_thread(thread));
+  __ movl(Address(thread, JavaThread::thread_state_offset()),
+          _thread_in_native_trans);
+
+  if (os::is_MP()) {
+    if (UseMembar) {
+      // Force this write out before the read below
+      __ membar(Assembler::Membar_mask_bits(
+           Assembler::LoadLoad | Assembler::LoadStore |
+           Assembler::StoreLoad | Assembler::StoreStore));
+    } else {
+      // Write serialization page so VM thread can do a pseudo remote membar.
+      // We use the current thread pointer to calculate a thread specific
+      // offset to write to within the page. This minimizes bus traffic
+      // due to cache line collision.
+      __ serialize_memory(thread, rcx);
+    }
+  }
+
+#ifndef _LP64
+  if (AlwaysRestoreFPU) {
+    //  Make sure the control word is correct.
+    __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
+  }
+#endif // _LP64
+
+  // check for safepoint operation in progress and/or pending suspend requests
+  {
+    Label Continue;
+    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+             SafepointSynchronize::_not_synchronized);
+
+    Label L;
+    __ jcc(Assembler::notEqual, L);
+    __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
+    __ jcc(Assembler::equal, Continue);
+    __ bind(L);
+
+    // Don't use call_VM as it will see a possible pending exception
+    // and forward it and never return here preventing us from
+    // clearing _last_native_pc down below.  Also can't use
+    // call_VM_leaf either as it will check to see if r13 & r14 are
+    // preserved and correspond to the bcp/locals pointers. So we do a
+    // runtime call by hand.
+    //
+#ifndef _LP64
+    __ push(thread);
+    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
+                                            JavaThread::check_special_condition_for_native_trans)));
+    __ increment(rsp, wordSize);
+    __ get_thread(thread);
+#else
+    __ mov(c_rarg0, r15_thread);
+    __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
+    __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+    __ andptr(rsp, -16); // align stack as required by ABI
+    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
+    __ mov(rsp, r12); // restore sp
+    __ reinit_heapbase();
+#endif // _LP64
+    __ bind(Continue);
+  }
+
+  // change thread state
+  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
+
+  // reset_last_Java_frame
+  __ reset_last_Java_frame(thread, true, true);
+
+  // reset handle block
+  __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
+  __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
+
+  // If result is an oop unbox and store it in frame where gc will see it
+  // and result handler will pick it up
+
+  {
+    Label no_oop, store_result;
+    __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
+    __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
+    __ jcc(Assembler::notEqual, no_oop);
+    // retrieve result
+    __ pop(ltos);
+    __ testptr(rax, rax);
+    __ jcc(Assembler::zero, store_result);
+    __ movptr(rax, Address(rax, 0));
+    __ bind(store_result);
+    __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
+    // keep stack depth as expected by pushing oop which will eventually be discarded
+    __ push(ltos);
+    __ bind(no_oop);
+  }
+
+
+  {
+    Label no_reguard;
+    __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()),
+            JavaThread::stack_guard_yellow_disabled);
+    __ jcc(Assembler::notEqual, no_reguard);
+
+    __ pusha(); // XXX only save smashed registers
+#ifndef _LP64
+    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
+    __ popa();
+#else
+    __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
+    __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+    __ andptr(rsp, -16); // align stack as required by ABI
+    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
+    __ mov(rsp, r12); // restore sp
+    __ popa(); // XXX only restore smashed registers
+    __ reinit_heapbase();
+#endif // _LP64
+
+    __ bind(no_reguard);
+  }
+
+
+  // The method register is junk from after the thread_in_native transition
+  // until here.  Also can't call_VM until the bcp has been
+  // restored.  Need bcp for throwing exception below so get it now.
+  __ get_method(method);
+
+  // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base()
+  __ movptr(rbcp, Address(method, Method::const_offset()));   // get ConstMethod*
+  __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset()));    // get codebase
+
+  // handle exceptions (exception handling will handle unlocking!)
+  {
+    Label L;
+    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
+    __ jcc(Assembler::zero, L);
+    // Note: At some point we may want to unify this with the code
+    // used in call_VM_base(); i.e., we should use the
+    // StubRoutines::forward_exception code. For now this doesn't work
+    // here because the rsp is not correctly set at this point.
+    __ MacroAssembler::call_VM(noreg,
+                               CAST_FROM_FN_PTR(address,
+                               InterpreterRuntime::throw_pending_exception));
+    __ should_not_reach_here();
+    __ bind(L);
+  }
+
+  // do unlocking if necessary
+  {
+    Label L;
+    __ movl(t, Address(method, Method::access_flags_offset()));
+    __ testl(t, JVM_ACC_SYNCHRONIZED);
+    __ jcc(Assembler::zero, L);
+    // the code below should be shared with interpreter macro
+    // assembler implementation
+    {
+      Label unlock;
+      // BasicObjectLock will be first in list, since this is a
+      // synchronized method. However, need to check that the object
+      // has not been unlocked by an explicit monitorexit bytecode.
+      const Address monitor(rbp,
+                            (intptr_t)(frame::interpreter_frame_initial_sp_offset *
+                                       wordSize - (int)sizeof(BasicObjectLock)));
+
+      const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
+
+      // monitor expect in c_rarg1 for slow unlock path
+      __ lea(regmon, monitor); // address of first monitor
+
+      __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes()));
+      __ testptr(t, t);
+      __ jcc(Assembler::notZero, unlock);
+
+      // Entry already unlocked, need to throw exception
+      __ MacroAssembler::call_VM(noreg,
+                                 CAST_FROM_FN_PTR(address,
+                   InterpreterRuntime::throw_illegal_monitor_state_exception));
+      __ should_not_reach_here();
+
+      __ bind(unlock);
+      __ unlock_object(regmon);
+    }
+    __ bind(L);
+  }
+
+  // jvmti support
+  // Note: This must happen _after_ handling/throwing any exceptions since
+  //       the exception handler code notifies the runtime of method exits
+  //       too. If this happens before, method entry/exit notifications are
+  //       not properly paired (was bug - gri 11/22/99).
+  __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
+
+  // restore potential result in edx:eax, call result handler to
+  // restore potential result in ST0 & handle result
+
+  __ pop(ltos);
+  LP64_ONLY( __ pop(dtos));
+
+  __ movptr(t, Address(rbp,
+                       (frame::interpreter_frame_result_handler_offset) * wordSize));
+  __ call(t);
+
+  // remove activation
+  __ movptr(t, Address(rbp,
+                       frame::interpreter_frame_sender_sp_offset *
+                       wordSize)); // get sender sp
+  __ leave();                                // remove frame anchor
+  __ pop(rdi);                               // get return address
+  __ mov(rsp, t);                            // set sp to sender sp
+  __ jmp(rdi);
+
+  if (inc_counter) {
+    // Handle overflow of counter and compile method
+    __ bind(invocation_counter_overflow);
+    generate_counter_overflow(&continue_after_compile);
+  }
+
+  return entry_point;
+}
+
+//
+// Generic interpreted method entry to (asm) interpreter
+//
+address InterpreterGenerator::generate_normal_entry(bool synchronized) {
+  // determine code generation flags
+  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
+
+  // ebx: Method*
+  // rbcp: sender sp
+  address entry_point = __ pc();
+
+  const Address constMethod(rbx, Method::const_offset());
+  const Address access_flags(rbx, Method::access_flags_offset());
+  const Address size_of_parameters(rdx,
+                                   ConstMethod::size_of_parameters_offset());
+  const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
+
+
+  // get parameter size (always needed)
+  __ movptr(rdx, constMethod);
+  __ load_unsigned_short(rcx, size_of_parameters);
+
+  // rbx: Method*
+  // rcx: size of parameters
+  // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )
+
+  __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
+  __ subl(rdx, rcx); // rdx = no. of additional locals
+
+  // YYY
+//   __ incrementl(rdx);
+//   __ andl(rdx, -2);
+
+  // see if we've got enough room on the stack for locals plus overhead.
+  generate_stack_overflow_check();
+
+  // get return address
+  __ pop(rax);
+
+  // compute beginning of parameters
+  __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
+
+  // rdx - # of additional locals
+  // allocate space for locals
+  // explicitly initialize locals
+  {
+    Label exit, loop;
+    __ testl(rdx, rdx);
+    __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
+    __ bind(loop);
+    __ push((int) NULL_WORD); // initialize local variables
+    __ decrementl(rdx); // until everything initialized
+    __ jcc(Assembler::greater, loop);
+    __ bind(exit);
+  }
+
+  // initialize fixed part of activation frame
+  generate_fixed_frame(false);
+
+  // make sure method is not native & not abstract
+#ifdef ASSERT
+  __ movl(rax, access_flags);
+  {
+    Label L;
+    __ testl(rax, JVM_ACC_NATIVE);
+    __ jcc(Assembler::zero, L);
+    __ stop("tried to execute native method as non-native");
+    __ bind(L);
+  }
+  {
+    Label L;
+    __ testl(rax, JVM_ACC_ABSTRACT);
+    __ jcc(Assembler::zero, L);
+    __ stop("tried to execute abstract method in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // Since at this point in the method invocation the exception
+  // handler would try to exit the monitor of synchronized methods
+  // which hasn't been entered yet, we set the thread local variable
+  // _do_not_unlock_if_synchronized to true. The remove_activation
+  // will check this flag.
+
+  const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
+  NOT_LP64(__ get_thread(thread));
+  const Address do_not_unlock_if_synchronized(thread,
+        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
+  __ movbool(do_not_unlock_if_synchronized, true);
+
+  __ profile_parameters_type(rax, rcx, rdx);
+  // increment invocation count & check for overflow
+  Label invocation_counter_overflow;
+  Label profile_method;
+  Label profile_method_continue;
+  if (inc_counter) {
+    generate_counter_incr(&invocation_counter_overflow,
+                          &profile_method,
+                          &profile_method_continue);
+    if (ProfileInterpreter) {
+      __ bind(profile_method_continue);
+    }
+  }
+
+  Label continue_after_compile;
+  __ bind(continue_after_compile);
+
+  // check for synchronized interpreted methods
+  bang_stack_shadow_pages(false);
+
+  // reset the _do_not_unlock_if_synchronized flag
+  NOT_LP64(__ get_thread(thread));
+  __ movbool(do_not_unlock_if_synchronized, false);
+
+  // check for synchronized methods
+  // Must happen AFTER invocation_counter check and stack overflow check,
+  // so method is not locked if overflows.
+  if (synchronized) {
+    // Allocate monitor and lock method
+    lock_method();
+  } else {
+    // no synchronization necessary
+#ifdef ASSERT
+    {
+      Label L;
+      __ movl(rax, access_flags);
+      __ testl(rax, JVM_ACC_SYNCHRONIZED);
+      __ jcc(Assembler::zero, L);
+      __ stop("method needs synchronization");
+      __ bind(L);
+    }
+#endif
+  }
+
+  // start execution
+#ifdef ASSERT
+  {
+    Label L;
+     const Address monitor_block_top (rbp,
+                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
+    __ movptr(rax, monitor_block_top);
+    __ cmpptr(rax, rsp);
+    __ jcc(Assembler::equal, L);
+    __ stop("broken stack frame setup in interpreter");
+    __ bind(L);
+  }
+#endif
+
+  // jvmti support
+  __ notify_method_entry();
+
+  __ dispatch_next(vtos);
+
+  // invocation counter overflow
+  if (inc_counter) {
+    if (ProfileInterpreter) {
+      // We have decided to profile this method in the interpreter
+      __ bind(profile_method);
+      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+      __ set_method_data_pointer_for_bcp();
+      __ get_method(rbx);
+      __ jmp(profile_method_continue);
+    }
+    // Handle overflow of counter and compile method
+    __ bind(invocation_counter_overflow);
+    generate_counter_overflow(&continue_after_compile);
+  }
+
+  return entry_point;
+}
+
+//-----------------------------------------------------------------------------
+// Exceptions
+
+void TemplateInterpreterGenerator::generate_throw_exception() {
+  // Entry point in previous activation (i.e., if the caller was
+  // interpreted)
+  Interpreter::_rethrow_exception_entry = __ pc();
+  // Restore sp to interpreter_frame_last_sp even though we are going
+  // to empty the expression stack for the exception processing.
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+  // rax: exception
+  // rdx: return address/pc that threw exception
+  __ restore_bcp();    // r13/rsi points to call/send
+  __ restore_locals();
+  LP64_ONLY(__ reinit_heapbase());  // restore r12 as heapbase.
+  // Entry point for exceptions thrown within interpreter code
+  Interpreter::_throw_exception_entry = __ pc();
+  // expression stack is undefined here
+  // rax: exception
+  // r13/rsi: exception bcp
+  __ verify_oop(rax);
+  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
+  LP64_ONLY(__ mov(c_rarg1, rax));
+
+  // expression stack must be empty before entering the VM in case of
+  // an exception
+  __ empty_expression_stack();
+  // find exception handler address and preserve exception oop
+  __ call_VM(rdx,
+             CAST_FROM_FN_PTR(address,
+                          InterpreterRuntime::exception_handler_for_exception),
+             rarg);
+  // rax: exception handler entry point
+  // rdx: preserved exception oop
+  // r13/rsi: bcp for exception handler
+  __ push_ptr(rdx); // push exception which is now the only value on the stack
+  __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
+
+  // If the exception is not handled in the current frame the frame is
+  // removed and the exception is rethrown (i.e. exception
+  // continuation is _rethrow_exception).
+  //
+  // Note: At this point the bci is still the bxi for the instruction
+  // which caused the exception and the expression stack is
+  // empty. Thus, for any VM calls at this point, GC will find a legal
+  // oop map (with empty expression stack).
+
+  // In current activation
+  // tos: exception
+  // esi: exception bcp
+
+  //
+  // JVMTI PopFrame support
+  //
+
+  Interpreter::_remove_activation_preserving_args_entry = __ pc();
+  __ empty_expression_stack();
+  // Set the popframe_processing bit in pending_popframe_condition
+  // indicating that we are currently handling popframe, so that
+  // call_VMs that may happen later do not trigger new popframe
+  // handling cycles.
+  const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
+  NOT_LP64(__ get_thread(thread));
+  __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
+  __ orl(rdx, JavaThread::popframe_processing_bit);
+  __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
+
+  {
+    // Check to see whether we are returning to a deoptimized frame.
+    // (The PopFrame call ensures that the caller of the popped frame is
+    // either interpreted or compiled and deoptimizes it if compiled.)
+    // In this case, we can't call dispatch_next() after the frame is
+    // popped, but instead must save the incoming arguments and restore
+    // them after deoptimization has occurred.
+    //
+    // Note that we don't compare the return PC against the
+    // deoptimization blob's unpack entry because of the presence of
+    // adapter frames in C2.
+    Label caller_not_deoptimized;
+    Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
+    __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize));
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
+                               InterpreterRuntime::interpreter_contains), rarg);
+    __ testl(rax, rax);
+    __ jcc(Assembler::notZero, caller_not_deoptimized);
+
+    // Compute size of arguments for saving when returning to
+    // deoptimized caller
+    __ get_method(rax);
+    __ movptr(rax, Address(rax, Method::const_offset()));
+    __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
+                                                size_of_parameters_offset())));
+    __ shll(rax, Interpreter::logStackElementSize);
+    __ restore_locals();
+    __ subptr(rlocals, rax);
+    __ addptr(rlocals, wordSize);
+    // Save these arguments
+    NOT_LP64(__ get_thread(thread));
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
+                                           Deoptimization::
+                                           popframe_preserve_args),
+                          thread, rax, rlocals);
+
+    __ remove_activation(vtos, rdx,
+                         /* throw_monitor_exception */ false,
+                         /* install_monitor_exception */ false,
+                         /* notify_jvmdi */ false);
+
+    // Inform deoptimization that it is responsible for restoring
+    // these arguments
+    NOT_LP64(__ get_thread(thread));
+    __ movl(Address(thread, JavaThread::popframe_condition_offset()),
+            JavaThread::popframe_force_deopt_reexecution_bit);
+
+    // Continue in deoptimization handler
+    __ jmp(rdx);
+
+    __ bind(caller_not_deoptimized);
+  }
+
+  __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */
+                       /* throw_monitor_exception */ false,
+                       /* install_monitor_exception */ false,
+                       /* notify_jvmdi */ false);
+
+  // Finish with popframe handling
+  // A previous I2C followed by a deoptimization might have moved the
+  // outgoing arguments further up the stack. PopFrame expects the
+  // mutations to those outgoing arguments to be preserved and other
+  // constraints basically require this frame to look exactly as
+  // though it had previously invoked an interpreted activation with
+  // no space between the top of the expression stack (current
+  // last_sp) and the top of stack. Rather than force deopt to
+  // maintain this kind of invariant all the time we call a small
+  // fixup routine to move the mutated arguments onto the top of our
+  // expression stack if necessary.
+#ifndef _LP64
+  __ mov(rax, rsp);
+  __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+  __ get_thread(thread);
+  // PC must point into interpreter here
+  __ set_last_Java_frame(thread, noreg, rbp, __ pc());
+  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
+  __ get_thread(thread);
+#else
+  __ mov(c_rarg1, rsp);
+  __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+  // PC must point into interpreter here
+  __ set_last_Java_frame(noreg, rbp, __ pc());
+  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
+#endif
+  __ reset_last_Java_frame(thread, true, true);
+
+  // Restore the last_sp and null it out
+  __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+
+  __ restore_bcp();
+  __ restore_locals();
+  // The method data pointer was incremented already during
+  // call profiling. We have to restore the mdp for the current bcp.
+  if (ProfileInterpreter) {
+    __ set_method_data_pointer_for_bcp();
+  }
+
+  // Clear the popframe condition flag
+  NOT_LP64(__ get_thread(thread));
+  __ movl(Address(thread, JavaThread::popframe_condition_offset()),
+          JavaThread::popframe_inactive);
+
+#if INCLUDE_JVMTI
+  {
+    Label L_done;
+    const Register local0 = rlocals;
+
+    __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic);
+    __ jcc(Assembler::notEqual, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+
+    __ get_method(rdx);
+    __ movptr(rax, Address(local0, 0));
+    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp);
+
+    __ testptr(rax, rax);
+    __ jcc(Assembler::zero, L_done);
+
+    __ movptr(Address(rbx, 0), rax);
+    __ bind(L_done);
+  }
+#endif // INCLUDE_JVMTI
+
+  __ dispatch_next(vtos);
+  // end of PopFrame support
+
+  Interpreter::_remove_activation_entry = __ pc();
+
+  // preserve exception over this code sequence
+  __ pop_ptr(rax);
+  NOT_LP64(__ get_thread(thread));
+  __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
+  // remove the activation (without doing throws on illegalMonitorExceptions)
+  __ remove_activation(vtos, rdx, false, true, false);
+  // restore exception
+  NOT_LP64(__ get_thread(thread));
+  __ get_vm_result(rax, thread);
+
+  // In between activations - previous activation type unknown yet
+  // compute continuation point - the continuation point expects the
+  // following registers set up:
+  //
+  // rax: exception
+  // rdx: return address/pc that threw exception
+  // rsp: expression stack of caller
+  // rbp: ebp of caller
+  __ push(rax);                                  // save exception
+  __ push(rdx);                                  // save return address
+  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
+                          SharedRuntime::exception_handler_for_return_address),
+                        thread, rdx);
+  __ mov(rbx, rax);                              // save exception handler
+  __ pop(rdx);                                   // restore return address
+  __ pop(rax);                                   // restore exception
+  // Note that an "issuing PC" is actually the next PC after the call
+  __ jmp(rbx);                                   // jump to exception
+                                                 // handler of caller
+}
+
+
+//
+// JVMTI ForceEarlyReturn support
+//
+address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
+  address entry = __ pc();
+
+  __ restore_bcp();
+  __ restore_locals();
+  __ empty_expression_stack();
+  __ load_earlyret_value(state);  // 32 bits returns value in rdx, so don't reuse
+
+  const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
+  NOT_LP64(__ get_thread(thread));
+  __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
+  Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
+
+  // Clear the earlyret state
+  __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
+
+  __ remove_activation(state, rsi,
+                       false, /* throw_monitor_exception */
+                       false, /* install_monitor_exception */
+                       true); /* notify_jvmdi */
+  __ jmp(rsi);
+
+  return entry;
+} // end of ForceEarlyReturn support
+
+
+//-----------------------------------------------------------------------------
+// Helper for vtos entry point generation
+
+void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
+                                                         address& bep,
+                                                         address& cep,
+                                                         address& sep,
+                                                         address& aep,
+                                                         address& iep,
+                                                         address& lep,
+                                                         address& fep,
+                                                         address& dep,
+                                                         address& vep) {
+  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
+  Label L;
+  aep = __ pc();  __ push_ptr();   __ jmp(L);
+#ifndef _LP64
+  fep = __ pc(); __ push(ftos); __ jmp(L);
+  dep = __ pc(); __ push(dtos); __ jmp(L);
+#else
+  fep = __ pc();  __ push_f(xmm0); __ jmp(L);
+  dep = __ pc();  __ push_d(xmm0); __ jmp(L);
+#endif // _LP64
+  lep = __ pc();  __ push_l();     __ jmp(L);
+  bep = cep = sep =
+  iep = __ pc();  __ push_i();
+  vep = __ pc();
+  __ bind(L);
+  generate_and_dispatch(t);
+}
+
+
+//-----------------------------------------------------------------------------
+// Generation of individual instructions
+
+// helpers for generate_and_dispatch
+
+
+InterpreterGenerator::InterpreterGenerator(StubQueue* code)
+  : TemplateInterpreterGenerator(code) {
+   generate_all(); // down here so it can be "virtual"
+}
+
+//-----------------------------------------------------------------------------
+
+// Non-product code
+#ifndef PRODUCT
+
+address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
+  address entry = __ pc();
+
+#ifndef _LP64
+  // prepare expression stack
+  __ pop(rcx);          // pop return address so expression stack is 'pure'
+  __ push(state);       // save tosca
+
+  // pass tosca registers as arguments & call tracer
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
+  __ mov(rcx, rax);     // make sure return address is not destroyed by pop(state)
+  __ pop(state);        // restore tosca
+
+  // return
+  __ jmp(rcx);
+#else
+  __ push(state);
+  __ push(c_rarg0);
+  __ push(c_rarg1);
+  __ push(c_rarg2);
+  __ push(c_rarg3);
+  __ mov(c_rarg2, rax);  // Pass itos
+#ifdef _WIN64
+  __ movflt(xmm3, xmm0); // Pass ftos
+#endif
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
+             c_rarg1, c_rarg2, c_rarg3);
+  __ pop(c_rarg3);
+  __ pop(c_rarg2);
+  __ pop(c_rarg1);
+  __ pop(c_rarg0);
+  __ pop(state);
+  __ ret(0);                                   // return from result handler
+#endif // _LP64
+
+  return entry;
+}
+
+void TemplateInterpreterGenerator::count_bytecode() {
+  __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
+}
+
+void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
+  __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
+}
+
+void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
+  __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index));
+  __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
+  __ orl(rbx,
+         ((int) t->bytecode()) <<
+         BytecodePairHistogram::log2_number_of_codes);
+  __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
+  __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters));
+  __ incrementl(Address(rscratch1, rbx, Address::times_4));
+}
+
+
+void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
+  // Call a little run-time stub to avoid blow-up for each bytecode.
+  // The run-time runtime saves the right registers, depending on
+  // the tosca in-state for the given template.
+
+  assert(Interpreter::trace_code(t->tos_in()) != NULL,
+         "entry must have been generated");
+#ifndef _LP64
+  __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
+#else
+  __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
+  __ andptr(rsp, -16); // align stack as required by ABI
+  __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
+  __ mov(rsp, r12); // restore sp
+  __ reinit_heapbase();
+#endif // _LP64
+}
+
+
+void TemplateInterpreterGenerator::stop_interpreter_at() {
+  Label L;
+  __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
+           StopInterpreterAt);
+  __ jcc(Assembler::notEqual, L);
+  __ int3();
+  __ bind(L);
+}
+#endif // !PRODUCT
+#endif // ! CC_INTERP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/templateInterpreterGenerator_x86_32.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "runtime/arguments.hpp"
+
+#define __ _masm->
+
+
+#ifndef CC_INTERP
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.update(int crc, int b)
+ */
+address InterpreterGenerator::generate_CRC32_update_entry() {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rbx: Method*
+    // rsi: senderSP must preserved for slow path, set SP to it on fast path
+    // rdx: scratch
+    // rdi: scratch
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+             SafepointSynchronize::_not_synchronized);
+    __ jcc(Assembler::notEqual, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = rax;  // crc
+    const Register val = rdx;  // source java byte value
+    const Register tbl = rdi;  // scratch
+
+    // Arguments are reversed on java expression stack
+    __ movl(val, Address(rsp,   wordSize)); // byte value
+    __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
+
+    __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
+    __ notl(crc); // ~crc
+    __ update_byte_crc32(crc, val, tbl);
+    __ notl(crc); // ~crc
+    // result in rax
+
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, rsi);           // set sp to sender sp
+    __ jmp(rdi);
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
+ *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+ */
+address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rbx,: Method*
+    // rsi: senderSP must preserved for slow path, set SP to it on fast path
+    // rdx: scratch
+    // rdi: scratch
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+             SafepointSynchronize::_not_synchronized);
+    __ jcc(Assembler::notEqual, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = rax;  // crc
+    const Register buf = rdx;  // source java byte array address
+    const Register len = rdi;  // length
+
+    // value              x86_32
+    // interp. arg ptr    ESP + 4
+    // int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
+    //                                         3           2      1        0
+    // int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+    //                                              4         2,3      1        0
+
+    // Arguments are reversed on java expression stack
+    __ movl(len,   Address(rsp,   4 + 0)); // Length
+    // Calculate address of start element
+    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
+      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // long buf
+      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
+      __ movl(crc,   Address(rsp, 4 + 4 * wordSize)); // Initial CRC
+    } else {
+      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // byte[] array
+      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
+      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
+      __ movl(crc,   Address(rsp, 4 + 3 * wordSize)); // Initial CRC
+    }
+
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
+    // result in rax
+
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, rsi);           // set sp to sender sp
+    __ jmp(rdi);
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+/**
+* Method entry for static native methods:
+*   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
+*   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
+*/
+address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  if (UseCRC32CIntrinsics) {
+    address entry = __ pc();
+    // Load parameters
+    const Register crc = rax;  // crc
+    const Register buf = rcx;  // source java byte array address
+    const Register len = rdx;  // length
+    const Register end = len;
+
+    // value              x86_32
+    // interp. arg ptr    ESP + 4
+    // int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int end)
+    //                                         3           2      1        0
+    // int java.util.zip.CRC32.updateByteBuffer(int crc, long address, int off, int end)
+    //                                              4         2,3          1        0
+
+    // Arguments are reversed on java expression stack
+    __ movl(end, Address(rsp, 4 + 0)); // end
+    __ subl(len, Address(rsp, 4 + 1 * wordSize));  // end - offset == length
+    // Calculate address of start element
+    if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
+      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // long address
+      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
+      __ movl(crc, Address(rsp, 4 + 4 * wordSize)); // Initial CRC
+    } else {
+      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // byte[] array
+      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
+      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
+      __ movl(crc, Address(rsp, 4 + 3 * wordSize)); // Initial CRC
+    }
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len);
+    // result in rax
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, rsi);           // set sp to sender sp
+    __ jmp(rdi);
+
+    return entry;
+  }
+  return NULL;
+}
+
+/**
+ * Method entry for static native method:
+ *    java.lang.Float.intBitsToFloat(int bits)
+ */
+address InterpreterGenerator::generate_Float_intBitsToFloat_entry() {
+  if (UseSSE >= 1) {
+    address entry = __ pc();
+
+    // rsi: the sender's SP
+
+    // Skip safepoint check (compiler intrinsic versions of this method
+    // do not perform safepoint checks either).
+
+    // Load 'bits' into xmm0 (interpreter returns results in xmm0)
+    __ movflt(xmm0, Address(rsp, wordSize));
+
+    // Return
+    __ pop(rdi); // get return address
+    __ mov(rsp, rsi); // set rsp to the sender's SP
+    __ jmp(rdi);
+    return entry;
+  }
+
+  return NULL;
+}
+
+/**
+ * Method entry for static native method:
+ *    java.lang.Float.floatToRawIntBits(float value)
+ */
+address InterpreterGenerator::generate_Float_floatToRawIntBits_entry() {
+  if (UseSSE >= 1) {
+    address entry = __ pc();
+
+    // rsi: the sender's SP
+
+    // Skip safepoint check (compiler intrinsic versions of this method
+    // do not perform safepoint checks either).
+
+    // Load the parameter (a floating-point value) into rax.
+    __ movl(rax, Address(rsp, wordSize));
+
+    // Return
+    __ pop(rdi); // get return address
+    __ mov(rsp, rsi); // set rsp to the sender's SP
+    __ jmp(rdi);
+    return entry;
+  }
+
+  return NULL;
+}
+
+
+/**
+ * Method entry for static native method:
+ *    java.lang.Double.longBitsToDouble(long bits)
+ */
+address InterpreterGenerator::generate_Double_longBitsToDouble_entry() {
+   if (UseSSE >= 2) {
+     address entry = __ pc();
+
+     // rsi: the sender's SP
+
+     // Skip safepoint check (compiler intrinsic versions of this method
+     // do not perform safepoint checks either).
+
+     // Load 'bits' into xmm0 (interpreter returns results in xmm0)
+     __ movdbl(xmm0, Address(rsp, wordSize));
+
+     // Return
+     __ pop(rdi); // get return address
+     __ mov(rsp, rsi); // set rsp to the sender's SP
+     __ jmp(rdi);
+     return entry;
+   }
+
+   return NULL;
+}
+
+/**
+ * Method entry for static native method:
+ *    java.lang.Double.doubleToRawLongBits(double value)
+ */
+address InterpreterGenerator::generate_Double_doubleToRawLongBits_entry() {
+  if (UseSSE >= 2) {
+    address entry = __ pc();
+
+    // rsi: the sender's SP
+
+    // Skip safepoint check (compiler intrinsic versions of this method
+    // do not perform safepoint checks either).
+
+    // Load the parameter (a floating-point value) into rax.
+    __ movl(rdx, Address(rsp, 2*wordSize));
+    __ movl(rax, Address(rsp, wordSize));
+
+    // Return
+    __ pop(rdi); // get return address
+    __ mov(rsp, rsi); // set rsp to the sender's SP
+    __ jmp(rdi);
+    return entry;
+  }
+
+  return NULL;
+}
+#endif // CC_INTERP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/templateInterpreterGenerator_x86_64.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "runtime/arguments.hpp"
+
+#define __ _masm->
+
+#ifndef CC_INTERP
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.update(int crc, int b)
+ */
+address InterpreterGenerator::generate_CRC32_update_entry() {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rbx,: Method*
+    // r13: senderSP must preserved for slow path, set SP to it on fast path
+    // c_rarg0: scratch (rdi on non-Win64, rcx on Win64)
+    // c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+             SafepointSynchronize::_not_synchronized);
+    __ jcc(Assembler::notEqual, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = rax;  // crc
+    const Register val = c_rarg0;  // source java byte value
+    const Register tbl = c_rarg1;  // scratch
+
+    // Arguments are reversed on java expression stack
+    __ movl(val, Address(rsp,   wordSize)); // byte value
+    __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
+
+    __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
+    __ notl(crc); // ~crc
+    __ update_byte_crc32(crc, val, tbl);
+    __ notl(crc); // ~crc
+    // result in rax
+
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, r13);           // set sp to sender sp
+    __ jmp(rdi);
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+/**
+ * Method entry for static native methods:
+ *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
+ *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
+ */
+address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  if (UseCRC32Intrinsics) {
+    address entry = __ pc();
+
+    // rbx,: Method*
+    // r13: senderSP must preserved for slow path, set SP to it on fast path
+
+    Label slow_path;
+    // If we need a safepoint check, generate full interpreter entry.
+    ExternalAddress state(SafepointSynchronize::address_of_state());
+    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+             SafepointSynchronize::_not_synchronized);
+    __ jcc(Assembler::notEqual, slow_path);
+
+    // We don't generate local frame and don't align stack because
+    // we call stub code and there is no safepoint on this path.
+
+    // Load parameters
+    const Register crc = c_rarg0;  // crc
+    const Register buf = c_rarg1;  // source java byte array address
+    const Register len = c_rarg2;  // length
+    const Register off = len;      // offset (never overlaps with 'len')
+
+    // Arguments are reversed on java expression stack
+    // Calculate address of start element
+    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
+      __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
+      __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
+      __ addq(buf, off); // + offset
+      __ movl(crc,   Address(rsp, 5*wordSize)); // Initial CRC
+    } else {
+      __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
+      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
+      __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
+      __ addq(buf, off); // + offset
+      __ movl(crc,   Address(rsp, 4*wordSize)); // Initial CRC
+    }
+    // Can now load 'len' since we're finished with 'off'
+    __ movl(len, Address(rsp, wordSize)); // Length
+
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
+    // result in rax
+
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, r13);           // set sp to sender sp
+    __ jmp(rdi);
+
+    // generate a vanilla native entry as the slow path
+    __ bind(slow_path);
+    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
+    return entry;
+  }
+  return NULL;
+}
+
+/**
+* Method entry for static native methods:
+*   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
+*   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
+*/
+address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
+  if (UseCRC32CIntrinsics) {
+    address entry = __ pc();
+    // Load parameters
+    const Register crc = c_rarg0;  // crc
+    const Register buf = c_rarg1;  // source java byte array address
+    const Register len = c_rarg2;
+    const Register off = c_rarg3;  // offset
+    const Register end = len;
+
+    // Arguments are reversed on java expression stack
+    // Calculate address of start element
+    if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
+      __ movptr(buf, Address(rsp, 3 * wordSize)); // long buf
+      __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
+      __ addq(buf, off); // + offset
+      __ movl(crc, Address(rsp, 5 * wordSize)); // Initial CRC
+      // Note on 5 * wordSize vs. 4 * wordSize:
+      // *   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
+      //                                                   4         2,3          1        0
+      // end starts at SP + 8
+      // The Java(R) Virtual Machine Specification Java SE 7 Edition
+      // 4.10.2.3. Values of Types long and double
+      //    "When calculating operand stack length, values of type long and double have length two."
+    } else {
+      __ movptr(buf, Address(rsp, 3 * wordSize)); // byte[] array
+      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
+      __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
+      __ addq(buf, off); // + offset
+      __ movl(crc, Address(rsp, 4 * wordSize)); // Initial CRC
+    }
+    __ movl(end, Address(rsp, wordSize)); // end
+    __ subl(end, off); // end - off
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len);
+    // result in rax
+    // _areturn
+    __ pop(rdi);                // get return address
+    __ mov(rsp, r13);           // set sp to sender sp
+    __ jmp(rdi);
+
+    return entry;
+  }
+
+  return NULL;
+}
+#endif // ! CC_INTERP
--- a/src/cpu/x86/vm/templateInterpreter_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/templateInterpreter_x86.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -116,4 +116,87 @@
     method->constants()->cache();
 }
 
+#ifndef _LP64
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : // fall through
+    case T_LONG   : // fall through
+    case T_VOID   : i = 4; break;
+    case T_FLOAT  : i = 5; break;  // have to treat float and double separately for SSE
+    case T_DOUBLE : i = 6; break;
+    case T_OBJECT : // fall through
+    case T_ARRAY  : i = 7; break;
+    default       : ShouldNotReachHere();
+  }
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
+  return i;
+}
+#else
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : i = 4; break;
+    case T_LONG   : i = 5; break;
+    case T_VOID   : i = 6; break;
+    case T_FLOAT  : i = 7; break;
+    case T_DOUBLE : i = 8; break;
+    case T_OBJECT : i = 9; break;
+    case T_ARRAY  : i = 9; break;
+    default       : ShouldNotReachHere();
+  }
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
+         "index out of bounds");
+  return i;
+}
+#endif // _LP64
+
+// These should never be compiled since the interpreter will prefer
+// the compiled version to the intrinsic version.
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  switch (method_kind(m)) {
+    case Interpreter::java_lang_math_sin     : // fall thru
+    case Interpreter::java_lang_math_cos     : // fall thru
+    case Interpreter::java_lang_math_tan     : // fall thru
+    case Interpreter::java_lang_math_abs     : // fall thru
+    case Interpreter::java_lang_math_log     : // fall thru
+    case Interpreter::java_lang_math_log10   : // fall thru
+    case Interpreter::java_lang_math_sqrt    : // fall thru
+    case Interpreter::java_lang_math_pow     : // fall thru
+    case Interpreter::java_lang_math_exp     :
+      return false;
+    default:
+      return true;
+  }
+}
+
+// How much stack a method activation needs in words.
+int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
+  const int entry_size = frame::interpreter_frame_monitor_size();
+
+  // total overhead size: entry_size + (saved rbp thru expr stack
+  // bottom).  be sure to change this if you add/subtract anything
+  // to/from the overhead area
+  const int overhead_size =
+    -(frame::interpreter_frame_initial_sp_offset) + entry_size;
+
+#ifndef _LP64
+  const int stub_code = 4;  // see generate_call_stub
+#else
+  const int stub_code = frame::entry_frame_after_call_words;
+#endif
+
+  const int method_stack = (method->max_locals() + method->max_stack()) *
+                           Interpreter::stackElementWords;
+  return (overhead_size + method_stack + stub_code);
+}
+
 #endif // CC_INTERP
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1916 +0,0 @@
-/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/macros.hpp"
-
-#define __ _masm->
-
-
-#ifndef CC_INTERP
-const int method_offset = frame::interpreter_frame_method_offset * wordSize;
-const int bcp_offset    = frame::interpreter_frame_bcp_offset    * wordSize;
-const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
-
-//------------------------------------------------------------------------------------------------------------------------
-
-address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
-  address entry = __ pc();
-
-  // Note: There should be a minimal interpreter frame set up when stack
-  // overflow occurs since we check explicitly for it now.
-  //
-#ifdef ASSERT
-  { Label L;
-    __ lea(rax, Address(rbp,
-                frame::interpreter_frame_monitor_block_top_offset * wordSize));
-    __ cmpptr(rax, rsp);  // rax, = maximal rsp for current rbp,
-                        //  (stack grows negative)
-    __ jcc(Assembler::aboveEqual, L); // check if frame is complete
-    __ stop ("interpreter frame not set up");
-    __ bind(L);
-  }
-#endif // ASSERT
-  // Restore bcp under the assumption that the current frame is still
-  // interpreted
-  __ restore_bcp();
-
-  // expression stack must be empty before entering the VM if an exception
-  // happened
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  // throw exception
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an exception happened
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  // setup parameters
-  // ??? convention: expect aberrant index in register rbx,
-  __ lea(rax, ExternalAddress((address)name));
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), rax, rbx);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
-  address entry = __ pc();
-  // object is at TOS
-  __ pop(rax);
-  // expression stack must be empty before entering the VM if an exception
-  // happened
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_ClassCastException),
-             rax);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
-  assert(!pass_oop || message == NULL, "either oop or message but not both");
-  address entry = __ pc();
-  if (pass_oop) {
-    // object is at TOS
-    __ pop(rbx);
-  }
-  // expression stack must be empty before entering the VM if an exception happened
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  // setup parameters
-  __ lea(rax, ExternalAddress((address)name));
-  if (pass_oop) {
-    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), rax, rbx);
-  } else {
-    if (message != NULL) {
-      __ lea(rbx, ExternalAddress((address)message));
-    } else {
-      __ movptr(rbx, NULL_WORD);
-    }
-    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx);
-  }
-  // throw exception
-  __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
-  address entry = __ pc();
-  // NULL last_sp until next java call
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
-  __ dispatch_next(state);
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
-  address entry = __ pc();
-
-#ifdef COMPILER2
-  // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
-  if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
-    for (int i = 1; i < 8; i++) {
-        __ ffree(i);
-    }
-  } else if (UseSSE < 2) {
-    __ empty_FPU_stack();
-  }
-#endif
-  if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
-    __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
-  } else {
-    __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
-  }
-
-  if (state == ftos) {
-    __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
-  } else if (state == dtos) {
-    __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
-  }
-
-  // Restore stack bottom in case i2c adjusted stack
-  __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
-  // and NULL it as marker that rsp is now tos until next java call
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
-
-  __ restore_bcp();
-  __ restore_locals();
-
-  if (state == atos) {
-    Register mdp = rbx;
-    Register tmp = rcx;
-    __ profile_return_type(mdp, rax, tmp);
-  }
-
-  const Register cache = rbx;
-  const Register index = rcx;
-  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
-
-  const Register flags = cache;
-  __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
-  __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
-  __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
-  __ dispatch_next(state, step);
-
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
-  address entry = __ pc();
-
-  if (state == ftos) {
-    __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter");
-  } else if (state == dtos) {
-    __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter");
-  }
-
-  // The stack is not extended by deopt but we must NULL last_sp as this
-  // entry is like a "return".
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
-  __ restore_bcp();
-  __ restore_locals();
-  // handle exceptions
-  { Label L;
-    const Register thread = rcx;
-    __ get_thread(thread);
-    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
-    __ jcc(Assembler::zero, L);
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-  __ dispatch_next(state, step);
-  return entry;
-}
-
-
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : // fall through
-    case T_LONG   : // fall through
-    case T_VOID   : i = 4; break;
-    case T_FLOAT  : i = 5; break;  // have to treat float and double separately for SSE
-    case T_DOUBLE : i = 6; break;
-    case T_OBJECT : // fall through
-    case T_ARRAY  : i = 7; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
-  return i;
-}
-
-
-address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
-  address entry = __ pc();
-  switch (type) {
-    case T_BOOLEAN: __ c2bool(rax);            break;
-    case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
-    case T_BYTE   : __ sign_extend_byte (rax); break;
-    case T_SHORT  : __ sign_extend_short(rax); break;
-    case T_INT    : /* nothing to do */        break;
-    case T_LONG   : /* nothing to do */        break;
-    case T_VOID   : /* nothing to do */        break;
-    case T_DOUBLE :
-    case T_FLOAT  :
-      { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
-        __ pop(t);                            // remove return address first
-        // Must return a result for interpreter or compiler. In SSE
-        // mode, results are returned in xmm0 and the FPU stack must
-        // be empty.
-        if (type == T_FLOAT && UseSSE >= 1) {
-          // Load ST0
-          __ fld_d(Address(rsp, 0));
-          // Store as float and empty fpu stack
-          __ fstp_s(Address(rsp, 0));
-          // and reload
-          __ movflt(xmm0, Address(rsp, 0));
-        } else if (type == T_DOUBLE && UseSSE >= 2 ) {
-          __ movdbl(xmm0, Address(rsp, 0));
-        } else {
-          // restore ST0
-          __ fld_d(Address(rsp, 0));
-        }
-        // and pop the temp
-        __ addptr(rsp, 2 * wordSize);
-        __ push(t);                           // restore return address
-      }
-      break;
-    case T_OBJECT :
-      // retrieve result from frame
-      __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
-      // and verify it
-      __ verify_oop(rax);
-      break;
-    default       : ShouldNotReachHere();
-  }
-  __ ret(0);                                   // return from result handler
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
-  address entry = __ pc();
-  __ push(state);
-  __ call_VM(noreg, runtime_entry);
-  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
-  return entry;
-}
-
-
-// Helpers for commoning out cases in the various type of method entries.
-//
-
-// increment invocation count & check for overflow
-//
-// Note: checking for negative value instead of overflow
-//       so we have a 'sticky' overflow test
-//
-// rbx,: method
-// rcx: invocation counter
-//
-void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
-  Label done;
-  // Note: In tiered we increment either counters in MethodCounters* or in MDO
-  // depending if we're profiling or not.
-  if (TieredCompilation) {
-    int increment = InvocationCounter::count_increment;
-    Label no_mdo;
-    if (ProfileInterpreter) {
-      // Are we profiling?
-      __ movptr(rax, Address(rbx, Method::method_data_offset()));
-      __ testptr(rax, rax);
-      __ jccb(Assembler::zero, no_mdo);
-      // Increment counter in the MDO
-      const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
-                                                in_bytes(InvocationCounter::counter_offset()));
-      const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
-      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
-      __ jmp(done);
-    }
-    __ bind(no_mdo);
-    // Increment counter in MethodCounters
-    const Address invocation_counter(rax,
-                  MethodCounters::invocation_counter_offset() +
-                  InvocationCounter::counter_offset());
-
-    __ get_method_counters(rbx, rax, done);
-    const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
-    __ increment_mask_and_jump(invocation_counter, increment, mask,
-                               rcx, false, Assembler::zero, overflow);
-    __ bind(done);
-  } else { // not TieredCompilation
-    const Address backedge_counter(rax,
-                  MethodCounters::backedge_counter_offset() +
-                  InvocationCounter::counter_offset());
-    const Address invocation_counter(rax,
-                  MethodCounters::invocation_counter_offset() +
-                  InvocationCounter::counter_offset());
-
-    __ get_method_counters(rbx, rax, done);
-
-    if (ProfileInterpreter) {
-      __ incrementl(Address(rax,
-              MethodCounters::interpreter_invocation_counter_offset()));
-    }
-
-    // Update standard invocation counters
-    __ movl(rcx, invocation_counter);
-    __ incrementl(rcx, InvocationCounter::count_increment);
-    __ movl(invocation_counter, rcx);             // save invocation count
-
-    __ movl(rax, backedge_counter);               // load backedge counter
-    __ andl(rax, InvocationCounter::count_mask_value);  // mask out the status bits
-
-    __ addl(rcx, rax);                            // add both counters
-
-    // profile_method is non-null only for interpreted method so
-    // profile_method != NULL == !native_call
-    // BytecodeInterpreter only calls for native so code is elided.
-
-    if (ProfileInterpreter && profile_method != NULL) {
-      // Test to see if we should create a method data oop
-      __ movptr(rax, Address(rbx, Method::method_counters_offset()));
-      __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
-      __ jcc(Assembler::less, *profile_method_continue);
-
-      // if no method data exists, go to profile_method
-      __ test_method_data_pointer(rax, *profile_method);
-    }
-
-    __ movptr(rax, Address(rbx, Method::method_counters_offset()));
-    __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
-    __ jcc(Assembler::aboveEqual, *overflow);
-    __ bind(done);
-  }
-}
-
-void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
-
-  // Asm interpreter on entry
-  // rdi - locals
-  // rsi - bcp
-  // rbx, - method
-  // rdx - cpool
-  // rbp, - interpreter frame
-
-  // C++ interpreter on entry
-  // rsi - new interpreter state pointer
-  // rbp - interpreter frame pointer
-  // rbx - method
-
-  // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
-  // rbx, - method
-  // rcx - rcvr (assuming there is one)
-  // top of stack return address of interpreter caller
-  // rsp - sender_sp
-
-  // C++ interpreter only
-  // rsi - previous interpreter state pointer
-
-  // InterpreterRuntime::frequency_counter_overflow takes one argument
-  // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
-  // The call returns the address of the verified entry point for the method or NULL
-  // if the compilation did not complete (either went background or bailed out).
-  __ movptr(rax, (intptr_t)false);
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
-
-  __ movptr(rbx, Address(rbp, method_offset));   // restore Method*
-
-  // Preserve invariant that rsi/rdi contain bcp/locals of sender frame
-  // and jump to the interpreted entry.
-  __ jmp(*do_continue, relocInfo::none);
-
-}
-
-void InterpreterGenerator::generate_stack_overflow_check(void) {
-  // see if we've got enough room on the stack for locals plus overhead.
-  // the expression stack grows down incrementally, so the normal guard
-  // page mechanism will work for that.
-  //
-  // Registers live on entry:
-  //
-  // Asm interpreter
-  // rdx: number of additional locals this frame needs (what we must check)
-  // rbx,: Method*
-
-  // destroyed on exit
-  // rax,
-
-  // NOTE:  since the additional locals are also always pushed (wasn't obvious in
-  // generate_fixed_frame) so the guard should work for them too.
-  //
-
-  // monitor entry size: see picture of stack in frame_x86.hpp
-  const int entry_size    = frame::interpreter_frame_monitor_size() * wordSize;
-
-  // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
-  // be sure to change this if you add/subtract anything to/from the overhead area
-  const int overhead_size = -(frame::interpreter_frame_initial_sp_offset*wordSize) + entry_size;
-
-  const int page_size = os::vm_page_size();
-
-  Label after_frame_check;
-
-  // see if the frame is greater than one page in size. If so,
-  // then we need to verify there is enough stack space remaining
-  // for the additional locals.
-  __ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize);
-  __ jcc(Assembler::belowEqual, after_frame_check);
-
-  // compute rsp as if this were going to be the last frame on
-  // the stack before the red zone
-
-  Label after_frame_check_pop;
-
-  __ push(rsi);
-
-  const Register thread = rsi;
-
-  __ get_thread(thread);
-
-  const Address stack_base(thread, Thread::stack_base_offset());
-  const Address stack_size(thread, Thread::stack_size_offset());
-
-  // locals + overhead, in bytes
-  __ lea(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size));
-
-#ifdef ASSERT
-  Label stack_base_okay, stack_size_okay;
-  // verify that thread stack base is non-zero
-  __ cmpptr(stack_base, (int32_t)NULL_WORD);
-  __ jcc(Assembler::notEqual, stack_base_okay);
-  __ stop("stack base is zero");
-  __ bind(stack_base_okay);
-  // verify that thread stack size is non-zero
-  __ cmpptr(stack_size, 0);
-  __ jcc(Assembler::notEqual, stack_size_okay);
-  __ stop("stack size is zero");
-  __ bind(stack_size_okay);
-#endif
-
-  // Add stack base to locals and subtract stack size
-  __ addptr(rax, stack_base);
-  __ subptr(rax, stack_size);
-
-  // Use the maximum number of pages we might bang.
-  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
-                                                                              (StackRedPages+StackYellowPages);
-  __ addptr(rax, max_pages * page_size);
-
-  // check against the current stack bottom
-  __ cmpptr(rsp, rax);
-  __ jcc(Assembler::above, after_frame_check_pop);
-
-  __ pop(rsi);  // get saved bcp / (c++ prev state ).
-
-  // Restore sender's sp as SP. This is necessary if the sender's
-  // frame is an extended compiled frame (see gen_c2i_adapter())
-  // and safer anyway in case of JSR292 adaptations.
-
-  __ pop(rax); // return address must be moved if SP is changed
-  __ mov(rsp, rsi);
-  __ push(rax);
-
-  // Note: the restored frame is not necessarily interpreted.
-  // Use the shared runtime version of the StackOverflowError.
-  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
-  __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
-  // all done with frame size check
-  __ bind(after_frame_check_pop);
-  __ pop(rsi);
-
-  __ bind(after_frame_check);
-}
-
-// Allocate monitor and lock method (asm interpreter)
-// rbx, - Method*
-//
-void TemplateInterpreterGenerator::lock_method() {
-  // synchronize method
-  const Address access_flags      (rbx, Method::access_flags_offset());
-  const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
-  const int entry_size            = frame::interpreter_frame_monitor_size() * wordSize;
-
-  #ifdef ASSERT
-    { Label L;
-      __ movl(rax, access_flags);
-      __ testl(rax, JVM_ACC_SYNCHRONIZED);
-      __ jcc(Assembler::notZero, L);
-      __ stop("method doesn't need synchronization");
-      __ bind(L);
-    }
-  #endif // ASSERT
-  // get synchronization object
-  { Label done;
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    __ movl(rax, access_flags);
-    __ testl(rax, JVM_ACC_STATIC);
-    __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0)));  // get receiver (assume this is frequent case)
-    __ jcc(Assembler::zero, done);
-    __ movptr(rax, Address(rbx, Method::const_offset()));
-    __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
-    __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
-    __ movptr(rax, Address(rax, mirror_offset));
-    __ bind(done);
-  }
-  // add space for monitor & lock
-  __ subptr(rsp, entry_size);                                           // add space for a monitor entry
-  __ movptr(monitor_block_top, rsp);                                    // set new monitor block top
-  __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
-  __ mov(rdx, rsp);                                                    // object address
-  __ lock_object(rdx);
-}
-
-//
-// Generate a fixed interpreter frame. This is identical setup for interpreted methods
-// and for native methods hence the shared code.
-
-void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
-  // initialize fixed part of activation frame
-  __ push(rax);                                       // save return address
-  __ enter();                                         // save old & set new rbp,
-
-
-  __ push(rsi);                                       // set sender sp
-  __ push((int32_t)NULL_WORD);                        // leave last_sp as null
-  __ movptr(rsi, Address(rbx,Method::const_offset())); // get ConstMethod*
-  __ lea(rsi, Address(rsi,ConstMethod::codes_offset())); // get codebase
-  __ push(rbx);                                      // save Method*
-  if (ProfileInterpreter) {
-    Label method_data_continue;
-    __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
-    __ testptr(rdx, rdx);
-    __ jcc(Assembler::zero, method_data_continue);
-    __ addptr(rdx, in_bytes(MethodData::data_offset()));
-    __ bind(method_data_continue);
-    __ push(rdx);                                       // set the mdp (method data pointer)
-  } else {
-    __ push(0);
-  }
-
-  __ movptr(rdx, Address(rbx, Method::const_offset()));
-  __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
-  __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
-  __ push(rdx);                                       // set constant pool cache
-  __ push(rdi);                                       // set locals pointer
-  if (native_call) {
-    __ push(0);                                       // no bcp
-  } else {
-    __ push(rsi);                                     // set bcp
-    }
-  __ push(0);                                         // reserve word for pointer to expression stack bottom
-  __ movptr(Address(rsp, 0), rsp);                    // set expression stack bottom
-}
-
-
-// Method entry for java.lang.ref.Reference.get.
-address InterpreterGenerator::generate_Reference_get_entry(void) {
-#if INCLUDE_ALL_GCS
-  // Code: _aload_0, _getfield, _areturn
-  // parameter size = 1
-  //
-  // The code that gets generated by this routine is split into 2 parts:
-  //    1. The "intrinsified" code for G1 (or any SATB based GC),
-  //    2. The slow path - which is an expansion of the regular method entry.
-  //
-  // Notes:-
-  // * In the G1 code we do not check whether we need to block for
-  //   a safepoint. If G1 is enabled then we must execute the specialized
-  //   code for Reference.get (except when the Reference object is null)
-  //   so that we can log the value in the referent field with an SATB
-  //   update buffer.
-  //   If the code for the getfield template is modified so that the
-  //   G1 pre-barrier code is executed when the current method is
-  //   Reference.get() then going through the normal method entry
-  //   will be fine.
-  // * The G1 code below can, however, check the receiver object (the instance
-  //   of java.lang.Reference) and jump to the slow path if null. If the
-  //   Reference object is null then we obviously cannot fetch the referent
-  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
-  //   regular method entry code to generate the NPE.
-  //
-  // This code is based on generate_accessor_enty.
-
-  // rbx,: Method*
-  // rcx: receiver (preserve for slow entry into asm interpreter)
-
-  // rsi: senderSP must preserved for slow path, set SP to it on fast path
-
-  address entry = __ pc();
-
-  const int referent_offset = java_lang_ref_Reference::referent_offset;
-  guarantee(referent_offset > 0, "referent offset not initialized");
-
-  if (UseG1GC) {
-    Label slow_path;
-
-    // Check if local 0 != NULL
-    // If the receiver is null then it is OK to jump to the slow path.
-    __ movptr(rax, Address(rsp, wordSize));
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, slow_path);
-
-    // rax: local 0 (must be preserved across the G1 barrier call)
-    //
-    // rbx: method (at this point it's scratch)
-    // rcx: receiver (at this point it's scratch)
-    // rdx: scratch
-    // rdi: scratch
-    //
-    // rsi: sender sp
-
-    // Preserve the sender sp in case the pre-barrier
-    // calls the runtime
-    __ push(rsi);
-
-    // Load the value of the referent field.
-    const Address field_address(rax, referent_offset);
-    __ movptr(rax, field_address);
-
-    // Generate the G1 pre-barrier code to log the value of
-    // the referent field in an SATB buffer.
-    __ get_thread(rcx);
-    __ g1_write_barrier_pre(noreg /* obj */,
-                            rax /* pre_val */,
-                            rcx /* thread */,
-                            rbx /* tmp */,
-                            true /* tosca_save */,
-                            true /* expand_call */);
-
-    // _areturn
-    __ pop(rsi);                // get sender sp
-    __ pop(rdi);                // get return address
-    __ mov(rsp, rsi);           // set sp to sender sp
-    __ jmp(rdi);
-
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
-    return entry;
-  }
-#endif // INCLUDE_ALL_GCS
-
-  // If G1 is not enabled then attempt to go through the accessor entry point
-  // Reference.get is an accessor
-  return NULL;
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.update(int crc, int b)
- */
-address InterpreterGenerator::generate_CRC32_update_entry() {
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    // rbx: Method*
-    // rsi: senderSP must preserved for slow path, set SP to it on fast path
-    // rdx: scratch
-    // rdi: scratch
-
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-    __ jcc(Assembler::notEqual, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we call stub code and there is no safepoint on this path.
-
-    // Load parameters
-    const Register crc = rax;  // crc
-    const Register val = rdx;  // source java byte value
-    const Register tbl = rdi;  // scratch
-
-    // Arguments are reversed on java expression stack
-    __ movl(val, Address(rsp,   wordSize)); // byte value
-    __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
-
-    __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
-    __ notl(crc); // ~crc
-    __ update_byte_crc32(crc, val, tbl);
-    __ notl(crc); // ~crc
-    // result in rax
-
-    // _areturn
-    __ pop(rdi);                // get return address
-    __ mov(rsp, rsi);           // set sp to sender sp
-    __ jmp(rdi);
-
-    // generate a vanilla native entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
- *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
- */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    // rbx,: Method*
-    // rsi: senderSP must preserved for slow path, set SP to it on fast path
-    // rdx: scratch
-    // rdi: scratch
-
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-    __ jcc(Assembler::notEqual, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we call stub code and there is no safepoint on this path.
-
-    // Load parameters
-    const Register crc = rax;  // crc
-    const Register buf = rdx;  // source java byte array address
-    const Register len = rdi;  // length
-
-    // value              x86_32
-    // interp. arg ptr    ESP + 4
-    // int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
-    //                                         3           2      1        0
-    // int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
-    //                                              4         2,3      1        0
-
-    // Arguments are reversed on java expression stack
-    __ movl(len,   Address(rsp,   4 + 0)); // Length
-    // Calculate address of start element
-    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
-      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // long buf
-      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
-      __ movl(crc,   Address(rsp, 4 + 4 * wordSize)); // Initial CRC
-    } else {
-      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // byte[] array
-      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
-      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
-      __ movl(crc,   Address(rsp, 4 + 3 * wordSize)); // Initial CRC
-    }
-
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
-    // result in rax
-
-    // _areturn
-    __ pop(rdi);                // get return address
-    __ mov(rsp, rsi);           // set sp to sender sp
-    __ jmp(rdi);
-
-    // generate a vanilla native entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-/**
-* Method entry for static native methods:
-*   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
-*   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
-*/
-address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-  if (UseCRC32CIntrinsics) {
-    address entry = __ pc();
-    // Load parameters
-    const Register crc = rax;  // crc
-    const Register buf = rcx;  // source java byte array address
-    const Register len = rdx;  // length
-    const Register end = len;
-
-    // value              x86_32
-    // interp. arg ptr    ESP + 4
-    // int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int end)
-    //                                         3           2      1        0
-    // int java.util.zip.CRC32.updateByteBuffer(int crc, long address, int off, int end)
-    //                                              4         2,3          1        0
-
-    // Arguments are reversed on java expression stack
-    __ movl(end, Address(rsp, 4 + 0)); // end
-    __ subl(len, Address(rsp, 4 + 1 * wordSize));  // end - offset == length
-    // Calculate address of start element
-    if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
-      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // long address
-      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
-      __ movl(crc, Address(rsp, 4 + 4 * wordSize)); // Initial CRC
-    } else {
-      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // byte[] array
-      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
-      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
-      __ movl(crc, Address(rsp, 4 + 3 * wordSize)); // Initial CRC
-    }
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len);
-    // result in rax
-    // _areturn
-    __ pop(rdi);                // get return address
-    __ mov(rsp, rsi);           // set sp to sender sp
-    __ jmp(rdi);
-
-    return entry;
-  }
-  return NULL;
-}
-
-/**
- * Method entry for static native method:
- *    java.lang.Float.intBitsToFloat(int bits)
- */
-address InterpreterGenerator::generate_Float_intBitsToFloat_entry() {
-  if (UseSSE >= 1) {
-    address entry = __ pc();
-
-    // rsi: the sender's SP
-
-    // Skip safepoint check (compiler intrinsic versions of this method
-    // do not perform safepoint checks either).
-
-    // Load 'bits' into xmm0 (interpreter returns results in xmm0)
-    __ movflt(xmm0, Address(rsp, wordSize));
-
-    // Return
-    __ pop(rdi); // get return address
-    __ mov(rsp, rsi); // set rsp to the sender's SP
-    __ jmp(rdi);
-    return entry;
-  }
-
-  return NULL;
-}
-
-/**
- * Method entry for static native method:
- *    java.lang.Float.floatToRawIntBits(float value)
- */
-address InterpreterGenerator::generate_Float_floatToRawIntBits_entry() {
-  if (UseSSE >= 1) {
-    address entry = __ pc();
-
-    // rsi: the sender's SP
-
-    // Skip safepoint check (compiler intrinsic versions of this method
-    // do not perform safepoint checks either).
-
-    // Load the parameter (a floating-point value) into rax.
-    __ movl(rax, Address(rsp, wordSize));
-
-    // Return
-    __ pop(rdi); // get return address
-    __ mov(rsp, rsi); // set rsp to the sender's SP
-    __ jmp(rdi);
-    return entry;
-  }
-
-  return NULL;
-}
-
-
-/**
- * Method entry for static native method:
- *    java.lang.Double.longBitsToDouble(long bits)
- */
-address InterpreterGenerator::generate_Double_longBitsToDouble_entry() {
-   if (UseSSE >= 2) {
-     address entry = __ pc();
-
-     // rsi: the sender's SP
-
-     // Skip safepoint check (compiler intrinsic versions of this method
-     // do not perform safepoint checks either).
-
-     // Load 'bits' into xmm0 (interpreter returns results in xmm0)
-     __ movdbl(xmm0, Address(rsp, wordSize));
-
-     // Return
-     __ pop(rdi); // get return address
-     __ mov(rsp, rsi); // set rsp to the sender's SP
-     __ jmp(rdi);
-     return entry;
-   }
-
-   return NULL;
-}
-
-/**
- * Method entry for static native method:
- *    java.lang.Double.doubleToRawLongBits(double value)
- */
-address InterpreterGenerator::generate_Double_doubleToRawLongBits_entry() {
-  if (UseSSE >= 2) {
-    address entry = __ pc();
-
-    // rsi: the sender's SP
-
-    // Skip safepoint check (compiler intrinsic versions of this method
-    // do not perform safepoint checks either).
-
-    // Load the parameter (a floating-point value) into rax.
-    __ movl(rdx, Address(rsp, 2*wordSize));
-    __ movl(rax, Address(rsp, wordSize));
-
-    // Return
-    __ pop(rdi); // get return address
-    __ mov(rsp, rsi); // set rsp to the sender's SP
-    __ jmp(rdi);
-    return entry;
-  }
-
-  return NULL;
-}
-
-//
-// Interpreter stub for calling a native method. (asm interpreter)
-// This sets up a somewhat different looking stack for calling the native method
-// than the typical interpreter frame setup.
-//
-
-address InterpreterGenerator::generate_native_entry(bool synchronized) {
-  // determine code generation flags
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // rbx,: Method*
-  // rsi: sender sp
-  // rsi: previous interpreter state (C++ interpreter) must preserve
-  address entry_point = __ pc();
-
-  const Address constMethod       (rbx, Method::const_offset());
-  const Address access_flags      (rbx, Method::access_flags_offset());
-  const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
-
-  // get parameter size (always needed)
-  __ movptr(rcx, constMethod);
-  __ load_unsigned_short(rcx, size_of_parameters);
-
-  // native calls don't need the stack size check since they have no expression stack
-  // and the arguments are already on the stack and we only add a handful of words
-  // to the stack
-
-  // rbx,: Method*
-  // rcx: size of parameters
-  // rsi: sender sp
-
-  __ pop(rax);                                       // get return address
-  // for natives the size of locals is zero
-
-  // compute beginning of parameters (rdi)
-  __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
-
-
-  // add 2 zero-initialized slots for native calls
-  // NULL result handler
-  __ push((int32_t)NULL_WORD);
-  // NULL oop temp (mirror or jni oop result)
-  __ push((int32_t)NULL_WORD);
-
-  // initialize fixed part of activation frame
-  generate_fixed_frame(true);
-
-  // make sure method is native & not abstract
-#ifdef ASSERT
-  __ movl(rax, access_flags);
-  {
-    Label L;
-    __ testl(rax, JVM_ACC_NATIVE);
-    __ jcc(Assembler::notZero, L);
-    __ stop("tried to execute non-native method as native");
-    __ bind(L);
-  }
-  { Label L;
-    __ testl(rax, JVM_ACC_ABSTRACT);
-    __ jcc(Assembler::zero, L);
-    __ stop("tried to execute abstract method in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // Since at this point in the method invocation the exception handler
-  // would try to exit the monitor of synchronized methods which hasn't
-  // been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. The remove_activation will
-  // check this flag.
-
-  __ get_thread(rax);
-  const Address do_not_unlock_if_synchronized(rax,
-        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
-  __ movbool(do_not_unlock_if_synchronized, true);
-
-  // increment invocation count & check for overflow
-  Label invocation_counter_overflow;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
-  }
-
-  Label continue_after_compile;
-  __ bind(continue_after_compile);
-
-  bang_stack_shadow_pages(true);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ get_thread(rax);
-  __ movbool(do_not_unlock_if_synchronized, false);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-  //
-  if (synchronized) {
-    lock_method();
-  } else {
-    // no synchronization necessary
-#ifdef ASSERT
-      { Label L;
-        __ movl(rax, access_flags);
-        __ testl(rax, JVM_ACC_SYNCHRONIZED);
-        __ jcc(Assembler::zero, L);
-        __ stop("method needs synchronization");
-        __ bind(L);
-      }
-#endif
-  }
-
-  // start execution
-#ifdef ASSERT
-  { Label L;
-    const Address monitor_block_top (rbp,
-                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
-    __ movptr(rax, monitor_block_top);
-    __ cmpptr(rax, rsp);
-    __ jcc(Assembler::equal, L);
-    __ stop("broken stack frame setup in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // jvmti/dtrace support
-  __ notify_method_entry();
-
-  // work registers
-  const Register method = rbx;
-  const Register thread = rdi;
-  const Register t      = rcx;
-
-  // allocate space for parameters
-  __ get_method(method);
-  __ movptr(t, Address(method, Method::const_offset()));
-  __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
-
-  __ shlptr(t, Interpreter::logStackElementSize);
-  __ addptr(t, 2*wordSize);     // allocate two more slots for JNIEnv and possible mirror
-  __ subptr(rsp, t);
-  __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
-
-  // get signature handler
-  { Label L;
-    __ movptr(t, Address(method, Method::signature_handler_offset()));
-    __ testptr(t, t);
-    __ jcc(Assembler::notZero, L);
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
-    __ get_method(method);
-    __ movptr(t, Address(method, Method::signature_handler_offset()));
-    __ bind(L);
-  }
-
-  // call signature handler
-  assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code");
-  assert(InterpreterRuntime::SignatureHandlerGenerator::to  () == rsp, "adjust this code");
-  assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t  , "adjust this code");
-  // The generated handlers do not touch RBX (the method oop).
-  // However, large signatures cannot be cached and are generated
-  // each time here.  The slow-path generator will blow RBX
-  // sometime, so we must reload it after the call.
-  __ call(t);
-  __ get_method(method);        // slow path call blows RBX on DevStudio 5.0
-
-  // result handler is in rax,
-  // set result handler
-  __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax);
-
-  // pass mirror handle if static call
-  { Label L;
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    __ movl(t, Address(method, Method::access_flags_offset()));
-    __ testl(t, JVM_ACC_STATIC);
-    __ jcc(Assembler::zero, L);
-    // get mirror
-    __ movptr(t, Address(method, Method:: const_offset()));
-    __ movptr(t, Address(t, ConstMethod::constants_offset()));
-    __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
-    __ movptr(t, Address(t, mirror_offset));
-    // copy mirror into activation frame
-    __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t);
-    // pass handle to mirror
-    __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
-    __ movptr(Address(rsp, wordSize), t);
-    __ bind(L);
-  }
-
-  // get native function entry point
-  { Label L;
-    __ movptr(rax, Address(method, Method::native_function_offset()));
-    ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
-    __ cmpptr(rax, unsatisfied.addr());
-    __ jcc(Assembler::notEqual, L);
-    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
-    __ get_method(method);
-    __ movptr(rax, Address(method, Method::native_function_offset()));
-    __ bind(L);
-  }
-
-  // pass JNIEnv
-  __ get_thread(thread);
-  __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
-  __ movptr(Address(rsp, 0), t);
-
-  // set_last_Java_frame_before_call
-  // It is enough that the pc()
-  // points into the right code segment. It does not have to be the correct return pc.
-  __ set_last_Java_frame(thread, noreg, rbp, __ pc());
-
-  // change thread state
-#ifdef ASSERT
-  { Label L;
-    __ movl(t, Address(thread, JavaThread::thread_state_offset()));
-    __ cmpl(t, _thread_in_Java);
-    __ jcc(Assembler::equal, L);
-    __ stop("Wrong thread state in native stub");
-    __ bind(L);
-  }
-#endif
-
-  // Change state to native
-  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
-  __ call(rax);
-
-  // result potentially in rdx:rax or ST0
-
-  // Verify or restore cpu control state after JNI call
-  __ restore_cpu_control_state_after_jni();
-
-  // save potential result in ST(0) & rdx:rax
-  // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
-  // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
-  // It is safe to do this push because state is _thread_in_native and return address will be found
-  // via _last_native_pc and not via _last_jave_sp
-
-  // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
-  // If the order changes or anything else is added to the stack the code in
-  // interpreter_frame_result will have to be changed.
-
-  { Label L;
-    Label push_double;
-    ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
-    ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
-    __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
-              float_handler.addr());
-    __ jcc(Assembler::equal, push_double);
-    __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
-              double_handler.addr());
-    __ jcc(Assembler::notEqual, L);
-    __ bind(push_double);
-    __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0).
-    __ bind(L);
-  }
-  __ push(ltos);
-
-  // change thread state
-  __ get_thread(thread);
-  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
-  if(os::is_MP()) {
-    if (UseMembar) {
-      // Force this write out before the read below
-      __ membar(Assembler::Membar_mask_bits(
-           Assembler::LoadLoad | Assembler::LoadStore |
-           Assembler::StoreLoad | Assembler::StoreStore));
-    } else {
-      // Write serialization page so VM thread can do a pseudo remote membar.
-      // We use the current thread pointer to calculate a thread specific
-      // offset to write to within the page. This minimizes bus traffic
-      // due to cache line collision.
-      __ serialize_memory(thread, rcx);
-    }
-  }
-
-  if (AlwaysRestoreFPU) {
-    //  Make sure the control word is correct.
-    __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
-  }
-
-  // check for safepoint operation in progress and/or pending suspend requests
-  { Label Continue;
-
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-
-    Label L;
-    __ jcc(Assembler::notEqual, L);
-    __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
-    __ jcc(Assembler::equal, Continue);
-    __ bind(L);
-
-    // Don't use call_VM as it will see a possible pending exception and forward it
-    // and never return here preventing us from clearing _last_native_pc down below.
-    // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
-    // preserved and correspond to the bcp/locals pointers. So we do a runtime call
-    // by hand.
-    //
-    __ push(thread);
-    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
-                                            JavaThread::check_special_condition_for_native_trans)));
-    __ increment(rsp, wordSize);
-    __ get_thread(thread);
-
-    __ bind(Continue);
-  }
-
-  // change thread state
-  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
-
-  __ reset_last_Java_frame(thread, true, true);
-
-  // reset handle block
-  __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
-  __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
-
-  // If result was an oop then unbox and save it in the frame
-  { Label L;
-    Label no_oop, store_result;
-    ExternalAddress handler(AbstractInterpreter::result_handler(T_OBJECT));
-    __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize),
-              handler.addr());
-    __ jcc(Assembler::notEqual, no_oop);
-    __ cmpptr(Address(rsp, 0), (int32_t)NULL_WORD);
-    __ pop(ltos);
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, store_result);
-    // unbox
-    __ movptr(rax, Address(rax, 0));
-    __ bind(store_result);
-    __ movptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax);
-    // keep stack depth as expected by pushing oop which will eventually be discarded
-    __ push(ltos);
-    __ bind(no_oop);
-  }
-
-  {
-     Label no_reguard;
-     __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
-     __ jcc(Assembler::notEqual, no_reguard);
-
-     __ pusha();
-     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
-     __ popa();
-
-     __ bind(no_reguard);
-   }
-
-  // restore rsi to have legal interpreter frame,
-  // i.e., bci == 0 <=> rsi == code_base()
-  // Can't call_VM until bcp is within reasonable.
-  __ get_method(method);      // method is junk from thread_in_native to now.
-  __ movptr(rsi, Address(method,Method::const_offset()));   // get ConstMethod*
-  __ lea(rsi, Address(rsi,ConstMethod::codes_offset()));    // get codebase
-
-  // handle exceptions (exception handling will handle unlocking!)
-  { Label L;
-    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
-    __ jcc(Assembler::zero, L);
-    // Note: At some point we may want to unify this with the code used in call_VM_base();
-    //       i.e., we should use the StubRoutines::forward_exception code. For now this
-    //       doesn't work here because the rsp is not correctly set at this point.
-    __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-
-  // do unlocking if necessary
-  { Label L;
-    __ movl(t, Address(method, Method::access_flags_offset()));
-    __ testl(t, JVM_ACC_SYNCHRONIZED);
-    __ jcc(Assembler::zero, L);
-    // the code below should be shared with interpreter macro assembler implementation
-    { Label unlock;
-      // BasicObjectLock will be first in list, since this is a synchronized method. However, need
-      // to check that the object has not been unlocked by an explicit monitorexit bytecode.
-      const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
-
-      __ lea(rdx, monitor);                   // address of first monitor
-
-      __ movptr(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
-      __ testptr(t, t);
-      __ jcc(Assembler::notZero, unlock);
-
-      // Entry already unlocked, need to throw exception
-      __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
-      __ should_not_reach_here();
-
-      __ bind(unlock);
-      __ unlock_object(rdx);
-    }
-    __ bind(L);
-  }
-
-  // jvmti/dtrace support
-  // Note: This must happen _after_ handling/throwing any exceptions since
-  //       the exception handler code notifies the runtime of method exits
-  //       too. If this happens before, method entry/exit notifications are
-  //       not properly paired (was bug - gri 11/22/99).
-  __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
-
-  // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
-  __ pop(ltos);
-  __ movptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
-  __ call(t);
-
-  // remove activation
-  __ movptr(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
-  __ leave();                                // remove frame anchor
-  __ pop(rdi);                               // get return address
-  __ mov(rsp, t);                            // set sp to sender sp
-  __ jmp(rdi);
-
-  if (inc_counter) {
-    // Handle overflow of counter and compile method
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
-  }
-
-  return entry_point;
-}
-
-//
-// Generic interpreted method entry to (asm) interpreter
-//
-address InterpreterGenerator::generate_normal_entry(bool synchronized) {
-  // determine code generation flags
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // rbx,: Method*
-  // rsi: sender sp
-  address entry_point = __ pc();
-
-  const Address constMethod       (rbx, Method::const_offset());
-  const Address access_flags      (rbx, Method::access_flags_offset());
-  const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
-  const Address size_of_locals    (rdx, ConstMethod::size_of_locals_offset());
-
-  // get parameter size (always needed)
-  __ movptr(rdx, constMethod);
-  __ load_unsigned_short(rcx, size_of_parameters);
-
-  // rbx,: Method*
-  // rcx: size of parameters
-
-  // rsi: sender_sp (could differ from sp+wordSize if we were called via c2i )
-
-  __ load_unsigned_short(rdx, size_of_locals);       // get size of locals in words
-  __ subl(rdx, rcx);                                // rdx = no. of additional locals
-
-  // see if we've got enough room on the stack for locals plus overhead.
-  generate_stack_overflow_check();
-
-  // get return address
-  __ pop(rax);
-
-  // compute beginning of parameters (rdi)
-  __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
-
-  // rdx - # of additional locals
-  // allocate space for locals
-  // explicitly initialize locals
-  {
-    Label exit, loop;
-    __ testl(rdx, rdx);
-    __ jcc(Assembler::lessEqual, exit);               // do nothing if rdx <= 0
-    __ bind(loop);
-    __ push((int32_t)NULL_WORD);                      // initialize local variables
-    __ decrement(rdx);                                // until everything initialized
-    __ jcc(Assembler::greater, loop);
-    __ bind(exit);
-  }
-
-  // initialize fixed part of activation frame
-  generate_fixed_frame(false);
-
-  // make sure method is not native & not abstract
-#ifdef ASSERT
-  __ movl(rax, access_flags);
-  {
-    Label L;
-    __ testl(rax, JVM_ACC_NATIVE);
-    __ jcc(Assembler::zero, L);
-    __ stop("tried to execute native method as non-native");
-    __ bind(L);
-  }
-  { Label L;
-    __ testl(rax, JVM_ACC_ABSTRACT);
-    __ jcc(Assembler::zero, L);
-    __ stop("tried to execute abstract method in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // Since at this point in the method invocation the exception handler
-  // would try to exit the monitor of synchronized methods which hasn't
-  // been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. The remove_activation will
-  // check this flag.
-
-  __ get_thread(rax);
-  const Address do_not_unlock_if_synchronized(rax,
-        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
-  __ movbool(do_not_unlock_if_synchronized, true);
-
-  __ profile_parameters_type(rax, rcx, rdx);
-  // increment invocation count & check for overflow
-  Label invocation_counter_overflow;
-  Label profile_method;
-  Label profile_method_continue;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
-    if (ProfileInterpreter) {
-      __ bind(profile_method_continue);
-    }
-  }
-  Label continue_after_compile;
-  __ bind(continue_after_compile);
-
-  bang_stack_shadow_pages(false);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ get_thread(rax);
-  __ movbool(do_not_unlock_if_synchronized, false);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-  //
-  if (synchronized) {
-    // Allocate monitor and lock method
-    lock_method();
-  } else {
-    // no synchronization necessary
-#ifdef ASSERT
-      { Label L;
-        __ movl(rax, access_flags);
-        __ testl(rax, JVM_ACC_SYNCHRONIZED);
-        __ jcc(Assembler::zero, L);
-        __ stop("method needs synchronization");
-        __ bind(L);
-      }
-#endif
-  }
-
-  // start execution
-#ifdef ASSERT
-  { Label L;
-     const Address monitor_block_top (rbp,
-                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
-    __ movptr(rax, monitor_block_top);
-    __ cmpptr(rax, rsp);
-    __ jcc(Assembler::equal, L);
-    __ stop("broken stack frame setup in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // jvmti support
-  __ notify_method_entry();
-
-  __ dispatch_next(vtos);
-
-  // invocation counter overflow
-  if (inc_counter) {
-    if (ProfileInterpreter) {
-      // We have decided to profile this method in the interpreter
-      __ bind(profile_method);
-      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
-      __ set_method_data_pointer_for_bcp();
-      __ get_method(rbx);
-      __ jmp(profile_method_continue);
-    }
-    // Handle overflow of counter and compile method
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
-  }
-
-  return entry_point;
-}
-
-
-// These should never be compiled since the interpreter will prefer
-// the compiled version to the intrinsic version.
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  switch (method_kind(m)) {
-    case Interpreter::java_lang_math_sin     : // fall thru
-    case Interpreter::java_lang_math_cos     : // fall thru
-    case Interpreter::java_lang_math_tan     : // fall thru
-    case Interpreter::java_lang_math_abs     : // fall thru
-    case Interpreter::java_lang_math_log     : // fall thru
-    case Interpreter::java_lang_math_log10   : // fall thru
-    case Interpreter::java_lang_math_sqrt    : // fall thru
-    case Interpreter::java_lang_math_pow     : // fall thru
-    case Interpreter::java_lang_math_exp     :
-      return false;
-    default:
-      return true;
-  }
-}
-
-// How much stack a method activation needs in words.
-int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
-
-  const int stub_code = 4;  // see generate_call_stub
-  // Save space for one monitor to get into the interpreted method in case
-  // the method is synchronized
-  int monitor_size    = method->is_synchronized() ?
-                                1*frame::interpreter_frame_monitor_size() : 0;
-
-  // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
-  // be sure to change this if you add/subtract anything to/from the overhead area
-  const int overhead_size = -frame::interpreter_frame_initial_sp_offset;
-
-  const int method_stack = (method->max_locals() + method->max_stack()) *
-                           Interpreter::stackElementWords;
-  return overhead_size + method_stack + stub_code;
-}
-
-//------------------------------------------------------------------------------------------------------------------------
-// Exceptions
-
-void TemplateInterpreterGenerator::generate_throw_exception() {
-  // Entry point in previous activation (i.e., if the caller was interpreted)
-  Interpreter::_rethrow_exception_entry = __ pc();
-  const Register thread = rcx;
-
-  // Restore sp to interpreter_frame_last_sp even though we are going
-  // to empty the expression stack for the exception processing.
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
-  // rax,: exception
-  // rdx: return address/pc that threw exception
-  __ restore_bcp();                              // rsi points to call/send
-  __ restore_locals();
-
-  // Entry point for exceptions thrown within interpreter code
-  Interpreter::_throw_exception_entry = __ pc();
-  // expression stack is undefined here
-  // rax,: exception
-  // rsi: exception bcp
-  __ verify_oop(rax);
-
-  // expression stack must be empty before entering the VM in case of an exception
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  // find exception handler address and preserve exception oop
-  __ call_VM(rdx, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), rax);
-  // rax,: exception handler entry point
-  // rdx: preserved exception oop
-  // rsi: bcp for exception handler
-  __ push_ptr(rdx);                              // push exception which is now the only value on the stack
-  __ jmp(rax);                                   // jump to exception handler (may be _remove_activation_entry!)
-
-  // If the exception is not handled in the current frame the frame is removed and
-  // the exception is rethrown (i.e. exception continuation is _rethrow_exception).
-  //
-  // Note: At this point the bci is still the bxi for the instruction which caused
-  //       the exception and the expression stack is empty. Thus, for any VM calls
-  //       at this point, GC will find a legal oop map (with empty expression stack).
-
-  // In current activation
-  // tos: exception
-  // rsi: exception bcp
-
-  //
-  // JVMTI PopFrame support
-  //
-
-   Interpreter::_remove_activation_preserving_args_entry = __ pc();
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  // Set the popframe_processing bit in pending_popframe_condition indicating that we are
-  // currently handling popframe, so that call_VMs that may happen later do not trigger new
-  // popframe handling cycles.
-  __ get_thread(thread);
-  __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
-  __ orl(rdx, JavaThread::popframe_processing_bit);
-  __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
-
-  {
-    // Check to see whether we are returning to a deoptimized frame.
-    // (The PopFrame call ensures that the caller of the popped frame is
-    // either interpreted or compiled and deoptimizes it if compiled.)
-    // In this case, we can't call dispatch_next() after the frame is
-    // popped, but instead must save the incoming arguments and restore
-    // them after deoptimization has occurred.
-    //
-    // Note that we don't compare the return PC against the
-    // deoptimization blob's unpack entry because of the presence of
-    // adapter frames in C2.
-    Label caller_not_deoptimized;
-    __ movptr(rdx, Address(rbp, frame::return_addr_offset * wordSize));
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx);
-    __ testl(rax, rax);
-    __ jcc(Assembler::notZero, caller_not_deoptimized);
-
-    // Compute size of arguments for saving when returning to deoptimized caller
-    __ get_method(rax);
-    __ movptr(rax, Address(rax, Method::const_offset()));
-    __ load_unsigned_short(rax, Address(rax, ConstMethod::size_of_parameters_offset()));
-    __ shlptr(rax, Interpreter::logStackElementSize);
-    __ restore_locals();
-    __ subptr(rdi, rax);
-    __ addptr(rdi, wordSize);
-    // Save these arguments
-    __ get_thread(thread);
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), thread, rax, rdi);
-
-    __ remove_activation(vtos, rdx,
-                         /* throw_monitor_exception */ false,
-                         /* install_monitor_exception */ false,
-                         /* notify_jvmdi */ false);
-
-    // Inform deoptimization that it is responsible for restoring these arguments
-    __ get_thread(thread);
-    __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit);
-
-    // Continue in deoptimization handler
-    __ jmp(rdx);
-
-    __ bind(caller_not_deoptimized);
-  }
-
-  __ remove_activation(vtos, rdx,
-                       /* throw_monitor_exception */ false,
-                       /* install_monitor_exception */ false,
-                       /* notify_jvmdi */ false);
-
-  // Finish with popframe handling
-  // A previous I2C followed by a deoptimization might have moved the
-  // outgoing arguments further up the stack. PopFrame expects the
-  // mutations to those outgoing arguments to be preserved and other
-  // constraints basically require this frame to look exactly as
-  // though it had previously invoked an interpreted activation with
-  // no space between the top of the expression stack (current
-  // last_sp) and the top of stack. Rather than force deopt to
-  // maintain this kind of invariant all the time we call a small
-  // fixup routine to move the mutated arguments onto the top of our
-  // expression stack if necessary.
-  __ mov(rax, rsp);
-  __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
-  __ get_thread(thread);
-  // PC must point into interpreter here
-  __ set_last_Java_frame(thread, noreg, rbp, __ pc());
-  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
-  __ get_thread(thread);
-  __ reset_last_Java_frame(thread, true, true);
-  // Restore the last_sp and null it out
-  __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
-
-  __ restore_bcp();
-  __ restore_locals();
-  // The method data pointer was incremented already during
-  // call profiling. We have to restore the mdp for the current bcp.
-  if (ProfileInterpreter) {
-    __ set_method_data_pointer_for_bcp();
-  }
-
-  // Clear the popframe condition flag
-  __ get_thread(thread);
-  __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
-
-#if INCLUDE_JVMTI
-  {
-    Label L_done;
-    const Register local0 = rdi;
-
-    __ cmpb(Address(rsi, 0), Bytecodes::_invokestatic);
-    __ jcc(Assembler::notEqual, L_done);
-
-    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
-    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
-
-    __ get_method(rdx);
-    __ movptr(rax, Address(local0, 0));
-    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rsi);
-
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, L_done);
-
-    __ movptr(Address(rbx, 0), rax);
-    __ bind(L_done);
-  }
-#endif // INCLUDE_JVMTI
-
-  __ dispatch_next(vtos);
-  // end of PopFrame support
-
-  Interpreter::_remove_activation_entry = __ pc();
-
-  // preserve exception over this code sequence
-  __ pop_ptr(rax);
-  __ get_thread(thread);
-  __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
-  // remove the activation (without doing throws on illegalMonitorExceptions)
-  __ remove_activation(vtos, rdx, false, true, false);
-  // restore exception
-  __ get_thread(thread);
-  __ get_vm_result(rax, thread);
-
-  // Inbetween activations - previous activation type unknown yet
-  // compute continuation point - the continuation point expects
-  // the following registers set up:
-  //
-  // rax: exception
-  // rdx: return address/pc that threw exception
-  // rsp: expression stack of caller
-  // rbp: rbp, of caller
-  __ push(rax);                                  // save exception
-  __ push(rdx);                                  // save return address
-  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, rdx);
-  __ mov(rbx, rax);                              // save exception handler
-  __ pop(rdx);                                   // restore return address
-  __ pop(rax);                                   // restore exception
-  // Note that an "issuing PC" is actually the next PC after the call
-  __ jmp(rbx);                                   // jump to exception handler of caller
-}
-
-
-//
-// JVMTI ForceEarlyReturn support
-//
-address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
-  address entry = __ pc();
-  const Register thread = rcx;
-
-  __ restore_bcp();
-  __ restore_locals();
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  __ load_earlyret_value(state);
-
-  __ get_thread(thread);
-  __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
-  const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
-
-  // Clear the earlyret state
-  __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
-
-  __ remove_activation(state, rsi,
-                       false, /* throw_monitor_exception */
-                       false, /* install_monitor_exception */
-                       true); /* notify_jvmdi */
-  __ jmp(rsi);
-  return entry;
-} // end of ForceEarlyReturn support
-
-
-//------------------------------------------------------------------------------------------------------------------------
-// Helper for vtos entry point generation
-
-void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
-  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
-  Label L;
-  fep = __ pc(); __ push(ftos); __ jmp(L);
-  dep = __ pc(); __ push(dtos); __ jmp(L);
-  lep = __ pc(); __ push(ltos); __ jmp(L);
-  aep = __ pc(); __ push(atos); __ jmp(L);
-  bep = cep = sep =             // fall through
-  iep = __ pc(); __ push(itos); // fall through
-  vep = __ pc(); __ bind(L);    // fall through
-  generate_and_dispatch(t);
-}
-
-//------------------------------------------------------------------------------------------------------------------------
-// Generation of individual instructions
-
-// helpers for generate_and_dispatch
-
-
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
- : TemplateInterpreterGenerator(code) {
-   generate_all(); // down here so it can be "virtual"
-}
-
-//------------------------------------------------------------------------------------------------------------------------
-
-// Non-product code
-#ifndef PRODUCT
-address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
-  address entry = __ pc();
-
-  // prepare expression stack
-  __ pop(rcx);          // pop return address so expression stack is 'pure'
-  __ push(state);       // save tosca
-
-  // pass tosca registers as arguments & call tracer
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
-  __ mov(rcx, rax);     // make sure return address is not destroyed by pop(state)
-  __ pop(state);        // restore tosca
-
-  // return
-  __ jmp(rcx);
-
-  return entry;
-}
-
-
-void TemplateInterpreterGenerator::count_bytecode() {
-  __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
-}
-
-
-void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
-  __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
-}
-
-
-void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
-  __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
-  __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
-  __ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
-  ExternalAddress table((address) BytecodePairHistogram::_counters);
-  Address index(noreg, rbx, Address::times_4);
-  __ incrementl(ArrayAddress(table, index));
-}
-
-
-void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
-  // Call a little run-time stub to avoid blow-up for each bytecode.
-  // The run-time runtime saves the right registers, depending on
-  // the tosca in-state for the given template.
-  assert(Interpreter::trace_code(t->tos_in()) != NULL,
-         "entry must have been generated");
-  __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
-}
-
-
-void TemplateInterpreterGenerator::stop_interpreter_at() {
-  Label L;
-  __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
-           StopInterpreterAt);
-  __ jcc(Assembler::notEqual, L);
-  __ int3();
-  __ bind(L);
-}
-#endif // !PRODUCT
-#endif // CC_INTERP
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1866 +0,0 @@
-/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "interpreter/bytecodeHistogram.hpp"
-#include "interpreter/interpreter.hpp"
-#include "interpreter/interpreterGenerator.hpp"
-#include "interpreter/interpreterRuntime.hpp"
-#include "interpreter/interp_masm.hpp"
-#include "interpreter/templateTable.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/methodData.hpp"
-#include "oops/method.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "prims/jvmtiThreadState.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vframeArray.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/macros.hpp"
-
-#define __ _masm->
-
-#ifndef CC_INTERP
-
-const int method_offset = frame::interpreter_frame_method_offset * wordSize;
-const int bcp_offset    = frame::interpreter_frame_bcp_offset    * wordSize;
-const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
-
-//-----------------------------------------------------------------------------
-
-address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
-  address entry = __ pc();
-
-#ifdef ASSERT
-  {
-    Label L;
-    __ lea(rax, Address(rbp,
-                        frame::interpreter_frame_monitor_block_top_offset *
-                        wordSize));
-    __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack
-                         // grows negative)
-    __ jcc(Assembler::aboveEqual, L); // check if frame is complete
-    __ stop ("interpreter frame not set up");
-    __ bind(L);
-  }
-#endif // ASSERT
-  // Restore bcp under the assumption that the current frame is still
-  // interpreted
-  __ restore_bcp();
-
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-  // throw exception
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_StackOverflowError));
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
-        const char* name) {
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-  // setup parameters
-  // ??? convention: expect aberrant index in register ebx
-  __ lea(c_rarg1, ExternalAddress((address)name));
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::
-                              throw_ArrayIndexOutOfBoundsException),
-             c_rarg1, rbx);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
-  address entry = __ pc();
-
-  // object is at TOS
-  __ pop(c_rarg1);
-
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::
-                              throw_ClassCastException),
-             c_rarg1);
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_exception_handler_common(
-        const char* name, const char* message, bool pass_oop) {
-  assert(!pass_oop || message == NULL, "either oop or message but not both");
-  address entry = __ pc();
-  if (pass_oop) {
-    // object is at TOS
-    __ pop(c_rarg2);
-  }
-  // expression stack must be empty before entering the VM if an
-  // exception happened
-  __ empty_expression_stack();
-  // setup parameters
-  __ lea(c_rarg1, ExternalAddress((address)name));
-  if (pass_oop) {
-    __ call_VM(rax, CAST_FROM_FN_PTR(address,
-                                     InterpreterRuntime::
-                                     create_klass_exception),
-               c_rarg1, c_rarg2);
-  } else {
-    // kind of lame ExternalAddress can't take NULL because
-    // external_word_Relocation will assert.
-    if (message != NULL) {
-      __ lea(c_rarg2, ExternalAddress((address)message));
-    } else {
-      __ movptr(c_rarg2, NULL_WORD);
-    }
-    __ call_VM(rax,
-               CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
-               c_rarg1, c_rarg2);
-  }
-  // throw exception
-  __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
-  address entry = __ pc();
-  // NULL last_sp until next java call
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
-  __ dispatch_next(state);
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
-  address entry = __ pc();
-
-  // Restore stack bottom in case i2c adjusted stack
-  __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
-  // and NULL it as marker that esp is now tos until next java call
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
-
-  __ restore_bcp();
-  __ restore_locals();
-
-  if (state == atos) {
-    Register mdp = rbx;
-    Register tmp = rcx;
-    __ profile_return_type(mdp, rax, tmp);
-  }
-
-  const Register cache = rbx;
-  const Register index = rcx;
-  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
-
-  const Register flags = cache;
-  __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
-  __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
-  __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
-  __ dispatch_next(state, step);
-
-  return entry;
-}
-
-
-address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
-  address entry = __ pc();
-  // NULL last_sp until next java call
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
-  __ restore_bcp();
-  __ restore_locals();
-#if INCLUDE_JVMCI
-  // Check if we need to take lock at entry of synchronized method.
-  if (UseJVMCICompiler) {
-    Label L;
-    __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
-    __ jcc(Assembler::zero, L);
-    // Clear flag.
-    __ movb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
-    // Satisfy calling convention for lock_method().
-    __ get_method(rbx);
-    // Take lock.
-    lock_method();
-    __ bind(L);
-  }
-#endif
-  // handle exceptions
-  {
-    Label L;
-    __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
-    __ jcc(Assembler::zero, L);
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address,
-                                InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-  __ dispatch_next(state, step);
-  return entry;
-}
-
-int AbstractInterpreter::BasicType_as_index(BasicType type) {
-  int i = 0;
-  switch (type) {
-    case T_BOOLEAN: i = 0; break;
-    case T_CHAR   : i = 1; break;
-    case T_BYTE   : i = 2; break;
-    case T_SHORT  : i = 3; break;
-    case T_INT    : i = 4; break;
-    case T_LONG   : i = 5; break;
-    case T_VOID   : i = 6; break;
-    case T_FLOAT  : i = 7; break;
-    case T_DOUBLE : i = 8; break;
-    case T_OBJECT : i = 9; break;
-    case T_ARRAY  : i = 9; break;
-    default       : ShouldNotReachHere();
-  }
-  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
-         "index out of bounds");
-  return i;
-}
-
-
-address TemplateInterpreterGenerator::generate_result_handler_for(
-        BasicType type) {
-  address entry = __ pc();
-  switch (type) {
-  case T_BOOLEAN: __ c2bool(rax);            break;
-  case T_CHAR   : __ movzwl(rax, rax);       break;
-  case T_BYTE   : __ sign_extend_byte(rax);  break;
-  case T_SHORT  : __ sign_extend_short(rax); break;
-  case T_INT    : /* nothing to do */        break;
-  case T_LONG   : /* nothing to do */        break;
-  case T_VOID   : /* nothing to do */        break;
-  case T_FLOAT  : /* nothing to do */        break;
-  case T_DOUBLE : /* nothing to do */        break;
-  case T_OBJECT :
-    // retrieve result from frame
-    __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
-    // and verify it
-    __ verify_oop(rax);
-    break;
-  default       : ShouldNotReachHere();
-  }
-  __ ret(0);                                   // return from result handler
-  return entry;
-}
-
-address TemplateInterpreterGenerator::generate_safept_entry_for(
-        TosState state,
-        address runtime_entry) {
-  address entry = __ pc();
-  __ push(state);
-  __ call_VM(noreg, runtime_entry);
-  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
-  return entry;
-}
-
-
-
-// Helpers for commoning out cases in the various type of method entries.
-//
-
-
-// increment invocation count & check for overflow
-//
-// Note: checking for negative value instead of overflow
-//       so we have a 'sticky' overflow test
-//
-// rbx: method
-// ecx: invocation counter
-//
-void InterpreterGenerator::generate_counter_incr(
-        Label* overflow,
-        Label* profile_method,
-        Label* profile_method_continue) {
-  Label done;
-  // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
-  if (TieredCompilation) {
-    int increment = InvocationCounter::count_increment;
-    Label no_mdo;
-    if (ProfileInterpreter) {
-      // Are we profiling?
-      __ movptr(rax, Address(rbx, Method::method_data_offset()));
-      __ testptr(rax, rax);
-      __ jccb(Assembler::zero, no_mdo);
-      // Increment counter in the MDO
-      const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
-                                                in_bytes(InvocationCounter::counter_offset()));
-      const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
-      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
-      __ jmp(done);
-    }
-    __ bind(no_mdo);
-    // Increment counter in MethodCounters
-    const Address invocation_counter(rax,
-                  MethodCounters::invocation_counter_offset() +
-                  InvocationCounter::counter_offset());
-    __ get_method_counters(rbx, rax, done);
-    const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
-    __ increment_mask_and_jump(invocation_counter, increment, mask, rcx,
-                               false, Assembler::zero, overflow);
-    __ bind(done);
-  } else { // not TieredCompilation
-    const Address backedge_counter(rax,
-                  MethodCounters::backedge_counter_offset() +
-                  InvocationCounter::counter_offset());
-    const Address invocation_counter(rax,
-                  MethodCounters::invocation_counter_offset() +
-                  InvocationCounter::counter_offset());
-
-    __ get_method_counters(rbx, rax, done);
-
-    if (ProfileInterpreter) {
-      __ incrementl(Address(rax,
-              MethodCounters::interpreter_invocation_counter_offset()));
-    }
-    // Update standard invocation counters
-    __ movl(rcx, invocation_counter);
-    __ incrementl(rcx, InvocationCounter::count_increment);
-    __ movl(invocation_counter, rcx); // save invocation count
-
-    __ movl(rax, backedge_counter);   // load backedge counter
-    __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
-
-    __ addl(rcx, rax);                // add both counters
-
-    // profile_method is non-null only for interpreted method so
-    // profile_method != NULL == !native_call
-
-    if (ProfileInterpreter && profile_method != NULL) {
-      // Test to see if we should create a method data oop
-      __ movptr(rax, Address(rbx, Method::method_counters_offset()));
-      __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
-      __ jcc(Assembler::less, *profile_method_continue);
-
-      // if no method data exists, go to profile_method
-      __ test_method_data_pointer(rax, *profile_method);
-    }
-
-    __ movptr(rax, Address(rbx, Method::method_counters_offset()));
-    __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
-    __ jcc(Assembler::aboveEqual, *overflow);
-    __ bind(done);
-  }
-}
-
-void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
-
-  // Asm interpreter on entry
-  // r14 - locals
-  // r13 - bcp
-  // rbx - method
-  // edx - cpool --- DOES NOT APPEAR TO BE TRUE
-  // rbp - interpreter frame
-
-  // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
-  // Everything as it was on entry
-  // rdx is not restored. Doesn't appear to really be set.
-
-  // InterpreterRuntime::frequency_counter_overflow takes two
-  // arguments, the first (thread) is passed by call_VM, the second
-  // indicates if the counter overflow occurs at a backwards branch
-  // (NULL bcp).  We pass zero for it.  The call returns the address
-  // of the verified entry point for the method or NULL if the
-  // compilation did not complete (either went background or bailed
-  // out).
-  __ movl(c_rarg1, 0);
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::frequency_counter_overflow),
-             c_rarg1);
-
-  __ movptr(rbx, Address(rbp, method_offset));   // restore Method*
-  // Preserve invariant that r13/r14 contain bcp/locals of sender frame
-  // and jump to the interpreted entry.
-  __ jmp(*do_continue, relocInfo::none);
-}
-
-// See if we've got enough room on the stack for locals plus overhead.
-// The expression stack grows down incrementally, so the normal guard
-// page mechanism will work for that.
-//
-// NOTE: Since the additional locals are also always pushed (wasn't
-// obvious in generate_fixed_frame) so the guard should work for them
-// too.
-//
-// Args:
-//      rdx: number of additional locals this frame needs (what we must check)
-//      rbx: Method*
-//
-// Kills:
-//      rax
-void InterpreterGenerator::generate_stack_overflow_check(void) {
-
-  // monitor entry size: see picture of stack in frame_x86.hpp
-  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
-
-  // total overhead size: entry_size + (saved rbp through expr stack
-  // bottom).  be sure to change this if you add/subtract anything
-  // to/from the overhead area
-  const int overhead_size =
-    -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
-
-  const int page_size = os::vm_page_size();
-
-  Label after_frame_check;
-
-  // see if the frame is greater than one page in size. If so,
-  // then we need to verify there is enough stack space remaining
-  // for the additional locals.
-  __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
-  __ jcc(Assembler::belowEqual, after_frame_check);
-
-  // compute rsp as if this were going to be the last frame on
-  // the stack before the red zone
-
-  const Address stack_base(r15_thread, Thread::stack_base_offset());
-  const Address stack_size(r15_thread, Thread::stack_size_offset());
-
-  // locals + overhead, in bytes
-  __ mov(rax, rdx);
-  __ shlptr(rax, Interpreter::logStackElementSize);  // 2 slots per parameter.
-  __ addptr(rax, overhead_size);
-
-#ifdef ASSERT
-  Label stack_base_okay, stack_size_okay;
-  // verify that thread stack base is non-zero
-  __ cmpptr(stack_base, (int32_t)NULL_WORD);
-  __ jcc(Assembler::notEqual, stack_base_okay);
-  __ stop("stack base is zero");
-  __ bind(stack_base_okay);
-  // verify that thread stack size is non-zero
-  __ cmpptr(stack_size, 0);
-  __ jcc(Assembler::notEqual, stack_size_okay);
-  __ stop("stack size is zero");
-  __ bind(stack_size_okay);
-#endif
-
-  // Add stack base to locals and subtract stack size
-  __ addptr(rax, stack_base);
-  __ subptr(rax, stack_size);
-
-  // Use the maximum number of pages we might bang.
-  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
-                                                                              (StackRedPages+StackYellowPages);
-
-  // add in the red and yellow zone sizes
-  __ addptr(rax, max_pages * page_size);
-
-  // check against the current stack bottom
-  __ cmpptr(rsp, rax);
-  __ jcc(Assembler::above, after_frame_check);
-
-  // Restore sender's sp as SP. This is necessary if the sender's
-  // frame is an extended compiled frame (see gen_c2i_adapter())
-  // and safer anyway in case of JSR292 adaptations.
-
-  __ pop(rax); // return address must be moved if SP is changed
-  __ mov(rsp, r13);
-  __ push(rax);
-
-  // Note: the restored frame is not necessarily interpreted.
-  // Use the shared runtime version of the StackOverflowError.
-  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
-  __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
-
-  // all done with frame size check
-  __ bind(after_frame_check);
-}
-
-// Allocate monitor and lock method (asm interpreter)
-//
-// Args:
-//      rbx: Method*
-//      r14: locals
-//
-// Kills:
-//      rax
-//      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
-//      rscratch1, rscratch2 (scratch regs)
-void TemplateInterpreterGenerator::lock_method() {
-  // synchronize method
-  const Address access_flags(rbx, Method::access_flags_offset());
-  const Address monitor_block_top(
-        rbp,
-        frame::interpreter_frame_monitor_block_top_offset * wordSize);
-  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
-
-#ifdef ASSERT
-  {
-    Label L;
-    __ movl(rax, access_flags);
-    __ testl(rax, JVM_ACC_SYNCHRONIZED);
-    __ jcc(Assembler::notZero, L);
-    __ stop("method doesn't need synchronization");
-    __ bind(L);
-  }
-#endif // ASSERT
-
-  // get synchronization object
-  {
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    Label done;
-    __ movl(rax, access_flags);
-    __ testl(rax, JVM_ACC_STATIC);
-    // get receiver (assume this is frequent case)
-    __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
-    __ jcc(Assembler::zero, done);
-    __ movptr(rax, Address(rbx, Method::const_offset()));
-    __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
-    __ movptr(rax, Address(rax,
-                           ConstantPool::pool_holder_offset_in_bytes()));
-    __ movptr(rax, Address(rax, mirror_offset));
-
-#ifdef ASSERT
-    {
-      Label L;
-      __ testptr(rax, rax);
-      __ jcc(Assembler::notZero, L);
-      __ stop("synchronization object is NULL");
-      __ bind(L);
-    }
-#endif // ASSERT
-
-    __ bind(done);
-  }
-
-  // add space for monitor & lock
-  __ subptr(rsp, entry_size); // add space for a monitor entry
-  __ movptr(monitor_block_top, rsp);  // set new monitor block top
-  // store object
-  __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
-  __ movptr(c_rarg1, rsp); // object address
-  __ lock_object(c_rarg1);
-}
-
-// Generate a fixed interpreter frame. This is identical setup for
-// interpreted methods and for native methods hence the shared code.
-//
-// Args:
-//      rax: return address
-//      rbx: Method*
-//      r14: pointer to locals
-//      r13: sender sp
-//      rdx: cp cache
-void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
-  // initialize fixed part of activation frame
-  __ push(rax);        // save return address
-  __ enter();          // save old & set new rbp
-  __ push(r13);        // set sender sp
-  __ push((int)NULL_WORD); // leave last_sp as null
-  __ movptr(r13, Address(rbx, Method::const_offset()));      // get ConstMethod*
-  __ lea(r13, Address(r13, ConstMethod::codes_offset())); // get codebase
-  __ push(rbx);        // save Method*
-  if (ProfileInterpreter) {
-    Label method_data_continue;
-    __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
-    __ testptr(rdx, rdx);
-    __ jcc(Assembler::zero, method_data_continue);
-    __ addptr(rdx, in_bytes(MethodData::data_offset()));
-    __ bind(method_data_continue);
-    __ push(rdx);      // set the mdp (method data pointer)
-  } else {
-    __ push(0);
-  }
-
-  __ movptr(rdx, Address(rbx, Method::const_offset()));
-  __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
-  __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
-  __ push(rdx); // set constant pool cache
-  __ push(r14); // set locals pointer
-  if (native_call) {
-    __ push(0); // no bcp
-  } else {
-    __ push(r13); // set bcp
-  }
-  __ push(0); // reserve word for pointer to expression stack bottom
-  __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
-}
-
-// End of helpers
-
-// Method entry for java.lang.ref.Reference.get.
-address InterpreterGenerator::generate_Reference_get_entry(void) {
-#if INCLUDE_ALL_GCS
-  // Code: _aload_0, _getfield, _areturn
-  // parameter size = 1
-  //
-  // The code that gets generated by this routine is split into 2 parts:
-  //    1. The "intrinsified" code for G1 (or any SATB based GC),
-  //    2. The slow path - which is an expansion of the regular method entry.
-  //
-  // Notes:-
-  // * In the G1 code we do not check whether we need to block for
-  //   a safepoint. If G1 is enabled then we must execute the specialized
-  //   code for Reference.get (except when the Reference object is null)
-  //   so that we can log the value in the referent field with an SATB
-  //   update buffer.
-  //   If the code for the getfield template is modified so that the
-  //   G1 pre-barrier code is executed when the current method is
-  //   Reference.get() then going through the normal method entry
-  //   will be fine.
-  // * The G1 code can, however, check the receiver object (the instance
-  //   of java.lang.Reference) and jump to the slow path if null. If the
-  //   Reference object is null then we obviously cannot fetch the referent
-  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
-  //   regular method entry code to generate the NPE.
-  //
-  // rbx: Method*
-
-  // r13: senderSP must preserve for slow path, set SP to it on fast path
-
-  address entry = __ pc();
-
-  const int referent_offset = java_lang_ref_Reference::referent_offset;
-  guarantee(referent_offset > 0, "referent offset not initialized");
-
-  if (UseG1GC) {
-    Label slow_path;
-    // rbx: method
-
-    // Check if local 0 != NULL
-    // If the receiver is null then it is OK to jump to the slow path.
-    __ movptr(rax, Address(rsp, wordSize));
-
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, slow_path);
-
-    // rax: local 0
-    // rbx: method (but can be used as scratch now)
-    // rdx: scratch
-    // rdi: scratch
-
-    // Generate the G1 pre-barrier code to log the value of
-    // the referent field in an SATB buffer.
-
-    // Load the value of the referent field.
-    const Address field_address(rax, referent_offset);
-    __ load_heap_oop(rax, field_address);
-
-    // Generate the G1 pre-barrier code to log the value of
-    // the referent field in an SATB buffer.
-    __ g1_write_barrier_pre(noreg /* obj */,
-                            rax /* pre_val */,
-                            r15_thread /* thread */,
-                            rbx /* tmp */,
-                            true /* tosca_live */,
-                            true /* expand_call */);
-
-    // _areturn
-    __ pop(rdi);                // get return address
-    __ mov(rsp, r13);           // set sp to sender sp
-    __ jmp(rdi);
-    __ ret(0);
-
-    // generate a vanilla interpreter entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
-    return entry;
-  }
-#endif // INCLUDE_ALL_GCS
-
-  // If G1 is not enabled then attempt to go through the accessor entry point
-  // Reference.get is an accessor
-  return NULL;
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.update(int crc, int b)
- */
-address InterpreterGenerator::generate_CRC32_update_entry() {
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    // rbx,: Method*
-    // r13: senderSP must preserved for slow path, set SP to it on fast path
-    // c_rarg0: scratch (rdi on non-Win64, rcx on Win64)
-    // c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
-
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-    __ jcc(Assembler::notEqual, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we call stub code and there is no safepoint on this path.
-
-    // Load parameters
-    const Register crc = rax;  // crc
-    const Register val = c_rarg0;  // source java byte value
-    const Register tbl = c_rarg1;  // scratch
-
-    // Arguments are reversed on java expression stack
-    __ movl(val, Address(rsp,   wordSize)); // byte value
-    __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
-
-    __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
-    __ notl(crc); // ~crc
-    __ update_byte_crc32(crc, val, tbl);
-    __ notl(crc); // ~crc
-    // result in rax
-
-    // _areturn
-    __ pop(rdi);                // get return address
-    __ mov(rsp, r13);           // set sp to sender sp
-    __ jmp(rdi);
-
-    // generate a vanilla native entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-/**
- * Method entry for static native methods:
- *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
- *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
- */
-address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-  if (UseCRC32Intrinsics) {
-    address entry = __ pc();
-
-    // rbx,: Method*
-    // r13: senderSP must preserved for slow path, set SP to it on fast path
-
-    Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-    __ jcc(Assembler::notEqual, slow_path);
-
-    // We don't generate local frame and don't align stack because
-    // we call stub code and there is no safepoint on this path.
-
-    // Load parameters
-    const Register crc = c_rarg0;  // crc
-    const Register buf = c_rarg1;  // source java byte array address
-    const Register len = c_rarg2;  // length
-    const Register off = len;      // offset (never overlaps with 'len')
-
-    // Arguments are reversed on java expression stack
-    // Calculate address of start element
-    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
-      __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
-      __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
-      __ addq(buf, off); // + offset
-      __ movl(crc,   Address(rsp, 5*wordSize)); // Initial CRC
-    } else {
-      __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
-      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
-      __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
-      __ addq(buf, off); // + offset
-      __ movl(crc,   Address(rsp, 4*wordSize)); // Initial CRC
-    }
-    // Can now load 'len' since we're finished with 'off'
-    __ movl(len, Address(rsp, wordSize)); // Length
-
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
-    // result in rax
-
-    // _areturn
-    __ pop(rdi);                // get return address
-    __ mov(rsp, r13);           // set sp to sender sp
-    __ jmp(rdi);
-
-    // generate a vanilla native entry as the slow path
-    __ bind(slow_path);
-    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
-    return entry;
-  }
-  return NULL;
-}
-
-/**
-* Method entry for static native methods:
-*   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
-*   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
-*/
-address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
-  if (UseCRC32CIntrinsics) {
-    address entry = __ pc();
-    // Load parameters
-    const Register crc = c_rarg0;  // crc
-    const Register buf = c_rarg1;  // source java byte array address
-    const Register len = c_rarg2;
-    const Register off = c_rarg3;  // offset
-    const Register end = len;
-
-    // Arguments are reversed on java expression stack
-    // Calculate address of start element
-    if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
-      __ movptr(buf, Address(rsp, 3 * wordSize)); // long buf
-      __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
-      __ addq(buf, off); // + offset
-      __ movl(crc, Address(rsp, 5 * wordSize)); // Initial CRC
-      // Note on 5 * wordSize vs. 4 * wordSize:
-      // *   int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end)
-      //                                                   4         2,3          1        0
-      // end starts at SP + 8
-      // The Java(R) Virtual Machine Specification Java SE 7 Edition
-      // 4.10.2.3. Values of Types long and double
-      //    "When calculating operand stack length, values of type long and double have length two."
-    } else {
-      __ movptr(buf, Address(rsp, 3 * wordSize)); // byte[] array
-      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
-      __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
-      __ addq(buf, off); // + offset
-      __ movl(crc, Address(rsp, 4 * wordSize)); // Initial CRC
-    }
-    __ movl(end, Address(rsp, wordSize)); // end
-    __ subl(end, off); // end - off
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len);
-    // result in rax
-    // _areturn
-    __ pop(rdi);                // get return address
-    __ mov(rsp, r13);           // set sp to sender sp
-    __ jmp(rdi);
-
-    return entry;
-  }
-
-  return NULL;
-}
-
-// Interpreter stub for calling a native method. (asm interpreter)
-// This sets up a somewhat different looking stack for calling the
-// native method than the typical interpreter frame setup.
-address InterpreterGenerator::generate_native_entry(bool synchronized) {
-  // determine code generation flags
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // rbx: Method*
-  // r13: sender sp
-
-  address entry_point = __ pc();
-
-  const Address constMethod       (rbx, Method::const_offset());
-  const Address access_flags      (rbx, Method::access_flags_offset());
-  const Address size_of_parameters(rcx, ConstMethod::
-                                        size_of_parameters_offset());
-
-
-  // get parameter size (always needed)
-  __ movptr(rcx, constMethod);
-  __ load_unsigned_short(rcx, size_of_parameters);
-
-  // native calls don't need the stack size check since they have no
-  // expression stack and the arguments are already on the stack and
-  // we only add a handful of words to the stack
-
-  // rbx: Method*
-  // rcx: size of parameters
-  // r13: sender sp
-  __ pop(rax);                                       // get return address
-
-  // for natives the size of locals is zero
-
-  // compute beginning of parameters (r14)
-  __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
-
-  // add 2 zero-initialized slots for native calls
-  // initialize result_handler slot
-  __ push((int) NULL_WORD);
-  // slot for oop temp
-  // (static native method holder mirror/jni oop result)
-  __ push((int) NULL_WORD);
-
-  // initialize fixed part of activation frame
-  generate_fixed_frame(true);
-
-  // make sure method is native & not abstract
-#ifdef ASSERT
-  __ movl(rax, access_flags);
-  {
-    Label L;
-    __ testl(rax, JVM_ACC_NATIVE);
-    __ jcc(Assembler::notZero, L);
-    __ stop("tried to execute non-native method as native");
-    __ bind(L);
-  }
-  {
-    Label L;
-    __ testl(rax, JVM_ACC_ABSTRACT);
-    __ jcc(Assembler::zero, L);
-    __ stop("tried to execute abstract method in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // Since at this point in the method invocation the exception handler
-  // would try to exit the monitor of synchronized methods which hasn't
-  // been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. The remove_activation will
-  // check this flag.
-
-  const Address do_not_unlock_if_synchronized(r15_thread,
-        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
-  __ movbool(do_not_unlock_if_synchronized, true);
-
-  // increment invocation count & check for overflow
-  Label invocation_counter_overflow;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
-  }
-
-  Label continue_after_compile;
-  __ bind(continue_after_compile);
-
-  bang_stack_shadow_pages(true);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ movbool(do_not_unlock_if_synchronized, false);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-  if (synchronized) {
-    lock_method();
-  } else {
-    // no synchronization necessary
-#ifdef ASSERT
-    {
-      Label L;
-      __ movl(rax, access_flags);
-      __ testl(rax, JVM_ACC_SYNCHRONIZED);
-      __ jcc(Assembler::zero, L);
-      __ stop("method needs synchronization");
-      __ bind(L);
-    }
-#endif
-  }
-
-  // start execution
-#ifdef ASSERT
-  {
-    Label L;
-    const Address monitor_block_top(rbp,
-                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
-    __ movptr(rax, monitor_block_top);
-    __ cmpptr(rax, rsp);
-    __ jcc(Assembler::equal, L);
-    __ stop("broken stack frame setup in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // jvmti support
-  __ notify_method_entry();
-
-  // work registers
-  const Register method = rbx;
-  const Register t      = r11;
-
-  // allocate space for parameters
-  __ get_method(method);
-  __ movptr(t, Address(method, Method::const_offset()));
-  __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
-  __ shll(t, Interpreter::logStackElementSize);
-
-  __ subptr(rsp, t);
-  __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
-  __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
-
-  // get signature handler
-  {
-    Label L;
-    __ movptr(t, Address(method, Method::signature_handler_offset()));
-    __ testptr(t, t);
-    __ jcc(Assembler::notZero, L);
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address,
-                                InterpreterRuntime::prepare_native_call),
-               method);
-    __ get_method(method);
-    __ movptr(t, Address(method, Method::signature_handler_offset()));
-    __ bind(L);
-  }
-
-  // call signature handler
-  assert(InterpreterRuntime::SignatureHandlerGenerator::from() == r14,
-         "adjust this code");
-  assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
-         "adjust this code");
-  assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
-          "adjust this code");
-
-  // The generated handlers do not touch RBX (the method oop).
-  // However, large signatures cannot be cached and are generated
-  // each time here.  The slow-path generator can do a GC on return,
-  // so we must reload it after the call.
-  __ call(t);
-  __ get_method(method);        // slow path can do a GC, reload RBX
-
-
-  // result handler is in rax
-  // set result handler
-  __ movptr(Address(rbp,
-                    (frame::interpreter_frame_result_handler_offset) * wordSize),
-            rax);
-
-  // pass mirror handle if static call
-  {
-    Label L;
-    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
-    __ movl(t, Address(method, Method::access_flags_offset()));
-    __ testl(t, JVM_ACC_STATIC);
-    __ jcc(Assembler::zero, L);
-    // get mirror
-    __ movptr(t, Address(method, Method::const_offset()));
-    __ movptr(t, Address(t, ConstMethod::constants_offset()));
-    __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
-    __ movptr(t, Address(t, mirror_offset));
-    // copy mirror into activation frame
-    __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
-            t);
-    // pass handle to mirror
-    __ lea(c_rarg1,
-           Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
-    __ bind(L);
-  }
-
-  // get native function entry point
-  {
-    Label L;
-    __ movptr(rax, Address(method, Method::native_function_offset()));
-    ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
-    __ movptr(rscratch2, unsatisfied.addr());
-    __ cmpptr(rax, rscratch2);
-    __ jcc(Assembler::notEqual, L);
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address,
-                                InterpreterRuntime::prepare_native_call),
-               method);
-    __ get_method(method);
-    __ movptr(rax, Address(method, Method::native_function_offset()));
-    __ bind(L);
-  }
-
-  // pass JNIEnv
-  __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
-
-  // It is enough that the pc() points into the right code
-  // segment. It does not have to be the correct return pc.
-  __ set_last_Java_frame(rsp, rbp, (address) __ pc());
-
-  // change thread state
-#ifdef ASSERT
-  {
-    Label L;
-    __ movl(t, Address(r15_thread, JavaThread::thread_state_offset()));
-    __ cmpl(t, _thread_in_Java);
-    __ jcc(Assembler::equal, L);
-    __ stop("Wrong thread state in native stub");
-    __ bind(L);
-  }
-#endif
-
-  // Change state to native
-
-  __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
-          _thread_in_native);
-
-  // Call the native method.
-  __ call(rax);
-  // result potentially in rax or xmm0
-
-  // Verify or restore cpu control state after JNI call
-  __ restore_cpu_control_state_after_jni();
-
-  // NOTE: The order of these pushes is known to frame::interpreter_frame_result
-  // in order to extract the result of a method call. If the order of these
-  // pushes change or anything else is added to the stack then the code in
-  // interpreter_frame_result must also change.
-
-  __ push(dtos);
-  __ push(ltos);
-
-  // change thread state
-  __ movl(Address(r15_thread, JavaThread::thread_state_offset()),
-          _thread_in_native_trans);
-
-  if (os::is_MP()) {
-    if (UseMembar) {
-      // Force this write out before the read below
-      __ membar(Assembler::Membar_mask_bits(
-           Assembler::LoadLoad | Assembler::LoadStore |
-           Assembler::StoreLoad | Assembler::StoreStore));
-    } else {
-      // Write serialization page so VM thread can do a pseudo remote membar.
-      // We use the current thread pointer to calculate a thread specific
-      // offset to write to within the page. This minimizes bus traffic
-      // due to cache line collision.
-      __ serialize_memory(r15_thread, rscratch2);
-    }
-  }
-
-  // check for safepoint operation in progress and/or pending suspend requests
-  {
-    Label Continue;
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-
-    Label L;
-    __ jcc(Assembler::notEqual, L);
-    __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
-    __ jcc(Assembler::equal, Continue);
-    __ bind(L);
-
-    // Don't use call_VM as it will see a possible pending exception
-    // and forward it and never return here preventing us from
-    // clearing _last_native_pc down below.  Also can't use
-    // call_VM_leaf either as it will check to see if r13 & r14 are
-    // preserved and correspond to the bcp/locals pointers. So we do a
-    // runtime call by hand.
-    //
-    __ mov(c_rarg0, r15_thread);
-    __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
-    __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
-    __ andptr(rsp, -16); // align stack as required by ABI
-    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
-    __ mov(rsp, r12); // restore sp
-    __ reinit_heapbase();
-    __ bind(Continue);
-  }
-
-  // change thread state
-  __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
-
-  // reset_last_Java_frame
-  __ reset_last_Java_frame(true, true);
-
-  // reset handle block
-  __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset()));
-  __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
-
-  // If result is an oop unbox and store it in frame where gc will see it
-  // and result handler will pick it up
-
-  {
-    Label no_oop, store_result;
-    __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
-    __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
-    __ jcc(Assembler::notEqual, no_oop);
-    // retrieve result
-    __ pop(ltos);
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, store_result);
-    __ movptr(rax, Address(rax, 0));
-    __ bind(store_result);
-    __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
-    // keep stack depth as expected by pushing oop which will eventually be discarde
-    __ push(ltos);
-    __ bind(no_oop);
-  }
-
-
-  {
-    Label no_reguard;
-    __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()),
-            JavaThread::stack_guard_yellow_disabled);
-    __ jcc(Assembler::notEqual, no_reguard);
-
-    __ pusha(); // XXX only save smashed registers
-    __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
-    __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
-    __ andptr(rsp, -16); // align stack as required by ABI
-    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
-    __ mov(rsp, r12); // restore sp
-    __ popa(); // XXX only restore smashed registers
-    __ reinit_heapbase();
-
-    __ bind(no_reguard);
-  }
-
-
-  // The method register is junk from after the thread_in_native transition
-  // until here.  Also can't call_VM until the bcp has been
-  // restored.  Need bcp for throwing exception below so get it now.
-  __ get_method(method);
-
-  // restore r13 to have legal interpreter frame, i.e., bci == 0 <=>
-  // r13 == code_base()
-  __ movptr(r13, Address(method, Method::const_offset()));   // get ConstMethod*
-  __ lea(r13, Address(r13, ConstMethod::codes_offset()));    // get codebase
-  // handle exceptions (exception handling will handle unlocking!)
-  {
-    Label L;
-    __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
-    __ jcc(Assembler::zero, L);
-    // Note: At some point we may want to unify this with the code
-    // used in call_VM_base(); i.e., we should use the
-    // StubRoutines::forward_exception code. For now this doesn't work
-    // here because the rsp is not correctly set at this point.
-    __ MacroAssembler::call_VM(noreg,
-                               CAST_FROM_FN_PTR(address,
-                               InterpreterRuntime::throw_pending_exception));
-    __ should_not_reach_here();
-    __ bind(L);
-  }
-
-  // do unlocking if necessary
-  {
-    Label L;
-    __ movl(t, Address(method, Method::access_flags_offset()));
-    __ testl(t, JVM_ACC_SYNCHRONIZED);
-    __ jcc(Assembler::zero, L);
-    // the code below should be shared with interpreter macro
-    // assembler implementation
-    {
-      Label unlock;
-      // BasicObjectLock will be first in list, since this is a
-      // synchronized method. However, need to check that the object
-      // has not been unlocked by an explicit monitorexit bytecode.
-      const Address monitor(rbp,
-                            (intptr_t)(frame::interpreter_frame_initial_sp_offset *
-                                       wordSize - sizeof(BasicObjectLock)));
-
-      // monitor expect in c_rarg1 for slow unlock path
-      __ lea(c_rarg1, monitor); // address of first monitor
-
-      __ movptr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
-      __ testptr(t, t);
-      __ jcc(Assembler::notZero, unlock);
-
-      // Entry already unlocked, need to throw exception
-      __ MacroAssembler::call_VM(noreg,
-                                 CAST_FROM_FN_PTR(address,
-                   InterpreterRuntime::throw_illegal_monitor_state_exception));
-      __ should_not_reach_here();
-
-      __ bind(unlock);
-      __ unlock_object(c_rarg1);
-    }
-    __ bind(L);
-  }
-
-  // jvmti support
-  // Note: This must happen _after_ handling/throwing any exceptions since
-  //       the exception handler code notifies the runtime of method exits
-  //       too. If this happens before, method entry/exit notifications are
-  //       not properly paired (was bug - gri 11/22/99).
-  __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
-
-  // restore potential result in edx:eax, call result handler to
-  // restore potential result in ST0 & handle result
-
-  __ pop(ltos);
-  __ pop(dtos);
-
-  __ movptr(t, Address(rbp,
-                       (frame::interpreter_frame_result_handler_offset) * wordSize));
-  __ call(t);
-
-  // remove activation
-  __ movptr(t, Address(rbp,
-                       frame::interpreter_frame_sender_sp_offset *
-                       wordSize)); // get sender sp
-  __ leave();                                // remove frame anchor
-  __ pop(rdi);                               // get return address
-  __ mov(rsp, t);                            // set sp to sender sp
-  __ jmp(rdi);
-
-  if (inc_counter) {
-    // Handle overflow of counter and compile method
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
-  }
-
-  return entry_point;
-}
-
-//
-// Generic interpreted method entry to (asm) interpreter
-//
-address InterpreterGenerator::generate_normal_entry(bool synchronized) {
-  // determine code generation flags
-  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
-
-  // ebx: Method*
-  // r13: sender sp
-  address entry_point = __ pc();
-
-  const Address constMethod(rbx, Method::const_offset());
-  const Address access_flags(rbx, Method::access_flags_offset());
-  const Address size_of_parameters(rdx,
-                                   ConstMethod::size_of_parameters_offset());
-  const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
-
-
-  // get parameter size (always needed)
-  __ movptr(rdx, constMethod);
-  __ load_unsigned_short(rcx, size_of_parameters);
-
-  // rbx: Method*
-  // rcx: size of parameters
-  // r13: sender_sp (could differ from sp+wordSize if we were called via c2i )
-
-  __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
-  __ subl(rdx, rcx); // rdx = no. of additional locals
-
-  // YYY
-//   __ incrementl(rdx);
-//   __ andl(rdx, -2);
-
-  // see if we've got enough room on the stack for locals plus overhead.
-  generate_stack_overflow_check();
-
-  // get return address
-  __ pop(rax);
-
-  // compute beginning of parameters (r14)
-  __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
-
-  // rdx - # of additional locals
-  // allocate space for locals
-  // explicitly initialize locals
-  {
-    Label exit, loop;
-    __ testl(rdx, rdx);
-    __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
-    __ bind(loop);
-    __ push((int) NULL_WORD); // initialize local variables
-    __ decrementl(rdx); // until everything initialized
-    __ jcc(Assembler::greater, loop);
-    __ bind(exit);
-  }
-
-  // initialize fixed part of activation frame
-  generate_fixed_frame(false);
-
-  // make sure method is not native & not abstract
-#ifdef ASSERT
-  __ movl(rax, access_flags);
-  {
-    Label L;
-    __ testl(rax, JVM_ACC_NATIVE);
-    __ jcc(Assembler::zero, L);
-    __ stop("tried to execute native method as non-native");
-    __ bind(L);
-  }
-  {
-    Label L;
-    __ testl(rax, JVM_ACC_ABSTRACT);
-    __ jcc(Assembler::zero, L);
-    __ stop("tried to execute abstract method in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // Since at this point in the method invocation the exception
-  // handler would try to exit the monitor of synchronized methods
-  // which hasn't been entered yet, we set the thread local variable
-  // _do_not_unlock_if_synchronized to true. The remove_activation
-  // will check this flag.
-
-  const Address do_not_unlock_if_synchronized(r15_thread,
-        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
-  __ movbool(do_not_unlock_if_synchronized, true);
-
-  __ profile_parameters_type(rax, rcx, rdx);
-  // increment invocation count & check for overflow
-  Label invocation_counter_overflow;
-  Label profile_method;
-  Label profile_method_continue;
-  if (inc_counter) {
-    generate_counter_incr(&invocation_counter_overflow,
-                          &profile_method,
-                          &profile_method_continue);
-    if (ProfileInterpreter) {
-      __ bind(profile_method_continue);
-    }
-  }
-
-  Label continue_after_compile;
-  __ bind(continue_after_compile);
-
-  // check for synchronized interpreted methods
-  bang_stack_shadow_pages(false);
-
-  // reset the _do_not_unlock_if_synchronized flag
-  __ movbool(do_not_unlock_if_synchronized, false);
-
-  // check for synchronized methods
-  // Must happen AFTER invocation_counter check and stack overflow check,
-  // so method is not locked if overflows.
-  if (synchronized) {
-    // Allocate monitor and lock method
-    lock_method();
-  } else {
-    // no synchronization necessary
-#ifdef ASSERT
-    {
-      Label L;
-      __ movl(rax, access_flags);
-      __ testl(rax, JVM_ACC_SYNCHRONIZED);
-      __ jcc(Assembler::zero, L);
-      __ stop("method needs synchronization");
-      __ bind(L);
-    }
-#endif
-  }
-
-  // start execution
-#ifdef ASSERT
-  {
-    Label L;
-     const Address monitor_block_top (rbp,
-                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
-    __ movptr(rax, monitor_block_top);
-    __ cmpptr(rax, rsp);
-    __ jcc(Assembler::equal, L);
-    __ stop("broken stack frame setup in interpreter");
-    __ bind(L);
-  }
-#endif
-
-  // jvmti support
-  __ notify_method_entry();
-
-  __ dispatch_next(vtos);
-
-  // invocation counter overflow
-  if (inc_counter) {
-    if (ProfileInterpreter) {
-      // We have decided to profile this method in the interpreter
-      __ bind(profile_method);
-      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
-      __ set_method_data_pointer_for_bcp();
-      __ get_method(rbx);
-      __ jmp(profile_method_continue);
-    }
-    // Handle overflow of counter and compile method
-    __ bind(invocation_counter_overflow);
-    generate_counter_overflow(&continue_after_compile);
-  }
-
-  return entry_point;
-}
-
-
-// These should never be compiled since the interpreter will prefer
-// the compiled version to the intrinsic version.
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  switch (method_kind(m)) {
-    case Interpreter::java_lang_math_sin     : // fall thru
-    case Interpreter::java_lang_math_cos     : // fall thru
-    case Interpreter::java_lang_math_tan     : // fall thru
-    case Interpreter::java_lang_math_abs     : // fall thru
-    case Interpreter::java_lang_math_log     : // fall thru
-    case Interpreter::java_lang_math_log10   : // fall thru
-    case Interpreter::java_lang_math_sqrt    : // fall thru
-    case Interpreter::java_lang_math_pow     : // fall thru
-    case Interpreter::java_lang_math_exp     :
-      return false;
-    default:
-      return true;
-  }
-}
-
-// How much stack a method activation needs in words.
-int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
-  const int entry_size = frame::interpreter_frame_monitor_size();
-
-  // total overhead size: entry_size + (saved rbp thru expr stack
-  // bottom).  be sure to change this if you add/subtract anything
-  // to/from the overhead area
-  const int overhead_size =
-    -(frame::interpreter_frame_initial_sp_offset) + entry_size;
-
-  const int stub_code = frame::entry_frame_after_call_words;
-  const int method_stack = (method->max_locals() + method->max_stack()) *
-                           Interpreter::stackElementWords;
-  return (overhead_size + method_stack + stub_code);
-}
-
-//-----------------------------------------------------------------------------
-// Exceptions
-
-void TemplateInterpreterGenerator::generate_throw_exception() {
-  // Entry point in previous activation (i.e., if the caller was
-  // interpreted)
-  Interpreter::_rethrow_exception_entry = __ pc();
-  // Restore sp to interpreter_frame_last_sp even though we are going
-  // to empty the expression stack for the exception processing.
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
-  // rax: exception
-  // rdx: return address/pc that threw exception
-  __ restore_bcp();    // r13 points to call/send
-  __ restore_locals();
-  __ reinit_heapbase();  // restore r12 as heapbase.
-  // Entry point for exceptions thrown within interpreter code
-  Interpreter::_throw_exception_entry = __ pc();
-  // expression stack is undefined here
-  // rax: exception
-  // r13: exception bcp
-  __ verify_oop(rax);
-  __ mov(c_rarg1, rax);
-
-  // expression stack must be empty before entering the VM in case of
-  // an exception
-  __ empty_expression_stack();
-  // find exception handler address and preserve exception oop
-  __ call_VM(rdx,
-             CAST_FROM_FN_PTR(address,
-                          InterpreterRuntime::exception_handler_for_exception),
-             c_rarg1);
-  // rax: exception handler entry point
-  // rdx: preserved exception oop
-  // r13: bcp for exception handler
-  __ push_ptr(rdx); // push exception which is now the only value on the stack
-  __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
-
-  // If the exception is not handled in the current frame the frame is
-  // removed and the exception is rethrown (i.e. exception
-  // continuation is _rethrow_exception).
-  //
-  // Note: At this point the bci is still the bxi for the instruction
-  // which caused the exception and the expression stack is
-  // empty. Thus, for any VM calls at this point, GC will find a legal
-  // oop map (with empty expression stack).
-
-  // In current activation
-  // tos: exception
-  // esi: exception bcp
-
-  //
-  // JVMTI PopFrame support
-  //
-
-  Interpreter::_remove_activation_preserving_args_entry = __ pc();
-  __ empty_expression_stack();
-  // Set the popframe_processing bit in pending_popframe_condition
-  // indicating that we are currently handling popframe, so that
-  // call_VMs that may happen later do not trigger new popframe
-  // handling cycles.
-  __ movl(rdx, Address(r15_thread, JavaThread::popframe_condition_offset()));
-  __ orl(rdx, JavaThread::popframe_processing_bit);
-  __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), rdx);
-
-  {
-    // Check to see whether we are returning to a deoptimized frame.
-    // (The PopFrame call ensures that the caller of the popped frame is
-    // either interpreted or compiled and deoptimizes it if compiled.)
-    // In this case, we can't call dispatch_next() after the frame is
-    // popped, but instead must save the incoming arguments and restore
-    // them after deoptimization has occurred.
-    //
-    // Note that we don't compare the return PC against the
-    // deoptimization blob's unpack entry because of the presence of
-    // adapter frames in C2.
-    Label caller_not_deoptimized;
-    __ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize));
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
-                               InterpreterRuntime::interpreter_contains), c_rarg1);
-    __ testl(rax, rax);
-    __ jcc(Assembler::notZero, caller_not_deoptimized);
-
-    // Compute size of arguments for saving when returning to
-    // deoptimized caller
-    __ get_method(rax);
-    __ movptr(rax, Address(rax, Method::const_offset()));
-    __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
-                                                size_of_parameters_offset())));
-    __ shll(rax, Interpreter::logStackElementSize);
-    __ restore_locals(); // XXX do we need this?
-    __ subptr(r14, rax);
-    __ addptr(r14, wordSize);
-    // Save these arguments
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
-                                           Deoptimization::
-                                           popframe_preserve_args),
-                          r15_thread, rax, r14);
-
-    __ remove_activation(vtos, rdx,
-                         /* throw_monitor_exception */ false,
-                         /* install_monitor_exception */ false,
-                         /* notify_jvmdi */ false);
-
-    // Inform deoptimization that it is responsible for restoring
-    // these arguments
-    __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
-            JavaThread::popframe_force_deopt_reexecution_bit);
-
-    // Continue in deoptimization handler
-    __ jmp(rdx);
-
-    __ bind(caller_not_deoptimized);
-  }
-
-  __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */
-                       /* throw_monitor_exception */ false,
-                       /* install_monitor_exception */ false,
-                       /* notify_jvmdi */ false);
-
-  // Finish with popframe handling
-  // A previous I2C followed by a deoptimization might have moved the
-  // outgoing arguments further up the stack. PopFrame expects the
-  // mutations to those outgoing arguments to be preserved and other
-  // constraints basically require this frame to look exactly as
-  // though it had previously invoked an interpreted activation with
-  // no space between the top of the expression stack (current
-  // last_sp) and the top of stack. Rather than force deopt to
-  // maintain this kind of invariant all the time we call a small
-  // fixup routine to move the mutated arguments onto the top of our
-  // expression stack if necessary.
-  __ mov(c_rarg1, rsp);
-  __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
-  // PC must point into interpreter here
-  __ set_last_Java_frame(noreg, rbp, __ pc());
-  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
-  __ reset_last_Java_frame(true, true);
-  // Restore the last_sp and null it out
-  __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
-
-  __ restore_bcp();  // XXX do we need this?
-  __ restore_locals(); // XXX do we need this?
-  // The method data pointer was incremented already during
-  // call profiling. We have to restore the mdp for the current bcp.
-  if (ProfileInterpreter) {
-    __ set_method_data_pointer_for_bcp();
-  }
-
-  // Clear the popframe condition flag
-  __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
-          JavaThread::popframe_inactive);
-
-#if INCLUDE_JVMTI
-  {
-    Label L_done;
-    const Register local0 = r14;
-
-    __ cmpb(Address(r13, 0), Bytecodes::_invokestatic);
-    __ jcc(Assembler::notEqual, L_done);
-
-    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
-    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
-
-    __ get_method(rdx);
-    __ movptr(rax, Address(local0, 0));
-    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, r13);
-
-    __ testptr(rax, rax);
-    __ jcc(Assembler::zero, L_done);
-
-    __ movptr(Address(rbx, 0), rax);
-    __ bind(L_done);
-  }
-#endif // INCLUDE_JVMTI
-
-  __ dispatch_next(vtos);
-  // end of PopFrame support
-
-  Interpreter::_remove_activation_entry = __ pc();
-
-  // preserve exception over this code sequence
-  __ pop_ptr(rax);
-  __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), rax);
-  // remove the activation (without doing throws on illegalMonitorExceptions)
-  __ remove_activation(vtos, rdx, false, true, false);
-  // restore exception
-  __ get_vm_result(rax, r15_thread);
-
-  // In between activations - previous activation type unknown yet
-  // compute continuation point - the continuation point expects the
-  // following registers set up:
-  //
-  // rax: exception
-  // rdx: return address/pc that threw exception
-  // rsp: expression stack of caller
-  // rbp: ebp of caller
-  __ push(rax);                                  // save exception
-  __ push(rdx);                                  // save return address
-  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
-                          SharedRuntime::exception_handler_for_return_address),
-                        r15_thread, rdx);
-  __ mov(rbx, rax);                              // save exception handler
-  __ pop(rdx);                                   // restore return address
-  __ pop(rax);                                   // restore exception
-  // Note that an "issuing PC" is actually the next PC after the call
-  __ jmp(rbx);                                   // jump to exception
-                                                 // handler of caller
-}
-
-
-//
-// JVMTI ForceEarlyReturn support
-//
-address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
-  address entry = __ pc();
-
-  __ restore_bcp();
-  __ restore_locals();
-  __ empty_expression_stack();
-  __ load_earlyret_value(state);
-
-  __ movptr(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
-  Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset());
-
-  // Clear the earlyret state
-  __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
-
-  __ remove_activation(state, rsi,
-                       false, /* throw_monitor_exception */
-                       false, /* install_monitor_exception */
-                       true); /* notify_jvmdi */
-  __ jmp(rsi);
-
-  return entry;
-} // end of ForceEarlyReturn support
-
-
-//-----------------------------------------------------------------------------
-// Helper for vtos entry point generation
-
-void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
-                                                         address& bep,
-                                                         address& cep,
-                                                         address& sep,
-                                                         address& aep,
-                                                         address& iep,
-                                                         address& lep,
-                                                         address& fep,
-                                                         address& dep,
-                                                         address& vep) {
-  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
-  Label L;
-  aep = __ pc();  __ push_ptr();   __ jmp(L);
-  fep = __ pc();  __ push_f(xmm0); __ jmp(L);
-  dep = __ pc();  __ push_d(xmm0); __ jmp(L);
-  lep = __ pc();  __ push_l();     __ jmp(L);
-  bep = cep = sep =
-  iep = __ pc();  __ push_i();
-  vep = __ pc();
-  __ bind(L);
-  generate_and_dispatch(t);
-}
-
-
-//-----------------------------------------------------------------------------
-// Generation of individual instructions
-
-// helpers for generate_and_dispatch
-
-
-InterpreterGenerator::InterpreterGenerator(StubQueue* code)
-  : TemplateInterpreterGenerator(code) {
-   generate_all(); // down here so it can be "virtual"
-}
-
-//-----------------------------------------------------------------------------
-
-// Non-product code
-#ifndef PRODUCT
-address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
-  address entry = __ pc();
-
-  __ push(state);
-  __ push(c_rarg0);
-  __ push(c_rarg1);
-  __ push(c_rarg2);
-  __ push(c_rarg3);
-  __ mov(c_rarg2, rax);  // Pass itos
-#ifdef _WIN64
-  __ movflt(xmm3, xmm0); // Pass ftos
-#endif
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
-             c_rarg1, c_rarg2, c_rarg3);
-  __ pop(c_rarg3);
-  __ pop(c_rarg2);
-  __ pop(c_rarg1);
-  __ pop(c_rarg0);
-  __ pop(state);
-  __ ret(0);                                   // return from result handler
-
-  return entry;
-}
-
-void TemplateInterpreterGenerator::count_bytecode() {
-  __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
-}
-
-void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
-  __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
-}
-
-void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
-  __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index));
-  __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
-  __ orl(rbx,
-         ((int) t->bytecode()) <<
-         BytecodePairHistogram::log2_number_of_codes);
-  __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
-  __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters));
-  __ incrementl(Address(rscratch1, rbx, Address::times_4));
-}
-
-
-void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
-  // Call a little run-time stub to avoid blow-up for each bytecode.
-  // The run-time runtime saves the right registers, depending on
-  // the tosca in-state for the given template.
-
-  assert(Interpreter::trace_code(t->tos_in()) != NULL,
-         "entry must have been generated");
-  __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
-  __ andptr(rsp, -16); // align stack as required by ABI
-  __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
-  __ mov(rsp, r12); // restore sp
-  __ reinit_heapbase();
-}
-
-
-void TemplateInterpreterGenerator::stop_interpreter_at() {
-  Label L;
-  __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
-           StopInterpreterAt);
-  __ jcc(Assembler::notEqual, L);
-  __ int3();
-  __ bind(L);
-}
-#endif // !PRODUCT
-#endif // ! CC_INTERP
--- a/src/cpu/x86/vm/templateTable_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/templateTable_x86.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -43,8 +43,8 @@
 #define __ _masm->
 
 // Global Register Names
-Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
-Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
+static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
+static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
 
 // Platform-dependent initialization
 void TemplateTable::pd_initialize() {
--- a/src/cpu/x86/vm/x86_32.ad	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/x86_32.ad	Fri Dec 18 12:39:02 2015 -0800
@@ -670,17 +670,16 @@
 
 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
   Compile *C = ra_->C;
+  MacroAssembler _masm(&cbuf);
 
   if (C->max_vector_size() > 16) {
     // Clear upper bits of YMM registers when current compiled code uses
     // wide vectors to avoid AVX <-> SSE transition penalty during call.
-    MacroAssembler masm(&cbuf);
-    masm.vzeroupper();
+    _masm.vzeroupper();
   }
   // If method set FPU control word, restore to standard control word
   if (C->in_24_bit_fp_mode()) {
-    MacroAssembler masm(&cbuf);
-    masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
+    _masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
   }
 
   int framesize = C->frame_size_in_bytes();
@@ -702,6 +701,10 @@
 
   emit_opcode(cbuf, 0x58 | EBP_enc);
 
+  if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
+    __ reserved_stack_check();
+  }
+
   if (do_polling() && C->is_method_compilation()) {
     cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0);
     emit_opcode(cbuf,0x85);
@@ -729,6 +732,7 @@
   } else {
     size += framesize ? 3 : 0;
   }
+  size += 64; // added to support ReservedStackAccess
   return size;
 }
 
--- a/src/cpu/x86/vm/x86_64.ad	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/x86/vm/x86_64.ad	Fri Dec 18 12:39:02 2015 -0800
@@ -953,10 +953,11 @@
 void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 {
   Compile* C = ra_->C;
+  MacroAssembler _masm(&cbuf);
+
   if (C->max_vector_size() > 16) {
     // Clear upper bits of YMM registers when current compiled code uses
     // wide vectors to avoid AVX <-> SSE transition penalty during call.
-    MacroAssembler _masm(&cbuf);
     __ vzeroupper();
   }
 
@@ -984,6 +985,10 @@
   // popq rbp
   emit_opcode(cbuf, 0x58 | RBP_enc);
 
+  if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
+    __ reserved_stack_check(); 
+  }
+
   if (do_polling() && C->is_method_compilation()) {
     MacroAssembler _masm(&cbuf);
     AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
--- a/src/cpu/zero/vm/globals_zero.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/zero/vm/globals_zero.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -48,14 +48,17 @@
 #define DEFAULT_STACK_YELLOW_PAGES (2)
 #define DEFAULT_STACK_RED_PAGES (1)
 #define DEFAULT_STACK_SHADOW_PAGES (5 LP64_ONLY(+1) DEBUG_ONLY(+3))
+#define DEFAULT_STACK_RESERVED_PAGES (0)
 
 #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
 #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
 #define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
+#define MIN_STACK_RESERVED_PAGES (0)
 
 define_pd_global(intx,  StackYellowPages,     DEFAULT_STACK_YELLOW_PAGES);
 define_pd_global(intx,  StackRedPages,        DEFAULT_STACK_RED_PAGES);
 define_pd_global(intx,  StackShadowPages,     DEFAULT_STACK_SHADOW_PAGES);
+define_pd_global(intx,  StackReservedPages,   DEFAULT_STACK_RESERVED_PAGES);
 
 define_pd_global(bool,  RewriteBytecodes,     true);
 define_pd_global(bool,  RewriteFrequentPairs, true);
--- a/src/cpu/zero/vm/interpreter_zero.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/cpu/zero/vm/interpreter_zero.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -38,7 +38,6 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -74,7 +73,3 @@
 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
   return true;
 }
-
-void Deoptimization::unwind_callee_save_values(frame* f,
-                                               vframeArray* vframe_array) {
-}
--- a/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl.java	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -170,7 +170,7 @@
      * @return flags of this method
      */
     private int getFlags() {
-        return UNSAFE.getByte(metaspaceMethod + config().methodFlagsOffset);
+        return UNSAFE.getShort(metaspaceMethod + config().methodFlagsOffset);
     }
 
     /**
--- a/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java	Fri Dec 18 12:39:02 2015 -0800
@@ -1227,7 +1227,7 @@
     @HotSpotVMField(name = "Method::_access_flags", type = "AccessFlags", get = HotSpotVMField.Type.OFFSET) @Stable public int methodAccessFlagsOffset;
     @HotSpotVMField(name = "Method::_constMethod", type = "ConstMethod*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodConstMethodOffset;
     @HotSpotVMField(name = "Method::_intrinsic_id", type = "u2", get = HotSpotVMField.Type.OFFSET) @Stable public int methodIntrinsicIdOffset;
-    @HotSpotVMField(name = "Method::_flags", type = "u1", get = HotSpotVMField.Type.OFFSET) @Stable public int methodFlagsOffset;
+    @HotSpotVMField(name = "Method::_flags", type = "u2", get = HotSpotVMField.Type.OFFSET) @Stable public int methodFlagsOffset;
     @HotSpotVMField(name = "Method::_vtable_index", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int methodVtableIndexOffset;
 
     @HotSpotVMField(name = "Method::_method_counters", type = "MethodCounters*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodCountersOffset;
--- a/src/os/aix/vm/globals_aix.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/aix/vm/globals_aix.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,37 +29,61 @@
 //
 // Defines Aix specific flags. They are not available on other platforms.
 //
+// (Please keep the switches sorted alphabetically.)
 #define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct, range, constraint) \
                                                                                     \
+  /* Whether to allow the VM to run if EXTSHM=ON. EXTSHM is an environment */       \
+  /* variable used on AIX to activate certain hacks which allow more shm segments */\
+  /* for 32bit processes. For 64bit processes, it is pointless and may have */      \
+  /* harmful side effects (e.g. for some reasonn prevents allocation of 64k pages */\
+  /* via shmctl). */                                                                \
+  /* Per default we quit with an error if that variable is found; for certain */    \
+  /* customer scenarios, we may want to be able to run despite that variable. */    \
+  product(bool, AllowExtshm, false,                                                 \
+          "Allow VM to run with EXTSHM=ON.")                                        \
+                                                                                    \
+  product(intx, AttachListenerTimeout, 1000,                                        \
+          "Timeout in ms the attach listener waits for a request")                  \
+          range(0, 2147483)                                                         \
+                                                                                    \
+  /*  Maximum expected size of the data segment. That correlates with the      */   \
+  /*  to the maximum C Heap consumption we expect.                             */   \
+  /*  We need to know this because we need to leave "breathing space" for the  */   \
+  /*  data segment when placing the java heap. If that space is too small, we  */   \
+  /*  reduce our chance of getting a low heap address (needed for compressed   */   \
+  /*  Oops).                                                                   */   \
+  product(uintx, MaxExpectedDataSegmentSize, (SIZE_4G * 2),                         \
+          "Maximum expected Data Segment Size.")                                    \
+                                                                                    \
+  /* Use optimized addresses for the polling page.                             */   \
+  product(bool, OptimizePollingPageLocation, true,                                  \
+             "Optimize the location of the polling page used for Safepoints")       \
+                                                                                    \
   /* Use 64K pages for virtual memory (shmat). */                                   \
   product(bool, Use64KPages, true,                                                  \
           "Use 64K pages if available.")                                            \
                                                                                     \
-  /* If UseLargePages == true allow or deny usage of 16M pages. 16M pages are  */   \
-  /* a scarce resource and there may be situations where we do not want the VM */   \
-  /* to run with 16M pages. (Will fall back to 64K pages).                     */   \
-  product_pd(bool, Use16MPages,                                                     \
-             "Use 16M pages if available.")                                         \
+  /*  If VM uses 64K paged memory (shmat) for virtual memory: threshold below  */   \
+  /*  which virtual memory allocations are done with 4K memory (mmap). This is */   \
+  /*  mainly for test purposes.                                                */   \
+  develop(uintx, Use64KPagesThreshold, 0,                                           \
+          "4K/64K page allocation threshold.")                                      \
                                                                                     \
-  /*  use optimized addresses for the polling page, */                              \
-  /* e.g. map it to a special 32-bit address.       */                              \
-  product_pd(bool, OptimizePollingPageLocation,                                     \
-             "Optimize the location of the polling page used for Safepoints")       \
-                                                                                    \
-  product_pd(intx, AttachListenerTimeout,                                           \
-             "Timeout in ms the attach listener waits for a request")               \
-             range(0, 2147483)                                                      \
+  /* Normally AIX commits memory on touch, but sometimes it is helpful to have */   \
+  /* explicit commit behaviour. This flag, if true, causes the VM to touch     */   \
+  /* memory on os::commit_memory() (which normally is a noop).                 */   \
+  product(bool, UseExplicitCommit, false,                                           \
+          "Explicit commit for virtual memory.")                                    \
                                                                                     \
 
-// Per default, do not allow 16M pages. 16M pages have to be switched on specifically.
-define_pd_global(bool, Use16MPages, false);
-define_pd_global(bool, OptimizePollingPageLocation, true);
-define_pd_global(intx, AttachListenerTimeout, 1000);
 
 //
 // Defines Aix-specific default values. The flags are available on all
 // platforms, but they may have different default values on other platforms.
 //
+
+// UseLargePages means nothing, for now, on AIX.
+// Use Use64KPages or Use16MPages instead.
 define_pd_global(bool, UseLargePages, false);
 define_pd_global(bool, UseLargePagesIndividualAllocation, false);
 define_pd_global(bool, UseOSErrorReporting, false);
--- a/src/os/aix/vm/jvm_aix.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/aix/vm/jvm_aix.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -109,92 +109,3 @@
   return JNI_TRUE;
 JVM_END
 
-/*
-  All the defined signal names for Linux.
-
-  NOTE that not all of these names are accepted by our Java implementation
-
-  Via an existing claim by the VM, sigaction restrictions, or
-  the "rules of Unix" some of these names will be rejected at runtime.
-  For example the VM sets up to handle USR1, sigaction returns EINVAL for
-  STOP, and Linux simply doesn't allow catching of KILL.
-
-  Here are the names currently accepted by a user of sun.misc.Signal with
-  1.4.1 (ignoring potential interaction with use of chaining, etc):
-
-    HUP, INT, TRAP, ABRT, IOT, BUS, USR2, PIPE, ALRM, TERM, STKFLT,
-    CLD, CHLD, CONT, TSTP, TTIN, TTOU, URG, XCPU, XFSZ, VTALRM, PROF,
-    WINCH, POLL, IO, PWR, SYS
-
-*/
-
-struct siglabel {
-  const char *name;
-  int   number;
-};
-
-struct siglabel siglabels[] = {
-  /* derived from /usr/include/bits/signum.h on RH7.2 */
-   "HUP",       SIGHUP,         /* Hangup (POSIX).  */
-  "INT",        SIGINT,         /* Interrupt (ANSI).  */
-  "QUIT",       SIGQUIT,        /* Quit (POSIX).  */
-  "ILL",        SIGILL,         /* Illegal instruction (ANSI).  */
-  "TRAP",       SIGTRAP,        /* Trace trap (POSIX).  */
-  "ABRT",       SIGABRT,        /* Abort (ANSI).  */
-  "IOT",        SIGIOT,         /* IOT trap (4.2 BSD).  */
-  "BUS",        SIGBUS,         /* BUS error (4.2 BSD).  */
-  "FPE",        SIGFPE,         /* Floating-point exception (ANSI).  */
-  "KILL",       SIGKILL,        /* Kill, unblockable (POSIX).  */
-  "USR1",       SIGUSR1,        /* User-defined signal 1 (POSIX).  */
-  "SEGV",       SIGSEGV,        /* Segmentation violation (ANSI).  */
-  "USR2",       SIGUSR2,        /* User-defined signal 2 (POSIX).  */
-  "PIPE",       SIGPIPE,        /* Broken pipe (POSIX).  */
-  "ALRM",       SIGALRM,        /* Alarm clock (POSIX).  */
-  "TERM",       SIGTERM,        /* Termination (ANSI).  */
-#ifdef SIGSTKFLT
-  "STKFLT",     SIGSTKFLT,      /* Stack fault.  */
-#endif
-  "CLD",        SIGCLD,         /* Same as SIGCHLD (System V).  */
-  "CHLD",       SIGCHLD,        /* Child status has changed (POSIX).  */
-  "CONT",       SIGCONT,        /* Continue (POSIX).  */
-  "STOP",       SIGSTOP,        /* Stop, unblockable (POSIX).  */
-  "TSTP",       SIGTSTP,        /* Keyboard stop (POSIX).  */
-  "TTIN",       SIGTTIN,        /* Background read from tty (POSIX).  */
-  "TTOU",       SIGTTOU,        /* Background write to tty (POSIX).  */
-  "URG",        SIGURG,         /* Urgent condition on socket (4.2 BSD).  */
-  "XCPU",       SIGXCPU,        /* CPU limit exceeded (4.2 BSD).  */
-  "XFSZ",       SIGXFSZ,        /* File size limit exceeded (4.2 BSD).  */
-  "DANGER",     SIGDANGER,      /* System crash imminent; free up some page space (AIX). */
-  "VTALRM",     SIGVTALRM,      /* Virtual alarm clock (4.2 BSD).  */
-  "PROF",       SIGPROF,        /* Profiling alarm clock (4.2 BSD).  */
-  "WINCH",      SIGWINCH,       /* Window size change (4.3 BSD, Sun).  */
-  "POLL",       SIGPOLL,        /* Pollable event occurred (System V).  */
-  "IO",         SIGIO,          /* I/O now possible (4.2 BSD).  */
-  "PWR",        SIGPWR,         /* Power failure restart (System V).  */
-#ifdef SIGSYS
-  "SYS",        SIGSYS          /* Bad system call. Only on some Linuxen! */
-#endif
-  };
-
-JVM_ENTRY_NO_ENV(jint, JVM_FindSignal(const char *name))
-
-  /* find and return the named signal's number */
-
-  for(uint i=0; i<ARRAY_SIZE(siglabels); i++)
-    if(!strcmp(name, siglabels[i].name))
-      return siglabels[i].number;
-
-  return -1;
-
-JVM_END
-
-// used by os::exception_name()
-extern bool signal_name(int signo, char* buf, size_t len) {
-  for(uint i = 0; i < ARRAY_SIZE(siglabels); i++) {
-    if (signo == siglabels[i].number) {
-      jio_snprintf(buf, len, "SIG%s", siglabels[i].name);
-      return true;
-    }
-  }
-  return false;
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/libo4.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// This is only a stub. Will flesh out later when/if we add further support
+// for PASE.
+
+#include "libo4.hpp"
+
+bool libo4::init() { return false; }
+void libo4::cleanup() {}
+bool libo4::get_memory_info (unsigned long long* p_virt_total, unsigned long long* p_real_total,
+  unsigned long long* p_real_free, unsigned long long* p_pgsp_total, unsigned long long* p_pgsp_free) {
+  return false;
+}
+bool libo4::get_load_avg (double* p_avg1, double* p_avg5, double* p_avg15) { return false; }
+bool libo4::realpath (const char* file_name, char* resolved_name, int resolved_name_len) { return false; }
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/libo4.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// A C++ wrapper around the libo4 porting library. The libo4 porting library
+// is a set of bridge functions into native AS/400 functionality.
+
+#ifndef OS_AIX_VM_LIBO4_HPP
+#define OS_AIX_VM_LIBO4_HPP
+
+
+class libo4 {
+public:
+
+  // Initialize the libo4 porting library.
+  // Returns true if succeeded, false if error.
+  static bool init();
+
+  // cleanup of the libo4 porting library.
+  static void cleanup();
+
+  // returns a number of memory statistics from the
+  // AS/400.
+  //
+  // Specify NULL for numbers you are not interested in.
+  //
+  // returns false if an error happened. Activate OsMisc trace for
+  // trace output.
+  //
+  static bool get_memory_info (unsigned long long* p_virt_total, unsigned long long* p_real_total,
+    unsigned long long* p_real_free, unsigned long long* p_pgsp_total, unsigned long long* p_pgsp_free);
+
+  // returns information about system load
+  // (similar to "loadavg()" under other Unices)
+  //
+  // Specify NULL for numbers you are not interested in.
+  //
+  // returns false if an error happened. Activate OsMisc trace for
+  // trace output.
+  //
+  static bool get_load_avg (double* p_avg1, double* p_avg5, double* p_avg15);
+
+  // this is a replacement for the "realpath()" API which does not really work
+  // on PASE
+  //
+  // Specify NULL for numbers you are not interested in.
+  //
+  // returns false if an error happened. Activate OsMisc trace for
+  // trace output.
+  //
+  static bool realpath (const char* file_name,
+      char* resolved_name, int resolved_name_len);
+
+};
+
+#endif // OS_AIX_VM_LIBO4_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/libodm_aix.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2015, 2015 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "libodm_aix.hpp"
+#include "misc_aix.hpp"
+#include <stdlib.h>
+#include <dlfcn.h>
+#include <string.h>
+#include "runtime/arguments.hpp"
+
+
+dynamicOdm::dynamicOdm() {
+  const char *libodmname = "/usr/lib/libodm.a(shr_64.o)";
+  _libhandle = dlopen(libodmname, RTLD_MEMBER | RTLD_NOW);
+  if (!_libhandle) {
+    trcVerbose("Couldn't open %s", libodmname);
+    return;
+  }
+  _odm_initialize  = (fun_odm_initialize )dlsym(_libhandle, "odm_initialize" );
+  _odm_set_path    = (fun_odm_set_path   )dlsym(_libhandle, "odm_set_path"   );
+  _odm_mount_class = (fun_odm_mount_class)dlsym(_libhandle, "odm_mount_class");
+  _odm_get_obj     = (fun_odm_get_obj    )dlsym(_libhandle, "odm_get_obj"    );
+  _odm_terminate   = (fun_odm_terminate  )dlsym(_libhandle, "odm_terminate"  );
+  if (!_odm_initialize || !_odm_set_path || !_odm_mount_class || !_odm_get_obj || !_odm_terminate) {
+    trcVerbose("Couldn't find all required odm symbols from %s", libodmname);
+    dlclose(_libhandle);
+    _libhandle = NULL;
+    return;
+  }
+}
+
+dynamicOdm::~dynamicOdm() {
+  if (_libhandle) { dlclose(_libhandle); }
+}
+
+
+void odmWrapper::clean_data() { if (_data) { free(_data); _data = NULL; } }
+
+
+int odmWrapper::class_offset(char *field, bool is_aix_5)
+{
+  assert(has_class(), "initialization");
+  for (int i = 0; i < odm_class()->nelem; i++) {
+    if (strcmp(odm_class()->elem[i].elemname, field) == 0) {
+      int offset = odm_class()->elem[i].offset;
+      if (is_aix_5) { offset += LINK_VAL_OFFSET; }
+      return offset;
+    }
+  }
+  return -1;
+}
+
+
+void odmWrapper::determine_os_kernel_version(uint32_t* p_ver) {
+  int major_aix_version = ((*p_ver) >> 24) & 0xFF,
+      minor_aix_version = ((*p_ver) >> 16) & 0xFF;
+  assert(*p_ver, "must be initialized");
+
+  odmWrapper odm("product", "/usr/lib/objrepos"); // could also use "lpp"
+  if (!odm.has_class()) {
+    trcVerbose("try_determine_os_kernel_version: odm init problem");
+    return;
+  }
+  int voff, roff, moff, foff;
+  bool is_aix_5 = (major_aix_version == 5);
+  voff = odm.class_offset("ver", is_aix_5);
+  roff = odm.class_offset("rel", is_aix_5);
+  moff = odm.class_offset("mod", is_aix_5);
+  foff = odm.class_offset("fix", is_aix_5);
+  if (voff == -1 || roff == -1 || moff == -1 || foff == -1) {
+    trcVerbose("try_determine_os_kernel_version: could not get offsets");
+    return;
+  }
+  if (!odm.retrieve_obj("name='bos.mp64'")) {
+    trcVerbose("try_determine_os_kernel_version: odm_get_obj failed");
+    return;
+  }
+  int version, release, modification, fix_level;
+  do {
+    version      = odm.read_short(voff);
+    release      = odm.read_short(roff);
+    modification = odm.read_short(moff);
+    fix_level    = odm.read_short(foff);
+    trcVerbose("odm found version: %d.%d.%d.%d", version, release, modification, fix_level);
+    if (version >> 8 != 0 || release >> 8 != 0 || modification >> 8 != 0 || fix_level >> 8 != 0) {
+      trcVerbose("8 bit numbers expected");
+      return;
+    }
+  } while (odm.retrieve_obj());
+
+  if (version != major_aix_version || release != minor_aix_version) {
+    trcVerbose("version determined by odm does not match uname");
+    return;
+  }
+  *p_ver = version << 24 | release << 16 | modification << 8 | fix_level;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/libodm_aix.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2015, 2015 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// Encapsulates the libodm library and provides more convenient interfaces.
+
+#ifndef OS_AIX_VM_LIBODM_AIX_HPP
+#define OS_AIX_VM_LIBODM_AIX_HPP
+
+#include <odmi.h>
+
+
+// The purpose of this code is to dynamically load the libodm library
+// instead of statically linking against it. The library is AIX-specific.
+// It only exists on AIX, not on PASE. In order to share binaries
+// between AIX and PASE, we can't directly link against it.
+
+typedef int          (*fun_odm_initialize )(void);
+typedef char*        (*fun_odm_set_path   )(char*);
+typedef CLASS_SYMBOL (*fun_odm_mount_class)(char*);
+typedef void*        (*fun_odm_get_obj    )(CLASS_SYMBOL, char*, void*, int);
+typedef int          (*fun_odm_terminate  )(void);
+
+class dynamicOdm {
+  void *_libhandle;
+ protected:
+  fun_odm_initialize  _odm_initialize;
+  fun_odm_set_path    _odm_set_path;
+  fun_odm_mount_class _odm_mount_class;
+  fun_odm_get_obj     _odm_get_obj;
+  fun_odm_terminate   _odm_terminate;
+ public:
+  dynamicOdm();
+  ~dynamicOdm();
+  bool odm_loaded() {return _libhandle != NULL; }
+};
+
+
+// We provide a more convenient interface for odm access and
+// especially to determine the exact AIX kernel version.
+
+class odmWrapper : private dynamicOdm {
+  CLASS_SYMBOL _odm_class;
+  char *_data;
+  bool _initialized;
+  void clean_data();
+
+ public:
+  // Make sure everything gets initialized and cleaned up properly.
+  explicit odmWrapper(char* odm_class_name, char* odm_path = NULL) : _odm_class((CLASS_SYMBOL)-1),
+                                                                     _data(NULL), _initialized(false) {
+    if (!odm_loaded()) { return; }
+    _initialized = ((*_odm_initialize)() != -1);
+    if (_initialized) {
+      if (odm_path) { (*_odm_set_path)(odm_path); }
+      _odm_class = (*_odm_mount_class)(odm_class_name);
+    }
+  }
+  ~odmWrapper() {
+    if (_initialized) { (*_odm_terminate)(); clean_data(); }
+  }
+
+  CLASS_SYMBOL odm_class() { return _odm_class; }
+  bool has_class() { return odm_class() != (CLASS_SYMBOL)-1; }
+  int class_offset(char *field, bool is_aix_5);
+  char* data() { return _data; }
+
+  char* retrieve_obj(char* name = NULL) {
+    clean_data();
+    char *cnp = (char*)(void*)(*_odm_get_obj)(odm_class(), name, NULL, (name == NULL) ? ODM_NEXT : ODM_FIRST);
+    if (cnp != (char*)-1) { _data = cnp; }
+    return data();
+  }
+
+  int read_short(int offs) {
+    short *addr = (short*)(data() + offs);
+    return *addr;
+  }
+
+  // Determine the exact AIX kernel version as 4 byte value.
+  // The high order 2 bytes must be initialized already. They can be determined by uname.
+  static void determine_os_kernel_version(uint32_t* p_ver);
+};
+
+#endif // OS_AIX_VM_LIBODM_AIX_HPP
--- a/src/os/aix/vm/libperfstat_aix.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/aix/vm/libperfstat_aix.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,49 +22,50 @@
  *
  */
 
-#include "runtime/arguments.hpp"
 #include "libperfstat_aix.hpp"
+#include "misc_aix.hpp"
 
-// For dlopen and friends
-#include <fcntl.h>
+#include <dlfcn.h>
+#include <sys/systemcfg.h>
 
-// handle to the libperfstat
+// Handle to the libperfstat.
 static void* g_libhandle = NULL;
 
-// whether initialization worked
-static bool g_initialized = false;
-
-
-typedef int (*fun_perfstat_cpu_total_t) (perfstat_id_t *name, perfstat_cpu_total_t* userbuff,
+typedef int (*fun_perfstat_cpu_total_t) (perfstat_id_t *name, PERFSTAT_CPU_TOTAL_T_LATEST* userbuff,
                                          int sizeof_userbuff, int desired_number);
 
 typedef int (*fun_perfstat_memory_total_t) (perfstat_id_t *name, perfstat_memory_total_t* userbuff,
                                             int sizeof_userbuff, int desired_number);
 
+typedef int (*fun_perfstat_partition_total_t) (perfstat_id_t *name,
+    PERFSTAT_PARTITON_TOTAL_T_LATEST* userbuff, int sizeof_userbuff,
+    int desired_number);
+
+typedef int (*fun_perfstat_wpar_total_t) (perfstat_id_wpar_t *name,
+    PERFSTAT_WPAR_TOTAL_T_LATEST* userbuff, int sizeof_userbuff,
+    int desired_number);
+
 typedef void (*fun_perfstat_reset_t) ();
 
+typedef cid_t (*fun_wpar_getcid_t) ();
+
 static fun_perfstat_cpu_total_t     g_fun_perfstat_cpu_total = NULL;
 static fun_perfstat_memory_total_t  g_fun_perfstat_memory_total = NULL;
+static fun_perfstat_partition_total_t g_fun_perfstat_partition_total = NULL;
+static fun_perfstat_wpar_total_t    g_fun_perfstat_wpar_total = NULL;
 static fun_perfstat_reset_t         g_fun_perfstat_reset = NULL;
+static fun_wpar_getcid_t            g_fun_wpar_getcid = NULL;
 
 bool libperfstat::init() {
 
-  if (g_initialized) {
-    return true;
-  }
-
-  g_initialized = false;
-
-  // dynamically load the libperfstat porting library.
+  // Dynamically load the libperfstat porting library.
   g_libhandle = dlopen("/usr/lib/libperfstat.a(shr_64.o)", RTLD_MEMBER | RTLD_NOW);
   if (!g_libhandle) {
-    if (Verbose) {
-      fprintf(stderr, "Cannot load libperfstat.a (dlerror: %s)", dlerror());
-    }
+    trcVerbose("Cannot load libperfstat.a (dlerror: %s)", dlerror());
     return false;
   }
 
-  // resolve function pointers
+  // Resolve function pointers
 
 #define RESOLVE_FUN_NO_ERROR(name) \
   g_fun_##name = (fun_##name##_t) dlsym(g_libhandle, #name);
@@ -72,26 +73,28 @@
 #define RESOLVE_FUN(name) \
   RESOLVE_FUN_NO_ERROR(name) \
   if (!g_fun_##name) { \
-    if (Verbose) { \
-      fprintf(stderr, "Cannot resolve " #name "() from libperfstat.a\n" \
+    trcVerbose("Cannot resolve " #name "() from libperfstat.a\n" \
                       "   (dlerror: %s)", dlerror()); \
-      } \
     return false; \
   }
 
+  // These functions may or may not be there depending on the OS release.
+  RESOLVE_FUN_NO_ERROR(perfstat_partition_total);
+  RESOLVE_FUN_NO_ERROR(perfstat_wpar_total);
+  RESOLVE_FUN_NO_ERROR(wpar_getcid);
+
+  // These functions are required for every release.
   RESOLVE_FUN(perfstat_cpu_total);
   RESOLVE_FUN(perfstat_memory_total);
   RESOLVE_FUN(perfstat_reset);
 
-  g_initialized = true;
+  trcVerbose("libperfstat loaded.");
 
   return true;
 }
 
 void libperfstat::cleanup() {
 
-  g_initialized = false;
-
   if (g_libhandle) {
     dlclose(g_libhandle);
     g_libhandle = NULL;
@@ -99,26 +102,250 @@
 
   g_fun_perfstat_cpu_total = NULL;
   g_fun_perfstat_memory_total = NULL;
+  g_fun_perfstat_partition_total = NULL;
+  g_fun_perfstat_wpar_total = NULL;
   g_fun_perfstat_reset = NULL;
+  g_fun_wpar_getcid = NULL;
+
 }
 
 int libperfstat::perfstat_memory_total(perfstat_id_t *name,
                                        perfstat_memory_total_t* userbuff,
                                        int sizeof_userbuff, int desired_number) {
-  assert(g_initialized, "libperfstat not initialized");
-  assert(g_fun_perfstat_memory_total, "");
+  if (g_fun_perfstat_memory_total == NULL) {
+    return -1;
+  }
   return g_fun_perfstat_memory_total(name, userbuff, sizeof_userbuff, desired_number);
 }
 
-int libperfstat::perfstat_cpu_total(perfstat_id_t *name, perfstat_cpu_total_t* userbuff,
+int libperfstat::perfstat_cpu_total(perfstat_id_t *name, PERFSTAT_CPU_TOTAL_T_LATEST* userbuff,
                                     int sizeof_userbuff, int desired_number) {
-  assert(g_initialized, "libperfstat not initialized");
-  assert(g_fun_perfstat_cpu_total, "");
+  if (g_fun_perfstat_cpu_total == NULL) {
+    return -1;
+  }
   return g_fun_perfstat_cpu_total(name, userbuff, sizeof_userbuff, desired_number);
 }
 
+int libperfstat::perfstat_partition_total(perfstat_id_t *name, PERFSTAT_PARTITON_TOTAL_T_LATEST* userbuff,
+                                          int sizeof_userbuff, int desired_number) {
+  if (g_fun_perfstat_partition_total == NULL) {
+    return -1;
+  }
+  return g_fun_perfstat_partition_total(name, userbuff, sizeof_userbuff, desired_number);
+}
+
+int libperfstat::perfstat_wpar_total(perfstat_id_wpar_t *name, PERFSTAT_WPAR_TOTAL_T_LATEST* userbuff,
+                                     int sizeof_userbuff, int desired_number) {
+  if (g_fun_perfstat_wpar_total == NULL) {
+    return -1;
+  }
+  return g_fun_perfstat_wpar_total(name, userbuff, sizeof_userbuff, desired_number);
+}
+
 void libperfstat::perfstat_reset() {
-  assert(g_initialized, "libperfstat not initialized");
-  assert(g_fun_perfstat_reset, "");
-  g_fun_perfstat_reset();
+  if (g_fun_perfstat_reset != NULL) {
+    g_fun_perfstat_reset();
+  }
+}
+
+cid_t libperfstat::wpar_getcid() {
+  if (g_fun_wpar_getcid == NULL) {
+    return (cid_t) -1;
+  }
+  return g_fun_wpar_getcid();
+}
+
+
+//////////////////// convenience functions, release-independent /////////////////////////////
+
+// Excerpts from systemcfg.h definitions newer than AIX 5.3 (our oldest build platform)
+
+#define PV_6 0x100000          /* Power PC 6 */
+#define PV_6_1 0x100001        /* Power PC 6 DD1.x */
+#define PV_7 0x200000          /* Power PC 7 */
+#define PV_5_Compat 0x0F8000   /* Power PC 5 */
+#define PV_6_Compat 0x108000   /* Power PC 6 */
+#define PV_7_Compat 0x208000   /* Power PC 7 */
+#define PV_8 0x300000          /* Power PC 8 */
+#define PV_8_Compat 0x308000   /* Power PC 8 */
+
+
+// Retrieve global cpu information.
+bool libperfstat::get_cpuinfo(cpuinfo_t* pci) {
+
+  assert(pci, "get_cpuinfo: invalid parameter");
+  memset(pci, 0, sizeof(cpuinfo_t));
+
+  PERFSTAT_CPU_TOTAL_T_LATEST psct;
+  memset (&psct, '\0', sizeof(psct));
+
+  if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(PERFSTAT_CPU_TOTAL_T_LATEST), 1)) {
+    if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_61), 1)) {
+      if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t_53), 1)) {
+          trcVerbose("perfstat_cpu_total() failed (errno=%d)", errno);
+          return false;
+      }
+    }
+  }
+
+  // Global cpu information.
+  strcpy (pci->description, psct.description);
+  pci->processorHZ = psct.processorHZ;
+  pci->ncpus = psct.ncpus;
+  for (int i = 0; i < 3; i++) {
+    pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
+  }
+
+  pci->user_clock_ticks = psct.user;
+  pci->sys_clock_ticks  = psct.sys;
+  pci->idle_clock_ticks = psct.idle;
+  pci->wait_clock_ticks = psct.wait;
+
+  // Get the processor version from _system_configuration.
+  switch (_system_configuration.version) {
+  case PV_8:
+    strcpy(pci->version, "Power PC 8");
+    break;
+  case PV_7:
+    strcpy(pci->version, "Power PC 7");
+    break;
+  case PV_6_1:
+    strcpy(pci->version, "Power PC 6 DD1.x");
+    break;
+  case PV_6:
+    strcpy(pci->version, "Power PC 6");
+    break;
+  case PV_5:
+    strcpy(pci->version, "Power PC 5");
+    break;
+  case PV_5_2:
+    strcpy(pci->version, "Power PC 5_2");
+    break;
+  case PV_5_3:
+    strcpy(pci->version, "Power PC 5_3");
+    break;
+  case PV_5_Compat:
+    strcpy(pci->version, "PV_5_Compat");
+    break;
+  case PV_6_Compat:
+    strcpy(pci->version, "PV_6_Compat");
+    break;
+  case PV_7_Compat:
+    strcpy(pci->version, "PV_7_Compat");
+    break;
+  case PV_8_Compat:
+    strcpy(pci->version, "PV_8_Compat");
+    break;
+  default:
+    strcpy(pci->version, "unknown");
+  }
+
+  return true;
 }
+
+// Retrieve partition information.
+bool libperfstat::get_partitioninfo(partitioninfo_t* ppi) {
+
+  assert(ppi, "get_partitioninfo: invalid parameter");
+  memset(ppi, 0, sizeof(partitioninfo_t));
+
+  PERFSTAT_PARTITON_TOTAL_T_LATEST pspt;
+  memset(&pspt, '\0', sizeof(pspt));
+
+  bool ame_details = true;
+
+  if (-1 == libperfstat::perfstat_partition_total(NULL, &pspt, sizeof(PERFSTAT_PARTITON_TOTAL_T_LATEST), 1)) {
+    if (-1 == libperfstat::perfstat_partition_total(NULL, &pspt, sizeof(perfstat_partition_total_t_71), 1)) {
+      ame_details = false;
+      if (-1 == libperfstat::perfstat_partition_total(NULL, &pspt, sizeof(perfstat_partition_total_t_61), 1)) {
+        if (-1 == libperfstat::perfstat_partition_total(NULL, &pspt, sizeof(perfstat_partition_total_t_53), 1)) {
+          if (-1 == libperfstat::perfstat_partition_total(NULL, &pspt, sizeof(perfstat_partition_total_t_53_5), 1)) {
+            trcVerbose("perfstat_partition_total() failed (errno=%d)", errno);
+            return false;
+          }
+        }
+      }
+    }
+  }
+
+  // partition type info
+  ppi->shared_enabled = pspt.type.b.shared_enabled;
+  ppi->smt_capable = pspt.type.b.smt_capable;
+  ppi->smt_enabled = pspt.type.b.smt_enabled;
+  ppi->lpar_capable = pspt.type.b.lpar_capable;
+  ppi->lpar_enabled = pspt.type.b.lpar_enabled;
+  ppi->dlpar_capable = pspt.type.b.dlpar_capable;
+  ppi->capped = pspt.type.b.capped;
+  ppi->kernel_is_64 = pspt.type.b.kernel_is_64;
+  ppi->pool_util_authority = pspt.type.b.pool_util_authority;
+  ppi->donate_capable = pspt.type.b.donate_capable;
+  ppi->donate_enabled = pspt.type.b.donate_enabled;
+  ppi->ams_capable = pspt.type.b.ams_capable;
+  ppi->ams_enabled = pspt.type.b.ams_enabled;
+  ppi->power_save = pspt.type.b.power_save;
+  ppi->ame_enabled = pspt.type.b.ame_enabled;
+
+  // partition total info
+  ppi->online_cpus = pspt.online_cpus;
+  ppi->entitled_proc_capacity = pspt.entitled_proc_capacity;
+  ppi->var_proc_capacity_weight = pspt.var_proc_capacity_weight;
+  ppi->phys_cpus_pool = pspt.phys_cpus_pool;
+  ppi->pool_id = pspt.pool_id;
+  ppi->entitled_pool_capacity = pspt.entitled_pool_capacity;
+  strcpy(ppi->name, pspt.name);
+
+  // Added values to ppi that we need for later computation of cpu utilization
+  // ( pool authorization needed for pool_idle_time ??? )
+  ppi->timebase_last   = pspt.timebase_last;
+  ppi->pool_idle_time  = pspt.pool_idle_time;
+  ppi->pcpu_tics_user  = pspt.puser;
+  ppi->pcpu_tics_sys   = pspt.psys;
+  ppi->pcpu_tics_idle  = pspt.pidle;
+  ppi->pcpu_tics_wait  = pspt.pwait;
+
+  // Additional AME information.
+  if (ame_details) {
+    ppi->true_memory = pspt.true_memory * 4096;
+    ppi->expanded_memory = pspt.expanded_memory * 4096;
+    ppi->target_memexp_factr = pspt.target_memexp_factr;
+    ppi->current_memexp_factr = pspt.current_memexp_factr;
+    ppi->cmcs_total_time = pspt.cmcs_total_time;
+  }
+
+  return true;
+}
+
+// Retrieve wpar information.
+bool libperfstat::get_wparinfo(wparinfo_t* pwi) {
+
+  assert(pwi, "get_wparinfo: invalid parameter");
+  memset(pwi, 0, sizeof(wparinfo_t));
+
+  if (libperfstat::wpar_getcid() <= 0) {
+    return false;
+  }
+
+  PERFSTAT_WPAR_TOTAL_T_LATEST pswt;
+  memset (&pswt, '\0', sizeof(pswt));
+
+  if (-1 == libperfstat::perfstat_wpar_total(NULL, &pswt, sizeof(PERFSTAT_WPAR_TOTAL_T_LATEST), 1)) {
+    if (-1 == libperfstat::perfstat_wpar_total(NULL, &pswt, sizeof(perfstat_wpar_total_t_61), 1)) {
+      trcVerbose("perfstat_wpar_total() failed (errno=%d)", errno);
+      return false;
+    }
+  }
+
+  // WPAR type info.
+  pwi->app_wpar = pswt.type.b.app_wpar;
+  pwi->cpu_rset = pswt.type.b.cpu_rset;
+  pwi->cpu_xrset = pswt.type.b.cpu_xrset;
+  pwi->cpu_limits = pswt.type.b.cpu_limits;
+  pwi->mem_limits = pswt.type.b.mem_limits;
+  // WPAR total info.
+  strcpy(pwi->name, pswt.name);
+  pwi->wpar_id = pswt.wpar_id;
+  pwi->cpu_limit = pswt.cpu_limit;
+  pwi->mem_limit = pswt.mem_limit;
+
+  return true;
+}
--- a/src/os/aix/vm/libperfstat_aix.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/aix/vm/libperfstat_aix.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -22,7 +22,7 @@
  *
  */
 
-// encapsulates the libperfstat library.
+// Encapsulates the libperfstat library.
 //
 // The purpose of this code is to dynamically load the libperfstat library
 // instead of statically linking against it. The libperfstat library is an
@@ -32,7 +32,732 @@
 #ifndef OS_AIX_VM_LIBPERFSTAT_AIX_HPP
 #define OS_AIX_VM_LIBPERFSTAT_AIX_HPP
 
-#include <libperfstat.h>
+#include <sys/types.h>
+#include <stdlib.h>
+
+///////////////////////////////////////////////////////////////////////////////////////////////
+// These are excerpts from the AIX 5.3, 6.1, 7.1 libperfstat.h -
+// this is all we need from libperfstat.h and I want to avoid having to include <libperfstat.h>
+//
+// Note: I define all structures as if I were to include libperfstat.h on an AIX 5.2
+// build machine.
+//
+// The ratio behind that is that if I would build on an AIX 5.2 build machine,
+// include libperfstat.h and hard-link against libperfstat.a, the program should
+// work without recompilation on all newer AIX versions.
+//
+
+#define IDENTIFIER_LENGTH 64    /* length of strings included in the structures */
+
+
+typedef struct { /* structure element identifier */
+  char name[IDENTIFIER_LENGTH]; /* name of the identifier */
+} perfstat_id_t;
+
+#define CEC_ID_LEN 40           /* CEC identifier length */
+#define MAXCORRALNAMELEN 25     /* length of the wpar name */
+#define FIRST_WPARNAME ""       /* pseudo-name for the first WPAR */
+#define FIRST_WPARID -1         /* pseudo-id for the first WPAR */
+
+typedef unsigned short cid_t;   /* workload partition identifier */
+
+typedef struct { /* Virtual memory utilization */
+  u_longlong_t virt_total;    /* total virtual memory (in 4KB pages) */
+  u_longlong_t real_total;    /* total real memory (in 4KB pages) */
+  u_longlong_t real_free;     /* free real memory (in 4KB pages) */
+  u_longlong_t real_pinned;   /* real memory which is pinned (in 4KB pages) */
+  u_longlong_t real_inuse;    /* real memory which is in use (in 4KB pages) */
+  u_longlong_t pgbad;         /* number of bad pages */
+  u_longlong_t pgexct;        /* number of page faults */
+  u_longlong_t pgins;         /* number of pages paged in */
+  u_longlong_t pgouts;        /* number of pages paged out */
+  u_longlong_t pgspins;       /* number of page ins from paging space */
+  u_longlong_t pgspouts;      /* number of page outs from paging space */
+  u_longlong_t scans;         /* number of page scans by clock */
+  u_longlong_t cycles;        /* number of page replacement cycles */
+  u_longlong_t pgsteals;      /* number of page steals */
+  u_longlong_t numperm;       /* number of frames used for files (in 4KB pages) */
+  u_longlong_t pgsp_total;    /* total paging space (in 4KB pages) */
+  u_longlong_t pgsp_free;     /* free paging space (in 4KB pages) */
+  u_longlong_t pgsp_rsvd;     /* reserved paging space (in 4KB pages) */
+  u_longlong_t real_system;   /* real memory used by system segments (in 4KB pages). This is the sum of all the used pages in segment marked for system usage.
+                               * Since segment classifications are not always guaranteed to be accurate, this number is only an approximation. */
+  u_longlong_t real_user;     /* real memory used by non-system segments (in 4KB pages). This is the sum of all pages used in segments not marked for system usage.
+                               * Since segment classifications are not always guaranteed to be accurate, this number is only an approximation. */
+  u_longlong_t real_process;  /* real memory used by process segments (in 4KB pages). This is real_total-real_free-numperm-real_system. Since real_system is an
+                               * approximation, this number is too. */
+  u_longlong_t virt_active;   /* Active virtual pages. Virtual pages are considered active if they have been accessed */
+
+} perfstat_memory_total_t;
+
+typedef struct { /* global cpu information AIX 5.3 < TL10 */
+  int ncpus;                            /* number of active logical processors */
+  int ncpus_cfg;                        /* number of configured processors */
+  char description[IDENTIFIER_LENGTH];  /* processor description (type/official name) */
+  u_longlong_t processorHZ;             /* processor speed in Hz */
+  u_longlong_t user;                    /* raw total number of clock ticks spent in user mode */
+  u_longlong_t sys;                     /* raw total number of clock ticks spent in system mode */
+  u_longlong_t idle;                    /* raw total number of clock ticks spent idle */
+  u_longlong_t wait;                    /* raw total number of clock ticks spent waiting for I/O */
+  u_longlong_t pswitch;                 /* number of process switches (change in currently running process) */
+  u_longlong_t syscall;                 /* number of system calls executed */
+  u_longlong_t sysread;                 /* number of read system calls executed */
+  u_longlong_t syswrite;                /* number of write system calls executed */
+  u_longlong_t sysfork;                 /* number of forks system calls executed */
+  u_longlong_t sysexec;                 /* number of execs system calls executed */
+  u_longlong_t readch;                  /* number of characters tranferred with read system call */
+  u_longlong_t writech;                 /* number of characters tranferred with write system call */
+  u_longlong_t devintrs;                /* number of device interrupts */
+  u_longlong_t softintrs;               /* number of software interrupts */
+  time_t lbolt;                         /* number of ticks since last reboot */
+  u_longlong_t loadavg[3];              /* (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.
+                                               * To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>. */
+  u_longlong_t runque;                  /* length of the run queue (processes ready) */
+  u_longlong_t swpque;                  /* ength of the swap queue (processes waiting to be paged in) */
+  u_longlong_t bread;                   /* number of blocks read */
+  u_longlong_t bwrite;                  /* number of blocks written */
+  u_longlong_t lread;                   /* number of logical read requests */
+  u_longlong_t lwrite;                  /* number of logical write requests */
+  u_longlong_t phread;                  /* number of physical reads (reads on raw devices) */
+  u_longlong_t phwrite;                 /* number of physical writes (writes on raw devices) */
+  u_longlong_t runocc;                  /* updated whenever runque is updated, i.e. the runqueue is occupied.
+                                               * This can be used to compute the simple average of ready processes  */
+  u_longlong_t swpocc;                  /* updated whenever swpque is updated. i.e. the swpqueue is occupied.
+                                               * This can be used to compute the simple average processes waiting to be paged in */
+  u_longlong_t iget;                    /* number of inode lookups */
+  u_longlong_t namei;                   /* number of vnode lookup from a path name */
+  u_longlong_t dirblk;                  /* number of 512-byte block reads by the directory search routine to locate an entry for a file */
+  u_longlong_t msg;                     /* number of IPC message operations */
+  u_longlong_t sema;                    /* number of IPC semaphore operations */
+  u_longlong_t rcvint;                  /* number of tty receive interrupts */
+  u_longlong_t xmtint;                  /* number of tyy transmit interrupts */
+  u_longlong_t mdmint;                  /* number of modem interrupts */
+  u_longlong_t tty_rawinch;             /* number of raw input characters  */
+  u_longlong_t tty_caninch;             /* number of canonical input characters (always zero) */
+  u_longlong_t tty_rawoutch;            /* number of raw output characters */
+  u_longlong_t ksched;                  /* number of kernel processes created */
+  u_longlong_t koverf;                  /* kernel process creation attempts where:
+                                               * -the user has forked to their maximum limit
+                                               * -the configuration limit of processes has been reached */
+  u_longlong_t kexit;                   /* number of kernel processes that became zombies */
+  u_longlong_t rbread;                  /* number of remote read requests */
+  u_longlong_t rcread;                  /* number of cached remote reads */
+  u_longlong_t rbwrt;                   /* number of remote writes */
+  u_longlong_t rcwrt;                   /* number of cached remote writes */
+  u_longlong_t traps;                   /* number of traps */
+  int ncpus_high;                       /* index of highest processor online */
+  u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+  u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t decrintrs;               /* number of decrementer tics interrupts */
+  u_longlong_t mpcrintrs;               /* number of mpc's received interrupts */
+  u_longlong_t mpcsintrs;               /* number of mpc's sent interrupts */
+  u_longlong_t phantintrs;              /* number of phantom interrupts */
+  u_longlong_t idle_donated_purr;       /* number of idle cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_donated_spurr;      /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_purr;       /* number of busy cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_spurr;      /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_stolen_purr;        /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t idle_stolen_spurr;       /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_purr;        /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_spurr;       /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+  short iowait;                         /* number of processes that are asleep waiting for buffered I/O */
+  short physio;                         /* number of processes waiting for raw I/O */
+  longlong_t twait;                     /* number of threads that are waiting for filesystem direct(cio) */
+  u_longlong_t hpi;                     /* number of hypervisor page-ins */
+  u_longlong_t hpit;                    /* Time spent in hypervisor page-ins (in nanoseconds) */
+} perfstat_cpu_total_t_53;
+
+typedef struct { /* global cpu information AIX 6.1|5.3 > TL09 */
+  int ncpus;                            /* number of active logical processors */
+  int ncpus_cfg;                        /* number of configured processors */
+  char description[IDENTIFIER_LENGTH];  /* processor description (type/official name) */
+  u_longlong_t processorHZ;             /* processor speed in Hz */
+  u_longlong_t user;                    /* raw total number of clock ticks spent in user mode */
+  u_longlong_t sys;                     /* raw total number of clock ticks spent in system mode */
+  u_longlong_t idle;                    /* raw total number of clock ticks spent idle */
+  u_longlong_t wait;                    /* raw total number of clock ticks spent waiting for I/O */
+  u_longlong_t pswitch;                 /* number of process switches (change in currently running process) */
+  u_longlong_t syscall;                 /* number of system calls executed */
+  u_longlong_t sysread;                 /* number of read system calls executed */
+  u_longlong_t syswrite;                /* number of write system calls executed */
+  u_longlong_t sysfork;                 /* number of forks system calls executed */
+  u_longlong_t sysexec;                 /* number of execs system calls executed */
+  u_longlong_t readch;                  /* number of characters tranferred with read system call */
+  u_longlong_t writech;                 /* number of characters tranferred with write system call */
+  u_longlong_t devintrs;                /* number of device interrupts */
+  u_longlong_t softintrs;               /* number of software interrupts */
+  time_t lbolt;                         /* number of ticks since last reboot */
+  u_longlong_t loadavg[3];              /* (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.
+                                               * To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>. */
+  u_longlong_t runque;                  /* length of the run queue (processes ready) */
+  u_longlong_t swpque;                  /* length of the swap queue (processes waiting to be paged in) */
+  u_longlong_t bread;                   /* number of blocks read */
+  u_longlong_t bwrite;                  /* number of blocks written */
+  u_longlong_t lread;                   /* number of logical read requests */
+  u_longlong_t lwrite;                  /* number of logical write requests */
+  u_longlong_t phread;                  /* number of physical reads (reads on raw devices) */
+  u_longlong_t phwrite;                 /* number of physical writes (writes on raw devices) */
+  u_longlong_t runocc;                  /* updated whenever runque is updated, i.e. the runqueue is occupied.
+                                               * This can be used to compute the simple average of ready processes  */
+  u_longlong_t swpocc;                  /* updated whenever swpque is updated. i.e. the swpqueue is occupied.
+                                               * This can be used to compute the simple average processes waiting to be paged in */
+  u_longlong_t iget;                    /* number of inode lookups */
+  u_longlong_t namei;                   /* number of vnode lookup from a path name */
+  u_longlong_t dirblk;                  /* number of 512-byte block reads by the directory search routine to locate an entry for a file */
+  u_longlong_t msg;                     /* number of IPC message operations */
+  u_longlong_t sema;                    /* number of IPC semaphore operations */
+  u_longlong_t rcvint;                  /* number of tty receive interrupts */
+  u_longlong_t xmtint;                  /* number of tyy transmit interrupts */
+  u_longlong_t mdmint;                  /* number of modem interrupts */
+  u_longlong_t tty_rawinch;             /* number of raw input characters  */
+  u_longlong_t tty_caninch;             /* number of canonical input characters (always zero) */
+  u_longlong_t tty_rawoutch;            /* number of raw output characters */
+  u_longlong_t ksched;                  /* number of kernel processes created */
+  u_longlong_t koverf;                  /* kernel process creation attempts where:
+                                               * -the user has forked to their maximum limit
+                                               * -the configuration limit of processes has been reached */
+  u_longlong_t kexit;                   /* number of kernel processes that became zombies */
+  u_longlong_t rbread;                  /* number of remote read requests */
+  u_longlong_t rcread;                  /* number of cached remote reads */
+  u_longlong_t rbwrt;                   /* number of remote writes */
+  u_longlong_t rcwrt;                   /* number of cached remote writes */
+  u_longlong_t traps;                   /* number of traps */
+  int ncpus_high;                       /* index of highest processor online */
+  u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+  u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t decrintrs;               /* number of decrementer tics interrupts */
+  u_longlong_t mpcrintrs;               /* number of mpc's received interrupts */
+  u_longlong_t mpcsintrs;               /* number of mpc's sent interrupts */
+  u_longlong_t phantintrs;              /* number of phantom interrupts */
+  u_longlong_t idle_donated_purr;       /* number of idle cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_donated_spurr;      /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_purr;       /* number of busy cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_spurr;      /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_stolen_purr;        /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t idle_stolen_spurr;       /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_purr;        /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_spurr;       /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+  short iowait;                         /* number of processes that are asleep waiting for buffered I/O */
+  short physio;                         /* number of processes waiting for raw I/O */
+  longlong_t twait;                     /* number of threads that are waiting for filesystem direct(cio) */
+  u_longlong_t hpi;                     /* number of hypervisor page-ins */
+  u_longlong_t hpit;                    /* Time spent in hypervisor page-ins (in nanoseconds) */
+  u_longlong_t puser_spurr;             /* number of spurr cycles spent in user mode */
+  u_longlong_t psys_spurr;              /* number of spurr cycles spent in kernel mode */
+  u_longlong_t pidle_spurr;             /* number of spurr cycles spent in idle mode */
+  u_longlong_t pwait_spurr;             /* number of spurr cycles spent in wait mode */
+  int spurrflag;                        /* set if running in spurr mode */
+} perfstat_cpu_total_t_61;
+
+typedef struct { /* global cpu information AIX 7.1 */
+  int ncpus;                            /* number of active logical processors */
+  int ncpus_cfg;                        /* number of configured processors */
+  char description[IDENTIFIER_LENGTH];  /* processor description (type/official name) */
+  u_longlong_t processorHZ;             /* processor speed in Hz */
+  u_longlong_t user;                    /* raw total number of clock ticks spent in user mode */
+  u_longlong_t sys;                     /* raw total number of clock ticks spent in system mode */
+  u_longlong_t idle;                    /* raw total number of clock ticks spent idle */
+  u_longlong_t wait;                    /* raw total number of clock ticks spent waiting for I/O */
+  u_longlong_t pswitch;                 /* number of process switches (change in currently running process) */
+  u_longlong_t syscall;                 /* number of system calls executed */
+  u_longlong_t sysread;                 /* number of read system calls executed */
+  u_longlong_t syswrite;                /* number of write system calls executed */
+  u_longlong_t sysfork;                 /* number of forks system calls executed */
+  u_longlong_t sysexec;                 /* number of execs system calls executed */
+  u_longlong_t readch;                  /* number of characters tranferred with read system call */
+  u_longlong_t writech;                 /* number of characters tranferred with write system call */
+  u_longlong_t devintrs;                /* number of device interrupts */
+  u_longlong_t softintrs;               /* number of software interrupts */
+  time_t lbolt;                         /* number of ticks since last reboot */
+  u_longlong_t loadavg[3];              /* (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.
+                                               * To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>. */
+  u_longlong_t runque;                  /* length of the run queue (processes ready) */
+  u_longlong_t swpque;                  /* ength of the swap queue (processes waiting to be paged in) */
+  u_longlong_t bread;                   /* number of blocks read */
+  u_longlong_t bwrite;                  /* number of blocks written */
+  u_longlong_t lread;                   /* number of logical read requests */
+  u_longlong_t lwrite;                  /* number of logical write requests */
+  u_longlong_t phread;                  /* number of physical reads (reads on raw devices) */
+  u_longlong_t phwrite;                 /* number of physical writes (writes on raw devices) */
+  u_longlong_t runocc;                  /* updated whenever runque is updated, i.e. the runqueue is occupied.
+                                               * This can be used to compute the simple average of ready processes  */
+  u_longlong_t swpocc;                  /* updated whenever swpque is updated. i.e. the swpqueue is occupied.
+                                               * This can be used to compute the simple average processes waiting to be paged in */
+  u_longlong_t iget;                    /* number of inode lookups */
+  u_longlong_t namei;                   /* number of vnode lookup from a path name */
+  u_longlong_t dirblk;                  /* number of 512-byte block reads by the directory search routine to locate an entry for a file */
+  u_longlong_t msg;                     /* number of IPC message operations */
+  u_longlong_t sema;                    /* number of IPC semaphore operations */
+  u_longlong_t rcvint;                  /* number of tty receive interrupts */
+  u_longlong_t xmtint;                  /* number of tyy transmit interrupts */
+  u_longlong_t mdmint;                  /* number of modem interrupts */
+  u_longlong_t tty_rawinch;             /* number of raw input characters  */
+  u_longlong_t tty_caninch;             /* number of canonical input characters (always zero) */
+  u_longlong_t tty_rawoutch;            /* number of raw output characters */
+  u_longlong_t ksched;                  /* number of kernel processes created */
+  u_longlong_t koverf;                  /* kernel process creation attempts where:
+                                               * -the user has forked to their maximum limit
+                                               * -the configuration limit of processes has been reached */
+  u_longlong_t kexit;                   /* number of kernel processes that became zombies */
+  u_longlong_t rbread;                  /* number of remote read requests */
+  u_longlong_t rcread;                  /* number of cached remote reads */
+  u_longlong_t rbwrt;                   /* number of remote writes */
+  u_longlong_t rcwrt;                   /* number of cached remote writes */
+  u_longlong_t traps;                   /* number of traps */
+  int ncpus_high;                       /* index of highest processor online */
+  u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+  u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t decrintrs;               /* number of decrementer tics interrupts */
+  u_longlong_t mpcrintrs;               /* number of mpc's received interrupts */
+  u_longlong_t mpcsintrs;               /* number of mpc's sent interrupts */
+  u_longlong_t phantintrs;              /* number of phantom interrupts */
+  u_longlong_t idle_donated_purr;       /* number of idle cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_donated_spurr;      /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_purr;       /* number of busy cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_spurr;      /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_stolen_purr;        /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t idle_stolen_spurr;       /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_purr;        /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_spurr;       /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+  short iowait;                         /* number of processes that are asleep waiting for buffered I/O */
+  short physio;                         /* number of processes waiting for raw I/O */
+  longlong_t twait;                     /* number of threads that are waiting for filesystem direct(cio) */
+  u_longlong_t hpi;                     /* number of hypervisor page-ins */
+  u_longlong_t hpit;                    /* Time spent in hypervisor page-ins (in nanoseconds) */
+  u_longlong_t puser_spurr;             /* number of spurr cycles spent in user mode */
+  u_longlong_t psys_spurr;              /* number of spurr cycles spent in kernel mode */
+  u_longlong_t pidle_spurr;             /* number of spurr cycles spent in idle mode */
+  u_longlong_t pwait_spurr;             /* number of spurr cycles spent in wait mode */
+  int spurrflag;                        /* set if running in spurr mode */
+  u_longlong_t  version;                /* version number (1, 2, etc.,) */
+/*      >>>>> END OF STRUCTURE DEFINITION <<<<<         */
+#define CURR_VERSION_CPU_TOTAL 1              /* Incremented by one for every new release *
+                                               * of perfstat_cpu_total_t data structure   */
+} perfstat_cpu_total_t_71;
+
+typedef union {
+  uint    w;
+  struct {
+          unsigned smt_capable :1;          /* OS supports SMT mode */
+          unsigned smt_enabled :1;          /* SMT mode is on */
+          unsigned lpar_capable :1;         /* OS supports logical partitioning */
+          unsigned lpar_enabled :1;         /* logical partitioning is on */
+          unsigned shared_capable :1;       /* OS supports shared processor LPAR */
+          unsigned shared_enabled :1;       /* partition runs in shared mode */
+          unsigned dlpar_capable :1;        /* OS supports dynamic LPAR */
+          unsigned capped :1;               /* partition is capped */
+          unsigned kernel_is_64 :1;         /* kernel is 64 bit */
+          unsigned pool_util_authority :1;  /* pool utilization available */
+          unsigned donate_capable :1;       /* capable of donating cycles */
+          unsigned donate_enabled :1;       /* enabled for donating cycles */
+          unsigned ams_capable:1;           /* 1 = AMS(Active Memory Sharing) capable, 0 = Not AMS capable */
+          unsigned ams_enabled:1;           /* 1 = AMS(Active Memory Sharing) enabled, 0 = Not AMS enabled */
+          unsigned power_save:1;            /* 1 = Power saving mode is enabled */
+          unsigned ame_enabled:1;           /* Active Memory Expansion is enabled */
+          unsigned shared_extended :1;
+          unsigned spare :15;               /* reserved for future usage */
+  } b;
+} perfstat_partition_type_t;
+
+typedef struct { /* partition total information AIX 5.3 < TL6 */
+  char name[IDENTIFIER_LENGTH];         /* name of the logical partition */
+  perfstat_partition_type_t type;       /* set of bits describing the partition */
+  int lpar_id;                          /* logical partition identifier */
+  int group_id;                         /* identifier of the LPAR group this partition is a member of */
+  int pool_id;                          /* identifier of the shared pool of physical processors this partition is a member of */
+  int online_cpus;                      /* number of virtual CPUs currently online on the partition */
+  int max_cpus;                         /* maximum number of virtual CPUs this parition can ever have */
+  int min_cpus;                         /* minimum number of virtual CPUs this partition must have */
+  u_longlong_t online_memory;           /* amount of memory currently online */
+  u_longlong_t max_memory;              /* maximum amount of memory this partition can ever have */
+  u_longlong_t min_memory;              /* minimum amount of memory this partition must have */
+  int entitled_proc_capacity;           /* number of processor units this partition is entitled to receive */
+  int max_proc_capacity;                /* maximum number of processor units this partition can ever have */
+  int min_proc_capacity;                /* minimum number of processor units this partition must have */
+  int proc_capacity_increment;          /* increment value to the entitled capacity */
+  int unalloc_proc_capacity;            /* number of processor units currently unallocated in the shared processor pool this partition belongs to */
+  int var_proc_capacity_weight;         /* partition priority weight to receive extra capacity */
+  int unalloc_var_proc_capacity_weight; /* number of variable processor capacity weight units currently unallocated  in the shared processor pool this partition belongs to */
+  int online_phys_cpus_sys;             /* number of physical CPUs currently active in the system containing this partition */
+  int max_phys_cpus_sys;                /* maximum possible number of physical CPUs in the system containing this partition */
+  int phys_cpus_pool;                   /* number of the physical CPUs currently in the shared processor pool this partition belong to */
+  u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+  u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t pool_idle_time;          /* number of clock tics a processor in the shared pool was idle */
+  u_longlong_t phantintrs;              /* number of phantom interrupts received by the partition */
+  u_longlong_t invol_virt_cswitch;      /* number involuntary virtual CPU context switches */
+  u_longlong_t vol_virt_cswitch;        /* number voluntary virtual CPU context switches */
+  u_longlong_t timebase_last;           /* most recently cpu time base */
+  u_longlong_t reserved_pages;          /* Currenlty number of 16GB pages. Cannot participate in DR operations */
+  u_longlong_t reserved_pagesize;       /* Currently 16GB pagesize Cannot participate in DR operations */
+} perfstat_partition_total_t_53_5;
+
+typedef struct { /* partition total information AIX 5.3 < TL10 */
+  char name[IDENTIFIER_LENGTH];         /* name of the logical partition */
+  perfstat_partition_type_t type;       /* set of bits describing the partition */
+  int lpar_id;                          /* logical partition identifier */
+  int group_id;                         /* identifier of the LPAR group this partition is a member of */
+  int pool_id;                          /* identifier of the shared pool of physical processors this partition is a member of */
+  int online_cpus;                      /* number of virtual CPUs currently online on the partition */
+  int max_cpus;                         /* maximum number of virtual CPUs this parition can ever have */
+  int min_cpus;                         /* minimum number of virtual CPUs this partition must have */
+  u_longlong_t online_memory;           /* amount of memory currently online */
+  u_longlong_t max_memory;              /* maximum amount of memory this partition can ever have */
+  u_longlong_t min_memory;              /* minimum amount of memory this partition must have */
+  int entitled_proc_capacity;           /* number of processor units this partition is entitled to receive */
+  int max_proc_capacity;                /* maximum number of processor units this partition can ever have */
+  int min_proc_capacity;                /* minimum number of processor units this partition must have */
+  int proc_capacity_increment;          /* increment value to the entitled capacity */
+  int unalloc_proc_capacity;            /* number of processor units currently unallocated in the shared processor pool this partition belongs to */
+  int var_proc_capacity_weight;         /* partition priority weight to receive extra capacity */
+  int unalloc_var_proc_capacity_weight; /* number of variable processor capacity weight units currently unallocated  in the shared processor pool this partition belongs to */
+  int online_phys_cpus_sys;             /* number of physical CPUs currently active in the system containing this partition */
+  int max_phys_cpus_sys;                /* maximum possible number of physical CPUs in the system containing this partition */
+  int phys_cpus_pool;                   /* number of the physical CPUs currently in the shared processor pool this partition belong to */
+  u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+  u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t pool_idle_time;          /* number of clock tics a processor in the shared pool was idle */
+  u_longlong_t phantintrs;              /* number of phantom interrupts received by the partition */
+  u_longlong_t invol_virt_cswitch;      /* number involuntary virtual CPU context switches */
+  u_longlong_t vol_virt_cswitch;        /* number voluntary virtual CPU context switches */
+  u_longlong_t timebase_last;           /* most recently cpu time base */
+  u_longlong_t reserved_pages;          /* Currenlty number of 16GB pages. Cannot participate in DR operations */
+  u_longlong_t reserved_pagesize;       /* Currently 16GB pagesize Cannot participate in DR operations */
+  u_longlong_t idle_donated_purr;       /* number of idle cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_donated_spurr;      /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_purr;       /* number of busy cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_spurr;      /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_stolen_purr;        /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t idle_stolen_spurr;       /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_purr;        /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_spurr;       /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t shcpus_in_sys;           /* Number of physical processors allocated for shared processor use */
+  u_longlong_t max_pool_capacity;       /* Maximum processor capacity of partitions pool */
+  u_longlong_t entitled_pool_capacity;  /* Entitled processor capacity of partitions pool */
+  u_longlong_t pool_max_time;           /* Summation of maximum time that could be consumed by the pool (nano seconds) */
+  u_longlong_t pool_busy_time;          /* Summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+  u_longlong_t pool_scaled_busy_time;   /* Scaled summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+  u_longlong_t shcpu_tot_time;          /* Summation of total time across all physical processors allocated for shared processor use (nano seconds) */
+  u_longlong_t shcpu_busy_time;         /* Summation of busy (non-idle) time accumulated across all shared processor partitions (nano seconds) */
+  u_longlong_t shcpu_scaled_busy_time;  /* Scaled summation of busy time accumulated across all shared processor partitions (nano seconds) */
+  int ams_pool_id;                      /* AMS pool id of the pool the LPAR belongs to */
+  int var_mem_weight;                   /* variable memory capacity weight */
+  u_longlong_t iome;                    /* I/O memory entitlement of the partition in bytes*/
+  u_longlong_t pmem;                    /* Physical memory currently backing the partition's logical memory in bytes*/
+  u_longlong_t hpi;                     /* number of hypervisor page-ins */
+  u_longlong_t hpit;                    /* Time spent in hypervisor page-ins (in nanoseconds)*/
+  u_longlong_t hypv_pagesize;           /* Hypervisor page size in KB*/
+} perfstat_partition_total_t_53;
+
+typedef struct { /* partition total information AIX 6.1|5.3 > TL09 */
+  char name[IDENTIFIER_LENGTH];         /* name of the logical partition */
+  perfstat_partition_type_t type;       /* set of bits describing the partition */
+  int lpar_id;                          /* logical partition identifier */
+  int group_id;                         /* identifier of the LPAR group this partition is a member of */
+  int pool_id;                          /* identifier of the shared pool of physical processors this partition is a member of */
+  int online_cpus;                      /* number of virtual CPUs currently online on the partition */
+  int max_cpus;                         /* maximum number of virtual CPUs this parition can ever have */
+  int min_cpus;                         /* minimum number of virtual CPUs this partition must have */
+  u_longlong_t online_memory;           /* amount of memory currently online */
+  u_longlong_t max_memory;              /* maximum amount of memory this partition can ever have */
+  u_longlong_t min_memory;              /* minimum amount of memory this partition must have */
+  int entitled_proc_capacity;           /* number of processor units this partition is entitled to receive */
+  int max_proc_capacity;                /* maximum number of processor units this partition can ever have */
+  int min_proc_capacity;                /* minimum number of processor units this partition must have */
+  int proc_capacity_increment;          /* increment value to the entitled capacity */
+  int unalloc_proc_capacity;            /* number of processor units currently unallocated in the shared processor pool this partition belongs to */
+  int var_proc_capacity_weight;         /* partition priority weight to receive extra capacity */
+  int unalloc_var_proc_capacity_weight; /* number of variable processor capacity weight units currently unallocated  in the shared processor pool this partition belongs to */
+  int online_phys_cpus_sys;             /* number of physical CPUs currently active in the system containing this partition */
+  int max_phys_cpus_sys;                /* maximum possible number of physical CPUs in the system containing this partition */
+  int phys_cpus_pool;                   /* number of the physical CPUs currently in the shared processor pool this partition belong to */
+  u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+  u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t pool_idle_time;          /* number of clock tics a processor in the shared pool was idle */
+  u_longlong_t phantintrs;              /* number of phantom interrupts received by the partition */
+  u_longlong_t invol_virt_cswitch;      /* number involuntary virtual CPU context switches */
+  u_longlong_t vol_virt_cswitch;        /* number voluntary virtual CPU context switches */
+  u_longlong_t timebase_last;           /* most recently cpu time base */
+  u_longlong_t reserved_pages;          /* Currenlty number of 16GB pages. Cannot participate in DR operations */
+  u_longlong_t reserved_pagesize;       /* Currently 16GB pagesize Cannot participate in DR operations */
+  u_longlong_t idle_donated_purr;       /* number of idle cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_donated_spurr;      /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_purr;       /* number of busy cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_spurr;      /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_stolen_purr;        /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t idle_stolen_spurr;       /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_purr;        /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_spurr;       /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t shcpus_in_sys;           /* Number of physical processors allocated for shared processor use */
+  u_longlong_t max_pool_capacity;       /* Maximum processor capacity of partitions pool */
+  u_longlong_t entitled_pool_capacity;  /* Entitled processor capacity of partitions pool */
+  u_longlong_t pool_max_time;           /* Summation of maximum time that could be consumed by the pool (nano seconds) */
+  u_longlong_t pool_busy_time;          /* Summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+  u_longlong_t pool_scaled_busy_time;   /* Scaled summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+  u_longlong_t shcpu_tot_time;          /* Summation of total time across all physical processors allocated for shared processor use (nano seconds) */
+  u_longlong_t shcpu_busy_time;         /* Summation of busy (non-idle) time accumulated across all shared processor partitions (nano seconds) */
+  u_longlong_t shcpu_scaled_busy_time;  /* Scaled summation of busy time accumulated across all shared processor partitions (nano seconds) */
+  int ams_pool_id;                      /* AMS pool id of the pool the LPAR belongs to */
+  int var_mem_weight;                   /* variable memory capacity weight */
+  u_longlong_t iome;                    /* I/O memory entitlement of the partition in bytes*/
+  u_longlong_t pmem;                    /* Physical memory currently backing the partition's logical memory in bytes*/
+  u_longlong_t hpi;                     /* number of hypervisor page-ins */
+  u_longlong_t hpit;                    /* Time spent in hypervisor page-ins (in nanoseconds)*/
+  u_longlong_t hypv_pagesize;           /* Hypervisor page size in KB*/
+  uint online_lcpus;                    /* number of online logical cpus */
+  uint smt_thrds;                       /* number of hardware threads that are running */
+  u_longlong_t puser_spurr;             /* number of spurr cycles spent in user mode */
+  u_longlong_t psys_spurr;              /* number of spurr cycles spent in kernel mode */
+  u_longlong_t pidle_spurr;             /* number of spurr cycles spent in idle mode */
+  u_longlong_t pwait_spurr;             /* number of spurr cycles spent in wait mode */
+  int spurrflag;                        /* set if running in spurr mode */
+} perfstat_partition_total_t_61;
+
+typedef struct { /* partition total information AIX 7.1 */
+  char name[IDENTIFIER_LENGTH];         /* name of the logical partition */
+  perfstat_partition_type_t type;       /* set of bits describing the partition */
+  int lpar_id;                          /* logical partition identifier */
+  int group_id;                         /* identifier of the LPAR group this partition is a member of */
+  int pool_id;                          /* identifier of the shared pool of physical processors this partition is a member of */
+  int online_cpus;                      /* number of virtual CPUs currently online on the partition */
+  int max_cpus;                         /* maximum number of virtual CPUs this parition can ever have */
+  int min_cpus;                         /* minimum number of virtual CPUs this partition must have */
+  u_longlong_t online_memory;           /* amount of memory currently online */
+  u_longlong_t max_memory;              /* maximum amount of memory this partition can ever have */
+  u_longlong_t min_memory;              /* minimum amount of memory this partition must have */
+  int entitled_proc_capacity;           /* number of processor units this partition is entitled to receive */
+  int max_proc_capacity;                /* maximum number of processor units this partition can ever have */
+  int min_proc_capacity;                /* minimum number of processor units this partition must have */
+  int proc_capacity_increment;          /* increment value to the entitled capacity */
+  int unalloc_proc_capacity;            /* number of processor units currently unallocated in the shared processor pool this partition belongs to */
+  int var_proc_capacity_weight;         /* partition priority weight to receive extra capacity */
+  int unalloc_var_proc_capacity_weight; /* number of variable processor capacity weight units currently unallocated  in the shared processor pool this partition belongs to */
+  int online_phys_cpus_sys;             /* number of physical CPUs currently active in the system containing this partition */
+  int max_phys_cpus_sys;                /* maximum possible number of physical CPUs in the system containing this partition */
+  int phys_cpus_pool;                   /* number of the physical CPUs currently in the shared processor pool this partition belong to */
+  u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+  u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+  u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+  u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+  u_longlong_t pool_idle_time;          /* number of clock tics a processor in the shared pool was idle */
+  u_longlong_t phantintrs;              /* number of phantom interrupts received by the partition */
+  u_longlong_t invol_virt_cswitch;      /* number involuntary virtual CPU context switches */
+  u_longlong_t vol_virt_cswitch;        /* number voluntary virtual CPU context switches */
+  u_longlong_t timebase_last;           /* most recently cpu time base */
+  u_longlong_t reserved_pages;          /* Currenlty number of 16GB pages. Cannot participate in DR operations */
+  u_longlong_t reserved_pagesize;       /* Currently 16GB pagesize Cannot participate in DR operations */
+  u_longlong_t idle_donated_purr;       /* number of idle cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_donated_spurr;      /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_purr;       /* number of busy cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t busy_donated_spurr;      /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+  u_longlong_t idle_stolen_purr;        /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t idle_stolen_spurr;       /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_purr;        /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t busy_stolen_spurr;       /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+  u_longlong_t shcpus_in_sys;           /* Number of physical processors allocated for shared processor use */
+  u_longlong_t max_pool_capacity;       /* Maximum processor capacity of partitions pool */
+  u_longlong_t entitled_pool_capacity;  /* Entitled processor capacity of partitions pool */
+  u_longlong_t pool_max_time;           /* Summation of maximum time that could be consumed by the pool (nano seconds) */
+  u_longlong_t pool_busy_time;          /* Summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+  u_longlong_t pool_scaled_busy_time;   /* Scaled summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+  u_longlong_t shcpu_tot_time;          /* Summation of total time across all physical processors allocated for shared processor use (nano seconds) */
+  u_longlong_t shcpu_busy_time;         /* Summation of busy (non-idle) time accumulated across all shared processor partitions (nano seconds) */
+  u_longlong_t shcpu_scaled_busy_time;  /* Scaled summation of busy time accumulated across all shared processor partitions (nano seconds) */
+  int ams_pool_id;                      /* AMS pool id of the pool the LPAR belongs to */
+  int var_mem_weight;                   /* variable memory capacity weight */
+  u_longlong_t iome;                    /* I/O memory entitlement of the partition in bytes*/
+  u_longlong_t pmem;                    /* Physical memory currently backing the partition's logical memory in bytes*/
+  u_longlong_t hpi;                     /* number of hypervisor page-ins */
+  u_longlong_t hpit;                    /* Time spent in hypervisor page-ins (in nanoseconds)*/
+  u_longlong_t hypv_pagesize;           /* Hypervisor page size in KB*/
+  uint online_lcpus;                    /* number of online logical cpus */
+  uint smt_thrds;                       /* number of hardware threads that are running */
+  u_longlong_t puser_spurr;             /* number of spurr cycles spent in user mode */
+  u_longlong_t psys_spurr;              /* number of spurr cycles spent in kernel mode */
+  u_longlong_t pidle_spurr;             /* number of spurr cycles spent in idle mode */
+  u_longlong_t pwait_spurr;             /* number of spurr cycles spent in wait mode */
+  int spurrflag;                        /* set if running in spurr mode */
+  char hardwareid[CEC_ID_LEN];          /* CEC Identifier */
+        uint power_save_mode;                 /* Power save mode for the LPAR. Introduced through LI 53K PRF : Feature 728 292*/
+        ushort ame_version;                   /* AME Version */
+        u_longlong_t true_memory;             /* True Memory Size in 4KB pages */
+        u_longlong_t expanded_memory;         /* Expanded Memory Size in 4KB pages */
+        u_longlong_t target_memexp_factr;     /* Target Memory Expansion Factor scaled by 100 */
+        u_longlong_t current_memexp_factr;    /* Current Memory Expansion Factor scaled by 100 */
+        u_longlong_t target_cpool_size;       /* Target Compressed Pool Size in bytes */
+        u_longlong_t max_cpool_size;          /* Max Size of Compressed Pool in bytes */
+        u_longlong_t min_ucpool_size;         /* Min Size of Uncompressed Pool in bytes */
+        u_longlong_t ame_deficit_size;        /*Deficit memory size in bytes */
+        u_longlong_t version;                 /* version number (1, 2, etc.,) */
+        u_longlong_t cmcs_total_time;         /* Total CPU time spent due to active memory expansion */
+} perfstat_partition_total_t_71;
+
+typedef struct { /* partition total information AIX 7.1 >= TL1*/
+        char name[IDENTIFIER_LENGTH];         /* name of the logical partition */
+        perfstat_partition_type_t type;       /* set of bits describing the partition */
+        int lpar_id;                          /* logical partition identifier */
+        int group_id;                         /* identifier of the LPAR group this partition is a member of */
+        int pool_id;                          /* identifier of the shared pool of physical processors this partition is a member of */
+        int online_cpus;                      /* number of virtual CPUs currently online on the partition */
+        int max_cpus;                         /* maximum number of virtual CPUs this parition can ever have */
+        int min_cpus;                         /* minimum number of virtual CPUs this partition must have */
+        u_longlong_t online_memory;           /* amount of memory currently online */
+        u_longlong_t max_memory;              /* maximum amount of memory this partition can ever have */
+        u_longlong_t min_memory;              /* minimum amount of memory this partition must have */
+        int entitled_proc_capacity;           /* number of processor units this partition is entitled to receive */
+        int max_proc_capacity;                /* maximum number of processor units this partition can ever have */
+        int min_proc_capacity;                /* minimum number of processor units this partition must have */
+        int proc_capacity_increment;          /* increment value to the entitled capacity */
+        int unalloc_proc_capacity;            /* number of processor units currently unallocated in the shared processor pool this partition belongs to */
+        int var_proc_capacity_weight;         /* partition priority weight to receive extra capacity */
+        int unalloc_var_proc_capacity_weight; /* number of variable processor capacity weight units currently unallocated  in the shared processor pool this partition belongs to */
+        int online_phys_cpus_sys;             /* number of physical CPUs currently active in the system containing this partition */
+        int max_phys_cpus_sys;                /* maximum possible number of physical CPUs in the system containing this partition */
+        int phys_cpus_pool;                   /* number of the physical CPUs currently in the shared processor pool this partition belong to */
+        u_longlong_t puser;                   /* raw number of physical processor tics in user mode */
+        u_longlong_t psys;                    /* raw number of physical processor tics in system mode */
+        u_longlong_t pidle;                   /* raw number of physical processor tics idle */
+        u_longlong_t pwait;                   /* raw number of physical processor tics waiting for I/O */
+        u_longlong_t pool_idle_time;          /* number of clock tics a processor in the shared pool was idle */
+        u_longlong_t phantintrs;              /* number of phantom interrupts received by the partition */
+        u_longlong_t invol_virt_cswitch;      /* number involuntary virtual CPU context switches */
+        u_longlong_t vol_virt_cswitch;        /* number voluntary virtual CPU context switches */
+        u_longlong_t timebase_last;           /* most recently cpu time base */
+        u_longlong_t reserved_pages;          /* Currenlty number of 16GB pages. Cannot participate in DR operations */
+        u_longlong_t reserved_pagesize;       /* Currently 16GB pagesize Cannot participate in DR operations */
+        u_longlong_t idle_donated_purr;       /* number of idle cycles donated by a dedicated partition enabled for donation */
+        u_longlong_t idle_donated_spurr;      /* number of idle spurr cycles donated by a dedicated partition enabled for donation */
+        u_longlong_t busy_donated_purr;       /* number of busy cycles donated by a dedicated partition enabled for donation */
+        u_longlong_t busy_donated_spurr;      /* number of busy spurr cycles donated by a dedicated partition enabled for donation */
+        u_longlong_t idle_stolen_purr;        /* number of idle cycles stolen by the hypervisor from a dedicated partition */
+        u_longlong_t idle_stolen_spurr;       /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */
+        u_longlong_t busy_stolen_purr;        /* number of busy cycles stolen by the hypervisor from a dedicated partition */
+        u_longlong_t busy_stolen_spurr;       /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */
+        u_longlong_t shcpus_in_sys;           /* Number of physical processors allocated for shared processor use */
+        u_longlong_t max_pool_capacity;       /* Maximum processor capacity of partitions pool */
+        u_longlong_t entitled_pool_capacity;  /* Entitled processor capacity of partitions pool */
+        u_longlong_t pool_max_time;           /* Summation of maximum time that could be consumed by the pool (nano seconds) */
+        u_longlong_t pool_busy_time;          /* Summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+        u_longlong_t pool_scaled_busy_time;   /* Scaled summation of busy (non-idle) time accumulated across all partitions in the pool (nano seconds) */
+        u_longlong_t shcpu_tot_time;          /* Summation of total time across all physical processors allocated for shared processor use (nano seconds) */
+        u_longlong_t shcpu_busy_time;         /* Summation of busy (non-idle) time accumulated across all shared processor partitions (nano seconds) */
+        u_longlong_t shcpu_scaled_busy_time;  /* Scaled summation of busy time accumulated across all shared processor partitions (nano seconds) */
+        int ams_pool_id;                      /* AMS pool id of the pool the LPAR belongs to */
+        int var_mem_weight;                   /* variable memory capacity weight */
+        u_longlong_t iome;                    /* I/O memory entitlement of the partition in bytes*/
+        u_longlong_t pmem;                    /* Physical memory currently backing the partition's logical memory in bytes*/
+        u_longlong_t hpi;                     /* number of hypervisor page-ins */
+        u_longlong_t hpit;                    /* Time spent in hypervisor page-ins (in nanoseconds)*/
+        u_longlong_t hypv_pagesize;           /* Hypervisor page size in KB*/
+        uint online_lcpus;                    /* number of online logical cpus */
+        uint smt_thrds;                       /* number of hardware threads that are running */
+        u_longlong_t puser_spurr;             /* number of spurr cycles spent in user mode */
+        u_longlong_t psys_spurr;              /* number of spurr cycles spent in kernel mode */
+        u_longlong_t pidle_spurr;             /* number of spurr cycles spent in idle mode */
+        u_longlong_t pwait_spurr;             /* number of spurr cycles spent in wait mode */
+        int spurrflag;                        /* set if running in spurr mode */
+        char hardwareid[CEC_ID_LEN];          /* CEC Identifier */
+        uint power_save_mode;                 /* Power save mode for the LPAR. Introduced through LI 53K PRF : Feature 728 292*/
+        ushort ame_version;                   /* AME Version */
+        u_longlong_t true_memory;             /* True Memory Size in 4KB pages */
+        u_longlong_t expanded_memory;         /* Expanded Memory Size in 4KB pages */
+        u_longlong_t target_memexp_factr;     /* Target Memory Expansion Factor scaled by 100 */
+        u_longlong_t current_memexp_factr;    /* Current Memory Expansion Factor scaled by 100 */
+        u_longlong_t target_cpool_size;       /* Target Compressed Pool Size in bytes */
+        u_longlong_t max_cpool_size;          /* Max Size of Compressed Pool in bytes */
+        u_longlong_t min_ucpool_size;         /* Min Size of Uncompressed Pool in bytes */
+        u_longlong_t ame_deficit_size;        /*Deficit memory size in bytes */
+        u_longlong_t version;                 /* version number (1, 2, etc.,) */
+        u_longlong_t cmcs_total_time;         /* Total CPU time spent due to active memory expansion */
+        u_longlong_t purr_coalescing;         /* If the calling partition is authorized to see pool wide statistics then PURR cycles consumed to coalesce data else set to zero.*/
+        u_longlong_t spurr_coalescing;        /* If the calling partition is authorized to see pool wide statistics then SPURR cycles consumed to coalesce data else set to zero.*/
+        u_longlong_t MemPoolSize;             /* Indicates the memory pool size of the pool that the partition belongs to (in bytes)., mpsz */
+        u_longlong_t IOMemEntInUse;           /* I/O memory entitlement of the LPAR in use in bytes. iomu */
+        u_longlong_t IOMemEntFree;            /* free I/O memory entitlement in bytes.  iomf */
+        u_longlong_t IOHighWaterMark;         /* high water mark of I/O memory entitlement usage in bytes. iohwn */
+        u_longlong_t purr_counter;            /* number of purr cycles spent in user + kernel mode */
+        u_longlong_t spurr_counter;           /* number of spurr cycles spent in user + kernel mode */
+
+        /* Marketing Requirement(MR): MR1124083744  */
+        u_longlong_t real_free;               /* free real memory (in 4KB pages) */
+        u_longlong_t real_avail;              /* number of pages available for user application (memfree + numperm - minperm - minfree) */
+        /*      >>>>> END OF STRUCTURE DEFINITION <<<<<         */
+#define CURR_VERSION_PARTITION_TOTAL 5        /* Incremented by one for every new release     *
+                                               * of perfstat_partition_total_t data structure */
+} perfstat_partition_total_t_71_1;
+
+typedef union { /* WPAR Type & Flags */
+        uint    w;
+        struct {
+                unsigned app_wpar :1;        /* Application WPAR */
+                unsigned cpu_rset :1;        /* WPAR restricted to CPU resource set */
+                unsigned cpu_xrset:1;        /* WPAR restricted to CPU Exclusive resource set */
+                unsigned cpu_limits :1;      /* CPU resource limits enforced */
+                unsigned mem_limits :1;      /* Memory resource limits enforced */
+                unsigned spare :27;          /* reserved for future usage */
+        } b;
+} perfstat_wpar_type_t;
+
+typedef struct { /* Workload partition Information AIX 5.3 & 6.1*/
+       char name[MAXCORRALNAMELEN+1]; /* name of the Workload Partition */
+       perfstat_wpar_type_t type;     /* set of bits describing the wpar */
+       cid_t wpar_id;                 /* workload partition identifier */
+       uint  online_cpus;             /* Number of Virtual CPUs in partition rset or  number of virtual CPUs currently online on the Global partition*/
+       int   cpu_limit;               /* CPU limit in 100ths of % - 1..10000 */
+       int   mem_limit;               /* Memory limit in 100ths of % - 1..10000 */
+       u_longlong_t online_memory;    /* amount of memory currently online in Global Partition */
+       int entitled_proc_capacity;    /* number of processor units this partition is entitled to receive */
+} perfstat_wpar_total_t_61;
+
+typedef struct { /* Workload partition Information AIX 7.1*/
+       char name[MAXCORRALNAMELEN+1]; /* name of the Workload Partition */
+       perfstat_wpar_type_t type;     /* set of bits describing the wpar */
+       cid_t wpar_id;                 /* workload partition identifier */
+       uint  online_cpus;             /* Number of Virtual CPUs in partition rset or  number of virtual CPUs currently online on the Global partition*/
+       int   cpu_limit;               /* CPU limit in 100ths of % - 1..10000 */
+       int   mem_limit;               /* Memory limit in 100ths of % - 1..10000 */
+       u_longlong_t online_memory;    /* amount of memory currently online in Global Partition */
+       int entitled_proc_capacity;    /* number of processor units this partition is entitled to receive */
+       u_longlong_t version;          /* version number (1, 2, etc.,)                  */
+/*      >>>>> END OF STRUCTURE DEFINITION <<<<<         */
+#define CURR_VERSION_WPAR_TOTAL 1     /* Incremented by one for every new release      *
+                                       * of perfstat_wpar_total_t data structure       */
+} perfstat_wpar_total_t_71;
+
+typedef void * rsethandle_t;  /* Type to identify a resource set handle: rsethandle_t */
+
+typedef enum { WPARNAME, WPARID, RSETHANDLE } wparid_specifier; /* Type of wparid_specifier */
+
+typedef struct { /* WPAR identifier */
+        wparid_specifier spec;  /* Specifier to choose wpar id or name */
+        union  {
+                cid_t wpar_id;                      /* WPAR ID */
+                rsethandle_t rset;                  /* Rset Handle */
+                char wparname[MAXCORRALNAMELEN+1];  /* WPAR NAME */
+        } u;
+        char name[IDENTIFIER_LENGTH]; /* name of the structure element identifier */
+} perfstat_id_wpar_t;
+
+
+
+// end: libperfstat.h (AIX 5.2, 5.3, 6.1, 7.1)
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#define PERFSTAT_PARTITON_TOTAL_T_LATEST perfstat_partition_total_t_71_1/* latest perfstat_partition_total_t structure */
+#define PERFSTAT_CPU_TOTAL_T_LATEST perfstat_cpu_total_t_71             /* latest perfstat_cpu_total_t structure */
+#define PERFSTAT_WPAR_TOTAL_T_LATEST perfstat_wpar_total_t_71           /* latest perfstat_wpar_total_t structure */
 
 class libperfstat {
 
@@ -41,19 +766,107 @@
   // Load the libperfstat library (must be in LIBPATH).
   // Returns true if succeeded, false if error.
   static bool init();
-
-  // cleanup of the libo4 porting library.
   static void cleanup();
 
-  // direct wrappers for the libperfstat functionality. All they do is
+  // Direct wrappers for the libperfstat functionality. All they do is
   // to call the functions with the same name via function pointers.
-  static int perfstat_cpu_total(perfstat_id_t *name, perfstat_cpu_total_t* userbuff,
+  // Get all available data also on newer AIX versions (PERFSTAT_CPU_TOTAL_T_LATEST).
+  static int perfstat_cpu_total(perfstat_id_t *name, PERFSTAT_CPU_TOTAL_T_LATEST* userbuff,
                                 int sizeof_userbuff, int desired_number);
 
   static int perfstat_memory_total(perfstat_id_t *name, perfstat_memory_total_t* userbuff,
                                    int sizeof_userbuff, int desired_number);
 
+  static int perfstat_partition_total(perfstat_id_t *name, PERFSTAT_PARTITON_TOTAL_T_LATEST* userbuff,
+                                      int sizeof_userbuff, int desired_number);
+
   static void perfstat_reset();
+
+  static int perfstat_wpar_total(perfstat_id_wpar_t *name, PERFSTAT_WPAR_TOTAL_T_LATEST* userbuff,
+                                 int sizeof_userbuff, int desired_number);
+
+  static cid_t wpar_getcid();
+
+
+  ////////////////////////////////////////////////////////////////
+  // The convenience functions get_partitioninfo(), get_cpuinfo(), get_wparinfo() return
+  // information about partition, cpu and wpars, respectivly. They can be used without
+  // regard for which OS release we are on. On older AIX release, some output structure
+  // members will be 0.
+
+  // Result struct for get_partitioninfo().
+  struct partitioninfo_t {
+    // partition type info
+    unsigned smt_capable :1;          /* OS supports SMT mode */
+    unsigned smt_enabled :1;          /* SMT mode is on */
+    unsigned lpar_capable :1;         /* OS supports logical partitioning */
+    unsigned lpar_enabled :1;         /* logical partitioning is on */
+    unsigned shared_capable :1;       /* OS supports shared processor LPAR */
+    unsigned shared_enabled :1;       /* partition runs in shared mode */
+    unsigned dlpar_capable :1;        /* OS supports dynamic LPAR */
+    unsigned capped :1;               /* partition is capped */
+    unsigned kernel_is_64 :1;         /* kernel is 64 bit */
+    unsigned pool_util_authority :1;  /* pool utilization available */
+    unsigned donate_capable :1;       /* capable of donating cycles */
+    unsigned donate_enabled :1;       /* enabled for donating cycles */
+    unsigned ams_capable:1;           /* 1 = AMS(Active Memory Sharing) capable, 0 = Not AMS capable */
+    unsigned ams_enabled:1;           /* 1 = AMS(Active Memory Sharing) enabled, 0 = Not AMS enabled */
+    unsigned power_save:1;            /* 1 = Power saving mode is enabled */
+    unsigned ame_enabled:1;           /* Active Memory Expansion is enabled */
+    // partition total info
+    int online_cpus;                  /* number of virtual CPUs currently online on the partition */
+    int entitled_proc_capacity;       /* number of processor units this partition is entitled to receive */
+    int var_proc_capacity_weight;     /* partition priority weight to receive extra capacity */
+    int phys_cpus_pool;               /* number of the physical CPUs currently in the shared processor pool this partition belong to */
+    int pool_id;                      /* identifier of the shared pool of physical processors this partition is a member of */
+    u_longlong_t entitled_pool_capacity;  /* Entitled processor capacity of partitions pool */
+    char name[IDENTIFIER_LENGTH];     /* name of the logical partition */
+
+    u_longlong_t timebase_last;       /* most recently cpu time base (an incremented long int on PowerPC) */
+    u_longlong_t pool_idle_time;      /* pool idle time = number of clock tics a processor in the shared pool was idle */
+    u_longlong_t pcpu_tics_user;      /* raw number of physical processor tics in user mode */
+    u_longlong_t pcpu_tics_sys;       /* raw number of physical processor tics in system mode */
+    u_longlong_t pcpu_tics_idle;      /* raw number of physical processor tics idle */
+    u_longlong_t pcpu_tics_wait;      /* raw number of physical processor tics waiting for I/O */
+
+    u_longlong_t true_memory;          /* True Memory Size in 4KB pages */
+    u_longlong_t expanded_memory;      /* Expanded Memory Size in 4KB pages */
+    u_longlong_t target_memexp_factr;  /* Target Memory Expansion Factor scaled by 100 */
+    u_longlong_t current_memexp_factr; /* Current Memory Expansion Factor scaled by 100 */
+    u_longlong_t cmcs_total_time;      /* Total CPU time spent due to active memory expansion */
+  };
+
+  // Result struct for get_cpuinfo().
+  struct cpuinfo_t {
+    char description[IDENTIFIER_LENGTH];  // processor description (type/official name)
+    u_longlong_t processorHZ;             // processor speed in Hz
+    int ncpus;                            // number of active logical processors
+    double loadavg[3];                    // (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.
+                                          // To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>.
+    char version[20];                     // processor version from _system_configuration (sys/systemcfg.h)
+    unsigned long long user_clock_ticks;  // raw total number of clock ticks spent in user mode
+    unsigned long long sys_clock_ticks;   // raw total number of clock ticks spent in system mode
+    unsigned long long idle_clock_ticks;  // raw total number of clock ticks spent idle
+    unsigned long long wait_clock_ticks;  // raw total number of clock ticks spent waiting for I/O
+  };
+
+  // Result struct for get_wparinfo().
+  struct wparinfo_t {
+    char name[MAXCORRALNAMELEN+1];  /* name of the Workload Partition */
+    unsigned short wpar_id;         /* workload partition identifier */
+    unsigned app_wpar :1;           /* Application WPAR */
+    unsigned cpu_rset :1;           /* WPAR restricted to CPU resource set */
+    unsigned cpu_xrset:1;           /* WPAR restricted to CPU Exclusive resource set */
+    unsigned cpu_limits :1;         /* CPU resource limits enforced */
+    unsigned mem_limits :1;         /* Memory resource limits enforced */
+    int cpu_limit;                  /* CPU limit in 100ths of % - 1..10000 */
+    int mem_limit;                  /* Memory limit in 100ths of % - 1..10000 */
+  };
+
+  static bool get_partitioninfo(partitioninfo_t* ppi);
+  static bool get_cpuinfo(cpuinfo_t* pci);
+  static bool get_wparinfo(wparinfo_t* pwi);
+
 };
 
 #endif // OS_AIX_VM_LIBPERFSTAT_AIX_HPP
--- a/src/os/aix/vm/loadlib_aix.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/aix/vm/loadlib_aix.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2015 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,6 @@
 #endif
 
 #include "loadlib_aix.hpp"
-// for CritSect
 #include "misc_aix.hpp"
 #include "porting_aix.hpp"
 #include "utilities/debug.hpp"
--- a/src/os/aix/vm/misc_aix.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/aix/vm/misc_aix.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -26,6 +26,8 @@
 #include "runtime/stubRoutines.hpp"
 
 #include <pthread.h>
+#include <unistd.h>
+#include <errno.h>
 
 void MiscUtils::init_critsect(MiscUtils::critsect_t* cs) {
   const int rc = pthread_mutex_init(cs, NULL);
--- a/src/os/aix/vm/misc_aix.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/aix/vm/misc_aix.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,6 +29,8 @@
 // misc_aix.hpp, misc_aix.cpp: convenience functions needed for the OpenJDK AIX
 // port.
 #include "utilities/globalDefinitions.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/debug.hpp"
 
 #include <pthread.h>
 
@@ -40,7 +42,6 @@
   } \
 }
 #define ERRBYE(s) { trcVerbose(s); return -1; }
-#define trc(fmt, ...)
 
 #define assert0(b) assert((b), "")
 #define guarantee0(b) guarantee((b), "")
--- a/src/os/aix/vm/osThread_aix.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/aix/vm/osThread_aix.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -35,7 +35,7 @@
 void OSThread::pd_initialize() {
   assert(this != NULL, "check");
   _thread_id        = 0;
-  _pthread_id       = 0;
+  _kernel_thread_id = 0;
   _siginfo = NULL;
   _ucontext = NULL;
   _expanding_stack = 0;
--- a/src/os/aix/vm/osThread_aix.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/aix/vm/osThread_aix.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,7 +27,7 @@
 #define OS_AIX_VM_OSTHREAD_AIX_HPP
 
  public:
-  typedef pid_t thread_id_t;
+  typedef pthread_t thread_id_t;
 
  private:
   int _thread_type;
@@ -43,9 +43,13 @@
 
  private:
 
-  // _pthread_id is the pthread id, which is used by library calls
-  // (e.g. pthread_kill).
-  pthread_t _pthread_id;
+  // On AIX, we use the pthread id as OSThread::thread_id and keep the kernel thread id
+  // separately for diagnostic purposes.
+  //
+  // Note: this kernel thread id is saved at thread start. Depending on the
+  // AIX scheduling mode, this may not be the current thread id (usually not
+  // a problem though as we run with AIXTHREAD_SCOPE=S).
+  tid_t _kernel_thread_id;
 
   sigset_t _caller_sigmask; // Caller's signal mask
 
@@ -66,11 +70,16 @@
     return false;
   }
 #endif // ASSERT
-  pthread_t pthread_id() const {
-    return _pthread_id;
+  tid_t kernel_thread_id() const {
+    return _kernel_thread_id;
   }
-  void set_pthread_id(pthread_t tid) {
-    _pthread_id = tid;
+  void set_kernel_thread_id(tid_t tid) {
+    _kernel_thread_id = tid;
+  }
+
+  pthread_t pthread_id() const {
+    // Here: same as OSThread::thread_id()
+    return _thread_id;
   }
 
   // ***************************************************************
--- a/src/os/aix/vm/os_aix.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/aix/vm/os_aix.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -36,7 +36,9 @@
 #include "compiler/compileBroker.hpp"
 #include "interpreter/interpreter.hpp"
 #include "jvm_aix.h"
+#include "libo4.hpp"
 #include "libperfstat_aix.hpp"
+#include "libodm_aix.hpp"
 #include "loadlib_aix.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
@@ -108,25 +110,14 @@
 #include <sys/vminfo.h>
 #include <sys/wait.h>
 
-// If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
-// getrusage() is prepared to handle the associated failure.
-#ifndef RUSAGE_THREAD
-#define RUSAGE_THREAD   (1)               /* only the calling thread */
-#endif
-
-// PPC port
-static const uintx Use64KPagesThreshold       = 1*M;
-static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
-
-// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
+// Missing prototypes for various system APIs.
+extern "C"
+int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
+
 #if !defined(_AIXVERSION_610)
-extern "C" {
-  int getthrds64(pid_t ProcessIdentifier,
-                 struct thrdentry64* ThreadBuffer,
-                 int ThreadSize,
-                 tid64_t* IndexPointer,
-                 int Count);
-}
+extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
+extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
+extern "C" int getargs   (procsinfo*, int, char*, int);
 #endif
 
 #define MAX_PATH (2 * K)
@@ -150,18 +141,9 @@
 typedef unsigned long stackslot_t;
 typedef stackslot_t* stackptr_t;
 
-// Excerpts from systemcfg.h definitions newer than AIX 5.3.
-#ifndef PV_7
-#define PV_7 0x200000          /* Power PC 7 */
-#define PV_7_Compat 0x208000   /* Power PC 7 */
-#endif
-#ifndef PV_8
-#define PV_8 0x300000          /* Power PC 8 */
-#define PV_8_Compat 0x308000   /* Power PC 8 */
-#endif
-
 // Query dimensions of the stack of the calling thread.
 static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
+static address resolve_function_descriptor_to_code_pointer(address p);
 
 // Function to check a given stack pointer against given stack limits.
 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
@@ -185,7 +167,7 @@
   if (((uintptr_t)p) & 0x3) {
     return false;
   }
-  if (!LoadedLibraries::find_for_text_address(p, NULL)) {
+  if (LoadedLibraries::find_for_text_address(p, NULL) == NULL) {
     return false;
   }
   return true;
@@ -203,31 +185,48 @@
   CHECK_STACK_PTR(sp, stack_base, stack_size); \
 }
 
+static void vmembk_print_on(outputStream* os);
+
 ////////////////////////////////////////////////////////////////////////////////
 // global variables (for a description see os_aix.hpp)
 
 julong    os::Aix::_physical_memory = 0;
+
 pthread_t os::Aix::_main_thread = ((pthread_t)0);
 int       os::Aix::_page_size = -1;
+
+// -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
 int       os::Aix::_on_pase = -1;
-int       os::Aix::_os_version = -1;
+
+// 0 = uninitialized, otherwise 32 bit number:
+//  0xVVRRTTSS
+//  VV - major version
+//  RR - minor version
+//  TT - tech level, if known, 0 otherwise
+//  SS - service pack, if known, 0 otherwise
+uint32_t  os::Aix::_os_version = 0;
+
 int       os::Aix::_stack_page_size = -1;
+
+// -1 = uninitialized, 0 - no, 1 - yes
 int       os::Aix::_xpg_sus_mode = -1;
+
+// -1 = uninitialized, 0 - no, 1 - yes
 int       os::Aix::_extshm = -1;
-int       os::Aix::_logical_cpus = -1;
 
 ////////////////////////////////////////////////////////////////////////////////
 // local variables
 
-static int      g_multipage_error  = -1;   // error analysis for multipage initialization
 static jlong    initial_time_count = 0;
 static int      clock_tics_per_sec = 100;
 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
 static bool     check_signals      = true;
-static pid_t    _initial_pid       = 0;
 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 static sigset_t SR_sigset;
 
+// Process break recorded at startup.
+static address g_brk_at_startup = NULL;
+
 // This describes the state of multipage support of the underlying
 // OS. Note that this is of no interest to the outsize world and
 // therefore should not be defined in AIX class.
@@ -278,8 +277,9 @@
 // a specific wish address, e.g. to place the heap in a
 // compressed-oops-friendly way.
 static bool is_close_to_brk(address a) {
-  address a1 = (address) sbrk(0);
-  if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {
+  assert0(g_brk_at_startup != NULL);
+  if (a >= g_brk_at_startup &&
+      a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
     return true;
   }
   return false;
@@ -290,11 +290,15 @@
 }
 
 julong os::Aix::available_memory() {
+  // Avoid expensive API call here, as returned value will always be null.
+  if (os::Aix::on_pase()) {
+    return 0x0LL;
+  }
   os::Aix::meminfo_t mi;
   if (os::Aix::get_meminfo(&mi)) {
     return mi.real_free;
   } else {
-    return 0xFFFFFFFFFFFFFFFFLL;
+    return ULONG_MAX;
   }
 }
 
@@ -332,7 +336,7 @@
 
   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
-      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
+      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
       return false;
     }
     p += maxDisclaimSize;
@@ -340,7 +344,7 @@
 
   if (lastDisclaimSize > 0) {
     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
-      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
+      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
       return false;
     }
   }
@@ -357,25 +361,30 @@
 #error Add appropriate cpu_arch setting
 #endif
 
+// Wrap the function "vmgetinfo" which is not available on older OS releases.
+static int checked_vmgetinfo(void *out, int command, int arg) {
+  if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
+    guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
+  }
+  return ::vmgetinfo(out, command, arg);
+}
 
 // Given an address, returns the size of the page backing that address.
 size_t os::Aix::query_pagesize(void* addr) {
 
+  if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
+    // AS/400 older than V6R1: no vmgetinfo here, default to 4K
+    return SIZE_4K;
+  }
+
   vm_page_info pi;
   pi.addr = (uint64_t)addr;
-  if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
+  if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
     return pi.pagesize;
   } else {
-    fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
     assert(false, "vmgetinfo failed to retrieve page size");
     return SIZE_4K;
   }
-
-}
-
-// Returns the kernel thread id of the currently running thread.
-pid_t os::Aix::gettid() {
-  return (pid_t) thread_self();
 }
 
 void os::Aix::initialize_system_info() {
@@ -387,7 +396,6 @@
   // Retrieve total physical storage.
   os::Aix::meminfo_t mi;
   if (!os::Aix::get_meminfo(&mi)) {
-    fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
     assert(false, "os::Aix::get_meminfo failed.");
   }
   _physical_memory = (julong) mi.real_total;
@@ -400,7 +408,6 @@
     case SIZE_64K: return "64K";
     case SIZE_16M: return "16M";
     case SIZE_16G: return "16G";
-    case -1:       return "not set";
     default:
       assert(false, "surprise");
       return "??";
@@ -431,6 +438,8 @@
   }
 
   // Query default shm page size (LDR_CNTRL SHMPSIZE).
+  // Note that this is pure curiosity. We do not rely on default page size but set
+  // our own page size after allocated.
   {
     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
     guarantee(shmid != -1, "shmget failed");
@@ -447,26 +456,26 @@
   // number of reasons so we may just as well guarantee it here.
   guarantee0(!os::Aix::is_primordial_thread());
 
-  // Query pthread stack page size.
+  // Query pthread stack page size. Should be the same as data page size because
+  // pthread stacks are allocated from C-Heap.
   {
     int dummy = 0;
     g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
   }
 
   // Query default text page size (LDR_CNTRL TEXTPSIZE).
-  /* PPC port: so far unused.
   {
     address any_function =
-      (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
+      resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
     g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
   }
-  */
 
   // Now probe for support of 64K pages and 16M pages.
 
   // Before OS/400 V6R1, there is no support for pages other than 4K.
   if (os::Aix::on_pase_V5R4_or_older()) {
-    Unimplemented();
+    trcVerbose("OS/400 < V6R1 - no large page support.");
+    g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
     goto query_multipage_support_end;
   }
 
@@ -474,10 +483,10 @@
   {
     const int MAX_PAGE_SIZES = 4;
     psize_t sizes[MAX_PAGE_SIZES];
-    const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
+    const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
     if (num_psizes == -1) {
-      trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
-      trc("disabling multipage support.\n");
+      trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
+      trcVerbose("disabling multipage support.");
       g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
       goto query_multipage_support_end;
     }
@@ -505,8 +514,8 @@
       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
         const int en = errno;
         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
-        // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
-        // PPC port  MiscUtils::describe_errno(en));
+        trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
+          errno);
       } else {
         // Attach and double check pageisze.
         void* p = ::shmat(shmid, NULL, 0);
@@ -532,35 +541,35 @@
 
 query_multipage_support_end:
 
-  trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
+  trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
       describe_pagesize(g_multipage_support.pagesize));
-  trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
+  trcVerbose("Data page size (C-Heap, bss, etc): %s",
       describe_pagesize(g_multipage_support.datapsize));
-  trcVerbose("Text page size: %s\n",
+  trcVerbose("Text page size: %s",
       describe_pagesize(g_multipage_support.textpsize));
-  trcVerbose("Thread stack page size (pthread): %s\n",
+  trcVerbose("Thread stack page size (pthread): %s",
       describe_pagesize(g_multipage_support.pthr_stack_pagesize));
-  trcVerbose("Default shared memory page size: %s\n",
+  trcVerbose("Default shared memory page size: %s",
       describe_pagesize(g_multipage_support.shmpsize));
-  trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
+  trcVerbose("Can use 64K pages dynamically with shared meory: %s",
       (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
-  trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
+  trcVerbose("Can use 16M pages dynamically with shared memory: %s",
       (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
-  trcVerbose("Multipage error details: %d\n",
+  trcVerbose("Multipage error details: %d",
       g_multipage_support.error);
 
   // sanity checks
   assert0(g_multipage_support.pagesize == SIZE_4K);
   assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
-  // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
+  assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
   assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
   assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
 
-} // end os::Aix::query_multipage_support()
+}
 
 void os::init_system_properties_values() {
 
-#define DEFAULT_LIBPATH "/usr/lib:/lib"
+#define DEFAULT_LIBPATH "/lib:/usr/lib"
 #define EXTENSIONS_DIR  "/lib/ext"
 
   // Buffer that fits several sprintfs.
@@ -578,7 +587,10 @@
 
     // Found the full path to libjvm.so.
     // Now cut the path to <java_home>/jre if we can.
-    *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
+    pslash = strrchr(buf, '/');
+    if (pslash != NULL) {
+      *pslash = '\0';            // Get rid of /libjvm.so.
+    }
     pslash = strrchr(buf, '/');
     if (pslash != NULL) {
       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
@@ -753,8 +765,21 @@
   memset(pmi, 0, sizeof(meminfo_t));
 
   if (os::Aix::on_pase()) {
-
-    Unimplemented();
+    // On PASE, use the libo4 porting library.
+
+    unsigned long long virt_total = 0;
+    unsigned long long real_total = 0;
+    unsigned long long real_free = 0;
+    unsigned long long pgsp_total = 0;
+    unsigned long long pgsp_free = 0;
+    if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
+      pmi->virt_total = virt_total;
+      pmi->real_total = real_total;
+      pmi->real_free = real_free;
+      pmi->pgsp_total = pgsp_total;
+      pmi->pgsp_free = pgsp_free;
+      return true;
+    }
     return false;
 
   } else {
@@ -770,7 +795,7 @@
     memset (&psmt, '\0', sizeof(psmt));
     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
     if (rc == -1) {
-      fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
+      trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
       assert(0, "perfstat_memory_total() failed");
       return false;
     }
@@ -798,81 +823,6 @@
   }
 } // end os::Aix::get_meminfo
 
-// Retrieve global cpu information.
-// Returns false if something went wrong;
-// the content of pci is undefined in this case.
-bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
-  assert(pci, "get_cpuinfo: invalid parameter");
-  memset(pci, 0, sizeof(cpuinfo_t));
-
-  perfstat_cpu_total_t psct;
-  memset (&psct, '\0', sizeof(psct));
-
-  if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
-    fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
-    assert(0, "perfstat_cpu_total() failed");
-    return false;
-  }
-
-  // global cpu information
-  strcpy (pci->description, psct.description);
-  pci->processorHZ = psct.processorHZ;
-  pci->ncpus = psct.ncpus;
-  os::Aix::_logical_cpus = psct.ncpus;
-  for (int i = 0; i < 3; i++) {
-    pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
-  }
-
-  // get the processor version from _system_configuration
-  switch (_system_configuration.version) {
-  case PV_8:
-    strcpy(pci->version, "Power PC 8");
-    break;
-  case PV_7:
-    strcpy(pci->version, "Power PC 7");
-    break;
-  case PV_6_1:
-    strcpy(pci->version, "Power PC 6 DD1.x");
-    break;
-  case PV_6:
-    strcpy(pci->version, "Power PC 6");
-    break;
-  case PV_5:
-    strcpy(pci->version, "Power PC 5");
-    break;
-  case PV_5_2:
-    strcpy(pci->version, "Power PC 5_2");
-    break;
-  case PV_5_3:
-    strcpy(pci->version, "Power PC 5_3");
-    break;
-  case PV_5_Compat:
-    strcpy(pci->version, "PV_5_Compat");
-    break;
-  case PV_6_Compat:
-    strcpy(pci->version, "PV_6_Compat");
-    break;
-  case PV_7_Compat:
-    strcpy(pci->version, "PV_7_Compat");
-    break;
-  case PV_8_Compat:
-    strcpy(pci->version, "PV_8_Compat");
-    break;
-  default:
-    strcpy(pci->version, "unknown");
-  }
-
-  return true;
-
-} //end os::Aix::get_cpuinfo
-
-//////////////////////////////////////////////////////////////////////////////
-// detecting pthread library
-
-void os::Aix::libpthread_init() {
-  return;
-}
-
 //////////////////////////////////////////////////////////////////////////////
 // create new thread
 
@@ -889,6 +839,26 @@
     thread->set_stack_size(size);
   }
 
+  const pthread_t pthread_id = ::pthread_self();
+  const tid_t kernel_thread_id = ::thread_self();
+
+  trcVerbose("newborn Thread : pthread-id %u, ktid " UINT64_FORMAT
+    ", stack %p ... %p, stacksize 0x%IX (%IB)",
+    pthread_id, kernel_thread_id,
+    thread->stack_base() - thread->stack_size(),
+    thread->stack_base(),
+    thread->stack_size(),
+    thread->stack_size());
+
+  // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
+  // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
+  // tools hook pthread_create(). In this case, we may run into problems establishing
+  // guard pages on those stacks, because the stacks may reside in memory which is not
+  // protectable (shmated).
+  if (thread->stack_base() > ::sbrk(0)) {
+    trcVerbose("Thread " UINT64_FORMAT ": stack not in data segment.", (uint64_t) pthread_id);
+  }
+
   // Do some sanity checks.
   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
 
@@ -902,32 +872,35 @@
   int pid = os::current_process_id();
   alloca(((pid ^ counter++) & 7) * 128);
 
-  ThreadLocalStorage::set_thread(thread);
+  thread->initialize_thread_current();
 
   OSThread* osthread = thread->osthread();
 
-  // thread_id is kernel thread id (similar to Solaris LWP id)
-  osthread->set_thread_id(os::Aix::gettid());
-
-  // initialize signal mask for this thread
+  // Thread_id is pthread id.
+  osthread->set_thread_id(pthread_id);
+
+  // .. but keep kernel thread id too for diagnostics
+  osthread->set_kernel_thread_id(kernel_thread_id);
+
+  // Initialize signal mask for this thread.
   os::Aix::hotspot_sigmask(thread);
 
-  // initialize floating point control register
+  // Initialize floating point control register.
   os::Aix::init_thread_fpu_state();
 
   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
 
-  // call one more level start routine
+  // Call one more level start routine.
   thread->run();
 
+  trcVerbose("Thread finished : pthread-id %u, ktid " UINT64_FORMAT ".",
+    pthread_id, kernel_thread_id);
+
   return 0;
 }
 
 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
 
-  // We want the whole function to be synchronized.
-  ThreadCritical cs;
-
   assert(thread->osthread() == NULL, "caller responsible");
 
   // Allocate the OSThread object
@@ -992,8 +965,14 @@
   pthread_attr_destroy(&attr);
 
   if (ret == 0) {
-    // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
+    trcVerbose("Created New Thread : pthread-id %u", tid);
   } else {
+    if (os::Aix::on_pase()) {
+      // QIBM_MULTI_THREADED=Y is needed when the launcher is started on iSeries
+      // using QSH. Otherwise pthread_create fails with errno=11.
+      trcVerbose("(Please make sure you set the environment variable "
+              "QIBM_MULTI_THREADED=Y before running this program.)");
+    }
     if (PrintMiscellaneous && (Verbose || WizardMode)) {
       perror("pthread_create()");
     }
@@ -1003,8 +982,8 @@
     return false;
   }
 
-  // Store pthread info into the OSThread
-  osthread->set_pthread_id(tid);
+  // OSThread::thread_id is the pthread id.
+  osthread->set_thread_id(tid);
 
   return true;
 }
@@ -1030,9 +1009,21 @@
     return false;
   }
 
-  // Store pthread info into the OSThread
-  osthread->set_thread_id(os::Aix::gettid());
-  osthread->set_pthread_id(::pthread_self());
+  const pthread_t pthread_id = ::pthread_self();
+  const tid_t kernel_thread_id = ::thread_self();
+
+  trcVerbose("attaching Thread : pthread-id %u, ktid " UINT64_FORMAT ", stack %p ... %p, stacksize 0x%IX (%IB)",
+    pthread_id, kernel_thread_id,
+    thread->stack_base() - thread->stack_size(),
+    thread->stack_base(),
+    thread->stack_size(),
+    thread->stack_size());
+
+  // OSThread::thread_id is the pthread id.
+  osthread->set_thread_id(pthread_id);
+
+  // .. but keep kernel thread id too for diagnostics
+  osthread->set_kernel_thread_id(kernel_thread_id);
 
   // initialize floating point control register
   os::Aix::init_thread_fpu_state();
@@ -1077,32 +1068,6 @@
   delete osthread;
 }
 
-//////////////////////////////////////////////////////////////////////////////
-// thread local storage
-
-int os::allocate_thread_local_storage() {
-  pthread_key_t key;
-  int rslt = pthread_key_create(&key, NULL);
-  assert(rslt == 0, "cannot allocate thread local storage");
-  return (int)key;
-}
-
-// Note: This is currently not used by VM, as we don't destroy TLS key
-// on VM exit.
-void os::free_thread_local_storage(int index) {
-  int rslt = pthread_key_delete((pthread_key_t)index);
-  assert(rslt == 0, "invalid index");
-}
-
-void os::thread_local_storage_at_put(int index, void* value) {
-  int rslt = pthread_setspecific((pthread_key_t)index, value);
-  assert(rslt == 0, "pthread_setspecific failed");
-}
-
-extern "C" Thread* get_thread() {
-  return ThreadLocalStorage::thread();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 // time support
 
@@ -1152,17 +1117,15 @@
   nanos = jlong(time.tv_usec) * 1000;
 }
 
-
-// We need to manually declare mread_real_time,
-// because IBM didn't provide a prototype in time.h.
-// (they probably only ever tested in C, not C++)
-extern "C"
-int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
-
 jlong os::javaTimeNanos() {
   if (os::Aix::on_pase()) {
-    Unimplemented();
-    return 0;
+
+    timeval time;
+    int status = gettimeofday(&time, NULL);
+    assert(status != -1, "PASE error at gettimeofday()");
+    jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
+    return 1000 * usecs;
+
   } else {
     // On AIX use the precision of processors real time clock
     // or time base registers.
@@ -1253,7 +1216,7 @@
 // Note: os::abort() might be called very early during initialization, or
 // called from signal handler. Before adding something to os::abort(), make
 // sure it is async-safe and can handle partially initialized VM.
-void os::abort(bool dump_core, void* siginfo, void* context) {
+void os::abort(bool dump_core, void* siginfo, const void* context) {
   os::shutdown();
   if (dump_core) {
 #ifndef PRODUCT
@@ -1291,22 +1254,12 @@
   return n;
 }
 
-intx os::current_thread_id() { return (intx)pthread_self(); }
+intx os::current_thread_id() {
+  return (intx)pthread_self();
+}
 
 int os::current_process_id() {
-
-  // This implementation returns a unique pid, the pid of the
-  // launcher thread that starts the vm 'process'.
-
-  // Under POSIX, getpid() returns the same pid as the
-  // launcher thread rather than a unique pid per thread.
-  // Use gettid() if you want the old pre NPTL behaviour.
-
-  // if you are looking for the result of a call to getpid() that
-  // returns a unique pid for the calling thread, then look at the
-  // OSThread::thread_id() method in osThread_linux.hpp file
-
-  return (int)(_initial_pid ? _initial_pid : getpid());
+  return getpid();
 }
 
 // DLL functions
@@ -1343,6 +1296,9 @@
   } else if (strchr(pname, *os::path_separator()) != NULL) {
     int n;
     char** pelements = split_path(pname, &n);
+    if (pelements == NULL) {
+      return false;
+    }
     for (int i = 0; i < n; i++) {
       // Really shouldn't be NULL, but check can't hurt
       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
@@ -1540,6 +1496,10 @@
   st->print(name.machine);
   st->cr();
 
+  uint32_t ver = os::Aix::os_version();
+  st->print_cr("AIX kernel version %u.%u.%u.%u",
+               (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
+
   // rlimit
   st->print("rlimit:");
   struct rlimit rlim;
@@ -1580,62 +1540,98 @@
   os::loadavg(loadavg, 3);
   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
   st->cr();
+
+  // print wpar info
+  libperfstat::wparinfo_t wi;
+  if (libperfstat::get_wparinfo(&wi)) {
+    st->print_cr("wpar info");
+    st->print_cr("name: %s", wi.name);
+    st->print_cr("id:   %d", wi.wpar_id);
+    st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
+  }
+
+  // print partition info
+  libperfstat::partitioninfo_t pi;
+  if (libperfstat::get_partitioninfo(&pi)) {
+    st->print_cr("partition info");
+    st->print_cr(" name: %s", pi.name);
+  }
+
 }
 
 void os::print_memory_info(outputStream* st) {
 
   st->print_cr("Memory:");
 
-  st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
-  st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
+  st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
+    describe_pagesize(g_multipage_support.pagesize));
+  st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
+    describe_pagesize(g_multipage_support.datapsize));
+  st->print_cr("  Text page size:                         %s",
+    describe_pagesize(g_multipage_support.textpsize));
+  st->print_cr("  Thread stack page size (pthread):       %s",
+    describe_pagesize(g_multipage_support.pthr_stack_pagesize));
   st->print_cr("  Default shared memory page size:        %s",
     describe_pagesize(g_multipage_support.shmpsize));
   st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
     (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
   st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
     (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
-  if (g_multipage_error != 0) {
-    st->print_cr("  multipage error: %d", g_multipage_error);
-  }
+  st->print_cr("  Multipage error: %d",
+    g_multipage_support.error);
+  st->cr();
+  st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
+  // not used in OpenJDK st->print_cr("  os::stack_page_size:    %s", describe_pagesize(os::stack_page_size()));
 
   // print out LDR_CNTRL because it affects the default page sizes
   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
 
+  // Print out EXTSHM because it is an unsupported setting.
   const char* const extshm = ::getenv("EXTSHM");
   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
   if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
     st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
   }
 
-  // Call os::Aix::get_meminfo() to retrieve memory statistics.
+  // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
+  const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
+  st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
+      aixthread_guardpages ? aixthread_guardpages : "<unset>");
+
   os::Aix::meminfo_t mi;
   if (os::Aix::get_meminfo(&mi)) {
     char buffer[256];
     if (os::Aix::on_aix()) {
-      jio_snprintf(buffer, sizeof(buffer),
-                   "  physical total : %llu\n"
-                   "  physical free  : %llu\n"
-                   "  swap total     : %llu\n"
-                   "  swap free      : %llu\n",
-                   mi.real_total,
-                   mi.real_free,
-                   mi.pgsp_total,
-                   mi.pgsp_free);
+      st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
+      st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
+      st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
+      st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
     } else {
-      Unimplemented();
+      // PASE - Numbers are result of QWCRSSTS; they mean:
+      // real_total: Sum of all system pools
+      // real_free: always 0
+      // pgsp_total: we take the size of the system ASP
+      // pgsp_free: size of system ASP times percentage of system ASP unused
+      st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
+      st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
+      st->print_cr("%% system asp used : " SIZE_FORMAT,
+        mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
     }
     st->print_raw(buffer);
-  } else {
-    st->print_cr("  (no more information available)");
-  }
+  }
+  st->cr();
+
+  // Print segments allocated with os::reserve_memory.
+  st->print_cr("internal virtual memory regions used by vm:");
+  vmembk_print_on(st);
 }
 
 // Get a string for the cpuinfo that is a summary of the cpu type
 void os::get_summary_cpu_info(char* buf, size_t buflen) {
   // This looks good
-  os::Aix::cpuinfo_t ci;
-  if (os::Aix::get_cpuinfo(&ci)) {
+  libperfstat::cpuinfo_t ci;
+  if (libperfstat::get_cpuinfo(&ci)) {
     strncpy(buf, ci.version, buflen);
   } else {
     strncpy(buf, "AIX", buflen);
@@ -1643,10 +1639,15 @@
 }
 
 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
+  st->print("CPU:");
+  st->print("total %d", os::processor_count());
+  // It's not safe to query number of active processors after crash.
+  // st->print("(active %d)", os::active_processor_count());
+  st->print(" %s", VM_Version::cpu_features());
+  st->cr();
 }
 
 void os::print_siginfo(outputStream* st, void* siginfo) {
-  // Use common posix version.
   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
   st->cr();
 }
@@ -1785,21 +1786,75 @@
 // a counter for each possible signal value
 static volatile jint pending_signals[NSIG+1] = { 0 };
 
-// Linux(POSIX) specific hand shaking semaphore.
+// Wrapper functions for: sem_init(), sem_post(), sem_wait()
+// On AIX, we use sem_init(), sem_post(), sem_wait()
+// On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
+// do not seem to work at all on PASE (unimplemented, will cause SIGILL).
+// Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
+// on AIX, msem_..() calls are suspected of causing problems.
 static sem_t sig_sem;
+static msemaphore* p_sig_msem = 0;
+
+static void local_sem_init() {
+  if (os::Aix::on_aix()) {
+    int rc = ::sem_init(&sig_sem, 0, 0);
+    guarantee(rc != -1, "sem_init failed");
+  } else {
+    // Memory semaphores must live in shared mem.
+    guarantee0(p_sig_msem == NULL);
+    p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
+    guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
+    guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
+  }
+}
+
+static void local_sem_post() {
+  static bool warn_only_once = false;
+  if (os::Aix::on_aix()) {
+    int rc = ::sem_post(&sig_sem);
+    if (rc == -1 && !warn_only_once) {
+      trcVerbose("sem_post failed (errno = %d, %s)", errno, strerror(errno));
+      warn_only_once = true;
+    }
+  } else {
+    guarantee0(p_sig_msem != NULL);
+    int rc = ::msem_unlock(p_sig_msem, 0);
+    if (rc == -1 && !warn_only_once) {
+      trcVerbose("msem_unlock failed (errno = %d, %s)", errno, strerror(errno));
+      warn_only_once = true;
+    }
+  }
+}
+
+static void local_sem_wait() {
+  static bool warn_only_once = false;
+  if (os::Aix::on_aix()) {
+    int rc = ::sem_wait(&sig_sem);
+    if (rc == -1 && !warn_only_once) {
+      trcVerbose("sem_wait failed (errno = %d, %s)", errno, strerror(errno));
+      warn_only_once = true;
+    }
+  } else {
+    guarantee0(p_sig_msem != NULL); // must init before use
+    int rc = ::msem_lock(p_sig_msem, 0);
+    if (rc == -1 && !warn_only_once) {
+      trcVerbose("msem_lock failed (errno = %d, %s)", errno, strerror(errno));
+      warn_only_once = true;
+    }
+  }
+}
 
 void os::signal_init_pd() {
   // Initialize signal structures
   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
 
   // Initialize signal semaphore
-  int rc = ::sem_init(&sig_sem, 0, 0);
-  guarantee(rc != -1, "sem_init failed");
+  local_sem_init();
 }
 
 void os::signal_notify(int sig) {
   Atomic::inc(&pending_signals[sig]);
-  ::sem_post(&sig_sem);
+  local_sem_post();
 }
 
 static int check_pending_signals(bool wait) {
@@ -1822,7 +1877,7 @@
       thread->set_suspend_equivalent();
       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
 
-      ::sem_wait(&sig_sem);
+      local_sem_wait();
 
       // were we externally suspended while we were waiting?
       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
@@ -1833,7 +1888,8 @@
         // while suspended because that would surprise the thread that
         // suspended us.
         //
-        ::sem_post(&sig_sem);
+
+        local_sem_post();
 
         thread->java_suspend_self();
       }
@@ -1884,14 +1940,14 @@
   // also check that range is fully page aligned to the page size if the block.
   void assert_is_valid_subrange(char* p, size_t s) const {
     if (!contains_range(p, s)) {
-      fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
-              "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
-              p, p + s - 1, addr, addr + size - 1);
+      trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
+              "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
+              p, p + s, addr, addr + size);
       guarantee0(false);
     }
     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
-      fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
-              " aligned to pagesize (%s)\n", p, p + s);
+      trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
+              " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
       guarantee0(false);
     }
   }
@@ -1988,7 +2044,7 @@
   // Reserve the shared segment.
   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
   if (shmid == -1) {
-    trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
+    trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
     return NULL;
   }
 
@@ -2017,7 +2073,7 @@
 
   // (A) Right after shmat and before handing shmat errors delete the shm segment.
   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
-    trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
+    trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
     assert(false, "failed to remove shared memory segment!");
   }
 
@@ -2082,6 +2138,8 @@
   return true;
 }
 
+////////////////////////////////  mmap-based routines /////////////////////////////////
+
 // Reserve memory via mmap.
 // If <requested_addr> is given, an attempt is made to attach at the given address.
 // Failing that, memory is allocated at any address.
@@ -2227,9 +2285,6 @@
   return rc;
 }
 
-// End: shared memory bookkeeping
-////////////////////////////////////////////////////////////////////////////////////////////////////
-
 int os::vm_page_size() {
   // Seems redundant as all get out.
   assert(os::Aix::page_size() != -1, "must call os::init");
@@ -2263,15 +2318,26 @@
 
 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
 
-  assert0(is_aligned_to(addr, os::vm_page_size()));
-  assert0(is_aligned_to(size, os::vm_page_size()));
+  assert(is_aligned_to(addr, os::vm_page_size()),
+    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
+    p2i(addr), os::vm_page_size());
+  assert(is_aligned_to(size, os::vm_page_size()),
+    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
+    size, os::vm_page_size());
 
   vmembk_t* const vmi = vmembk_find(addr);
-  assert0(vmi);
+  guarantee0(vmi);
   vmi->assert_is_valid_subrange(addr, size);
 
   trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
 
+  if (UseExplicitCommit) {
+    // AIX commits memory on touch. So, touch all pages to be committed.
+    for (char* p = addr; p < (addr + size); p += SIZE_4K) {
+      *p = '\0';
+    }
+  }
+
   return true;
 }
 
@@ -2287,12 +2353,16 @@
 }
 
 bool os::pd_uncommit_memory(char* addr, size_t size) {
-  assert0(is_aligned_to(addr, os::vm_page_size()));
-  assert0(is_aligned_to(size, os::vm_page_size()));
+  assert(is_aligned_to(addr, os::vm_page_size()),
+    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
+    p2i(addr), os::vm_page_size());
+  assert(is_aligned_to(size, os::vm_page_size()),
+    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
+    size, os::vm_page_size());
 
   // Dynamically do different things for mmap/shmat.
   const vmembk_t* const vmi = vmembk_find(addr);
-  assert0(vmi);
+  guarantee0(vmi);
   vmi->assert_is_valid_subrange(addr, size);
 
   if (vmi->type == VMEM_SHMATED) {
@@ -2390,7 +2460,7 @@
 
   // Dynamically do different things for mmap/shmat.
   vmembk_t* const vmi = vmembk_find(addr);
-  assert0(vmi);
+  guarantee0(vmi);
 
   // Always round to os::vm_page_size(), which may be larger than 4K.
   size = align_size_up(size, os::vm_page_size());
@@ -2466,11 +2536,31 @@
       } else {
         rc = read_protected;
       }
+
+      if (!rc) {
+        if (os::Aix::on_pase()) {
+          // There is an issue on older PASE systems where mprotect() will return success but the
+          // memory will not be protected.
+          // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
+          // machines; we only see it rarely, when using mprotect() to protect the guard page of
+          // a stack. It is an OS error.
+          //
+          // A valid strategy is just to try again. This usually works. :-/
+
+          ::usleep(1000);
+          if (::mprotect(addr, size, prot) == 0) {
+            const bool read_protected_2 =
+              (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
+              SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
+            rc = true;
+          }
+        }
+      }
     }
   }
-  if (!rc) {
-    assert(false, "mprotect failed.");
-  }
+
+  assert(rc == true, "mprotect failed.");
+
   return rc;
 }
 
@@ -2507,10 +2597,11 @@
 }
 
 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
-  // "exec" is passed in but not used. Creating the shared image for
-  // the code cache doesn't have an SHM_X executable permission to check.
-  Unimplemented();
-  return 0;
+  // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
+  // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
+  // so this is not needed.
+  assert(false, "should not be called on AIX");
+  return NULL;
 }
 
 bool os::release_memory_special(char* base, size_t bytes) {
@@ -2962,7 +3053,9 @@
   // getting raised while being blocked.
   unblock_program_error_signals();
 
+  int orig_errno = errno;  // Preserve errno value over signal handler.
   JVM_handle_aix_signal(sig, info, uc, true);
+  errno = orig_errno;
 }
 
 // This boolean allows users to forward their own non-matching signals
@@ -3084,7 +3177,6 @@
   void* oldhand = oldAct.sa_sigaction
     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
-  // Renamed 'signalHandler' to avoid collision with other shared libs.
   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
@@ -3108,7 +3200,6 @@
     sigAct.sa_handler = SIG_DFL;
     sigAct.sa_flags = SA_RESTART;
   } else {
-    // Renamed 'signalHandler' to avoid collision with other shared libs.
     sigAct.sa_sigaction = javaSignalHandler;
     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
   }
@@ -3300,7 +3391,7 @@
   struct sigaction act;
   if (os_sigaction == NULL) {
     // only trust the default sigaction, in case it has been interposed
-    os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
+    os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
     if (os_sigaction == NULL) return;
   }
 
@@ -3317,7 +3408,6 @@
   case SIGPIPE:
   case SIGILL:
   case SIGXFSZ:
-    // Renamed 'signalHandler' to avoid collision with other shared libs.
     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
     break;
 
@@ -3350,8 +3440,12 @@
     }
   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
-    tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
-    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
+    tty->print("expected:");
+    os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
+    tty->cr();
+    tty->print("  found:");
+    os::Posix::print_sa_flags(tty, act.sa_flags);
+    tty->cr();
     // No need to check this sig any longer
     sigaddset(&check_signal_done, sig);
   }
@@ -3362,20 +3456,6 @@
   }
 }
 
-extern bool signal_name(int signo, char* buf, size_t len);
-
-const char* os::exception_name(int exception_code, char* buf, size_t size) {
-  if (0 < exception_code && exception_code <= SIGRTMAX) {
-    // signal
-    if (!signal_name(exception_code, buf, size)) {
-      jio_snprintf(buf, size, "SIG%d", exception_code);
-    }
-    return buf;
-  } else {
-    return NULL;
-  }
-}
-
 // To install functions for atexit system call
 extern "C" {
   static void perfMemory_exit_helper() {
@@ -3389,6 +3469,10 @@
   // (Shared memory boundary is supposed to be a 256M aligned.)
   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
 
+  // Record process break at startup.
+  g_brk_at_startup = (address) ::sbrk(0);
+  assert(g_brk_at_startup != (address) -1, "sbrk failed");
+
   // First off, we need to know whether we run on AIX or PASE, and
   // the OS level we run on.
   os::Aix::initialize_os_info();
@@ -3396,7 +3480,7 @@
   // Scan environment (SPEC1170 behaviour, etc).
   os::Aix::scan_environment();
 
-  // Check which pages are supported by AIX.
+  // Probe multipage support.
   query_multipage_support();
 
   // Act like we only have one page size by eliminating corner cases which
@@ -3449,9 +3533,9 @@
     }
   } else {
     // datapsize = 64k. Data segment, thread stacks are 64k paged.
-    //   This normally means that we can allocate 64k pages dynamically.
-    //   (There is one special case where this may be false: EXTSHM=on.
-    //    but we decided to not support that mode).
+    // This normally means that we can allocate 64k pages dynamically.
+    // (There is one special case where this may be false: EXTSHM=on.
+    // but we decided to not support that mode).
     assert0(g_multipage_support.can_use_64K_pages);
     Aix::_page_size = SIZE_64K;
     trcVerbose("64K page mode");
@@ -3467,7 +3551,7 @@
   _page_sizes[0] = 0;
 
   // debug trace
-  trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
+  trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
 
   // Next, we need to initialize libo4 and libperfstat libraries.
   if (os::Aix::on_pase()) {
@@ -3485,8 +3569,6 @@
   // need libperfstat etc.
   os::Aix::initialize_system_info();
 
-  _initial_pid = getpid();
-
   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
 
   init_random(1234567);
@@ -3511,11 +3593,21 @@
 // This is called _after_ the global arguments have been parsed.
 jint os::init_2(void) {
 
+  if (os::Aix::on_pase()) {
+    trcVerbose("Running on PASE.");
+  } else {
+    trcVerbose("Running on AIX (not PASE).");
+  }
+
   trcVerbose("processor count: %d", os::_processor_count);
   trcVerbose("physical memory: %lu", Aix::_physical_memory);
 
   // Initially build up the loaded dll map.
   LoadedLibraries::reload();
+  if (Verbose) {
+    trcVerbose("Loaded Libraries: ");
+    LoadedLibraries::print(tty);
+  }
 
   const int page_size = Aix::page_size();
   const int map_size = page_size;
@@ -3553,10 +3645,8 @@
                                      map_size, prot,
                                      flags | MAP_FIXED,
                                      -1, 0);
-      if (Verbose) {
-        fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
-                address_wishes[i], map_address + (ssize_t)page_size);
-      }
+      trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
+                   address_wishes[i], map_address + (ssize_t)page_size);
 
       if (map_address + (ssize_t)page_size == address_wishes[i]) {
         // Map succeeded and map_address is at wished address, exit loop.
@@ -3583,11 +3673,9 @@
     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
     os::set_memory_serialize_page(mem_serialize_page);
 
-#ifndef PRODUCT
-    if (Verbose && PrintMiscellaneous) {
-      tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
-    }
-#endif
+    trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
+        mem_serialize_page, mem_serialize_page + Aix::page_size(),
+        Aix::page_size(), Aix::page_size());
   }
 
   // initialize suspend/resume support - must do this before signal_sets_init()
@@ -3624,7 +3712,10 @@
   // Note that this can be 0, if no default stacksize was set.
   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
 
-  Aix::libpthread_init();
+  if (UseNUMA) {
+    UseNUMA = false;
+    warning("NUMA optimizations are not available on this OS.");
+  }
 
   if (MaxFDLimit) {
     // Set the number of file descriptors to max. print out error
@@ -3646,7 +3737,7 @@
 
   if (PerfAllowAtExitRegistration) {
     // Only register atexit functions if PerfAllowAtExitRegistration is set.
-    // Atexit functions can be delayed until process exit time, which
+    // At exit functions can be delayed until process exit time, which
     // can be problematic for embedded VM situations. Embedded VMs should
     // call DestroyJavaVM() to assure that VM resources are released.
 
@@ -3724,7 +3815,7 @@
   Thread* thread = context.thread();
   OSThread* osthread = thread->osthread();
   if (osthread->ucontext() != NULL) {
-    _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
+    _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext());
   } else {
     // NULL context is unexpected, double-check this is the VMThread.
     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
@@ -3746,16 +3837,6 @@
 ////////////////////////////////////////////////////////////////////////////////
 // debug support
 
-static address same_page(address x, address y) {
-  intptr_t page_bits = -os::vm_page_size();
-  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
-    return x;
-  else if (x > y)
-    return (address)(intptr_t(y) | ~page_bits) + 1;
-  else
-    return (address)(intptr_t(y) & page_bits);
-}
-
 bool os::find(address addr, outputStream* st) {
 
   st->print(PTR_FORMAT ": ", addr);
@@ -4119,24 +4200,28 @@
 // For now just return the system wide load average (no processor sets).
 int os::loadavg(double values[], int nelem) {
 
-  // Implemented using libperfstat on AIX.
-
   guarantee(nelem >= 0 && nelem <= 3, "argument error");
   guarantee(values, "argument error");
 
   if (os::Aix::on_pase()) {
-    Unimplemented();
-    return -1;
+
+    // AS/400 PASE: use libo4 porting library
+    double v[3] = { 0.0, 0.0, 0.0 };
+
+    if (libo4::get_load_avg(v, v + 1, v + 2)) {
+      for (int i = 0; i < nelem; i ++) {
+        values[i] = v[i];
+      }
+      return nelem;
+    } else {
+      return -1;
+    }
+
   } else {
+
     // AIX: use libperfstat
-    //
-    // See also:
-    // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
-    // /usr/include/libperfstat.h:
-
-    // Use the already AIX version independent get_cpuinfo.
-    os::Aix::cpuinfo_t ci;
-    if (os::Aix::get_cpuinfo(&ci)) {
+    libperfstat::cpuinfo_t ci;
+    if (libperfstat::get_cpuinfo(&ci)) {
       for (int i = 0; i < nelem; i++) {
         values[i] = ci.loadavg[i];
       }
@@ -4163,8 +4248,7 @@
       (void)::poll(NULL, 0, 100);
     }
   } else {
-    jio_fprintf(stderr,
-      "Could not open pause file '%s', continuing immediately.\n", filename);
+    trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
   }
 }
 
@@ -4180,13 +4264,13 @@
 // one of Aix::on_pase(), Aix::os_version() static
 void os::Aix::initialize_os_info() {
 
-  assert(_on_pase == -1 && _os_version == -1, "already called.");
+  assert(_on_pase == -1 && _os_version == 0, "already called.");
 
   struct utsname uts;
   memset(&uts, 0, sizeof(uts));
   strcpy(uts.sysname, "?");
   if (::uname(&uts) == -1) {
-    trc("uname failed (%d)", errno);
+    trcVerbose("uname failed (%d)", errno);
     guarantee(0, "Could not determine whether we run on AIX or PASE");
   } else {
     trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
@@ -4196,21 +4280,34 @@
     assert(major > 0, "invalid OS version");
     const int minor = atoi(uts.release);
     assert(minor > 0, "invalid OS release");
-    _os_version = (major << 8) | minor;
+    _os_version = (major << 24) | (minor << 16);
+    char ver_str[20] = {0};
+    char *name_str = "unknown OS";
     if (strcmp(uts.sysname, "OS400") == 0) {
-      Unimplemented();
+      // We run on AS/400 PASE. We do not support versions older than V5R4M0.
+      _on_pase = 1;
+      if (os_version_short() < 0x0504) {
+        trcVerbose("OS/400 releases older than V5R4M0 not supported.");
+        assert(false, "OS/400 release too old.");
+      }
+      name_str = "OS/400 (pase)";
+      jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
     } else if (strcmp(uts.sysname, "AIX") == 0) {
       // We run on AIX. We do not support versions older than AIX 5.3.
       _on_pase = 0;
-      if (_os_version < 0x0503) {
-        trc("AIX release older than AIX 5.3 not supported.");
+      // Determine detailed AIX version: Version, Release, Modification, Fix Level.
+      odmWrapper::determine_os_kernel_version(&_os_version);
+      if (os_version_short() < 0x0503) {
+        trcVerbose("AIX release older than AIX 5.3 not supported.");
         assert(false, "AIX release too old.");
-      } else {
-        trcVerbose("We run on AIX %d.%d\n", major, minor);
       }
+      name_str = "AIX";
+      jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
+                   major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
     } else {
-      assert(false, "unknown OS");
+      assert(false, name_str);
     }
+    trcVerbose("We run on %s %s", name_str, ver_str);
   }
 
   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
@@ -4232,12 +4329,17 @@
   // This switch was needed on AIX 32bit, but on AIX 64bit the general
   // recommendation is (in OSS notes) to switch it off.
   p = ::getenv("EXTSHM");
-  if (Verbose) {
-    fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
-  }
+  trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
   if (p && strcasecmp(p, "ON") == 0) {
-    fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
     _extshm = 1;
+    trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
+    if (!AllowExtshm) {
+      // We allow under certain conditions the user to continue. However, we want this
+      // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
+      // that the VM is not able to allocate 64k pages for the heap.
+      // We do not want to run with reduced performance.
+      vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
+    }
   } else {
     _extshm = 0;
   }
@@ -4254,7 +4356,7 @@
   trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
   if (p && strcmp(p, "ON") == 0) {
     _xpg_sus_mode = 1;
-    trc("Unsupported setting: XPG_SUS_ENV=ON");
+    trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
     // clobber address ranges. If we ever want to support that, we have to do some
     // testing first.
@@ -4263,35 +4365,46 @@
     _xpg_sus_mode = 0;
   }
 
-  // Switch off AIX internal (pthread) guard pages. This has
-  // immediate effect for any pthread_create calls which follow.
+  if (os::Aix::on_pase()) {
+    p = ::getenv("QIBM_MULTI_THREADED");
+    trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
+  }
+
+  p = ::getenv("LDR_CNTRL");
+  trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
+  if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
+    if (p && ::strstr(p, "TEXTPSIZE")) {
+      trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
+        "you may experience hangs or crashes on OS/400 V7R1.");
+    }
+  }
+
   p = ::getenv("AIXTHREAD_GUARDPAGES");
   trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
-  rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
-  guarantee(rc == 0, "");
 
 } // end: os::Aix::scan_environment()
 
-// PASE: initialize the libo4 library (AS400 PASE porting library).
+// PASE: initialize the libo4 library (PASE porting library).
 void os::Aix::initialize_libo4() {
-  Unimplemented();
-}
-
-// AIX: initialize the libperfstat library (we load this dynamically
-// because it is only available on AIX.
+  guarantee(os::Aix::on_pase(), "OS/400 only.");
+  if (!libo4::init()) {
+    trcVerbose("libo4 initialization failed.");
+    assert(false, "libo4 initialization failed");
+  } else {
+    trcVerbose("libo4 initialized.");
+  }
+}
+
+// AIX: initialize the libperfstat library.
 void os::Aix::initialize_libperfstat() {
-
   assert(os::Aix::on_aix(), "AIX only");
-
   if (!libperfstat::init()) {
-    trc("libperfstat initialization failed.");
+    trcVerbose("libperfstat initialization failed.");
     assert(false, "libperfstat initialization failed");
   } else {
-    if (Verbose) {
-      fprintf(stderr, "libperfstat initialized.\n");
-    }
-  }
-} // end: os::Aix::initialize_libperfstat
+    trcVerbose("libperfstat initialized.");
+  }
+}
 
 /////////////////////////////////////////////////////////////////////////////
 // thread stack
@@ -4313,7 +4426,7 @@
 
   pthread_t tid = pthread_self();
   struct __pthrdsinfo pinfo;
-  char dummy[1]; // We only need this to satisfy the api and to not get E.
+  char dummy[1]; // Just needed to satisfy pthread_getthrds_np.
   int dummy_size = sizeof(dummy);
 
   memset(&pinfo, 0, sizeof(pinfo));
@@ -4328,44 +4441,47 @@
   }
   guarantee0(pinfo.__pi_stackend);
 
-  // The following can happen when invoking pthread_getthrds_np on a pthread running
-  // on a user provided stack (when handing down a stack to pthread create, see
-  // pthread_attr_setstackaddr).
-  // Not sure what to do here - I feel inclined to forbid this use case completely.
+  // The following may happen when invoking pthread_getthrds_np on a pthread
+  // running on a user provided stack (when handing down a stack to pthread
+  // create, see pthread_attr_setstackaddr).
+  // Not sure what to do then.
+
   guarantee0(pinfo.__pi_stacksize);
 
-  // Note: the pthread stack on AIX seems to look like this:
-  //
-  // ---------------------   real base ? at page border ?
+  // Note: we get three values from pthread_getthrds_np:
+  //       __pi_stackaddr, __pi_stacksize, __pi_stackend
   //
-  //     pthread internal data, like ~2K, see also
-  //     http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/thread_supp_tun_params.htm
-  //
-  // ---------------------   __pi_stackend - not page aligned, (xxxxF890)
+  // high addr    ---------------------
   //
-  //     stack
-  //      ....
-  //
-  //     stack
-  //
-  // ---------------------   __pi_stackend  - __pi_stacksize
+  //    |         pthread internal data, like ~2K
+  //    |
+  //    |         ---------------------   __pi_stackend   (usually not page aligned, (xxxxF890))
+  //    |
+  //    |
+  //    |
+  //    |
+  //    |
+  //    |
+  //    |          ---------------------   (__pi_stackend - __pi_stacksize)
+  //    |
+  //    |          padding to align the following AIX guard pages, if enabled.
+  //    |
+  //    V          ---------------------   __pi_stackaddr
   //
-  //     padding due to AIX guard pages (?) see AIXTHREAD_GUARDPAGES
-  // ---------------------   __pi_stackaddr  (page aligned if AIXTHREAD_GUARDPAGES > 0)
-  //
-  //   AIX guard pages (?)
+  // low addr      AIX guard pages, if enabled (AIXTHREAD_GUARDPAGES > 0)
   //
 
-  // So, the safe thing to do is to use the area from __pi_stackend to __pi_stackaddr;
-  // __pi_stackend however is almost never page aligned.
-  //
+  address stack_base = (address)(pinfo.__pi_stackend);
+  address stack_low_addr = (address)align_ptr_up(pinfo.__pi_stackaddr,
+    os::vm_page_size());
+  size_t stack_size = stack_base - stack_low_addr;
 
   if (p_stack_base) {
-    (*p_stack_base) = (address) (pinfo.__pi_stackend);
+    *p_stack_base = stack_base;
   }
 
   if (p_stack_size) {
-    (*p_stack_size) = pinfo.__pi_stackend - pinfo.__pi_stackaddr;
+    *p_stack_size = stack_size;
   }
 
   return true;
@@ -4915,7 +5031,7 @@
 }
 #endif
 
-bool os::start_debugging(char *buf, int buflen) {
+bool os::start_debugging(char *buf, int buflen) {
   int len = (int)strlen(buf);
   char *p = &buf[len];
 
--- a/src/os/aix/vm/os_aix.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/aix/vm/os_aix.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -34,9 +34,6 @@
 class Aix {
   friend class os;
 
-  // Length of strings included in the libperfstat structures.
-#define IDENTIFIER_LENGTH 64
-
   static bool libjsig_is_loaded;        // libjsig that interposes sigaction(),
                                         // __sigaction(), signal() is loaded
   static struct sigaction *(*get_signal_action)(int);
@@ -45,23 +42,25 @@
 
   static void check_signal_handler(int sig);
 
- protected:
+ private:
 
   static julong _physical_memory;
   static pthread_t _main_thread;
   static Mutex* _createThread_lock;
   static int _page_size;
-  static int _logical_cpus;
+
+  // Page size of newly created pthreads.
+  static int _stack_page_size;
 
   // -1 = uninitialized, 0 = AIX, 1 = OS/400 (PASE)
   static int _on_pase;
 
-  // -1 = uninitialized, otherwise 16 bit number:
+  // 0 = uninitialized, otherwise 16 bit number:
   //  lower 8 bit - minor version
   //  higher 8 bit - major version
   //  For AIX, e.g. 0x0601 for AIX 6.1
   //  for OS/400 e.g. 0x0504 for OS/400 V5R4
-  static int _os_version;
+  static uint32_t _os_version;
 
   // -1 = uninitialized,
   //  0 - SPEC1170 not requested (XPG_SUS_ENV is OFF or not set)
@@ -73,35 +72,6 @@
   //  1 - EXTSHM=ON
   static int _extshm;
 
-  // page sizes on AIX.
-  //
-  //  AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The latter two
-  //  (16M "large" resp. 16G "huge" pages) require special setup and are normally
-  //  not available.
-  //
-  //  AIX supports multiple page sizes per process, for:
-  //  - Stack (of the primordial thread, so not relevant for us)
-  //  - Data - data, bss, heap, for us also pthread stacks
-  //  - Text - text code
-  //  - shared memory
-  //
-  //  Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
-  //  and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...)
-  //
-  //  For shared memory, page size can be set dynamically via shmctl(). Different shared memory
-  //  regions can have different page sizes.
-  //
-  //  More information can be found at AIBM info center:
-  //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
-  //
-  // -----
-  //  We want to support 4K and 64K and, if the machine is set up correctly, 16MB pages.
-  //
-
-  // page size of the stack of newly created pthreads
-  // (should be LDR_CNTRL DATAPSIZE because stack is allocated on heap by pthread lib)
-  static int _stack_page_size;
-
   static julong available_memory();
   static julong physical_memory() { return _physical_memory; }
   static void initialize_system_info();
@@ -125,9 +95,6 @@
  public:
   static void init_thread_fpu_state();
   static pthread_t main_thread(void)                                { return _main_thread; }
-  // returns kernel thread id (similar to LWP id on Solaris), which can be
-  // used to access /proc
-  static pid_t gettid();
   static void set_createThread_lock(Mutex* lk)                      { _createThread_lock = lk; }
   static Mutex* createThread_lock(void)                             { return _createThread_lock; }
   static void hotspot_sigmask(Thread* thread);
@@ -156,8 +123,8 @@
   static int vm_default_page_size(void ) { return 8*K; }
 
   static address   ucontext_get_pc(const ucontext_t* uc);
-  static intptr_t* ucontext_get_sp(ucontext_t* uc);
-  static intptr_t* ucontext_get_fp(ucontext_t* uc);
+  static intptr_t* ucontext_get_sp(const ucontext_t* uc);
+  static intptr_t* ucontext_get_fp(const ucontext_t* uc);
   // Set PC into context. Needed for continuation after signal.
   static void ucontext_set_pc(ucontext_t* uc, address pc);
 
@@ -205,24 +172,31 @@
     return _on_pase ? false : true;
   }
 
-  // -1 = uninitialized, otherwise 16 bit number:
+  // Get 4 byte AIX kernel version number:
+  // highest 2 bytes: Version, Release
+  // if available: lowest 2 bytes: Tech Level, Service Pack.
+  static uint32_t os_version() {
+    assert(_os_version != 0, "not initialized");
+    return _os_version;
+  }
+
+  // 0 = uninitialized, otherwise 16 bit number:
   // lower 8 bit - minor version
   // higher 8 bit - major version
   // For AIX, e.g. 0x0601 for AIX 6.1
   // for OS/400 e.g. 0x0504 for OS/400 V5R4
-  static int os_version () {
-    assert(_os_version != -1, "not initialized");
-    return _os_version;
+  static int os_version_short() {
+    return os_version() >> 16;
   }
 
   // Convenience method: returns true if running on PASE V5R4 or older.
   static bool on_pase_V5R4_or_older() {
-    return on_pase() && os_version() <= 0x0504;
+    return on_pase() && os_version_short() <= 0x0504;
   }
 
   // Convenience method: returns true if running on AIX 5.3 or older.
   static bool on_aix_53_or_older() {
-    return on_aix() && os_version() <= 0x0503;
+    return on_aix() && os_version_short() <= 0x0503;
   }
 
   // Returns true if we run in SPEC1170 compliant mode (XPG_SUS_ENV=ON).
@@ -257,27 +231,12 @@
 
   };
 
-  // Result struct for get_cpuinfo().
-  struct cpuinfo_t {
-    char description[IDENTIFIER_LENGTH];  // processor description (type/official name)
-    u_longlong_t processorHZ;             // processor speed in Hz
-    int ncpus;                            // number of active logical processors
-    double loadavg[3];                    // (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.
-                                          // To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>.
-    char version[20];                     // processor version from _system_configuration (sys/systemcfg.h)
-  };
-
   // Functions to retrieve memory information on AIX, PASE.
   // (on AIX, using libperfstat, on PASE with libo4.so).
   // Returns true if ok, false if error.
   static bool get_meminfo(meminfo_t* pmi);
 
-  // Function to retrieve cpu information on AIX
-  // (on AIX, using libperfstat)
-  // Returns true if ok, false if error.
-  static bool get_cpuinfo(cpuinfo_t* pci);
-
-}; // os::Aix class
+};
 
 
 class PlatformEvent : public CHeapObj<mtInternal> {
--- a/src/os/aix/vm/os_aix.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/aix/vm/os_aix.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -36,10 +36,6 @@
 #include <sys/ioctl.h>
 #include <netdb.h>
 
-inline void* os::thread_local_storage_at(int index) {
-  return pthread_getspecific((pthread_key_t)index);
-}
-
 // File names are case-sensitive on windows only.
 inline int os::file_name_strcmp(const char* s1, const char* s2) {
   return strcmp(s1, s2);
@@ -64,6 +60,8 @@
 // On Aix, reservations are made on a page by page basis, nothing to do.
 inline void os::pd_split_reserved_memory(char *base, size_t size,
                                          size_t split, bool realloc) {
+  // TODO: Determine whether Sys V memory is split. If yes, we need to treat
+  // this the same way Windows treats its VirtualAlloc allocations.
 }
 
 // Bang the shadow pages if they need to be touched to be mapped.
--- a/src/os/aix/vm/thread_aix.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_AIX_VM_THREAD_AIX_INLINE_HPP
-#define OS_AIX_VM_THREAD_AIX_INLINE_HPP
-
-#include "runtime/thread.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Contains inlined functions for class Thread and ThreadLocalStorage
-
-inline void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
-
-#endif // OS_AIX_VM_THREAD_AIX_INLINE_HPP
--- a/src/os/bsd/vm/jvm_bsd.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/bsd/vm/jvm_bsd.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -108,84 +108,3 @@
   return JNI_TRUE;
 JVM_END
 
-/*
-  All the defined signal names for Bsd.
-
-  NOTE that not all of these names are accepted by our Java implementation
-
-  Via an existing claim by the VM, sigaction restrictions, or
-  the "rules of Unix" some of these names will be rejected at runtime.
-  For example the VM sets up to handle USR1, sigaction returns EINVAL for
-  STOP, and Bsd simply doesn't allow catching of KILL.
-
-  Here are the names currently accepted by a user of sun.misc.Signal with
-  1.4.1 (ignoring potential interaction with use of chaining, etc):
-
-    HUP, INT, TRAP, ABRT, IOT, BUS, USR2, PIPE, ALRM, TERM, STKFLT,
-    CLD, CHLD, CONT, TSTP, TTIN, TTOU, URG, XCPU, XFSZ, VTALRM, PROF,
-    WINCH, POLL, IO, PWR, SYS
-
-*/
-
-struct siglabel {
-  const char *name;
-  int   number;
-};
-
-struct siglabel siglabels[] = {
-  /* derived from /usr/include/bits/signum.h on RH7.2 */
-   "HUP",       SIGHUP,         /* Hangup (POSIX).  */
-  "INT",        SIGINT,         /* Interrupt (ANSI).  */
-  "QUIT",       SIGQUIT,        /* Quit (POSIX).  */
-  "ILL",        SIGILL,         /* Illegal instruction (ANSI).  */
-  "TRAP",       SIGTRAP,        /* Trace trap (POSIX).  */
-  "ABRT",       SIGABRT,        /* Abort (ANSI).  */
-  "EMT",        SIGEMT,         /* EMT trap  */
-  "FPE",        SIGFPE,         /* Floating-point exception (ANSI).  */
-  "KILL",       SIGKILL,        /* Kill, unblockable (POSIX).  */
-  "BUS",        SIGBUS,         /* BUS error (4.2 BSD).  */
-  "SEGV",       SIGSEGV,        /* Segmentation violation (ANSI).  */
-  "SYS",        SIGSYS,         /* Bad system call. Only on some Bsden! */
-  "PIPE",       SIGPIPE,        /* Broken pipe (POSIX).  */
-  "ALRM",       SIGALRM,        /* Alarm clock (POSIX).  */
-  "TERM",       SIGTERM,        /* Termination (ANSI).  */
-  "URG",        SIGURG,         /* Urgent condition on socket (4.2 BSD).  */
-  "STOP",       SIGSTOP,        /* Stop, unblockable (POSIX).  */
-  "TSTP",       SIGTSTP,        /* Keyboard stop (POSIX).  */
-  "CONT",       SIGCONT,        /* Continue (POSIX).  */
-  "CHLD",       SIGCHLD,        /* Child status has changed (POSIX).  */
-  "TTIN",       SIGTTIN,        /* Background read from tty (POSIX).  */
-  "TTOU",       SIGTTOU,        /* Background write to tty (POSIX).  */
-  "IO",         SIGIO,          /* I/O now possible (4.2 BSD).  */
-  "XCPU",       SIGXCPU,        /* CPU limit exceeded (4.2 BSD).  */
-  "XFSZ",       SIGXFSZ,        /* File size limit exceeded (4.2 BSD).  */
-  "VTALRM",     SIGVTALRM,      /* Virtual alarm clock (4.2 BSD).  */
-  "PROF",       SIGPROF,        /* Profiling alarm clock (4.2 BSD).  */
-  "WINCH",      SIGWINCH,       /* Window size change (4.3 BSD, Sun).  */
-  "INFO",       SIGINFO,        /* Information request.  */
-  "USR1",       SIGUSR1,        /* User-defined signal 1 (POSIX).  */
-  "USR2",       SIGUSR2         /* User-defined signal 2 (POSIX).  */
-  };
-
-JVM_ENTRY_NO_ENV(jint, JVM_FindSignal(const char *name))
-
-  /* find and return the named signal's number */
-
-  for(uint i=0; i<ARRAY_SIZE(siglabels); i++)
-    if(!strcmp(name, siglabels[i].name))
-      return siglabels[i].number;
-
-  return -1;
-
-JVM_END
-
-// used by os::exception_name()
-extern bool signal_name(int signo, char* buf, size_t len) {
-  for(uint i = 0; i < ARRAY_SIZE(siglabels); i++) {
-    if (signo == siglabels[i].number) {
-      jio_snprintf(buf, len, "SIG%s", siglabels[i].name);
-      return true;
-    }
-  }
-  return false;
-}
--- a/src/os/bsd/vm/os_bsd.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/bsd/vm/os_bsd.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -674,7 +674,7 @@
   int pid = os::current_process_id();
   alloca(((pid ^ counter++) & 7) * 128);
 
-  ThreadLocalStorage::set_thread(thread);
+  thread->initialize_thread_current();
 
   OSThread* osthread = thread->osthread();
   Monitor* sync = osthread->startThread_lock();
@@ -882,44 +882,6 @@
   delete osthread;
 }
 
-//////////////////////////////////////////////////////////////////////////////
-// thread local storage
-
-// Restore the thread pointer if the destructor is called. This is in case
-// someone from JNI code sets up a destructor with pthread_key_create to run
-// detachCurrentThread on thread death. Unless we restore the thread pointer we
-// will hang or crash. When detachCurrentThread is called the key will be set
-// to null and we will not be called again. If detachCurrentThread is never
-// called we could loop forever depending on the pthread implementation.
-static void restore_thread_pointer(void* p) {
-  Thread* thread = (Thread*) p;
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
-
-int os::allocate_thread_local_storage() {
-  pthread_key_t key;
-  int rslt = pthread_key_create(&key, restore_thread_pointer);
-  assert(rslt == 0, "cannot allocate thread local storage");
-  return (int)key;
-}
-
-// Note: This is currently not used by VM, as we don't destroy TLS key
-// on VM exit.
-void os::free_thread_local_storage(int index) {
-  int rslt = pthread_key_delete((pthread_key_t)index);
-  assert(rslt == 0, "invalid index");
-}
-
-void os::thread_local_storage_at_put(int index, void* value) {
-  int rslt = pthread_setspecific((pthread_key_t)index, value);
-  assert(rslt == 0, "pthread_setspecific failed");
-}
-
-extern "C" Thread* get_thread() {
-  return ThreadLocalStorage::thread();
-}
-
-
 ////////////////////////////////////////////////////////////////////////////////
 // time support
 
@@ -1115,7 +1077,7 @@
 // Note: os::abort() might be called very early during initialization, or
 // called from signal handler. Before adding something to os::abort(), make
 // sure it is async-safe and can handle partially initialized VM.
-void os::abort(bool dump_core, void* siginfo, void* context) {
+void os::abort(bool dump_core, void* siginfo, const void* context) {
   os::shutdown();
   if (dump_core) {
 #ifndef PRODUCT
@@ -3420,8 +3382,12 @@
     }
   } else if(os::Bsd::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Bsd::get_our_sigflags(sig)) {
     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
-    tty->print("expected:" PTR32_FORMAT, os::Bsd::get_our_sigflags(sig));
-    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
+    tty->print("expected:");
+    os::Posix::print_sa_flags(tty, os::Bsd::get_our_sigflags(sig));
+    tty->cr();
+    tty->print("  found:");
+    os::Posix::print_sa_flags(tty, act.sa_flags);
+    tty->cr();
     // No need to check this sig any longer
     sigaddset(&check_signal_done, sig);
   }
@@ -3435,20 +3401,6 @@
 extern void report_error(char* file_name, int line_no, char* title,
                          char* format, ...);
 
-extern bool signal_name(int signo, char* buf, size_t len);
-
-const char* os::exception_name(int exception_code, char* buf, size_t size) {
-  if (0 < exception_code && exception_code <= SIGRTMAX) {
-    // signal
-    if (!signal_name(exception_code, buf, size)) {
-      jio_snprintf(buf, size, "SIG%d", exception_code);
-    }
-    return buf;
-  } else {
-    return NULL;
-  }
-}
-
 // this is called _before_ the most of global arguments have been parsed
 void os::init(void) {
   char dummy;   // used to get a guess on initial stack address
@@ -3545,7 +3497,7 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed,
-                                    (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
+                                    (size_t)(StackReservedPages+StackYellowPages+StackRedPages+StackShadowPages+
                                     2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size());
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
@@ -3691,7 +3643,7 @@
   Thread* thread = context.thread();
   OSThread* osthread = thread->osthread();
   if (osthread->ucontext() != NULL) {
-    _epc = os::Bsd::ucontext_get_pc((ucontext_t *) context.ucontext());
+    _epc = os::Bsd::ucontext_get_pc((const ucontext_t *) context.ucontext());
   } else {
     // NULL context is unexpected, double-check this is the VMThread
     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
--- a/src/os/bsd/vm/os_bsd.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/bsd/vm/os_bsd.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -86,19 +86,21 @@
   static int page_size(void)                                        { return _page_size; }
   static void set_page_size(int val)                                { _page_size = val; }
 
-  static address   ucontext_get_pc(ucontext_t* uc);
+  static address   ucontext_get_pc(const ucontext_t* uc);
   static void ucontext_set_pc(ucontext_t* uc, address pc);
-  static intptr_t* ucontext_get_sp(ucontext_t* uc);
-  static intptr_t* ucontext_get_fp(ucontext_t* uc);
+  static intptr_t* ucontext_get_sp(const ucontext_t* uc);
+  static intptr_t* ucontext_get_fp(const ucontext_t* uc);
 
   // For Analyzer Forte AsyncGetCallTrace profiling support:
   //
   // This interface should be declared in os_bsd_i486.hpp, but
   // that file provides extensions to the os class and not the
   // Bsd class.
-  static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
+  static ExtendedPC fetch_frame_from_ucontext(Thread* thread, const ucontext_t* uc,
                                               intptr_t** ret_sp, intptr_t** ret_fp);
 
+  static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr);
+
   // This boolean allows users to forward their own non-matching signals
   // to JVM_handle_bsd_signal, harmlessly.
   static bool signal_handlers_are_installed;
--- a/src/os/bsd/vm/os_bsd.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/bsd/vm/os_bsd.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,10 +34,6 @@
 #include <sys/poll.h>
 #include <netdb.h>
 
-inline void* os::thread_local_storage_at(int index) {
-  return pthread_getspecific((pthread_key_t)index);
-}
-
 // File names are case-sensitive on windows only
 inline int os::file_name_strcmp(const char* s1, const char* s2) {
   return strcmp(s1, s2);
--- a/src/os/bsd/vm/thread_bsd.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_BSD_VM_THREAD_BSD_INLINE_HPP
-#define OS_BSD_VM_THREAD_BSD_INLINE_HPP
-
-#ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
-#error "This file should only be included from thread.inline.hpp"
-#endif
-
-#include "runtime/thread.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Contains inlined functions for class Thread and ThreadLocalStorage
-
-inline void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
-
-#endif // OS_BSD_VM_THREAD_BSD_INLINE_HPP
--- a/src/os/linux/vm/jvm_linux.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/linux/vm/jvm_linux.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -108,91 +108,3 @@
   return JNI_TRUE;
 JVM_END
 
-/*
-  All the defined signal names for Linux.
-
-  NOTE that not all of these names are accepted by our Java implementation
-
-  Via an existing claim by the VM, sigaction restrictions, or
-  the "rules of Unix" some of these names will be rejected at runtime.
-  For example the VM sets up to handle USR1, sigaction returns EINVAL for
-  STOP, and Linux simply doesn't allow catching of KILL.
-
-  Here are the names currently accepted by a user of sun.misc.Signal with
-  1.4.1 (ignoring potential interaction with use of chaining, etc):
-
-    HUP, INT, TRAP, ABRT, IOT, BUS, USR2, PIPE, ALRM, TERM, STKFLT,
-    CLD, CHLD, CONT, TSTP, TTIN, TTOU, URG, XCPU, XFSZ, VTALRM, PROF,
-    WINCH, POLL, IO, PWR, SYS
-
-*/
-
-struct siglabel {
-  const char *name;
-  int   number;
-};
-
-struct siglabel siglabels[] = {
-  /* derived from /usr/include/bits/signum.h on RH7.2 */
-   "HUP",       SIGHUP,         /* Hangup (POSIX).  */
-  "INT",        SIGINT,         /* Interrupt (ANSI).  */
-  "QUIT",       SIGQUIT,        /* Quit (POSIX).  */
-  "ILL",        SIGILL,         /* Illegal instruction (ANSI).  */
-  "TRAP",       SIGTRAP,        /* Trace trap (POSIX).  */
-  "ABRT",       SIGABRT,        /* Abort (ANSI).  */
-  "IOT",        SIGIOT,         /* IOT trap (4.2 BSD).  */
-  "BUS",        SIGBUS,         /* BUS error (4.2 BSD).  */
-  "FPE",        SIGFPE,         /* Floating-point exception (ANSI).  */
-  "KILL",       SIGKILL,        /* Kill, unblockable (POSIX).  */
-  "USR1",       SIGUSR1,        /* User-defined signal 1 (POSIX).  */
-  "SEGV",       SIGSEGV,        /* Segmentation violation (ANSI).  */
-  "USR2",       SIGUSR2,        /* User-defined signal 2 (POSIX).  */
-  "PIPE",       SIGPIPE,        /* Broken pipe (POSIX).  */
-  "ALRM",       SIGALRM,        /* Alarm clock (POSIX).  */
-  "TERM",       SIGTERM,        /* Termination (ANSI).  */
-#ifdef SIGSTKFLT
-  "STKFLT",     SIGSTKFLT,      /* Stack fault.  */
-#endif
-  "CLD",        SIGCLD,         /* Same as SIGCHLD (System V).  */
-  "CHLD",       SIGCHLD,        /* Child status has changed (POSIX).  */
-  "CONT",       SIGCONT,        /* Continue (POSIX).  */
-  "STOP",       SIGSTOP,        /* Stop, unblockable (POSIX).  */
-  "TSTP",       SIGTSTP,        /* Keyboard stop (POSIX).  */
-  "TTIN",       SIGTTIN,        /* Background read from tty (POSIX).  */
-  "TTOU",       SIGTTOU,        /* Background write to tty (POSIX).  */
-  "URG",        SIGURG,         /* Urgent condition on socket (4.2 BSD).  */
-  "XCPU",       SIGXCPU,        /* CPU limit exceeded (4.2 BSD).  */
-  "XFSZ",       SIGXFSZ,        /* File size limit exceeded (4.2 BSD).  */
-  "VTALRM",     SIGVTALRM,      /* Virtual alarm clock (4.2 BSD).  */
-  "PROF",       SIGPROF,        /* Profiling alarm clock (4.2 BSD).  */
-  "WINCH",      SIGWINCH,       /* Window size change (4.3 BSD, Sun).  */
-  "POLL",       SIGPOLL,        /* Pollable event occurred (System V).  */
-  "IO",         SIGIO,          /* I/O now possible (4.2 BSD).  */
-  "PWR",        SIGPWR,         /* Power failure restart (System V).  */
-#ifdef SIGSYS
-  "SYS",        SIGSYS          /* Bad system call. Only on some Linuxen! */
-#endif
-  };
-
-JVM_ENTRY_NO_ENV(jint, JVM_FindSignal(const char *name))
-
-  /* find and return the named signal's number */
-
-  for(uint i=0; i<ARRAY_SIZE(siglabels); i++)
-    if(!strcmp(name, siglabels[i].name))
-      return siglabels[i].number;
-
-  return -1;
-
-JVM_END
-
-// used by os::exception_name()
-extern bool signal_name(int signo, char* buf, size_t len) {
-  for(uint i = 0; i < ARRAY_SIZE(siglabels); i++) {
-    if (signo == siglabels[i].number) {
-      jio_snprintf(buf, len, "SIG%s", siglabels[i].name);
-      return true;
-    }
-  }
-  return false;
-}
--- a/src/os/linux/vm/os_linux.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/linux/vm/os_linux.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -646,7 +646,7 @@
   int pid = os::current_process_id();
   alloca(((pid ^ counter++) & 7) * 128);
 
-  ThreadLocalStorage::set_thread(thread);
+  thread->initialize_thread_current();
 
   OSThread* osthread = thread->osthread();
   Monitor* sync = osthread->startThread_lock();
@@ -874,43 +874,6 @@
 }
 
 //////////////////////////////////////////////////////////////////////////////
-// thread local storage
-
-// Restore the thread pointer if the destructor is called. This is in case
-// someone from JNI code sets up a destructor with pthread_key_create to run
-// detachCurrentThread on thread death. Unless we restore the thread pointer we
-// will hang or crash. When detachCurrentThread is called the key will be set
-// to null and we will not be called again. If detachCurrentThread is never
-// called we could loop forever depending on the pthread implementation.
-static void restore_thread_pointer(void* p) {
-  Thread* thread = (Thread*) p;
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
-
-int os::allocate_thread_local_storage() {
-  pthread_key_t key;
-  int rslt = pthread_key_create(&key, restore_thread_pointer);
-  assert(rslt == 0, "cannot allocate thread local storage");
-  return (int)key;
-}
-
-// Note: This is currently not used by VM, as we don't destroy TLS key
-// on VM exit.
-void os::free_thread_local_storage(int index) {
-  int rslt = pthread_key_delete((pthread_key_t)index);
-  assert(rslt == 0, "invalid index");
-}
-
-void os::thread_local_storage_at_put(int index, void* value) {
-  int rslt = pthread_setspecific((pthread_key_t)index, value);
-  assert(rslt == 0, "pthread_setspecific failed");
-}
-
-extern "C" Thread* get_thread() {
-  return ThreadLocalStorage::thread();
-}
-
-//////////////////////////////////////////////////////////////////////////////
 // initial thread
 
 // Check if current thread is the initial thread, similar to Solaris thr_main.
@@ -1378,7 +1341,7 @@
 // Note: os::abort() might be called very early during initialization, or
 // called from signal handler. Before adding something to os::abort(), make
 // sure it is async-safe and can handle partially initialized VM.
-void os::abort(bool dump_core, void* siginfo, void* context) {
+void os::abort(bool dump_core, void* siginfo, const void* context) {
   os::shutdown();
   if (dump_core) {
 #ifndef PRODUCT
@@ -1770,7 +1733,7 @@
 #if defined(VM_LITTLE_ENDIAN)
     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64"},
 #else
-    {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
+    {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64 LE"},
 #endif
     {EM_ARM,         EM_ARM,     ELFCLASS32,   ELFDATA2LSB, (char*)"ARM"},
     {EM_S390,        EM_S390,    ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
@@ -1898,8 +1861,8 @@
     JavaThread *jt = Threads::first();
 
     while (jt) {
-      if (!jt->stack_guard_zone_unused() &&        // Stack not yet fully initialized
-          jt->stack_yellow_zone_enabled()) {       // No pending stack overflow exceptions
+      if (!jt->stack_guard_zone_unused() &&     // Stack not yet fully initialized
+          jt->stack_guards_enabled()) {         // No pending stack overflow exceptions
         if (!os::guard_memory((char *) jt->stack_red_zone_base() - jt->stack_red_zone_size(),
                               jt->stack_yellow_zone_size() + jt->stack_red_zone_size())) {
           warning("Attempt to reguard stack yellow zone failed.");
@@ -2214,6 +2177,8 @@
 const char* search_string = "model name";
 #elif defined(SPARC)
 const char* search_string = "cpu";
+#elif defined(PPC64)
+const char* search_string = "cpu";
 #else
 const char* search_string = "Processor";
 #endif
@@ -4570,8 +4535,12 @@
     }
   } else if(os::Linux::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Linux::get_our_sigflags(sig)) {
     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
-    tty->print("expected:" PTR32_FORMAT, os::Linux::get_our_sigflags(sig));
-    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
+    tty->print("expected:");
+    os::Posix::print_sa_flags(tty, os::Linux::get_our_sigflags(sig));
+    tty->cr();
+    tty->print("  found:");
+    os::Posix::print_sa_flags(tty, act.sa_flags);
+    tty->cr();
     // No need to check this sig any longer
     sigaddset(&check_signal_done, sig);
   }
@@ -4585,20 +4554,6 @@
 extern void report_error(char* file_name, int line_no, char* title,
                          char* format, ...);
 
-extern bool signal_name(int signo, char* buf, size_t len);
-
-const char* os::exception_name(int exception_code, char* buf, size_t size) {
-  if (0 < exception_code && exception_code <= SIGRTMAX) {
-    // signal
-    if (!signal_name(exception_code, buf, size)) {
-      jio_snprintf(buf, size, "SIG%d", exception_code);
-    }
-    return buf;
-  } else {
-    return NULL;
-  }
-}
-
 // this is called _before_ the most of global arguments have been parsed
 void os::init(void) {
   char dummy;   // used to get a guess on initial stack address
@@ -4650,6 +4605,11 @@
   if (vm_page_size() > (int)Linux::vm_default_page_size()) {
     StackYellowPages = 1;
     StackRedPages = 1;
+#if defined(IA32) || defined(IA64)
+    StackReservedPages = 1;
+#else
+    StackReservedPages = 0;
+#endif
     StackShadowPages = round_to((StackShadowPages*Linux::vm_default_page_size()), vm_page_size()) / vm_page_size();
   }
 
@@ -4711,7 +4671,7 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
-                                      (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() +
+                                      (size_t)(StackReservedPages+StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() +
                                       (2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size());
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
@@ -4893,7 +4853,7 @@
   Thread* thread = context.thread();
   OSThread* osthread = thread->osthread();
   if (osthread->ucontext() != NULL) {
-    _epc = os::Linux::ucontext_get_pc((ucontext_t *) context.ucontext());
+    _epc = os::Linux::ucontext_get_pc((const ucontext_t *) context.ucontext());
   } else {
     // NULL context is unexpected, double-check this is the VMThread
     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
--- a/src/os/linux/vm/os_linux.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/linux/vm/os_linux.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -123,19 +123,21 @@
 
   static int vm_default_page_size(void)                             { return _vm_default_page_size; }
 
-  static address   ucontext_get_pc(ucontext_t* uc);
+  static address   ucontext_get_pc(const ucontext_t* uc);
   static void ucontext_set_pc(ucontext_t* uc, address pc);
-  static intptr_t* ucontext_get_sp(ucontext_t* uc);
-  static intptr_t* ucontext_get_fp(ucontext_t* uc);
+  static intptr_t* ucontext_get_sp(const ucontext_t* uc);
+  static intptr_t* ucontext_get_fp(const ucontext_t* uc);
 
   // For Analyzer Forte AsyncGetCallTrace profiling support:
   //
   // This interface should be declared in os_linux_i486.hpp, but
   // that file provides extensions to the os class and not the
   // Linux class.
-  static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
+  static ExtendedPC fetch_frame_from_ucontext(Thread* thread, const ucontext_t* uc,
                                               intptr_t** ret_sp, intptr_t** ret_fp);
 
+  static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr);
+
   // This boolean allows users to forward their own non-matching signals
   // to JVM_handle_linux_signal, harmlessly.
   static bool signal_handlers_are_installed;
--- a/src/os/linux/vm/os_linux.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/linux/vm/os_linux.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,10 +34,6 @@
 #include <sys/poll.h>
 #include <netdb.h>
 
-inline void* os::thread_local_storage_at(int index) {
-  return pthread_getspecific((pthread_key_t)index);
-}
-
 // File names are case-sensitive on windows only
 inline int os::file_name_strcmp(const char* s1, const char* s2) {
   return strcmp(s1, s2);
--- a/src/os/linux/vm/thread_linux.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_LINUX_VM_THREAD_LINUX_INLINE_HPP
-#define OS_LINUX_VM_THREAD_LINUX_INLINE_HPP
-
-#ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
-#error "This file should only be included from thread.inline.hpp"
-#endif
-
-#include "runtime/thread.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Contains inlined functions for class Thread and ThreadLocalStorage
-
-inline void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
-
-#endif // OS_LINUX_VM_THREAD_LINUX_INLINE_HPP
--- a/src/os/posix/vm/os_posix.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/posix/vm/os_posix.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -493,166 +493,171 @@
   return interrupted;
 }
 
-// Returned string is a constant. For unknown signals "UNKNOWN" is returned.
-const char* os::Posix::get_signal_name(int sig, char* out, size_t outlen) {
+
 
-  static const struct {
-    int sig; const char* name;
-  }
-  info[] =
+static const struct {
+  int sig; const char* name;
+}
+ g_signal_info[] =
   {
-    {  SIGABRT,     "SIGABRT" },
+  {  SIGABRT,     "SIGABRT" },
 #ifdef SIGAIO
-    {  SIGAIO,      "SIGAIO" },
+  {  SIGAIO,      "SIGAIO" },
 #endif
-    {  SIGALRM,     "SIGALRM" },
+  {  SIGALRM,     "SIGALRM" },
 #ifdef SIGALRM1
-    {  SIGALRM1,    "SIGALRM1" },
+  {  SIGALRM1,    "SIGALRM1" },
 #endif
-    {  SIGBUS,      "SIGBUS" },
+  {  SIGBUS,      "SIGBUS" },
 #ifdef SIGCANCEL
-    {  SIGCANCEL,   "SIGCANCEL" },
+  {  SIGCANCEL,   "SIGCANCEL" },
 #endif
-    {  SIGCHLD,     "SIGCHLD" },
+  {  SIGCHLD,     "SIGCHLD" },
 #ifdef SIGCLD
-    {  SIGCLD,      "SIGCLD" },
+  {  SIGCLD,      "SIGCLD" },
 #endif
-    {  SIGCONT,     "SIGCONT" },
+  {  SIGCONT,     "SIGCONT" },
 #ifdef SIGCPUFAIL
-    {  SIGCPUFAIL,  "SIGCPUFAIL" },
+  {  SIGCPUFAIL,  "SIGCPUFAIL" },
 #endif
 #ifdef SIGDANGER
-    {  SIGDANGER,   "SIGDANGER" },
+  {  SIGDANGER,   "SIGDANGER" },
 #endif
 #ifdef SIGDIL
-    {  SIGDIL,      "SIGDIL" },
+  {  SIGDIL,      "SIGDIL" },
 #endif
 #ifdef SIGEMT
-    {  SIGEMT,      "SIGEMT" },
+  {  SIGEMT,      "SIGEMT" },
 #endif
-    {  SIGFPE,      "SIGFPE" },
+  {  SIGFPE,      "SIGFPE" },
 #ifdef SIGFREEZE
-    {  SIGFREEZE,   "SIGFREEZE" },
+  {  SIGFREEZE,   "SIGFREEZE" },
 #endif
 #ifdef SIGGFAULT
-    {  SIGGFAULT,   "SIGGFAULT" },
+  {  SIGGFAULT,   "SIGGFAULT" },
 #endif
 #ifdef SIGGRANT
-    {  SIGGRANT,    "SIGGRANT" },
+  {  SIGGRANT,    "SIGGRANT" },
 #endif
-    {  SIGHUP,      "SIGHUP" },
-    {  SIGILL,      "SIGILL" },
-    {  SIGINT,      "SIGINT" },
+  {  SIGHUP,      "SIGHUP" },
+  {  SIGILL,      "SIGILL" },
+  {  SIGINT,      "SIGINT" },
 #ifdef SIGIO
-    {  SIGIO,       "SIGIO" },
+  {  SIGIO,       "SIGIO" },
 #endif
 #ifdef SIGIOINT
-    {  SIGIOINT,    "SIGIOINT" },
+  {  SIGIOINT,    "SIGIOINT" },
 #endif
 #ifdef SIGIOT
-  // SIGIOT is there for BSD compatibility, but on most Unices just a
-  // synonym for SIGABRT. The result should be "SIGABRT", not
-  // "SIGIOT".
-  #if (SIGIOT != SIGABRT )
-    {  SIGIOT,      "SIGIOT" },
-  #endif
+// SIGIOT is there for BSD compatibility, but on most Unices just a
+// synonym for SIGABRT. The result should be "SIGABRT", not
+// "SIGIOT".
+#if (SIGIOT != SIGABRT )
+  {  SIGIOT,      "SIGIOT" },
+#endif
 #endif
 #ifdef SIGKAP
-    {  SIGKAP,      "SIGKAP" },
+  {  SIGKAP,      "SIGKAP" },
 #endif
-    {  SIGKILL,     "SIGKILL" },
+  {  SIGKILL,     "SIGKILL" },
 #ifdef SIGLOST
-    {  SIGLOST,     "SIGLOST" },
+  {  SIGLOST,     "SIGLOST" },
 #endif
 #ifdef SIGLWP
-    {  SIGLWP,      "SIGLWP" },
+  {  SIGLWP,      "SIGLWP" },
 #endif
 #ifdef SIGLWPTIMER
-    {  SIGLWPTIMER, "SIGLWPTIMER" },
+  {  SIGLWPTIMER, "SIGLWPTIMER" },
 #endif
 #ifdef SIGMIGRATE
-    {  SIGMIGRATE,  "SIGMIGRATE" },
+  {  SIGMIGRATE,  "SIGMIGRATE" },
 #endif
 #ifdef SIGMSG
-    {  SIGMSG,      "SIGMSG" },
+  {  SIGMSG,      "SIGMSG" },
 #endif
-    {  SIGPIPE,     "SIGPIPE" },
+  {  SIGPIPE,     "SIGPIPE" },
 #ifdef SIGPOLL
-    {  SIGPOLL,     "SIGPOLL" },
+  {  SIGPOLL,     "SIGPOLL" },
 #endif
 #ifdef SIGPRE
-    {  SIGPRE,      "SIGPRE" },
+  {  SIGPRE,      "SIGPRE" },
 #endif
-    {  SIGPROF,     "SIGPROF" },
+  {  SIGPROF,     "SIGPROF" },
 #ifdef SIGPTY
-    {  SIGPTY,      "SIGPTY" },
+  {  SIGPTY,      "SIGPTY" },
 #endif
 #ifdef SIGPWR
-    {  SIGPWR,      "SIGPWR" },
+  {  SIGPWR,      "SIGPWR" },
 #endif
-    {  SIGQUIT,     "SIGQUIT" },
+  {  SIGQUIT,     "SIGQUIT" },
 #ifdef SIGRECONFIG
-    {  SIGRECONFIG, "SIGRECONFIG" },
+  {  SIGRECONFIG, "SIGRECONFIG" },
 #endif
 #ifdef SIGRECOVERY
-    {  SIGRECOVERY, "SIGRECOVERY" },
+  {  SIGRECOVERY, "SIGRECOVERY" },
 #endif
 #ifdef SIGRESERVE
-    {  SIGRESERVE,  "SIGRESERVE" },
+  {  SIGRESERVE,  "SIGRESERVE" },
 #endif
 #ifdef SIGRETRACT
-    {  SIGRETRACT,  "SIGRETRACT" },
+  {  SIGRETRACT,  "SIGRETRACT" },
 #endif
 #ifdef SIGSAK
-    {  SIGSAK,      "SIGSAK" },
+  {  SIGSAK,      "SIGSAK" },
 #endif
-    {  SIGSEGV,     "SIGSEGV" },
+  {  SIGSEGV,     "SIGSEGV" },
 #ifdef SIGSOUND
-    {  SIGSOUND,    "SIGSOUND" },
+  {  SIGSOUND,    "SIGSOUND" },
+#endif
+#ifdef SIGSTKFLT
+  {  SIGSTKFLT,    "SIGSTKFLT" },
 #endif
-    {  SIGSTOP,     "SIGSTOP" },
-    {  SIGSYS,      "SIGSYS" },
+  {  SIGSTOP,     "SIGSTOP" },
+  {  SIGSYS,      "SIGSYS" },
 #ifdef SIGSYSERROR
-    {  SIGSYSERROR, "SIGSYSERROR" },
+  {  SIGSYSERROR, "SIGSYSERROR" },
 #endif
 #ifdef SIGTALRM
-    {  SIGTALRM,    "SIGTALRM" },
+  {  SIGTALRM,    "SIGTALRM" },
 #endif
-    {  SIGTERM,     "SIGTERM" },
+  {  SIGTERM,     "SIGTERM" },
 #ifdef SIGTHAW
-    {  SIGTHAW,     "SIGTHAW" },
+  {  SIGTHAW,     "SIGTHAW" },
 #endif
-    {  SIGTRAP,     "SIGTRAP" },
+  {  SIGTRAP,     "SIGTRAP" },
 #ifdef SIGTSTP
-    {  SIGTSTP,     "SIGTSTP" },
+  {  SIGTSTP,     "SIGTSTP" },
 #endif
-    {  SIGTTIN,     "SIGTTIN" },
-    {  SIGTTOU,     "SIGTTOU" },
+  {  SIGTTIN,     "SIGTTIN" },
+  {  SIGTTOU,     "SIGTTOU" },
 #ifdef SIGURG
-    {  SIGURG,      "SIGURG" },
+  {  SIGURG,      "SIGURG" },
 #endif
-    {  SIGUSR1,     "SIGUSR1" },
-    {  SIGUSR2,     "SIGUSR2" },
+  {  SIGUSR1,     "SIGUSR1" },
+  {  SIGUSR2,     "SIGUSR2" },
 #ifdef SIGVIRT
-    {  SIGVIRT,     "SIGVIRT" },
+  {  SIGVIRT,     "SIGVIRT" },
 #endif
-    {  SIGVTALRM,   "SIGVTALRM" },
+  {  SIGVTALRM,   "SIGVTALRM" },
 #ifdef SIGWAITING
-    {  SIGWAITING,  "SIGWAITING" },
+  {  SIGWAITING,  "SIGWAITING" },
 #endif
 #ifdef SIGWINCH
-    {  SIGWINCH,    "SIGWINCH" },
+  {  SIGWINCH,    "SIGWINCH" },
 #endif
 #ifdef SIGWINDOW
-    {  SIGWINDOW,   "SIGWINDOW" },
+  {  SIGWINDOW,   "SIGWINDOW" },
 #endif
-    {  SIGXCPU,     "SIGXCPU" },
-    {  SIGXFSZ,     "SIGXFSZ" },
+  {  SIGXCPU,     "SIGXCPU" },
+  {  SIGXFSZ,     "SIGXFSZ" },
 #ifdef SIGXRES
-    {  SIGXRES,     "SIGXRES" },
+  {  SIGXRES,     "SIGXRES" },
 #endif
-    { -1, NULL }
-  };
+  { -1, NULL }
+};
+
+// Returned string is a constant. For unknown signals "UNKNOWN" is returned.
+const char* os::Posix::get_signal_name(int sig, char* out, size_t outlen) {
 
   const char* ret = NULL;
 
@@ -670,9 +675,9 @@
 #endif
 
   if (sig > 0) {
-    for (int idx = 0; info[idx].sig != -1; idx ++) {
-      if (info[idx].sig == sig) {
-        ret = info[idx].name;
+    for (int idx = 0; g_signal_info[idx].sig != -1; idx ++) {
+      if (g_signal_info[idx].sig == sig) {
+        ret = g_signal_info[idx].name;
         break;
       }
     }
@@ -693,6 +698,25 @@
   return out;
 }
 
+int os::Posix::get_signal_number(const char* signal_name) {
+  char tmp[30];
+  const char* s = signal_name;
+  if (s[0] != 'S' || s[1] != 'I' || s[2] != 'G') {
+    jio_snprintf(tmp, sizeof(tmp), "SIG%s", signal_name);
+    s = tmp;
+  }
+  for (int idx = 0; g_signal_info[idx].sig != -1; idx ++) {
+    if (strcmp(g_signal_info[idx].name, s) == 0) {
+      return g_signal_info[idx].sig;
+    }
+  }
+  return -1;
+}
+
+int os::get_signal_number(const char* signal_name) {
+  return os::Posix::get_signal_number(signal_name);
+}
+
 // Returns true if signal number is valid.
 bool os::Posix::is_valid_signal(int sig) {
   // MacOS not really POSIX compliant: sigaddset does not return
@@ -711,6 +735,21 @@
 #endif
 }
 
+// Returns:
+// NULL for an invalid signal number
+// "SIG<num>" for a valid but unknown signal number
+// signal name otherwise.
+const char* os::exception_name(int sig, char* buf, size_t size) {
+  if (!os::Posix::is_valid_signal(sig)) {
+    return NULL;
+  }
+  const char* const name = os::Posix::get_signal_name(sig, buf, size);
+  if (strcmp(name, "UNKNOWN") == 0) {
+    jio_snprintf(buf, size, "SIG%d", sig);
+  }
+  return buf;
+}
+
 #define NUM_IMPORTANT_SIGS 32
 // Returns one-line short description of a signal set in a user provided buffer.
 const char* os::Posix::describe_signal_set_short(const sigset_t* set, char* buffer, size_t buf_size) {
@@ -992,7 +1031,7 @@
   return pthread_sigmask(SIG_UNBLOCK, set, NULL);
 }
 
-address os::Posix::ucontext_get_pc(ucontext_t* ctx) {
+address os::Posix::ucontext_get_pc(const ucontext_t* ctx) {
 #ifdef TARGET_OS_FAMILY_linux
    return Linux::ucontext_get_pc(ctx);
 #elif defined(TARGET_OS_FAMILY_solaris)
--- a/src/os/posix/vm/os_posix.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/posix/vm/os_posix.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -51,6 +51,12 @@
   // Returned string is a constant. For unknown signals "UNKNOWN" is returned.
   static const char* get_signal_name(int sig, char* out, size_t outlen);
 
+  // Helper function, returns a signal number for a given signal name, e.g. 11
+  // for "SIGSEGV". Name can be given with or without "SIG" prefix, so both
+  // "SEGV" or "SIGSEGV" work. Name must be uppercase.
+  // Returns -1 for an unknown signal name.
+  static int get_signal_number(const char* signal_name);
+
   // Returns one-line short description of a signal set in a user provided buffer.
   static const char* describe_signal_set_short(const sigset_t* set, char* buffer, size_t size);
 
@@ -70,7 +76,7 @@
   // A POSIX conform, platform-independend siginfo print routine.
   static void print_siginfo_brief(outputStream* os, const siginfo_t* si);
 
-  static address ucontext_get_pc(ucontext_t* ctx);
+  static address ucontext_get_pc(const ucontext_t* ctx);
   // Set PC into context. Needed for continuation after signal.
   static void ucontext_set_pc(ucontext_t* ctx, address pc);
 };
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/posix/vm/threadLocalStorage_posix.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "runtime/threadLocalStorage.hpp"
+#include <pthread.h>
+
+static pthread_key_t _thread_key;
+static bool _initialized = false;
+
+// Restore the thread pointer if the destructor is called. This is in case
+// someone from JNI code sets up a destructor with pthread_key_create to run
+// detachCurrentThread on thread death. Unless we restore the thread pointer we
+// will hang or crash. When detachCurrentThread is called the key will be set
+// to null and we will not be called again. If detachCurrentThread is never
+// called we could loop forever depending on the pthread implementation.
+extern "C" void restore_thread_pointer(void* p) {
+  ThreadLocalStorage::set_thread((Thread*) p);
+}
+
+void ThreadLocalStorage::init() {
+  assert(!_initialized, "initializing TLS more than once!");
+  int rslt = pthread_key_create(&_thread_key, restore_thread_pointer);
+  // If this assert fails we will get a recursive assertion failure
+  // and not see the actual error message or get a hs_err file
+  assert_status(rslt == 0, rslt, "pthread_key_create");
+  _initialized = true;
+}
+
+bool ThreadLocalStorage::is_initialized() {
+  return _initialized;
+}
+
+Thread* ThreadLocalStorage::thread() {
+  // If this assert fails we will get a recursive assertion failure
+  // and not see the actual error message or get a hs_err file.
+  // Which most likely indicates we have taken an error path early in
+  // the initialization process, which is using Thread::current without
+  // checking TLS is initialized - see java.cpp vm_exit
+  assert(_initialized, "TLS not initialized yet!");
+  return (Thread*) pthread_getspecific(_thread_key); // may be NULL
+}
+
+void ThreadLocalStorage::set_thread(Thread* current) {
+  assert(_initialized, "TLS not initialized yet!");
+  int rslt = pthread_setspecific(_thread_key, current);
+  assert_status(rslt == 0, rslt, "pthread_setspecific");
+}
--- a/src/os/solaris/vm/jvm_solaris.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/solaris/vm/jvm_solaris.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -106,40 +106,3 @@
   return JNI_TRUE;
 JVM_END
 
-
-/*
-  All the defined signal names for Solaris are defined by str2sig().
-
-  NOTE that not all of these names are accepted by our Java implementation
-
-  Via an existing claim by the VM, sigaction restrictions, or
-  the "rules of Unix" some of these names will be rejected at runtime.
-  For example the VM sets up to handle USR1, sigaction returns EINVAL for
-  CANCEL, and Solaris simply doesn't allow catching of KILL.
-
-  Here are the names currently accepted by a user of sun.misc.Signal with
-  1.4.1 (ignoring potential interaction with use of chaining, etc):
-
-      HUP, INT, TRAP, IOT, ABRT, EMT, BUS, SYS, PIPE, ALRM, TERM, USR2,
-      CLD, CHLD, PWR, WINCH, URG, POLL, IO, TSTP, CONT, TTIN, TTOU, VTALRM,
-      PROF, XCPU, XFSZ, FREEZE, THAW, LOST
-*/
-
-JVM_ENTRY_NO_ENV(jint, JVM_FindSignal(const char *name))
-
-  int sig;
-
-  /* return the named signal's number */
-
-  if(str2sig(name, &sig))
-    return -1;
-  else
-    return sig;
-
-JVM_END
-
-
-//Reconciliation History
-// 1.4 98/10/07 13:39:41 jvm_win32.cpp
-// 1.6 99/06/22 16:39:00 jvm_win32.cpp
-//End
--- a/src/os/solaris/vm/os_solaris.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/solaris/vm/os_solaris.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -728,6 +728,9 @@
 
   int prio;
   Thread* thread = (Thread*)thread_addr;
+
+  thread->initialize_thread_current();
+
   OSThread* osthr = thread->osthread();
 
   osthr->set_lwp_id(_lwp_self());  // Store lwp in case we are bound
@@ -1377,7 +1380,7 @@
 // Note: os::abort() might be called very early during initialization, or
 // called from signal handler. Before adding something to os::abort(), make
 // sure it is async-safe and can handle partially initialized VM.
-void os::abort(bool dump_core, void* siginfo, void* context) {
+void os::abort(bool dump_core, void* siginfo, const void* context) {
   os::shutdown();
   if (dump_core) {
 #ifndef PRODUCT
@@ -3733,7 +3736,7 @@
   Thread* thread = context.thread();
   OSThread* osthread = thread->osthread();
   if (osthread->ucontext() != NULL) {
-    _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
+    _epc = os::Solaris::ucontext_get_pc((const ucontext_t *) context.ucontext());
   } else {
     // NULL context is unexpected, double-check this is the VMThread
     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
@@ -4055,8 +4058,12 @@
     }
   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
-    tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
-    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
+    tty->print("expected:");
+    os::Posix::print_sa_flags(tty, os::Solaris::get_our_sigflags(sig));
+    tty->cr();
+    tty->print("  found:");
+    os::Posix::print_sa_flags(tty, act.sa_flags);
+    tty->cr();
     // No need to check this sig any longer
     sigaddset(&check_signal_done, sig);
   }
@@ -4144,32 +4151,6 @@
 void report_error(const char* file_name, int line_no, const char* title,
                   const char* format, ...);
 
-const char * signames[] = {
-  "SIG0",
-  "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
-  "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
-  "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
-  "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
-  "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
-  "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
-  "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
-  "SIGCANCEL", "SIGLOST"
-};
-
-const char* os::exception_name(int exception_code, char* buf, size_t size) {
-  if (0 < exception_code && exception_code <= SIGRTMAX) {
-    // signal
-    if (exception_code < sizeof(signames)/sizeof(const char*)) {
-      jio_snprintf(buf, size, "%s", signames[exception_code]);
-    } else {
-      jio_snprintf(buf, size, "SIG%d", exception_code);
-    }
-    return buf;
-  } else {
-    return NULL;
-  }
-}
-
 // (Static) wrapper for getisax(2) call.
 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
 
@@ -4401,6 +4382,7 @@
   if (vm_page_size() > 8*K) {
     StackYellowPages = 1;
     StackRedPages = 1;
+    StackReservedPages = 1;
     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
   }
 }
@@ -4457,7 +4439,7 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
-                                        (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
+                                        (size_t)(StackReservedPages+StackYellowPages+StackRedPages+StackShadowPages+
                                         2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
@@ -5605,7 +5587,7 @@
 
   // fork is async-safe, fork1 is not so can't use in signal handler
   pid_t pid;
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
   if (t != NULL && t->is_inside_signal_handler()) {
     pid = fork();
   } else {
--- a/src/os/solaris/vm/os_solaris.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/solaris/vm/os_solaris.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -130,15 +130,15 @@
   static address handler_start, handler_end; // start and end pc of thr_sighndlrinfo
 
   static bool valid_stack_address(Thread* thread, address sp);
-  static bool valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect);
-  static ucontext_t* get_valid_uc_in_signal_handler(Thread* thread,
-                                                    ucontext_t* uc);
+  static bool valid_ucontext(Thread* thread, const ucontext_t* valid, const ucontext_t* suspect);
+  static const ucontext_t* get_valid_uc_in_signal_handler(Thread* thread,
+                                                    const ucontext_t* uc);
 
-  static ExtendedPC  ucontext_get_ExtendedPC(ucontext_t* uc);
-  static intptr_t*   ucontext_get_sp(ucontext_t* uc);
+  static ExtendedPC  ucontext_get_ExtendedPC(const ucontext_t* uc);
+  static intptr_t*   ucontext_get_sp(const ucontext_t* uc);
   // ucontext_get_fp() is only used by Solaris X86 (see note below)
-  static intptr_t*   ucontext_get_fp(ucontext_t* uc);
-  static address    ucontext_get_pc(ucontext_t* uc);
+  static intptr_t*   ucontext_get_fp(const ucontext_t* uc);
+  static address    ucontext_get_pc(const ucontext_t* uc);
   static void ucontext_set_pc(ucontext_t* uc, address pc);
 
   // For Analyzer Forte AsyncGetCallTrace profiling support:
@@ -147,9 +147,11 @@
   // We should have different declarations of this interface in
   // os_solaris_i486.hpp and os_solaris_sparc.hpp, but that file
   // provides extensions to the os class and not the Solaris class.
-  static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
+  static ExtendedPC fetch_frame_from_ucontext(Thread* thread, const ucontext_t* uc,
                                               intptr_t** ret_sp, intptr_t** ret_fp);
 
+  static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr);
+
   static void hotspot_sigmask(Thread* thread);
 
   // SR_handler
--- a/src/os/solaris/vm/thread_solaris.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_SOLARIS_VM_THREAD_SOLARIS_INLINE_HPP
-#define OS_SOLARIS_VM_THREAD_SOLARIS_INLINE_HPP
-
-#ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
-#error "This file should only be included from thread.inline.hpp"
-#endif
-
-#include "runtime/thread.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Thread::current is "hot" it's called > 128K times in the 1st 500 msecs of
-// startup.
-// ThreadLocalStorage::thread is warm -- it's called > 16K times in the same
-// period.   Thread::current() now calls ThreadLocalStorage::thread() directly.
-// For SPARC, to avoid excessive register window spill-fill faults,
-// we aggressively inline these routines.
-
-inline void ThreadLocalStorage::set_thread(Thread* thread) {
-  _thr_current = thread;
-}
-
-inline Thread* ThreadLocalStorage::thread()  {
-  return _thr_current;
-}
-
-#endif // OS_SOLARIS_VM_THREAD_SOLARIS_INLINE_HPP
--- a/src/os/windows/vm/jvm_windows.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/windows/vm/jvm_windows.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -89,39 +89,3 @@
 JVM_END
 
 
-/*
-  All the defined signal names for Windows.
-
-  NOTE that not all of these names are accepted by FindSignal!
-
-  For various reasons some of these may be rejected at runtime.
-
-  Here are the names currently accepted by a user of sun.misc.Signal with
-  1.4.1 (ignoring potential interaction with use of chaining, etc):
-
-     (LIST TBD)
-
-*/
-struct siglabel {
-  char *name;
-  int   number;
-};
-
-struct siglabel siglabels[] =
-  /* derived from version 6.0 VC98/include/signal.h */
-  {"ABRT",      SIGABRT,        /* abnormal termination triggered by abort cl */
-  "FPE",        SIGFPE,         /* floating point exception */
-  "SEGV",       SIGSEGV,        /* segment violation */
-  "INT",        SIGINT,         /* interrupt */
-  "TERM",       SIGTERM,        /* software term signal from kill */
-  "BREAK",      SIGBREAK,       /* Ctrl-Break sequence */
-  "ILL",        SIGILL};        /* illegal instruction */
-
-JVM_ENTRY_NO_ENV(jint, JVM_FindSignal(const char *name))
-  /* find and return the named signal's number */
-
-  for(int i=0;i<sizeof(siglabels)/sizeof(struct siglabel);i++)
-    if(!strcmp(name, siglabels[i].name))
-      return siglabels[i].number;
-  return -1;
-JVM_END
--- a/src/os/windows/vm/os_windows.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/windows/vm/os_windows.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -419,6 +419,8 @@
   int pid = os::current_process_id();
   _alloca(((pid ^ counter++) & 7) * 128);
 
+  thread->initialize_thread_current();
+
   OSThread* osthr = thread->osthread();
   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 
@@ -1026,7 +1028,7 @@
   VMError::record_coredump_status(buffer, status);
 }
 
-void os::abort(bool dump_core, void* siginfo, void* context) {
+void os::abort(bool dump_core, void* siginfo, const void* context) {
   HINSTANCE dbghelp;
   EXCEPTION_POINTERS ep;
   MINIDUMP_EXCEPTION_INFORMATION mei;
@@ -1799,24 +1801,32 @@
 void os::print_siginfo(outputStream *st, void *siginfo) {
   EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo;
   st->print("siginfo:");
-  st->print(" ExceptionCode=0x%x", er->ExceptionCode);
-
-  if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
-      er->NumberParameters >= 2) {
+
+  char tmp[64];
+  if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
+    strcpy(tmp, "EXCEPTION_??");
+  }
+  st->print(" %s (0x%x)", tmp, er->ExceptionCode);
+
+  if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
+       er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
+       er->NumberParameters >= 2) {
     switch (er->ExceptionInformation[0]) {
     case 0: st->print(", reading address"); break;
     case 1: st->print(", writing address"); break;
+    case 8: st->print(", data execution prevention violation at address"); break;
     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
                        er->ExceptionInformation[0]);
     }
     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
-  } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR &&
-             er->NumberParameters >= 2 && UseSharedSpaces) {
-    FileMapInfo* mapinfo = FileMapInfo::current_info();
-    if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) {
-      st->print("\n\nError accessing class data sharing archive."       \
-                " Mapped file inaccessible during execution, "          \
-                " possible disk/network problem.");
+
+    if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && UseSharedSpaces) {
+      FileMapInfo* mapinfo = FileMapInfo::current_info();
+      if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) {
+        st->print("\n\nError accessing class data sharing archive."       \
+                  " Mapped file inaccessible during execution, "          \
+                  " possible disk/network problem.");
+      }
     }
   } else {
     int num = er->NumberParameters;
@@ -2146,7 +2156,7 @@
 
 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
                       address handler) {
-  JavaThread* thread = JavaThread::current();
+    JavaThread* thread = (JavaThread*) Thread::current_or_null();
   // Save pc in thread
 #ifdef _M_IA64
   // Do not blow up if no thread info available.
@@ -2364,6 +2374,39 @@
   // somewhere where we can find it in the minidump.
 }
 
+bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
+        struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
+  PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
+  address addr = (address) exceptionRecord->ExceptionInformation[1];
+  if (Interpreter::contains(pc)) {
+    *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
+    if (!fr->is_first_java_frame()) {
+      assert(fr->safe_for_sender(thread), "Safety check");
+      *fr = fr->java_sender();
+    }
+  } else {
+    // more complex code with compiled code
+    assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
+    CodeBlob* cb = CodeCache::find_blob(pc);
+    if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
+      // Not sure where the pc points to, fallback to default
+      // stack overflow handling
+      return false;
+    } else {
+      *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
+      // in compiled code, the stack banging is performed just after the return pc
+      // has been pushed on the stack
+      *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
+      if (!fr->is_java_frame()) {
+        assert(fr->safe_for_sender(thread), "Safety check");
+        *fr = fr->java_sender();
+      }
+    }
+  }
+  assert(fr->is_java_frame(), "Safety check");
+  return true;
+}
+
 //-----------------------------------------------------------------------------
 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
@@ -2384,7 +2427,7 @@
   address pc = (address) exceptionInfo->ContextRecord->Eip;
   #endif
 #endif
-  Thread* t = ThreadLocalStorage::get_thread_slow();          // slow & steady
+  Thread* t = Thread::current_or_null_safe();
 
   // Handle SafeFetch32 and SafeFetchN exceptions.
   if (StubRoutines::is_safefetch_fault(pc)) {
@@ -2540,7 +2583,16 @@
                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
         }
 #endif
-        if (thread->stack_yellow_zone_enabled()) {
+        if (thread->stack_guards_enabled()) {
+          if (_thread_in_Java) {
+            frame fr;
+            PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
+            address addr = (address) exceptionRecord->ExceptionInformation[1];
+            if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
+              assert(fr.is_java_frame(), "Must be a Java frame");
+              SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
+            }
+          }
           // Yellow zone violation.  The o/s has unprotected the first yellow
           // zone page for us.  Note:  must call disable_stack_yellow_zone to
           // update the enabled status, even if the zone contains only one page.
@@ -4011,27 +4063,6 @@
   return result == IDYES;
 }
 
-int os::allocate_thread_local_storage() {
-  return TlsAlloc();
-}
-
-
-void os::free_thread_local_storage(int index) {
-  TlsFree(index);
-}
-
-
-void os::thread_local_storage_at_put(int index, void* value) {
-  TlsSetValue(index, value);
-  assert(thread_local_storage_at(index) == value, "Just checking");
-}
-
-
-void* os::thread_local_storage_at(int index) {
-  return TlsGetValue(index);
-}
-
-
 #ifndef PRODUCT
 #ifndef _WIN64
 // Helpers to check whether NX protection is enabled
@@ -4079,6 +4110,9 @@
     fatal("DuplicateHandle failed\n");
   }
   main_thread_id = (int) GetCurrentThreadId();
+
+  // initialize fast thread access - only used for 32-bit
+  win32::initialize_thread_ptr_offset();
 }
 
 // To install functions for atexit processing
@@ -5177,9 +5211,7 @@
     }
   }
 
-  JavaThread* thread = (JavaThread*)(Thread::current());
-  assert(thread->is_Java_thread(), "Must be JavaThread");
-  JavaThread *jt = (JavaThread *)thread;
+  JavaThread* thread = JavaThread::current();
 
   // Don't wait if interrupted or already triggered
   if (Thread::is_interrupted(thread, false) ||
@@ -5187,16 +5219,16 @@
     ResetEvent(_ParkEvent);
     return;
   } else {
-    ThreadBlockInVM tbivm(jt);
+    ThreadBlockInVM tbivm(thread);
     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
-    jt->set_suspend_equivalent();
+    thread->set_suspend_equivalent();
 
     WaitForSingleObject(_ParkEvent, time);
     ResetEvent(_ParkEvent);
 
     // If externally suspended while waiting, re-suspend
-    if (jt->handle_special_suspend_equivalent_condition()) {
-      jt->java_suspend_self();
+    if (thread->handle_special_suspend_equivalent_condition()) {
+      thread->java_suspend_self();
     }
   }
 }
@@ -5299,7 +5331,7 @@
   DWORD exception_code = e->ExceptionRecord->ExceptionCode;
 
   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
-    JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow();
+    JavaThread* thread = JavaThread::current();
     PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
     address addr = (address) exceptionRecord->ExceptionInformation[1];
 
@@ -5539,8 +5571,6 @@
   return yes;
 }
 
-#ifndef JDK6_OR_EARLIER
-
 void os::Kernel32Dll::initialize() {
   initializeCommon();
 }
@@ -5715,261 +5745,6 @@
   return agent_entry_name;
 }
 
-#else
-// Kernel32 API
-typedef BOOL (WINAPI* SwitchToThread_Fn)(void);
-typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD, DWORD);
-typedef BOOL (WINAPI* Module32First_Fn)(HANDLE, LPMODULEENTRY32);
-typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE, LPMODULEENTRY32);
-typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO);
-
-SwitchToThread_Fn           os::Kernel32Dll::_SwitchToThread = NULL;
-CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL;
-Module32First_Fn            os::Kernel32Dll::_Module32First = NULL;
-Module32Next_Fn             os::Kernel32Dll::_Module32Next = NULL;
-GetNativeSystemInfo_Fn      os::Kernel32Dll::_GetNativeSystemInfo = NULL;
-
-void os::Kernel32Dll::initialize() {
-  if (!initialized) {
-    HMODULE handle = ::GetModuleHandle("Kernel32.dll");
-    assert(handle != NULL, "Just check");
-
-    _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread");
-    _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn)
-      ::GetProcAddress(handle, "CreateToolhelp32Snapshot");
-    _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First");
-    _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next");
-    _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo");
-    initializeCommon();  // resolve the functions that always need resolving
-
-    initialized = TRUE;
-  }
-}
-
-BOOL os::Kernel32Dll::SwitchToThread() {
-  assert(initialized && _SwitchToThread != NULL,
-         "SwitchToThreadAvailable() not yet called");
-  return _SwitchToThread();
-}
-
-
-BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
-  if (!initialized) {
-    initialize();
-  }
-  return _SwitchToThread != NULL;
-}
-
-// Help tools
-BOOL os::Kernel32Dll::HelpToolsAvailable() {
-  if (!initialized) {
-    initialize();
-  }
-  return _CreateToolhelp32Snapshot != NULL &&
-         _Module32First != NULL &&
-         _Module32Next != NULL;
-}
-
-HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,
-                                                 DWORD th32ProcessId) {
-  assert(initialized && _CreateToolhelp32Snapshot != NULL,
-         "HelpToolsAvailable() not yet called");
-
-  return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
-}
-
-BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
-  assert(initialized && _Module32First != NULL,
-         "HelpToolsAvailable() not yet called");
-
-  return _Module32First(hSnapshot, lpme);
-}
-
-inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,
-                                          LPMODULEENTRY32 lpme) {
-  assert(initialized && _Module32Next != NULL,
-         "HelpToolsAvailable() not yet called");
-
-  return _Module32Next(hSnapshot, lpme);
-}
-
-
-BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
-  if (!initialized) {
-    initialize();
-  }
-  return _GetNativeSystemInfo != NULL;
-}
-
-void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
-  assert(initialized && _GetNativeSystemInfo != NULL,
-         "GetNativeSystemInfoAvailable() not yet called");
-
-  _GetNativeSystemInfo(lpSystemInfo);
-}
-
-// PSAPI API
-
-
-typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD);
-typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);
-typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD);
-
-EnumProcessModules_Fn   os::PSApiDll::_EnumProcessModules = NULL;
-GetModuleFileNameEx_Fn  os::PSApiDll::_GetModuleFileNameEx = NULL;
-GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL;
-BOOL                    os::PSApiDll::initialized = FALSE;
-
-void os::PSApiDll::initialize() {
-  if (!initialized) {
-    HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0);
-    if (handle != NULL) {
-      _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle,
-                                                                    "EnumProcessModules");
-      _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle,
-                                                                      "GetModuleFileNameExA");
-      _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle,
-                                                                        "GetModuleInformation");
-    }
-    initialized = TRUE;
-  }
-}
-
-
-
-BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule,
-                                      DWORD cb, LPDWORD lpcbNeeded) {
-  assert(initialized && _EnumProcessModules != NULL,
-         "PSApiAvailable() not yet called");
-  return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
-}
-
-DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule,
-                                        LPTSTR lpFilename, DWORD nSize) {
-  assert(initialized && _GetModuleFileNameEx != NULL,
-         "PSApiAvailable() not yet called");
-  return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
-}
-
-BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule,
-                                        LPMODULEINFO lpmodinfo, DWORD cb) {
-  assert(initialized && _GetModuleInformation != NULL,
-         "PSApiAvailable() not yet called");
-  return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
-}
-
-BOOL os::PSApiDll::PSApiAvailable() {
-  if (!initialized) {
-    initialize();
-  }
-  return _EnumProcessModules != NULL &&
-    _GetModuleFileNameEx != NULL &&
-    _GetModuleInformation != NULL;
-}
-
-
-// WinSock2 API
-typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA);
-typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...);
-
-WSAStartup_Fn    os::WinSock2Dll::_WSAStartup = NULL;
-gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL;
-BOOL             os::WinSock2Dll::initialized = FALSE;
-
-void os::WinSock2Dll::initialize() {
-  if (!initialized) {
-    HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0);
-    if (handle != NULL) {
-      _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup");
-      _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname");
-    }
-    initialized = TRUE;
-  }
-}
-
-
-BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
-  assert(initialized && _WSAStartup != NULL,
-         "WinSock2Available() not yet called");
-  return _WSAStartup(wVersionRequested, lpWSAData);
-}
-
-struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
-  assert(initialized && _gethostbyname != NULL,
-         "WinSock2Available() not yet called");
-  return _gethostbyname(name);
-}
-
-BOOL os::WinSock2Dll::WinSock2Available() {
-  if (!initialized) {
-    initialize();
-  }
-  return _WSAStartup != NULL &&
-    _gethostbyname != NULL;
-}
-
-typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
-typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE);
-typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID);
-
-AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL;
-OpenProcessToken_Fn      os::Advapi32Dll::_OpenProcessToken = NULL;
-LookupPrivilegeValue_Fn  os::Advapi32Dll::_LookupPrivilegeValue = NULL;
-BOOL                     os::Advapi32Dll::initialized = FALSE;
-
-void os::Advapi32Dll::initialize() {
-  if (!initialized) {
-    HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0);
-    if (handle != NULL) {
-      _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle,
-                                                                          "AdjustTokenPrivileges");
-      _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle,
-                                                                "OpenProcessToken");
-      _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle,
-                                                                        "LookupPrivilegeValueA");
-    }
-    initialized = TRUE;
-  }
-}
-
-BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
-                                            BOOL DisableAllPrivileges,
-                                            PTOKEN_PRIVILEGES NewState,
-                                            DWORD BufferLength,
-                                            PTOKEN_PRIVILEGES PreviousState,
-                                            PDWORD ReturnLength) {
-  assert(initialized && _AdjustTokenPrivileges != NULL,
-         "AdvapiAvailable() not yet called");
-  return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
-                                BufferLength, PreviousState, ReturnLength);
-}
-
-BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle,
-                                       DWORD DesiredAccess,
-                                       PHANDLE TokenHandle) {
-  assert(initialized && _OpenProcessToken != NULL,
-         "AdvapiAvailable() not yet called");
-  return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
-}
-
-BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName,
-                                           LPCTSTR lpName, PLUID lpLuid) {
-  assert(initialized && _LookupPrivilegeValue != NULL,
-         "AdvapiAvailable() not yet called");
-  return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
-}
-
-BOOL os::Advapi32Dll::AdvapiAvailable() {
-  if (!initialized) {
-    initialize();
-  }
-  return _AdjustTokenPrivileges != NULL &&
-    _OpenProcessToken != NULL &&
-    _LookupPrivilegeValue != NULL;
-}
-
-#endif
-
 #ifndef PRODUCT
 
 // test the code path in reserve_memory_special() that tries to allocate memory in a single
@@ -5986,7 +5761,7 @@
 void TestReserveMemorySpecial_test() {
   if (!UseLargePages) {
     if (VerboseInternalVMTests) {
-      gclog_or_tty->print("Skipping test because large pages are disabled");
+      tty->print("Skipping test because large pages are disabled");
     }
     return;
   }
@@ -6002,7 +5777,7 @@
   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
   if (result == NULL) {
     if (VerboseInternalVMTests) {
-      gclog_or_tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
+      tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
                           large_allocation_size);
     }
   } else {
@@ -6015,7 +5790,7 @@
     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
     if (actual_location == NULL) {
       if (VerboseInternalVMTests) {
-        gclog_or_tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
+        tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
                             expected_location, large_allocation_size);
       }
     } else {
@@ -6033,3 +5808,48 @@
   UseNUMAInterleaving = old_use_numa_interleaving;
 }
 #endif // PRODUCT
+
+/*
+  All the defined signal names for Windows.
+
+  NOTE that not all of these names are accepted by FindSignal!
+
+  For various reasons some of these may be rejected at runtime.
+
+  Here are the names currently accepted by a user of sun.misc.Signal with
+  1.4.1 (ignoring potential interaction with use of chaining, etc):
+
+     (LIST TBD)
+
+*/
+int os::get_signal_number(const char* name) {
+  static const struct {
+    char* name;
+    int   number;
+  } siglabels [] =
+    // derived from version 6.0 VC98/include/signal.h
+  {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
+  "FPE",        SIGFPE,         // floating point exception
+  "SEGV",       SIGSEGV,        // segment violation
+  "INT",        SIGINT,         // interrupt
+  "TERM",       SIGTERM,        // software term signal from kill
+  "BREAK",      SIGBREAK,       // Ctrl-Break sequence
+  "ILL",        SIGILL};        // illegal instruction
+  for(int i=0;i<sizeof(siglabels)/sizeof(struct siglabel);i++)
+    if(!strcmp(name, siglabels[i].name))
+      return siglabels[i].number;
+  return -1;
+}
+
+// Fast current thread access
+
+int os::win32::_thread_ptr_offset = 0;
+
+static void call_wrapper_dummy() {}
+
+// We need to call the os_exception_wrapper once so that it sets
+// up the offset from FS of the thread pointer.
+void os::win32::initialize_thread_ptr_offset() {
+  os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
+                           NULL, NULL, NULL, NULL);
+}
--- a/src/os/windows/vm/os_windows.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os/windows/vm/os_windows.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -110,6 +110,10 @@
   // Default stack size for the current process.
   static size_t default_stack_size() { return _default_stack_size; }
 
+  static bool get_frame_at_stack_banging_point(JavaThread* thread,
+                          struct _EXCEPTION_POINTERS* exceptionInfo,
+                          address pc, frame* fr);
+
 #ifndef _WIN64
   // A wrapper to install a structured exception handler for fast JNI accesors.
   static address fast_jni_accessor_wrapper(BasicType);
@@ -117,6 +121,17 @@
 
   // filter function to ignore faults on serializations page
   static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e);
+
+  // Fast access to current thread
+protected:
+  static int _thread_ptr_offset;
+private:
+  static void initialize_thread_ptr_offset();
+public:
+  static inline void set_thread_ptr_offset(int offset) {
+    _thread_ptr_offset = offset;
+  }
+  static inline int get_thread_ptr_offset() { return _thread_ptr_offset; }
 };
 
 /*
@@ -172,26 +187,11 @@
 
 } ;
 
-// JDK7 requires VS2010
-#if _MSC_VER < 1600
-#define JDK6_OR_EARLIER 1
-#endif
-
-
-
 class WinSock2Dll: AllStatic {
 public:
   static BOOL WSAStartup(WORD, LPWSADATA);
   static struct hostent* gethostbyname(const char *name);
   static BOOL WinSock2Available();
-#ifdef JDK6_OR_EARLIER
-private:
-  static int (PASCAL FAR* _WSAStartup)(WORD, LPWSADATA);
-  static struct hostent *(PASCAL FAR *_gethostbyname)(...);
-  static BOOL initialized;
-
-  static void initialize();
-#endif
 };
 
 class Kernel32Dll: AllStatic {
@@ -233,16 +233,6 @@
 
   static void initialize();
   static void initializeCommon();
-
-#ifdef JDK6_OR_EARLIER
-private:
-  static BOOL (WINAPI *_SwitchToThread)(void);
-  static HANDLE (WINAPI* _CreateToolhelp32Snapshot)(DWORD,DWORD);
-  static BOOL (WINAPI* _Module32First)(HANDLE,LPMODULEENTRY32);
-  static BOOL (WINAPI* _Module32Next)(HANDLE,LPMODULEENTRY32);
-  static void (WINAPI *_GetNativeSystemInfo)(LPSYSTEM_INFO);
-#endif
-
 };
 
 class Advapi32Dll: AllStatic {
@@ -252,16 +242,6 @@
   static BOOL LookupPrivilegeValue(LPCTSTR, LPCTSTR, PLUID);
 
   static BOOL AdvapiAvailable();
-
-#ifdef JDK6_OR_EARLIER
-private:
-  static BOOL (WINAPI *_AdjustTokenPrivileges)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
-  static BOOL (WINAPI *_OpenProcessToken)(HANDLE, DWORD, PHANDLE);
-  static BOOL (WINAPI *_LookupPrivilegeValue)(LPCTSTR, LPCTSTR, PLUID);
-  static BOOL initialized;
-
-  static void initialize();
-#endif
 };
 
 class PSApiDll: AllStatic {
@@ -271,16 +251,6 @@
   static BOOL GetModuleInformation(HANDLE, HMODULE, LPMODULEINFO, DWORD);
 
   static BOOL PSApiAvailable();
-
-#ifdef JDK6_OR_EARLIER
-private:
-  static BOOL (WINAPI *_EnumProcessModules)(HANDLE, HMODULE *, DWORD, LPDWORD);
-  static BOOL (WINAPI *_GetModuleFileNameEx)(HANDLE, HMODULE, LPTSTR, DWORD);;
-  static BOOL (WINAPI *_GetModuleInformation)(HANDLE, HMODULE, LPMODULEINFO, DWORD);
-  static BOOL initialized;
-
-  static void initialize();
-#endif
 };
 
 #endif // OS_WINDOWS_VM_OS_WINDOWS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/windows/vm/sharedRuntimeRem.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,162 @@
+/*
+* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#include "precompiled.hpp"
+
+#ifdef _WIN64
+// These are copied defines from fdlibm.h, this allows us to keep the code
+// the same as in the JDK, for easier maintenance.
+
+#define __HI(x) *(1+(int*)&x)
+#define __LO(x) *(int*)&x
+
+// This code is a copy of __ieee754_fmod() from the JDK's libfdlibm and is
+// used as a workaround for issues with the Windows x64 CRT implementation
+// of fmod. Microsoft has acknowledged that this is an issue in Visual Studio
+// 2012 and forward, but has not provided a time frame for a fix other than that
+// it'll not be fixed in Visual Studio 2013 or 2015.
+
+static const double one = 1.0, Zero[] = { 0.0, -0.0, };
+
+double SharedRuntime::fmod_winx64(double x, double y)
+{
+  int n, hx, hy, hz, ix, iy, sx, i;
+  unsigned lx, ly, lz;
+
+  hx = __HI(x);           /* high word of x */
+  lx = __LO(x);           /* low  word of x */
+  hy = __HI(y);           /* high word of y */
+  ly = __LO(y);           /* low  word of y */
+  sx = hx & 0x80000000;             /* sign of x */
+  hx ^= sx;                /* |x| */
+  hy &= 0x7fffffff;       /* |y| */
+
+#pragma warning( disable : 4146 )
+  /* purge off exception values */
+  if ((hy | ly) == 0 || (hx >= 0x7ff00000) ||       /* y=0,or x not finite */
+    ((hy | ((ly | -ly) >> 31))>0x7ff00000))     /* or y is NaN */
+#pragma warning( default : 4146 )
+    return (x*y) / (x*y);
+  if (hx <= hy) {
+    if ((hx<hy) || (lx<ly)) return x;      /* |x|<|y| return x */
+    if (lx == ly)
+      return Zero[(unsigned)sx >> 31];  /* |x|=|y| return x*0*/
+  }
+
+  /* determine ix = ilogb(x) */
+  if (hx<0x00100000) {     /* subnormal x */
+    if (hx == 0) {
+      for (ix = -1043, i = lx; i>0; i <<= 1) ix -= 1;
+    }
+    else {
+      for (ix = -1022, i = (hx << 11); i>0; i <<= 1) ix -= 1;
+    }
+  }
+  else ix = (hx >> 20) - 1023;
+
+  /* determine iy = ilogb(y) */
+  if (hy<0x00100000) {     /* subnormal y */
+    if (hy == 0) {
+      for (iy = -1043, i = ly; i>0; i <<= 1) iy -= 1;
+    }
+    else {
+      for (iy = -1022, i = (hy << 11); i>0; i <<= 1) iy -= 1;
+    }
+  }
+  else iy = (hy >> 20) - 1023;
+
+  /* set up {hx,lx}, {hy,ly} and align y to x */
+  if (ix >= -1022)
+    hx = 0x00100000 | (0x000fffff & hx);
+  else {          /* subnormal x, shift x to normal */
+    n = -1022 - ix;
+    if (n <= 31) {
+      hx = (hx << n) | (lx >> (32 - n));
+      lx <<= n;
+    }
+    else {
+      hx = lx << (n - 32);
+      lx = 0;
+    }
+  }
+  if (iy >= -1022)
+    hy = 0x00100000 | (0x000fffff & hy);
+  else {          /* subnormal y, shift y to normal */
+    n = -1022 - iy;
+    if (n <= 31) {
+      hy = (hy << n) | (ly >> (32 - n));
+      ly <<= n;
+    }
+    else {
+      hy = ly << (n - 32);
+      ly = 0;
+    }
+  }
+
+  /* fix point fmod */
+  n = ix - iy;
+  while (n--) {
+    hz = hx - hy; lz = lx - ly; if (lx<ly) hz -= 1;
+    if (hz<0){ hx = hx + hx + (lx >> 31); lx = lx + lx; }
+    else {
+      if ((hz | lz) == 0)          /* return sign(x)*0 */
+        return Zero[(unsigned)sx >> 31];
+      hx = hz + hz + (lz >> 31); lx = lz + lz;
+    }
+  }
+  hz = hx - hy; lz = lx - ly; if (lx<ly) hz -= 1;
+  if (hz >= 0) { hx = hz; lx = lz; }
+
+  /* convert back to floating value and restore the sign */
+  if ((hx | lx) == 0)                  /* return sign(x)*0 */
+    return Zero[(unsigned)sx >> 31];
+  while (hx<0x00100000) {          /* normalize x */
+    hx = hx + hx + (lx >> 31); lx = lx + lx;
+    iy -= 1;
+  }
+  if (iy >= -1022) {        /* normalize output */
+    hx = ((hx - 0x00100000) | ((iy + 1023) << 20));
+    __HI(x) = hx | sx;
+    __LO(x) = lx;
+  }
+  else {                /* subnormal output */
+    n = -1022 - iy;
+    if (n <= 20) {
+      lx = (lx >> n) | ((unsigned)hx << (32 - n));
+      hx >>= n;
+    }
+    else if (n <= 31) {
+      lx = (hx << (32 - n)) | (lx >> n); hx = sx;
+    }
+    else {
+      lx = hx >> (n - 32); hx = sx;
+    }
+    __HI(x) = hx | sx;
+    __LO(x) = lx;
+    x *= one;           /* create necessary signal */
+  }
+  return x;               /* exact output */
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/windows/vm/threadLocalStorage_windows.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/threadLocalStorage.hpp"
+#include <windows.h>
+
+static DWORD _thread_key;
+static bool _initialized = false;
+
+
+void ThreadLocalStorage::init() {
+  assert(!_initialized, "initializing TLS more than once!");
+  _thread_key = TlsAlloc();
+  // If this assert fails we will get a recursive assertion failure
+  // and not see the actual error message or get a hs_err file
+  assert(_thread_key != TLS_OUT_OF_INDEXES, "TlsAlloc failed: out of indices");
+  _initialized = true;
+}
+
+bool ThreadLocalStorage::is_initialized() {
+  return _initialized;
+}
+
+Thread* ThreadLocalStorage::thread() {
+  // If this assert fails we will get a recursive assertion failure
+  // and not see the actual error message or get a hs_err file.
+  // Which most likely indicates we have taken an error path early in
+  // the initialization process, which is using Thread::current without
+  // checking TLS is initialized - see java.cpp vm_exit
+  assert(_initialized, "TLS not initialized yet!");
+  Thread* current = (Thread*) TlsGetValue(_thread_key);
+  assert(current != 0 || GetLastError() == ERROR_SUCCESS,
+         "TlsGetValue failed with error code: %lu", GetLastError());
+  return current;
+}
+
+void ThreadLocalStorage::set_thread(Thread* current) {
+  assert(_initialized, "TLS not initialized yet!");
+  BOOL res = TlsSetValue(_thread_key, current);
+  assert(res, "TlsSetValue failed with error code: %lu", GetLastError());
+}
--- a/src/os/windows/vm/thread_windows.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_WINDOWS_VM_THREAD_WINDOWS_INLINE_HPP
-#define OS_WINDOWS_VM_THREAD_WINDOWS_INLINE_HPP
-
-#ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
-#error "This file should only be included from thread.inline.hpp"
-#endif
-
-#include "runtime/thread.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Contains inlined functions for class Thread and ThreadLocalStorage
-
-inline void ThreadLocalStorage::pd_invalidate_all()            { return; }
-
-#endif // OS_WINDOWS_VM_THREAD_WINDOWS_INLINE_HPP
--- a/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -291,6 +291,71 @@
   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 }
 
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
+  // (see atomic.hpp).
+
+  // Using 32 bit internally.
+  volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
+
+#ifdef VM_LITTLE_ENDIAN
+  const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
+#else
+  const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
+#endif
+  const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
+                     masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
+                     xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
+
+  unsigned int old_value, value32;
+
+  __asm__ __volatile__ (
+    /* fence */
+    strasm_sync
+    /* simple guard */
+    "   lbz     %[old_value], 0(%[dest])                  \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* atomic loop */
+    "1:                                                   \n"
+    "   lwarx   %[value32], 0, %[dest_base]               \n"
+    /* extract byte and compare */
+    "   srd     %[old_value], %[value32], %[shift_amount] \n"
+    "   clrldi  %[old_value], %[old_value], 56            \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* replace byte and try to store */
+    "   xor     %[value32], %[xor_value], %[value32]      \n"
+    "   stwcx.  %[value32], 0, %[dest_base]               \n"
+    "   bne-    1b                                        \n"
+    /* acquire */
+    strasm_sync
+    /* exit */
+    "2:                                                   \n"
+    /* out */
+    : [old_value]           "=&r"   (old_value),
+      [value32]             "=&r"   (value32),
+                            "=m"    (*dest),
+                            "=m"    (*dest_base)
+    /* in */
+    : [dest]                "b"     (dest),
+      [dest_base]           "b"     (dest_base),
+      [shift_amount]        "r"     (shift_amount),
+      [masked_compare_val]  "r"     (masked_compare_val),
+      [xor_value]           "r"     (xor_value),
+                            "m"     (*dest),
+                            "m"     (*dest_base)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (jbyte)(unsigned char)old_value;
+}
+
 inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) {
 
   // Note that cmpxchg guarantees a two-way memory barrier across
--- a/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -28,6 +28,7 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
@@ -97,12 +98,12 @@
   return (address)uc->uc_mcontext.jmp_context.iar;
 }
 
-intptr_t* os::Aix::ucontext_get_sp(ucontext_t * uc) {
+intptr_t* os::Aix::ucontext_get_sp(const ucontext_t * uc) {
   // gpr1 holds the stack pointer on aix
   return (intptr_t*)uc->uc_mcontext.jmp_context.gpr[1/*REG_SP*/];
 }
 
-intptr_t* os::Aix::ucontext_get_fp(ucontext_t * uc) {
+intptr_t* os::Aix::ucontext_get_fp(const ucontext_t * uc) {
   return NULL;
 }
 
@@ -110,11 +111,11 @@
   uc->uc_mcontext.jmp_context.iar = (uint64_t) new_pc;
 }
 
-ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
                                         intptr_t** ret_sp, intptr_t** ret_fp) {
 
   ExtendedPC  epc;
-  ucontext_t* uc = (ucontext_t*)ucVoid;
+  const ucontext_t* uc = (const ucontext_t*)ucVoid;
 
   if (uc != NULL) {
     epc = ExtendedPC(os::Aix::ucontext_get_pc(uc));
@@ -130,7 +131,7 @@
   return epc;
 }
 
-frame os::fetch_frame_from_context(void* ucVoid) {
+frame os::fetch_frame_from_context(const void* ucVoid) {
   intptr_t* sp;
   intptr_t* fp;
   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
@@ -167,7 +168,7 @@
 
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();   // slow & steady
+  Thread* t = Thread::current_or_null_safe();
 
   SignalHandlerMark shm(t);
 
@@ -506,10 +507,10 @@
 /////////////////////////////////////////////////////////////////////////////
 // helper functions for fatal error handler
 
-void os::print_context(outputStream *st, void *context) {
+void os::print_context(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t* uc = (ucontext_t*)context;
+  const ucontext_t* uc = (const ucontext_t*)context;
 
   st->print_cr("Registers:");
   st->print("pc =" INTPTR_FORMAT "  ", uc->uc_mcontext.jmp_context.iar);
@@ -543,9 +544,23 @@
   st->cr();
 }
 
-void os::print_register_info(outputStream *st, void *context) {
+void os::print_register_info(outputStream *st, const void *context) {
   if (context == NULL) return;
-  st->print("Not ported - print_register_info\n");
+
+  ucontext_t *uc = (ucontext_t*)context;
+
+  st->print_cr("Register to memory mapping:");
+  st->cr();
+
+  st->print("pc ="); print_location(st, (intptr_t)uc->uc_mcontext.jmp_context.iar);
+  st->print("lr ="); print_location(st, (intptr_t)uc->uc_mcontext.jmp_context.lr);
+  st->print("sp ="); print_location(st, (intptr_t)os::Aix::ucontext_get_sp(uc));
+  for (int i = 0; i < 32; i++) {
+    st->print("r%-2d=", i);
+    print_location(st, (intptr_t)uc->uc_mcontext.jmp_context.gpr[i]);
+  }
+
+  st->cr();
 }
 
 extern "C" {
@@ -564,3 +579,4 @@
   // PPC does not require the additional stack bang.
   return 0;
 }
+
--- a/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/threadLocalStorage.hpp"
-#include "runtime/thread.hpp"
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-  // Nothing we can do here for user-level thread.
-}
-
-void ThreadLocalStorage::pd_init() {
-  // Nothing to do.
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
--- a/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
-#define OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
-
-  // Processor dependent parts of ThreadLocalStorage
-
-public:
-  static Thread* thread() {
-    return (Thread *) os::thread_local_storage_at(thread_index());
-  }
-
-#endif // OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
--- a/src/os_cpu/aix_ppc/vm/vmStructs_aix_ppc.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/aix_ppc/vm/vmStructs_aix_ppc.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2013 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -35,8 +35,7 @@
   /******************************/                                                                                                   \
   /* Threads (NOTE: incomplete) */                                                                                                   \
   /******************************/                                                                                                   \
-  nonstatic_field(OSThread,                      _thread_id,                                      pid_t)                             \
-  nonstatic_field(OSThread,                      _pthread_id,                                     pthread_t)
+  nonstatic_field(OSThread,                      _thread_id,                                      pthread_t)                         \
 
 
 #define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
@@ -45,7 +44,6 @@
   /* Posix Thread IDs   */                                                \
   /**********************/                                                \
                                                                           \
-  declare_integer_type(pid_t)                                             \
   declare_unsigned_integer_type(pthread_t)
 
 #define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
--- a/src/os_cpu/bsd_x86/vm/assembler_bsd_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/bsd_x86/vm/assembler_bsd_x86.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,62 +26,7 @@
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
 
-#ifndef _LP64
-void MacroAssembler::int3() {
-  call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MacroAssembler::get_thread(Register thread) {
-  movl(thread, rsp);
-  shrl(thread, PAGE_SHIFT);
-
-  ExternalAddress tls_base((address)ThreadLocalStorage::sp_map_addr());
-  Address index(noreg, thread, Address::times_4);
-  ArrayAddress tls(tls_base, index);
-
-  movptr(thread, tls);
-}
-#else
 void MacroAssembler::int3() {
   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 }
-
-void MacroAssembler::get_thread(Register thread) {
-  // call pthread_getspecific
-  // void * pthread_getspecific(pthread_key_t key);
-   if (thread != rax) {
-     push(rax);
-   }
-   push(rdi);
-   push(rsi);
-   push(rdx);
-   push(rcx);
-   push(r8);
-   push(r9);
-   push(r10);
-   // XXX
-   mov(r10, rsp);
-   andq(rsp, -16);
-   push(r10);
-   push(r11);
-
-   movl(rdi, ThreadLocalStorage::thread_index());
-   call(RuntimeAddress(CAST_FROM_FN_PTR(address, pthread_getspecific)));
-
-   pop(r11);
-   pop(rsp);
-   pop(r10);
-   pop(r9);
-   pop(r8);
-   pop(rcx);
-   pop(rdx);
-   pop(rsi);
-   pop(rdi);
-   if (thread != rax) {
-       mov(thread, rax);
-       pop(rax);
-   }
-}
-#endif
--- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,6 +27,7 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
@@ -303,7 +304,7 @@
 // Nothing to do.
 }
 
-address os::Bsd::ucontext_get_pc(ucontext_t * uc) {
+address os::Bsd::ucontext_get_pc(const ucontext_t * uc) {
   return (address)uc->context_pc;
 }
 
@@ -311,11 +312,11 @@
   uc->context_pc = (intptr_t)pc ;
 }
 
-intptr_t* os::Bsd::ucontext_get_sp(ucontext_t * uc) {
+intptr_t* os::Bsd::ucontext_get_sp(const ucontext_t * uc) {
   return (intptr_t*)uc->context_sp;
 }
 
-intptr_t* os::Bsd::ucontext_get_fp(ucontext_t * uc) {
+intptr_t* os::Bsd::ucontext_get_fp(const ucontext_t * uc) {
   return (intptr_t*)uc->context_fp;
 }
 
@@ -324,8 +325,9 @@
 // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
 // frames. Currently we don't do that on Bsd, so it's the same as
 // os::fetch_frame_from_context().
+// This method is also used for stack overflow signal handling.
 ExtendedPC os::Bsd::fetch_frame_from_ucontext(Thread* thread,
-  ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
+  const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
 
   assert(thread != NULL, "just checking");
   assert(ret_sp != NULL, "just checking");
@@ -334,11 +336,11 @@
   return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
 }
 
-ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
                     intptr_t** ret_sp, intptr_t** ret_fp) {
 
   ExtendedPC  epc;
-  ucontext_t* uc = (ucontext_t*)ucVoid;
+  const ucontext_t* uc = (const ucontext_t*)ucVoid;
 
   if (uc != NULL) {
     epc = ExtendedPC(os::Bsd::ucontext_get_pc(uc));
@@ -354,13 +356,55 @@
   return epc;
 }
 
-frame os::fetch_frame_from_context(void* ucVoid) {
+frame os::fetch_frame_from_context(const void* ucVoid) {
   intptr_t* sp;
   intptr_t* fp;
   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
   return frame(sp, fp, epc.pc());
 }
 
+frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) {
+  intptr_t* sp;
+  intptr_t* fp;
+  ExtendedPC epc = os::Bsd::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, &fp);
+  return frame(sp, fp, epc.pc());
+}
+
+bool os::Bsd::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
+  address pc = (address) os::Bsd::ucontext_get_pc(uc);
+  if (Interpreter::contains(pc)) {
+    // interpreter performs stack banging after the fixed frame header has
+    // been generated while the compilers perform it before. To maintain
+    // semantic consistency between interpreted and compiled frames, the
+    // method returns the Java sender of the current frame.
+    *fr = os::fetch_frame_from_ucontext(thread, uc);
+    if (!fr->is_first_java_frame()) {
+      assert(fr->safe_for_sender(thread), "Safety check");
+      *fr = fr->java_sender();
+    }
+  } else {
+    // more complex code with compiled code
+    assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
+    CodeBlob* cb = CodeCache::find_blob(pc);
+    if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
+      // Not sure where the pc points to, fallback to default
+      // stack overflow handling
+      return false;
+    } else {
+      *fr = os::fetch_frame_from_ucontext(thread, uc);
+      // in compiled code, the stack banging is performed just after the return pc
+      // has been pushed on the stack
+      *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
+      if (!fr->is_java_frame()) {
+        assert(fr->safe_for_sender(thread), "Safety check");
+        *fr = fr->java_sender();
+      }
+    }
+  }
+  assert(fr->is_java_frame(), "Safety check");
+  return true;
+}
+
 // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
 // turned off by -fomit-frame-pointer,
 frame os::get_sender_for_C_frame(frame* fr) {
@@ -405,7 +449,7 @@
                         int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
   // (no destructors can be run)
@@ -478,13 +522,31 @@
           addr >= thread->stack_base() - thread->stack_size()) {
         // stack overflow
         if (thread->in_stack_yellow_zone(addr)) {
-          thread->disable_stack_yellow_zone();
           if (thread->thread_state() == _thread_in_Java) {
+            if (thread->in_stack_reserved_zone(addr)) {
+              frame fr;
+              if (os::Bsd::get_frame_at_stack_banging_point(thread, uc, &fr)) {
+                assert(fr.is_java_frame(), "Must be a Java frame");
+                frame activation = SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
+                if (activation.sp() != NULL) {
+                  thread->disable_stack_reserved_zone();
+                  if (activation.is_interpreted_frame()) {
+                    thread->set_reserved_stack_activation((address)(
+                      activation.fp() + frame::interpreter_frame_initial_sp_offset));
+                  } else {
+                    thread->set_reserved_stack_activation((address)activation.unextended_sp());
+                  }
+                  return 1;
+                }
+              }
+            }
             // Throw a stack overflow exception.  Guard pages will be reenabled
             // while unwinding the stack.
+            thread->disable_stack_yellow_zone();
             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
           } else {
             // Thread was in the vm or native code.  Return and try to finish.
+            thread->disable_stack_yellow_zone();
             return 1;
           }
         } else if (thread->in_stack_red_zone(addr)) {
@@ -909,10 +971,10 @@
 /////////////////////////////////////////////////////////////////////////////
 // helper functions for fatal error handler
 
-void os::print_context(outputStream *st, void *context) {
+void os::print_context(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t *uc = (ucontext_t*)context;
+  const ucontext_t *uc = (const ucontext_t*)context;
   st->print_cr("Registers:");
 #ifdef AMD64
   st->print(  "RAX=" INTPTR_FORMAT, uc->context_rax);
@@ -970,10 +1032,10 @@
   print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
 }
 
-void os::print_register_info(outputStream *st, void *context) {
+void os::print_register_info(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t *uc = (ucontext_t*)context;
+  const ucontext_t *uc = (const ucontext_t*)context;
 
   st->print_cr("Register to memory mapping:");
   st->cr();
--- a/src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Map stack pointer (%esp) to thread pointer for faster TLS access
-//
-// Here we use a flat table for better performance. Getting current thread
-// is down to one memory access (read _sp_map[%esp>>12]) in generated code
-// and two in runtime code (-fPIC code needs an extra load for _sp_map).
-//
-// This code assumes stack page is not shared by different threads. It works
-// in 32-bit VM when page size is 4K (or a multiple of 4K, if that matters).
-//
-// Notice that _sp_map is allocated in the bss segment, which is ZFOD
-// (zero-fill-on-demand). While it reserves 4M address space upfront,
-// actual memory pages are committed on demand.
-//
-// If an application creates and destroys a lot of threads, usually the
-// stack space freed by a thread will soon get reused by new thread
-// (this is especially true in NPTL or BsdThreads in fixed-stack mode).
-// No memory page in _sp_map is wasted.
-//
-// However, it's still possible that we might end up populating &
-// committing a large fraction of the 4M table over time, but the actual
-// amount of live data in the table could be quite small. The max wastage
-// is less than 4M bytes. If it becomes an issue, we could use madvise()
-// with MADV_DONTNEED to reclaim unused (i.e. all-zero) pages in _sp_map.
-// MADV_DONTNEED on Bsd keeps the virtual memory mapping, but zaps the
-// physical memory page (i.e. similar to MADV_FREE on Solaris).
-
-#ifndef AMD64
-Thread* ThreadLocalStorage::_sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
-#endif // !AMD64
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-    // nothing we can do here for user-level thread
-}
-
-void ThreadLocalStorage::pd_init() {
-#ifndef AMD64
-  assert(align_size_down(os::vm_page_size(), PAGE_SIZE) == os::vm_page_size(),
-         "page size must be multiple of PAGE_SIZE");
-#endif // !AMD64
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-
-#ifndef AMD64
-  address stack_top = os::current_stack_base();
-  size_t stack_size = os::current_stack_size();
-
-  for (address p = stack_top - stack_size; p < stack_top; p += PAGE_SIZE) {
-    // pd_set_thread() is called with non-NULL value when a new thread is
-    // created/attached, or with NULL value when a thread is about to exit.
-    // If both "thread" and the corresponding _sp_map[] entry are non-NULL,
-    // they should have the same value. Otherwise it might indicate that the
-    // stack page is shared by multiple threads. However, a more likely cause
-    // for this assertion to fail is that an attached thread exited without
-    // detaching itself from VM, which is a program error and could cause VM
-    // to crash.
-    assert(thread == NULL || _sp_map[(uintptr_t)p >> PAGE_SHIFT] == NULL ||
-           thread == _sp_map[(uintptr_t)p >> PAGE_SHIFT],
-           "thread exited without detaching from VM??");
-    _sp_map[(uintptr_t)p >> PAGE_SHIFT] = thread;
-  }
-#endif // !AMD64
-}
--- a/src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_X86_VM_THREADLS_BSD_X86_HPP
-#define OS_CPU_BSD_X86_VM_THREADLS_BSD_X86_HPP
-
-  // Processor dependent parts of ThreadLocalStorage
-
-#ifndef AMD64
-  // map stack pointer to thread pointer - see notes in threadLS_bsd_x86.cpp
-  #define SP_BITLENGTH  32
-#ifndef PAGE_SHIFT
-  #define PAGE_SHIFT    12
-  #define PAGE_SIZE     (1UL << PAGE_SHIFT)
-#endif
-  static Thread* _sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
-#endif // !AMD64
-
-public:
-
-#ifndef AMD64
-  static Thread** sp_map_addr() { return _sp_map; }
-#endif // !AMD64
-
-  static Thread* thread() {
-#ifdef AMD64
-    return (Thread*) os::thread_local_storage_at(thread_index());
-#else
-    uintptr_t sp;
-    __asm__ volatile ("movl %%esp, %0" : "=r" (sp));
-    return _sp_map[sp >> PAGE_SHIFT];
-#endif // AMD64
-  }
-
-#endif // OS_CPU_BSD_X86_VM_THREADLS_BSD_X86_HPP
--- a/src/os_cpu/bsd_zero/vm/assembler_bsd_zero.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/bsd_zero/vm/assembler_bsd_zero.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -23,10 +23,4 @@
  *
  */
 
-#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_zero.inline.hpp"
-#include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
 // This file is intentionally empty
--- a/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -106,7 +106,7 @@
   // Nothing to do.
 }
 
-address os::Bsd::ucontext_get_pc(ucontext_t* uc) {
+address os::Bsd::ucontext_get_pc(const ucontext_t* uc) {
   ShouldNotCallThis();
   return NULL;
 }
@@ -115,14 +115,14 @@
   ShouldNotCallThis();
 }
 
-ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
                                         intptr_t** ret_sp,
                                         intptr_t** ret_fp) {
   ShouldNotCallThis();
   return ExtendedPC();
 }
 
-frame os::fetch_frame_from_context(void* ucVoid) {
+frame os::fetch_frame_from_context(const void* ucVoid) {
   ShouldNotCallThis();
   return frame();
 }
@@ -134,7 +134,7 @@
                         int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   SignalHandlerMark shm(t);
 
@@ -374,11 +374,11 @@
 /////////////////////////////////////////////////////////////////////////////
 // helper functions for fatal error handler
 
-void os::print_context(outputStream* st, void* context) {
+void os::print_context(outputStream* st, const void* context) {
   ShouldNotCallThis();
 }
 
-void os::print_register_info(outputStream *st, void *context) {
+void os::print_register_info(outputStream *st, const void *context) {
   ShouldNotCallThis();
 }
 
--- a/src/os_cpu/bsd_zero/vm/threadLS_bsd_zero.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-  // nothing to do
-}
-
-void ThreadLocalStorage::pd_init() {
-  // nothing to do
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
--- a/src/os_cpu/bsd_zero/vm/threadLS_bsd_zero.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_ZERO_VM_THREADLS_BSD_ZERO_HPP
-#define OS_CPU_BSD_ZERO_VM_THREADLS_BSD_ZERO_HPP
-
-// Processor dependent parts of ThreadLocalStorage
-
- public:
-  static Thread* thread() {
-    return (Thread*) os::thread_local_storage_at(thread_index());
-  }
-
-#endif // OS_CPU_BSD_ZERO_VM_THREADLS_BSD_ZERO_HPP
--- a/src/os_cpu/linux_aarch64/vm/assembler_linux_aarch64.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/linux_aarch64/vm/assembler_linux_aarch64.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -23,32 +23,6 @@
  *
  */
 
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
+// nothing required here
 
 
-// get_thread can be called anywhere inside generated code so we need
-// to save whatever non-callee save context might get clobbered by the
-// call to the C thread_local lookup call or, indeed, the call setup
-// code. x86 appears to save C arg registers.
-
-void MacroAssembler::get_thread(Register dst) {
-  // call pthread_getspecific
-  // void * pthread_getspecific(pthread_key_t key);
-
-  // Save all call-clobbered regs except dst, plus r19 and r20.
-  RegSet saved_regs = RegSet::range(r0, r20) + lr - dst;
-  push(saved_regs, sp);
-  mov(c_rarg0, ThreadLocalStorage::thread_index());
-  mov(r19, CAST_FROM_FN_PTR(address, pthread_getspecific));
-  blrt(r19, 1, 0, 1);
-  if (dst != c_rarg0) {
-    mov(dst, c_rarg0);
-  }
-  // restore pushed registers
-  pop(saved_regs, sp);
-}
-
--- a/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -28,6 +28,7 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "code/nativeInst.hpp"
@@ -108,7 +109,7 @@
 void os::initialize_thread(Thread *thr) {
 }
 
-address os::Linux::ucontext_get_pc(ucontext_t * uc) {
+address os::Linux::ucontext_get_pc(const ucontext_t * uc) {
 #ifdef BUILTIN_SIM
   return (address)uc->uc_mcontext.gregs[REG_PC];
 #else
@@ -124,7 +125,7 @@
 #endif
 }
 
-intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) {
+intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
 #ifdef BUILTIN_SIM
   return (intptr_t*)uc->uc_mcontext.gregs[REG_SP];
 #else
@@ -132,7 +133,7 @@
 #endif
 }
 
-intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) {
+intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) {
 #ifdef BUILTIN_SIM
   return (intptr_t*)uc->uc_mcontext.gregs[REG_FP];
 #else
@@ -146,7 +147,7 @@
 // frames. Currently we don't do that on Linux, so it's the same as
 // os::fetch_frame_from_context().
 ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread,
-  ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
+  const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
 
   assert(thread != NULL, "just checking");
   assert(ret_sp != NULL, "just checking");
@@ -155,11 +156,11 @@
   return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
 }
 
-ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
                     intptr_t** ret_sp, intptr_t** ret_fp) {
 
   ExtendedPC  epc;
-  ucontext_t* uc = (ucontext_t*)ucVoid;
+  const ucontext_t* uc = (const ucontext_t*)ucVoid;
 
   if (uc != NULL) {
     epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
@@ -175,7 +176,7 @@
   return epc;
 }
 
-frame os::fetch_frame_from_context(void* ucVoid) {
+frame os::fetch_frame_from_context(const void* ucVoid) {
   intptr_t* sp;
   intptr_t* fp;
   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
@@ -249,7 +250,7 @@
                         int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
   // (no destructors can be run)
@@ -590,10 +591,10 @@
 /////////////////////////////////////////////////////////////////////////////
 // helper functions for fatal error handler
 
-void os::print_context(outputStream *st, void *context) {
+void os::print_context(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t *uc = (ucontext_t*)context;
+  const ucontext_t *uc = (const ucontext_t*)context;
   st->print_cr("Registers:");
 #ifdef BUILTIN_SIM
   st->print(  "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]);
@@ -642,10 +643,10 @@
   print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
 }
 
-void os::print_register_info(outputStream *st, void *context) {
+void os::print_register_info(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t *uc = (ucontext_t*)context;
+  const ucontext_t *uc = (const ucontext_t*)context;
 
   st->print_cr("Register to memory mapping:");
   st->cr();
--- a/src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/threadLocalStorage.hpp"
-#include "runtime/thread.inline.hpp"
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-    // nothing we can do here for user-level thread
-}
-
-void ThreadLocalStorage::pd_init() {
-}
-
-__thread Thread *aarch64_currentThread;
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-  aarch64_currentThread = thread;
-}
--- a/src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_AARCH64_VM_THREADLS_LINUX_AARCH64_HPP
-#define OS_CPU_LINUX_AARCH64_VM_THREADLS_LINUX_AARCH64_HPP
-
-  // Processor dependent parts of ThreadLocalStorage
-
-public:
-
-  static Thread *thread() {
-    return aarch64_currentThread;
-  }
-
-#endif // OS_CPU_LINUX_AARCH64_VM_THREADLS_LINUX_AARCH64_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_aarch64/vm/threadLS_linux_aarch64.s	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,44 @@
+// Copyright (c) 2015, Red Hat Inc. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+
+        // JavaThread::aarch64_get_thread_helper()
+        //
+        // Return the current thread pointer in x0.
+        // Clobber x1, flags.
+        // All other registers are preserved,
+
+	.global	_ZN10JavaThread25aarch64_get_thread_helperEv
+	.type	_ZN10JavaThread25aarch64_get_thread_helperEv, %function
+
+_ZN10JavaThread25aarch64_get_thread_helperEv:
+	stp x29, x30, [sp, -16]!
+	adrp x0, :tlsdesc:_ZN6Thread12_thr_currentE
+	ldr x1, [x0, #:tlsdesc_lo12:_ZN6Thread12_thr_currentE]
+	add x0, x0, :tlsdesc_lo12:_ZN6Thread12_thr_currentE
+	.tlsdesccall _ZN6Thread12_thr_currentE
+	blr x1
+	mrs x1, tpidr_el0
+	add x0, x1, x0
+	ldr x0, [x0]
+	ldp x29, x30, [sp], 16
+	ret
+
+	.size _ZN10JavaThread25aarch64_get_thread_helperEv, .-_ZN10JavaThread25aarch64_get_thread_helperEv
--- a/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/linux_aarch64/vm/thread_linux_aarch64.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -77,6 +77,8 @@
   bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
 public:
 
+  static Thread *aarch64_get_thread_helper();
+
   // These routines are only used on cpu architectures that
   // have separate register stacks (Itanium).
   static bool register_stack_overflow() { return false; }
--- a/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -291,6 +291,71 @@
   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 }
 
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
+  // (see atomic.hpp).
+
+  // Using 32 bit internally.
+  volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
+
+#ifdef VM_LITTLE_ENDIAN
+  const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
+#else
+  const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
+#endif
+  const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
+                     masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
+                     xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
+
+  unsigned int old_value, value32;
+
+  __asm__ __volatile__ (
+    /* fence */
+    strasm_sync
+    /* simple guard */
+    "   lbz     %[old_value], 0(%[dest])                  \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* atomic loop */
+    "1:                                                   \n"
+    "   lwarx   %[value32], 0, %[dest_base]               \n"
+    /* extract byte and compare */
+    "   srd     %[old_value], %[value32], %[shift_amount] \n"
+    "   clrldi  %[old_value], %[old_value], 56            \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* replace byte and try to store */
+    "   xor     %[value32], %[xor_value], %[value32]      \n"
+    "   stwcx.  %[value32], 0, %[dest_base]               \n"
+    "   bne-    1b                                        \n"
+    /* acquire */
+    strasm_sync
+    /* exit */
+    "2:                                                   \n"
+    /* out */
+    : [old_value]           "=&r"   (old_value),
+      [value32]             "=&r"   (value32),
+                            "=m"    (*dest),
+                            "=m"    (*dest_base)
+    /* in */
+    : [dest]                "b"     (dest),
+      [dest_base]           "b"     (dest_base),
+      [shift_amount]        "r"     (shift_amount),
+      [masked_compare_val]  "r"     (masked_compare_val),
+      [xor_value]           "r"     (xor_value),
+                            "m"     (*dest),
+                            "m"     (*dest_base)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (jbyte)(unsigned char)old_value;
+}
+
 inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) {
 
   // Note that cmpxchg guarantees a two-way memory barrier across
--- a/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -28,6 +28,7 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
@@ -98,7 +99,7 @@
 // Frame information (pc, sp, fp) retrieved via ucontext
 // always looks like a C-frame according to the frame
 // conventions in frame_ppc64.hpp.
-address os::Linux::ucontext_get_pc(ucontext_t * uc) {
+address os::Linux::ucontext_get_pc(const ucontext_t * uc) {
   // On powerpc64, ucontext_t is not selfcontained but contains
   // a pointer to an optional substructure (mcontext_t.regs) containing the volatile
   // registers - NIP, among others.
@@ -121,19 +122,19 @@
   uc->uc_mcontext.regs->nip = (unsigned long)pc;
 }
 
-intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) {
+intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
   return (intptr_t*)uc->uc_mcontext.regs->gpr[1/*REG_SP*/];
 }
 
-intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) {
+intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) {
   return NULL;
 }
 
-ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
                     intptr_t** ret_sp, intptr_t** ret_fp) {
 
   ExtendedPC  epc;
-  ucontext_t* uc = (ucontext_t*)ucVoid;
+  const ucontext_t* uc = (const ucontext_t*)ucVoid;
 
   if (uc != NULL) {
     epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
@@ -149,7 +150,7 @@
   return epc;
 }
 
-frame os::fetch_frame_from_context(void* ucVoid) {
+frame os::fetch_frame_from_context(const void* ucVoid) {
   intptr_t* sp;
   intptr_t* fp;
   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
@@ -182,7 +183,7 @@
                         int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   SignalHandlerMark shm(t);
 
@@ -563,10 +564,10 @@
 /////////////////////////////////////////////////////////////////////////////
 // helper functions for fatal error handler
 
-void os::print_context(outputStream *st, void *context) {
+void os::print_context(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t* uc = (ucontext_t*)context;
+  const ucontext_t* uc = (const ucontext_t*)context;
 
   st->print_cr("Registers:");
   st->print("pc =" INTPTR_FORMAT "  ", uc->uc_mcontext.regs->nip);
@@ -594,10 +595,10 @@
   st->cr();
 }
 
-void os::print_register_info(outputStream *st, void *context) {
+void os::print_register_info(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t *uc = (ucontext_t*)context;
+  const ucontext_t *uc = (const ucontext_t*)context;
 
   st->print_cr("Register to memory mapping:");
   st->cr();
--- a/src/os_cpu/linux_ppc/vm/threadLS_linux_ppc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-    // nothing we can do here for user-level thread
-}
-
-void ThreadLocalStorage::pd_init() {
-  // Nothing to do
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
--- a/src/os_cpu/linux_ppc/vm/threadLS_linux_ppc.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP
-#define OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP
-
-  // Processor dependent parts of ThreadLocalStorage
-
-public:
-  static Thread* thread() {
-    return (Thread *) os::thread_local_storage_at(thread_index());
-  }
-
-#endif // OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP
--- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,6 +27,7 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
@@ -91,7 +92,7 @@
 // signal frames. Currently we don't do that on Linux, so it's the
 // same as os::fetch_frame_from_context().
 ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread,
-                                                ucontext_t* uc,
+                                                const ucontext_t* uc,
                                                 intptr_t** ret_sp,
                                                 intptr_t** ret_fp) {
   assert(thread != NULL, "just checking");
@@ -101,10 +102,10 @@
   return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
 }
 
-ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
                                         intptr_t** ret_sp,
                                         intptr_t** ret_fp) {
-  ucontext_t* uc = (ucontext_t*) ucVoid;
+  const ucontext_t* uc = (const ucontext_t*) ucVoid;
   ExtendedPC  epc;
 
   if (uc != NULL) {
@@ -129,7 +130,7 @@
   return epc;
 }
 
-frame os::fetch_frame_from_context(void* ucVoid) {
+frame os::fetch_frame_from_context(const void* ucVoid) {
   intptr_t* sp;
   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, NULL);
   return frame(sp, frame::unpatchable, epc.pc());
@@ -212,10 +213,10 @@
 
 void os::initialize_thread(Thread* thr) {}
 
-void os::print_context(outputStream *st, void *context) {
+void os::print_context(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t* uc = (ucontext_t*)context;
+  const ucontext_t* uc = (const ucontext_t*)context;
   sigcontext* sc = (sigcontext*)context;
   st->print_cr("Registers:");
 
@@ -290,11 +291,11 @@
 }
 
 
-void os::print_register_info(outputStream *st, void *context) {
+void os::print_register_info(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t *uc = (ucontext_t*)context;
-  sigcontext* sc = (sigcontext*)context;
+  const ucontext_t *uc = (const ucontext_t*)context;
+  const sigcontext* sc = (const sigcontext*)context;
   intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
 
   st->print_cr("Register to memory mapping:");
@@ -342,23 +343,23 @@
 }
 
 
-address os::Linux::ucontext_get_pc(ucontext_t* uc) {
+address os::Linux::ucontext_get_pc(const ucontext_t* uc) {
   return (address) SIG_PC((sigcontext*)uc);
 }
 
 void os::Linux::ucontext_set_pc(ucontext_t* uc, address pc) {
-  sigcontext_t* ctx = (sigcontext_t*) uc;
-  SIG_PC(ctx)  = (intptr_t)addr;
-  SIG_NPC(ctx) = (intptr_t)(addr+4);
+  sigcontext* ctx = (sigcontext*) uc;
+  SIG_PC(ctx)  = (intptr_t)pc;
+  SIG_NPC(ctx) = (intptr_t)(pc+4);
 }
 
-intptr_t* os::Linux::ucontext_get_sp(ucontext_t *uc) {
+intptr_t* os::Linux::ucontext_get_sp(const ucontext_t *uc) {
   return (intptr_t*)
     ((intptr_t)SIG_REGS((sigcontext*)uc).u_regs[CON_O6] + STACK_BIAS);
 }
 
 // not used on Sparc
-intptr_t* os::Linux::ucontext_get_fp(ucontext_t *uc) {
+intptr_t* os::Linux::ucontext_get_fp(const ucontext_t *uc) {
   ShouldNotReachHere();
   return NULL;
 }
@@ -541,7 +542,7 @@
   ucontext_t* ucFake = (ucontext_t*) ucVoid;
   sigcontext* uc = (sigcontext*)ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
   // (no destructors can be run)
@@ -683,7 +684,7 @@
   }
 
   if (pc == NULL && uc != NULL) {
-    pc = os::Linux::ucontext_get_pc((ucontext_t*)uc);
+    pc = os::Linux::ucontext_get_pc((const ucontext_t*)uc);
   }
 
   // unmask current signal
@@ -695,6 +696,7 @@
   VMError::report_and_die(t, sig, pc, info, ucVoid);
 
   ShouldNotReachHere();
+  return false;
 }
 
 void os::Linux::init_thread_fpu_state(void) {
--- a/src/os_cpu/linux_sparc/vm/threadLS_linux_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-}
-
-void ThreadLocalStorage::pd_init() {
-   // Nothing to do
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
--- a/src/os_cpu/linux_sparc/vm/threadLS_linux_sparc.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_SPARC_VM_THREADLS_LINUX_SPARC_HPP
-#define OS_CPU_LINUX_SPARC_VM_THREADLS_LINUX_SPARC_HPP
-
-public:
-  static Thread* thread() {
-    return (Thread*) os::thread_local_storage_at(thread_index());
-  }
-
-#endif // OS_CPU_LINUX_SPARC_VM_THREADLS_LINUX_SPARC_HPP
--- a/src/os_cpu/linux_x86/vm/assembler_linux_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/linux_x86/vm/assembler_linux_x86.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,85 +26,7 @@
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
 
-#ifndef _LP64
 void MacroAssembler::int3() {
   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 }
-
-#ifdef MINIMIZE_RAM_USAGE
-
-void MacroAssembler::get_thread(Register thread) {
-  // call pthread_getspecific
-  // void * pthread_getspecific(pthread_key_t key);
-  if (thread != rax) push(rax);
-  push(rcx);
-  push(rdx);
-
-  push(ThreadLocalStorage::thread_index());
-  call(RuntimeAddress(CAST_FROM_FN_PTR(address, pthread_getspecific)));
-  increment(rsp, wordSize);
-
-  pop(rdx);
-  pop(rcx);
-  if (thread != rax) {
-    mov(thread, rax);
-    pop(rax);
-  }
-}
-
-#else
-void MacroAssembler::get_thread(Register thread) {
-  movl(thread, rsp);
-  shrl(thread, PAGE_SHIFT);
-
-  ExternalAddress tls_base((address)ThreadLocalStorage::sp_map_addr());
-  Address index(noreg, thread, Address::times_4);
-  ArrayAddress tls(tls_base, index);
-
-  movptr(thread, tls);
-}
-#endif // MINIMIZE_RAM_USAGE
-#else
-void MacroAssembler::int3() {
-  call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MacroAssembler::get_thread(Register thread) {
-  // call pthread_getspecific
-  // void * pthread_getspecific(pthread_key_t key);
-   if (thread != rax) {
-     push(rax);
-   }
-   push(rdi);
-   push(rsi);
-   push(rdx);
-   push(rcx);
-   push(r8);
-   push(r9);
-   push(r10);
-   // XXX
-   mov(r10, rsp);
-   andq(rsp, -16);
-   push(r10);
-   push(r11);
-
-   movl(rdi, ThreadLocalStorage::thread_index());
-   call(RuntimeAddress(CAST_FROM_FN_PTR(address, pthread_getspecific)));
-
-   pop(r11);
-   pop(rsp);
-   pop(r10);
-   pop(r9);
-   pop(r8);
-   pop(rcx);
-   pop(rdx);
-   pop(rsi);
-   pop(rdi);
-   if (thread != rax) {
-       mov(thread, rax);
-       pop(rax);
-   }
-}
-#endif
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,6 +27,7 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
@@ -116,7 +117,7 @@
 // Nothing to do.
 }
 
-address os::Linux::ucontext_get_pc(ucontext_t * uc) {
+address os::Linux::ucontext_get_pc(const ucontext_t * uc) {
   return (address)uc->uc_mcontext.gregs[REG_PC];
 }
 
@@ -124,11 +125,11 @@
   uc->uc_mcontext.gregs[REG_PC] = (intptr_t)pc;
 }
 
-intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) {
+intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
   return (intptr_t*)uc->uc_mcontext.gregs[REG_SP];
 }
 
-intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) {
+intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) {
   return (intptr_t*)uc->uc_mcontext.gregs[REG_FP];
 }
 
@@ -137,8 +138,9 @@
 // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
 // frames. Currently we don't do that on Linux, so it's the same as
 // os::fetch_frame_from_context().
+// This method is also used for stack overflow signal handling.
 ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread,
-  ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
+  const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
 
   assert(thread != NULL, "just checking");
   assert(ret_sp != NULL, "just checking");
@@ -147,11 +149,11 @@
   return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
 }
 
-ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
                     intptr_t** ret_sp, intptr_t** ret_fp) {
 
   ExtendedPC  epc;
-  ucontext_t* uc = (ucontext_t*)ucVoid;
+  const ucontext_t* uc = (const ucontext_t*)ucVoid;
 
   if (uc != NULL) {
     epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
@@ -167,13 +169,57 @@
   return epc;
 }
 
-frame os::fetch_frame_from_context(void* ucVoid) {
+frame os::fetch_frame_from_context(const void* ucVoid) {
   intptr_t* sp;
   intptr_t* fp;
   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
   return frame(sp, fp, epc.pc());
 }
 
+frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) {
+  intptr_t* sp;
+  intptr_t* fp;
+  ExtendedPC epc = os::Linux::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, &fp);
+  return frame(sp, fp, epc.pc());
+}
+
+bool os::Linux::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
+  address pc = (address) os::Linux::ucontext_get_pc(uc);
+  if (Interpreter::contains(pc)) {
+    // interpreter performs stack banging after the fixed frame header has
+    // been generated while the compilers perform it before. To maintain
+    // semantic consistency between interpreted and compiled frames, the
+    // method returns the Java sender of the current frame.
+    *fr = os::fetch_frame_from_ucontext(thread, uc);
+    if (!fr->is_first_java_frame()) {
+      assert(fr->safe_for_sender(thread), "Safety check");
+      *fr = fr->java_sender();
+    }
+  } else {
+    // more complex code with compiled code
+    assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
+    CodeBlob* cb = CodeCache::find_blob(pc);
+    if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
+      // Not sure where the pc points to, fallback to default
+      // stack overflow handling
+      return false;
+    } else {
+      // in compiled code, the stack banging is performed just after the return pc
+      // has been pushed on the stack
+      intptr_t* fp = os::Linux::ucontext_get_fp(uc);
+      intptr_t* sp = os::Linux::ucontext_get_sp(uc);
+      *fr = frame(sp + 1, fp, (address)*sp);
+      if (!fr->is_java_frame()) {
+        assert(fr->safe_for_sender(thread), "Safety check");
+        assert(!fr->is_first_frame(), "Safety check");
+        *fr = fr->java_sender();
+      }
+    }
+  }
+  assert(fr->is_java_frame(), "Safety check");
+  return true;
+}
+
 // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
 // turned off by -fomit-frame-pointer,
 frame os::get_sender_for_C_frame(frame* fr) {
@@ -221,7 +267,7 @@
                         int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
   // (no destructors can be run)
@@ -304,13 +350,32 @@
           addr >= thread->stack_base() - thread->stack_size()) {
         // stack overflow
         if (thread->in_stack_yellow_zone(addr)) {
-          thread->disable_stack_yellow_zone();
           if (thread->thread_state() == _thread_in_Java) {
+            if (thread->in_stack_reserved_zone(addr)) {
+              frame fr;
+              if (os::Linux::get_frame_at_stack_banging_point(thread, uc, &fr)) {
+                assert(fr.is_java_frame(), "Must be a Java frame");
+                frame activation =
+                  SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
+                if (activation.sp() != NULL) {
+                  thread->disable_stack_reserved_zone();
+                  if (activation.is_interpreted_frame()) {
+                    thread->set_reserved_stack_activation((address)(
+                      activation.fp() + frame::interpreter_frame_initial_sp_offset));
+                  } else {
+                    thread->set_reserved_stack_activation((address)activation.unextended_sp());
+                  }
+                  return 1;
+                }
+              }
+            }
             // Throw a stack overflow exception.  Guard pages will be reenabled
             // while unwinding the stack.
+            thread->disable_stack_yellow_zone();
             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
           } else {
             // Thread was in the vm or native code.  Return and try to finish.
+            thread->disable_stack_yellow_zone();
             return 1;
           }
         } else if (thread->in_stack_red_zone(addr)) {
@@ -719,10 +784,10 @@
 /////////////////////////////////////////////////////////////////////////////
 // helper functions for fatal error handler
 
-void os::print_context(outputStream *st, void *context) {
+void os::print_context(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t *uc = (ucontext_t*)context;
+  const ucontext_t *uc = (const ucontext_t*)context;
   st->print_cr("Registers:");
 #ifdef AMD64
   st->print(  "RAX=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RAX]);
@@ -782,10 +847,10 @@
   print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
 }
 
-void os::print_register_info(outputStream *st, void *context) {
+void os::print_register_info(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t *uc = (ucontext_t*)context;
+  const ucontext_t *uc = (const ucontext_t*)context;
 
   st->print_cr("Register to memory mapping:");
   st->cr();
@@ -867,7 +932,7 @@
    * we don't have much control or understanding of the address space, just let it slide.
    */
   char* hint = (char*) (Linux::initial_thread_stack_bottom() -
-                        ((StackYellowPages + StackRedPages + 1) * page_size));
+                        ((StackReservedPages + StackYellowPages + StackRedPages + 1) * page_size));
   char* codebuf = os::attempt_reserve_memory_at(page_size, hint);
   if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) {
     return; // No matter, we tried, best effort.
--- a/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Map stack pointer (%esp) to thread pointer for faster TLS access
-//
-// Here we use a flat table for better performance. Getting current thread
-// is down to one memory access (read _sp_map[%esp>>12]) in generated code
-// and two in runtime code (-fPIC code needs an extra load for _sp_map).
-//
-// This code assumes stack page is not shared by different threads. It works
-// in 32-bit VM when page size is 4K (or a multiple of 4K, if that matters).
-//
-// Notice that _sp_map is allocated in the bss segment, which is ZFOD
-// (zero-fill-on-demand). While it reserves 4M address space upfront,
-// actual memory pages are committed on demand.
-//
-// If an application creates and destroys a lot of threads, usually the
-// stack space freed by a thread will soon get reused by new thread.
-// No memory page in _sp_map is wasted.
-//
-// However, it's still possible that we might end up populating &
-// committing a large fraction of the 4M table over time, but the actual
-// amount of live data in the table could be quite small. The max wastage
-// is less than 4M bytes. If it becomes an issue, we could use madvise()
-// with MADV_DONTNEED to reclaim unused (i.e. all-zero) pages in _sp_map.
-// MADV_DONTNEED on Linux keeps the virtual memory mapping, but zaps the
-// physical memory page (i.e. similar to MADV_FREE on Solaris).
-
-#if !defined(AMD64) && !defined(MINIMIZE_RAM_USAGE)
-Thread* ThreadLocalStorage::_sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-    // nothing we can do here for user-level thread
-}
-
-void ThreadLocalStorage::pd_init() {
-  assert(align_size_down(os::vm_page_size(), PAGE_SIZE) == os::vm_page_size(),
-         "page size must be multiple of PAGE_SIZE");
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-  address stack_top = os::current_stack_base();
-  size_t stack_size = os::current_stack_size();
-
-  for (address p = stack_top - stack_size; p < stack_top; p += PAGE_SIZE) {
-    // pd_set_thread() is called with non-NULL value when a new thread is
-    // created/attached, or with NULL value when a thread is about to exit.
-    // If both "thread" and the corresponding _sp_map[] entry are non-NULL,
-    // they should have the same value. Otherwise it might indicate that the
-    // stack page is shared by multiple threads. However, a more likely cause
-    // for this assertion to fail is that an attached thread exited without
-    // detaching itself from VM, which is a program error and could cause VM
-    // to crash.
-    assert(thread == NULL || _sp_map[(uintptr_t)p >> PAGE_SHIFT] == NULL ||
-           thread == _sp_map[(uintptr_t)p >> PAGE_SHIFT],
-           "thread exited without detaching from VM??");
-    _sp_map[(uintptr_t)p >> PAGE_SHIFT] = thread;
-  }
-}
-#else
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-    // nothing we can do here for user-level thread
-}
-
-void ThreadLocalStorage::pd_init() {
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
-#endif // !AMD64 && !MINIMIZE_RAM_USAGE
--- a/src/os_cpu/linux_x86/vm/threadLS_linux_x86.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_X86_VM_THREADLS_LINUX_X86_HPP
-#define OS_CPU_LINUX_X86_VM_THREADLS_LINUX_X86_HPP
-
-  // Processor dependent parts of ThreadLocalStorage
-
-#if !defined(AMD64) && !defined(MINIMIZE_RAM_USAGE)
-
-  // map stack pointer to thread pointer - see notes in threadLS_linux_x86.cpp
-  #define SP_BITLENGTH  32
-  #define PAGE_SHIFT    12
-  #define PAGE_SIZE     (1UL << PAGE_SHIFT)
-  static Thread* _sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
-
-public:
-
-  static Thread** sp_map_addr() { return _sp_map; }
-
-  static Thread* thread() {
-    uintptr_t sp;
-    __asm__ volatile ("movl %%esp, %0" : "=r" (sp));
-    return _sp_map[sp >> PAGE_SHIFT];
-  }
-
-#else
-
-public:
-
-   static Thread* thread() {
-     return (Thread*) os::thread_local_storage_at(thread_index());
-   }
-
-#endif // AMD64 || MINIMIZE_RAM_USAGE
-
-#endif // OS_CPU_LINUX_X86_VM_THREADLS_LINUX_X86_HPP
--- a/src/os_cpu/linux_zero/vm/assembler_linux_zero.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/linux_zero/vm/assembler_linux_zero.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -23,10 +23,4 @@
  *
  */
 
-#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_zero.inline.hpp"
-#include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
 // This file is intentionally empty
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -100,7 +100,7 @@
   // Nothing to do.
 }
 
-address os::Linux::ucontext_get_pc(ucontext_t* uc) {
+address os::Linux::ucontext_get_pc(const ucontext_t* uc) {
   ShouldNotCallThis();
 }
 
@@ -108,13 +108,13 @@
   ShouldNotCallThis();
 }
 
-ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
                                         intptr_t** ret_sp,
                                         intptr_t** ret_fp) {
   ShouldNotCallThis();
 }
 
-frame os::fetch_frame_from_context(void* ucVoid) {
+frame os::fetch_frame_from_context(const void* ucVoid) {
   ShouldNotCallThis();
 }
 
@@ -125,7 +125,7 @@
                         int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   SignalHandlerMark shm(t);
 
@@ -406,11 +406,11 @@
 /////////////////////////////////////////////////////////////////////////////
 // helper functions for fatal error handler
 
-void os::print_context(outputStream* st, void* context) {
+void os::print_context(outputStream* st, const void* context) {
   ShouldNotCallThis();
 }
 
-void os::print_register_info(outputStream *st, void *context) {
+void os::print_register_info(outputStream *st, const void *context) {
   ShouldNotCallThis();
 }
 
--- a/src/os_cpu/linux_zero/vm/threadLS_linux_zero.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-  // nothing to do
-}
-
-void ThreadLocalStorage::pd_init() {
-  // nothing to do
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
--- a/src/os_cpu/linux_zero/vm/threadLS_linux_zero.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_ZERO_VM_THREADLS_LINUX_ZERO_HPP
-#define OS_CPU_LINUX_ZERO_VM_THREADLS_LINUX_ZERO_HPP
-
-// Processor dependent parts of ThreadLocalStorage
-
- public:
-  static Thread* thread() {
-    return (Thread*) os::thread_local_storage_at(thread_index());
-  }
-
-#endif // OS_CPU_LINUX_ZERO_VM_THREADLS_LINUX_ZERO_HPP
--- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -121,7 +121,7 @@
 // There are issues with libthread giving out uc_links for different threads
 // on the same uc_link chain and bad or circular links.
 //
-bool os::Solaris::valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect) {
+bool os::Solaris::valid_ucontext(Thread* thread, const ucontext_t* valid, const ucontext_t* suspect) {
   if (valid >= suspect ||
       valid->uc_stack.ss_flags != suspect->uc_stack.ss_flags ||
       valid->uc_stack.ss_sp    != suspect->uc_stack.ss_sp    ||
@@ -148,10 +148,10 @@
 // We will only follow one level of uc_link since there are libthread
 // issues with ucontext linking and it is better to be safe and just
 // let caller retry later.
-ucontext_t* os::Solaris::get_valid_uc_in_signal_handler(Thread *thread,
-  ucontext_t *uc) {
+const ucontext_t* os::Solaris::get_valid_uc_in_signal_handler(Thread *thread,
+  const ucontext_t *uc) {
 
-  ucontext_t *retuc = NULL;
+  const ucontext_t *retuc = NULL;
 
   // Sometimes the topmost register windows are not properly flushed.
   // i.e., if the kernel would have needed to take a page fault
@@ -179,7 +179,7 @@
 }
 
 // Assumes ucontext is valid
-ExtendedPC os::Solaris::ucontext_get_ExtendedPC(ucontext_t *uc) {
+ExtendedPC os::Solaris::ucontext_get_ExtendedPC(const ucontext_t *uc) {
   address pc = (address)uc->uc_mcontext.gregs[REG_PC];
   // set npc to zero to avoid using it for safepoint, good for profiling only
   return ExtendedPC(pc);
@@ -191,17 +191,17 @@
 }
 
 // Assumes ucontext is valid
-intptr_t* os::Solaris::ucontext_get_sp(ucontext_t *uc) {
+intptr_t* os::Solaris::ucontext_get_sp(const ucontext_t *uc) {
   return (intptr_t*)((intptr_t)uc->uc_mcontext.gregs[REG_SP] + STACK_BIAS);
 }
 
 // Solaris X86 only
-intptr_t* os::Solaris::ucontext_get_fp(ucontext_t *uc) {
+intptr_t* os::Solaris::ucontext_get_fp(const ucontext_t *uc) {
   ShouldNotReachHere();
   return NULL;
 }
 
-address os::Solaris::ucontext_get_pc(ucontext_t *uc) {
+address os::Solaris::ucontext_get_pc(const ucontext_t *uc) {
   return (address) uc->uc_mcontext.gregs[REG_PC];
 }
 
@@ -213,25 +213,26 @@
 //
 // The difference between this and os::fetch_frame_from_context() is that
 // here we try to skip nested signal frames.
+// This method is also used for stack overflow signal handling.
 ExtendedPC os::Solaris::fetch_frame_from_ucontext(Thread* thread,
-  ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
+  const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
 
   assert(thread != NULL, "just checking");
   assert(ret_sp != NULL, "just checking");
   assert(ret_fp == NULL, "just checking");
 
-  ucontext_t *luc = os::Solaris::get_valid_uc_in_signal_handler(thread, uc);
+  const ucontext_t *luc = os::Solaris::get_valid_uc_in_signal_handler(thread, uc);
 
   return os::fetch_frame_from_context(luc, ret_sp, ret_fp);
 }
 
 
 // ret_fp parameter is only used by Solaris X86.
-ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
                     intptr_t** ret_sp, intptr_t** ret_fp) {
 
   ExtendedPC  epc;
-  ucontext_t *uc = (ucontext_t*)ucVoid;
+  const ucontext_t *uc = (const ucontext_t*)ucVoid;
 
   if (uc != NULL) {
     epc = os::Solaris::ucontext_get_ExtendedPC(uc);
@@ -245,13 +246,48 @@
   return epc;
 }
 
-frame os::fetch_frame_from_context(void* ucVoid) {
+frame os::fetch_frame_from_context(const void* ucVoid) {
   intptr_t* sp;
   intptr_t* fp;
   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
   return frame(sp, frame::unpatchable, epc.pc());
 }
 
+frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) {
+  intptr_t* sp;
+  ExtendedPC epc = os::Solaris::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, NULL);
+  return frame(sp, frame::unpatchable, epc.pc());
+}
+
+bool os::Solaris::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
+  address pc = (address) os::Solaris::ucontext_get_pc(uc);
+  if (Interpreter::contains(pc)) {
+    *fr = os::fetch_frame_from_ucontext(thread, uc);
+    if (!fr->is_first_java_frame()) {
+      assert(fr->safe_for_sender(thread), "Safety check");
+      *fr = fr->java_sender();
+    }
+  } else {
+    // more complex code with compiled code
+    assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
+    CodeBlob* cb = CodeCache::find_blob(pc);
+    if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
+      // Not sure where the pc points to, fallback to default
+      // stack overflow handling
+      return false;
+    } else {
+      *fr = os::fetch_frame_from_ucontext(thread, uc);
+      *fr = frame(fr->sender_sp(), frame::unpatchable, fr->sender_pc());
+      if (!fr->is_java_frame()) {
+        assert(fr->safe_for_sender(thread), "Safety check");
+        *fr = fr->java_sender();
+      }
+    }
+  }
+  assert(fr->is_java_frame(), "Safety check");
+  return true;
+}
+
 frame os::get_sender_for_C_frame(frame* fr) {
   return frame(fr->sender_sp(), frame::unpatchable, fr->sender_pc());
 }
@@ -290,7 +326,7 @@
                           int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null_safe();
 
   // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
   // (no destructors can be run)
@@ -367,17 +403,32 @@
     if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) {
       address addr = (address) info->si_addr;
       if (thread->in_stack_yellow_zone(addr)) {
-        thread->disable_stack_yellow_zone();
         // Sometimes the register windows are not properly flushed.
         if(uc->uc_mcontext.gwins != NULL) {
           ::handle_unflushed_register_windows(uc->uc_mcontext.gwins);
         }
         if (thread->thread_state() == _thread_in_Java) {
+          if (thread->in_stack_reserved_zone(addr)) {
+            frame fr;
+            if (os::Solaris::get_frame_at_stack_banging_point(thread, uc, &fr)) {
+              assert(fr.is_java_frame(), "Must be a Java frame");
+              frame activation = SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
+              if (activation.sp() != NULL) {
+                thread->disable_stack_reserved_zone();
+                RegisterMap map(thread);
+                int frame_size = activation.frame_size(&map);
+                thread->set_reserved_stack_activation((address)(((address)activation.sp()) - STACK_BIAS));
+                return true;
+              }
+            }
+          }
           // Throw a stack overflow exception.  Guard pages will be reenabled
           // while unwinding the stack.
+          thread->disable_stack_yellow_zone();
           stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
         } else {
           // Thread was in the vm or native code.  Return and try to finish.
+          thread->disable_stack_yellow_zone();
           return true;
         }
       } else if (thread->in_stack_red_zone(addr)) {
@@ -551,12 +602,13 @@
   VMError::report_and_die(t, sig, pc, info, ucVoid);
 
   ShouldNotReachHere();
+  return false;
 }
 
-void os::print_context(outputStream *st, void *context) {
+void os::print_context(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t *uc = (ucontext_t*)context;
+  const ucontext_t *uc = (const ucontext_t*)context;
   st->print_cr("Registers:");
 
   st->print_cr(" G1=" INTPTR_FORMAT " G2=" INTPTR_FORMAT
@@ -630,10 +682,10 @@
   print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
 }
 
-void os::print_register_info(outputStream *st, void *context) {
+void os::print_register_info(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t *uc = (ucontext_t*)context;
+  const ucontext_t *uc = (const ucontext_t*)context;
   intptr_t *sp = (intptr_t *)os::Solaris::ucontext_get_sp(uc);
 
   st->print_cr("Register to memory mapping:");
--- a/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// True thread-local variable
-__thread Thread * ThreadLocalStorage::_thr_current = NULL;
-
-// Implementations needed to support the shared API
-
-void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
-
-bool ThreadLocalStorage::_initialized = false;
-
-void ThreadLocalStorage::init() {
-  _initialized = true;
-}
-
-bool ThreadLocalStorage::is_initialized() {
-  return _initialized;
-}
-
-Thread* ThreadLocalStorage::get_thread_slow() {
-    return thread();
-}
-
-extern "C" Thread* get_thread() {
-  return ThreadLocalStorage::thread();
-}
--- a/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP
-#define OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP
-
-// Solaris specific implementation involves simple, direct use
-// of a compiler-based thread-local variable
-
-private:
-  static __thread Thread * _thr_current;
-
-  static bool _initialized;  // needed for shared API
-
-public:
-  static inline Thread* thread();
-
-#endif // OS_CPU_SOLARIS_SPARC_VM_THREADLS_SOLARIS_SPARC_HPP
--- a/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,8 +25,6 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
-#include "runtime/thread.inline.hpp"
 
 void MacroAssembler::int3() {
   push(rax);
@@ -37,33 +35,3 @@
   pop(rdx);
   pop(rax);
 }
-
-// This is simply a call to ThreadLocalStorage::thread()
-void MacroAssembler::get_thread(Register thread) {
-  if (thread != rax) {
-    push(rax);
-  }
-  push(rdi);
-  push(rsi);
-  push(rdx);
-  push(rcx);
-  push(r8);
-  push(r9);
-  push(r10);
-  push(r11);
-
-  call(RuntimeAddress(CAST_FROM_FN_PTR(address, ThreadLocalStorage::thread)));
-
-  pop(r11);
-  pop(r10);
-  pop(r9);
-  pop(r8);
-  pop(rcx);
-  pop(rdx);
-  pop(rsi);
-  pop(rdi);
-  if (thread != rax) {
-    movl(thread, rax);
-    pop(rax);
-  }
-}
--- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,6 +27,7 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
@@ -120,7 +121,7 @@
 // There are issues with libthread giving out uc_links for different threads
 // on the same uc_link chain and bad or circular links.
 //
-bool os::Solaris::valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect) {
+bool os::Solaris::valid_ucontext(Thread* thread, const ucontext_t* valid, const ucontext_t* suspect) {
   if (valid >= suspect ||
       valid->uc_stack.ss_flags != suspect->uc_stack.ss_flags ||
       valid->uc_stack.ss_sp    != suspect->uc_stack.ss_sp    ||
@@ -145,10 +146,10 @@
 // We will only follow one level of uc_link since there are libthread
 // issues with ucontext linking and it is better to be safe and just
 // let caller retry later.
-ucontext_t* os::Solaris::get_valid_uc_in_signal_handler(Thread *thread,
-  ucontext_t *uc) {
+const ucontext_t* os::Solaris::get_valid_uc_in_signal_handler(Thread *thread,
+  const ucontext_t *uc) {
 
-  ucontext_t *retuc = NULL;
+  const ucontext_t *retuc = NULL;
 
   if (uc != NULL) {
     if (uc->uc_link == NULL) {
@@ -170,7 +171,7 @@
 }
 
 // Assumes ucontext is valid
-ExtendedPC os::Solaris::ucontext_get_ExtendedPC(ucontext_t *uc) {
+ExtendedPC os::Solaris::ucontext_get_ExtendedPC(const ucontext_t *uc) {
   return ExtendedPC((address)uc->uc_mcontext.gregs[REG_PC]);
 }
 
@@ -179,16 +180,16 @@
 }
 
 // Assumes ucontext is valid
-intptr_t* os::Solaris::ucontext_get_sp(ucontext_t *uc) {
+intptr_t* os::Solaris::ucontext_get_sp(const ucontext_t *uc) {
   return (intptr_t*)uc->uc_mcontext.gregs[REG_SP];
 }
 
 // Assumes ucontext is valid
-intptr_t* os::Solaris::ucontext_get_fp(ucontext_t *uc) {
+intptr_t* os::Solaris::ucontext_get_fp(const ucontext_t *uc) {
   return (intptr_t*)uc->uc_mcontext.gregs[REG_FP];
 }
 
-address os::Solaris::ucontext_get_pc(ucontext_t *uc) {
+address os::Solaris::ucontext_get_pc(const ucontext_t *uc) {
   return (address) uc->uc_mcontext.gregs[REG_PC];
 }
 
@@ -197,22 +198,23 @@
 //
 // The difference between this and os::fetch_frame_from_context() is that
 // here we try to skip nested signal frames.
+// This method is also used for stack overflow signal handling.
 ExtendedPC os::Solaris::fetch_frame_from_ucontext(Thread* thread,
-  ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
+  const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
 
   assert(thread != NULL, "just checking");
   assert(ret_sp != NULL, "just checking");
   assert(ret_fp != NULL, "just checking");
 
-  ucontext_t *luc = os::Solaris::get_valid_uc_in_signal_handler(thread, uc);
+  const ucontext_t *luc = os::Solaris::get_valid_uc_in_signal_handler(thread, uc);
   return os::fetch_frame_from_context(luc, ret_sp, ret_fp);
 }
 
-ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
                     intptr_t** ret_sp, intptr_t** ret_fp) {
 
   ExtendedPC  epc;
-  ucontext_t *uc = (ucontext_t*)ucVoid;
+  const ucontext_t *uc = (const ucontext_t*)ucVoid;
 
   if (uc != NULL) {
     epc = os::Solaris::ucontext_get_ExtendedPC(uc);
@@ -228,13 +230,56 @@
   return epc;
 }
 
-frame os::fetch_frame_from_context(void* ucVoid) {
+frame os::fetch_frame_from_context(const void* ucVoid) {
   intptr_t* sp;
   intptr_t* fp;
   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
   return frame(sp, fp, epc.pc());
 }
 
+frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) {
+  intptr_t* sp;
+  intptr_t* fp;
+  ExtendedPC epc = os::Solaris::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, &fp);
+  return frame(sp, fp, epc.pc());
+}
+
+bool os::Solaris::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
+ address pc = (address) os::Solaris::ucontext_get_pc(uc);
+  if (Interpreter::contains(pc)) {
+    // interpreter performs stack banging after the fixed frame header has
+    // been generated while the compilers perform it before. To maintain
+    // semantic consistency between interpreted and compiled frames, the
+    // method returns the Java sender of the current frame.
+    *fr = os::fetch_frame_from_ucontext(thread, uc);
+    if (!fr->is_first_java_frame()) {
+      assert(fr->safe_for_sender(thread), "Safety check");
+      *fr = fr->java_sender();
+    }
+  } else {
+    // more complex code with compiled code
+    assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
+    CodeBlob* cb = CodeCache::find_blob(pc);
+    if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
+      // Not sure where the pc points to, fallback to default
+      // stack overflow handling
+      return false;
+    } else {
+      // in compiled code, the stack banging is performed just after the return pc
+      // has been pushed on the stack
+      intptr_t* fp = os::Solaris::ucontext_get_fp(uc);
+      intptr_t* sp = os::Solaris::ucontext_get_sp(uc);
+      *fr = frame(sp + 1, fp, (address)*sp);
+      if (!fr->is_java_frame()) {
+        assert(fr->safe_for_sender(thread), "Safety check");
+        *fr = fr->java_sender();
+      }
+    }
+  }
+  assert(fr->is_java_frame(), "Safety check");
+  return true;
+}
+
 frame os::get_sender_for_C_frame(frame* fr) {
   return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
 }
@@ -346,7 +391,7 @@
   }
 #endif // !AMD64
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();  // slow & steady
+  Thread* t = Thread::current_or_null_safe();
 
   // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
   // (no destructors can be run)
@@ -421,13 +466,31 @@
     if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) {
       address addr = (address) info->si_addr;
       if (thread->in_stack_yellow_zone(addr)) {
-        thread->disable_stack_yellow_zone();
         if (thread->thread_state() == _thread_in_Java) {
+          if (thread->in_stack_reserved_zone(addr)) {
+            frame fr;
+            if (os::Solaris::get_frame_at_stack_banging_point(thread, uc, &fr)) {
+              assert(fr.is_java_frame(), "Must be Java frame");
+              frame activation = SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
+              if (activation.sp() != NULL) {
+                thread->disable_stack_reserved_zone();
+                if (activation.is_interpreted_frame()) {
+                  thread->set_reserved_stack_activation((address)(
+                    activation.fp() + frame::interpreter_frame_initial_sp_offset));
+                } else {
+                  thread->set_reserved_stack_activation((address)activation.unextended_sp());
+                }
+                return true;
+              }
+            }
+          }
           // Throw a stack overflow exception.  Guard pages will be reenabled
           // while unwinding the stack.
+          thread->disable_stack_yellow_zone();
           stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
         } else {
           // Thread was in the vm or native code.  Return and try to finish.
+          thread->disable_stack_yellow_zone();
           return true;
         }
       } else if (thread->in_stack_red_zone(addr)) {
@@ -711,10 +774,10 @@
   return false;
 }
 
-void os::print_context(outputStream *st, void *context) {
+void os::print_context(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t *uc = (ucontext_t*)context;
+  const ucontext_t *uc = (const ucontext_t*)context;
   st->print_cr("Registers:");
 #ifdef AMD64
   st->print(  "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]);
@@ -770,10 +833,10 @@
   print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
 }
 
-void os::print_register_info(outputStream *st, void *context) {
+void os::print_register_info(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  ucontext_t *uc = (ucontext_t*)context;
+  const ucontext_t *uc = (const ucontext_t*)context;
 
   st->print_cr("Register to memory mapping:");
   st->cr();
--- a/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// True thread-local variable
-__thread Thread * ThreadLocalStorage::_thr_current = NULL;
-
-// Implementations needed to support the shared API
-
-void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
-
-bool ThreadLocalStorage::_initialized = false;
-
-void ThreadLocalStorage::init() {
-  _initialized = true;
-}
-
-bool ThreadLocalStorage::is_initialized() {
-  return _initialized;
-}
-
-Thread* ThreadLocalStorage::get_thread_slow() {
-    return thread();
-}
-
-extern "C" Thread* get_thread() {
-  return ThreadLocalStorage::thread();
-}
--- a/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP
-#define OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP
-
-// Solaris specific implementation involves simple, direct use
-// of a compiler-based thread-local variable
-
-private:
-  static __thread Thread * _thr_current;
-
-  static bool _initialized;  // needed for shared API
-
-public:
-  static inline Thread* thread();
-
-#endif // OS_CPU_SOLARIS_X86_VM_THREADLS_SOLARIS_X86_HPP
--- a/src/os_cpu/windows_x86/vm/assembler_windows_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/windows_x86/vm/assembler_windows_x86.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,8 +26,6 @@
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
 
 void MacroAssembler::int3() {
   emit_int8((unsigned char)0xCC);
@@ -58,44 +56,11 @@
 
   prefix(FS_segment);
   movptr(thread, null);
-  assert(ThreadLocalStorage::get_thread_ptr_offset() != 0,
+  assert(os::win32::get_thread_ptr_offset() != 0,
          "Thread Pointer Offset has not been initialized");
-  movl(thread, Address(thread, ThreadLocalStorage::get_thread_ptr_offset()));
+  movl(thread, Address(thread, os::win32::get_thread_ptr_offset()));
 }
-#else
-// call (Thread*)TlsGetValue(thread_index());
-void MacroAssembler::get_thread(Register thread) {
-   if (thread != rax) {
-     push(rax);
-   }
-   push(rdi);
-   push(rsi);
-   push(rdx);
-   push(rcx);
-   push(r8);
-   push(r9);
-   push(r10);
-   // XXX
-   mov(r10, rsp);
-   andq(rsp, -16);
-   push(r10);
-   push(r11);
 
-   movl(c_rarg0, ThreadLocalStorage::thread_index());
-   call(RuntimeAddress((address)TlsGetValue));
+// #else - use shared x86 implementation in cpu/x86/vm/macroAssembler_x86.cpp
 
-   pop(r11);
-   pop(rsp);
-   pop(r10);
-   pop(r9);
-   pop(r8);
-   pop(rcx);
-   pop(rdx);
-   pop(rsi);
-   pop(rdi);
-   if (thread != rax) {
-       mov(thread, rax);
-       pop(rax);
-   }
-}
 #endif
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -85,14 +85,14 @@
     //
     volatile Thread* wrapperthread = thread;
 
-    if ( ThreadLocalStorage::get_thread_ptr_offset() == 0 ) {
+    if (os::win32::get_thread_ptr_offset() == 0) {
       int thread_ptr_offset;
       __asm {
         lea eax, dword ptr wrapperthread;
         sub eax, dword ptr FS:[0H];
         mov thread_ptr_offset, eax
       };
-      ThreadLocalStorage::set_thread_ptr_offset(thread_ptr_offset);
+      os::win32::set_thread_ptr_offset(thread_ptr_offset);
     }
 #ifdef ASSERT
     // Verify that the offset hasn't changed since we initally captured
@@ -105,7 +105,7 @@
         sub eax, dword ptr FS:[0H];
         mov test_thread_ptr_offset, eax
       };
-      assert(test_thread_ptr_offset == ThreadLocalStorage::get_thread_ptr_offset(),
+      assert(test_thread_ptr_offset == os::win32::get_thread_ptr_offset(),
              "thread pointer offset from SEH changed");
     }
 #endif // ASSERT
@@ -359,7 +359,7 @@
  *     while (...) {...  fr = os::get_sender_for_C_frame(&fr); }
  * loop in vmError.cpp. We need to roll our own loop.
  */
-bool os::platform_print_native_stack(outputStream* st, void* context,
+bool os::platform_print_native_stack(outputStream* st, const void* context,
                                      char *buf, int buf_size)
 {
   CONTEXT ctx;
@@ -435,7 +435,7 @@
 }
 #endif // AMD64
 
-ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
                     intptr_t** ret_sp, intptr_t** ret_fp) {
 
   ExtendedPC  epc;
@@ -455,7 +455,7 @@
   return epc;
 }
 
-frame os::fetch_frame_from_context(void* ucVoid) {
+frame os::fetch_frame_from_context(const void* ucVoid) {
   intptr_t* sp;
   intptr_t* fp;
   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
@@ -527,10 +527,10 @@
   }
 }
 
-void os::print_context(outputStream *st, void *context) {
+void os::print_context(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  CONTEXT* uc = (CONTEXT*)context;
+  const CONTEXT* uc = (const CONTEXT*)context;
 
   st->print_cr("Registers:");
 #ifdef AMD64
@@ -588,10 +588,10 @@
 }
 
 
-void os::print_register_info(outputStream *st, void *context) {
+void os::print_register_info(outputStream *st, const void *context) {
   if (context == NULL) return;
 
-  CONTEXT* uc = (CONTEXT*)context;
+  const CONTEXT* uc = (const CONTEXT*)context;
 
   st->print_cr("Register to memory mapping:");
   st->cr();
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -66,7 +66,7 @@
 
 #ifdef AMD64
 #define PLATFORM_PRINT_NATIVE_STACK 1
-static bool platform_print_native_stack(outputStream* st, void* context,
+static bool platform_print_native_stack(outputStream* st, const void* context,
                                         char *buf, int buf_size);
 #endif
 
--- a/src/os_cpu/windows_x86/vm/threadLS_windows_x86.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Provides an entry point we can link against and
-// a buffer we can emit code into. The buffer is
-// filled by ThreadLocalStorage::generate_code_for_get_thread
-// and called from ThreadLocalStorage::thread()
-
-int ThreadLocalStorage::_thread_ptr_offset = 0;
-
-static void call_wrapper_dummy() {}
-
-// We need to call the os_exception_wrapper once so that it sets
-// up the offset from FS of the thread pointer.
-void ThreadLocalStorage::generate_code_for_get_thread() {
-      os::os_exception_wrapper( (java_call_t)call_wrapper_dummy,
-                                NULL, NULL, NULL, NULL);
-}
-
-void ThreadLocalStorage::pd_init() { }
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread)  {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
--- a/src/os_cpu/windows_x86/vm/threadLS_windows_x86.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_WINDOWS_X86_VM_THREADLS_WINDOWS_X86_HPP
-#define OS_CPU_WINDOWS_X86_VM_THREADLS_WINDOWS_X86_HPP
-
-// Processor dependent parts of ThreadLocalStorage
-
-protected:
-
-  static int _thread_ptr_offset;
-
-public:
-
-  // Java Thread
-  static inline Thread* thread() {
-    return (Thread*)TlsGetValue(thread_index());
-  }
-
-  static inline Thread* get_thread() {
-    return (Thread*)TlsGetValue(thread_index());
-  }
-
-  static inline void set_thread_ptr_offset( int offset ) { _thread_ptr_offset = offset; }
-
-  static inline int get_thread_ptr_offset() { return _thread_ptr_offset; }
-
-#endif // OS_CPU_WINDOWS_X86_VM_THREADLS_WINDOWS_X86_HPP
--- a/src/share/tools/hsdis/Makefile	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/tools/hsdis/Makefile	Fri Dec 18 12:39:02 2015 -0800
@@ -70,12 +70,12 @@
 else   #linux
 CPU             = $(shell uname -m)
 ARCH1=$(CPU:x86_64=amd64)
-ARCH2=$(ARCH1:i686=i386)
-ARCH=$(ARCH2:ppc64le=ppc64)
+ARCH=$(ARCH1:i686=i386)
 ifdef LP64
 CFLAGS/sparcv9	+= -m64
 CFLAGS/amd64	+= -m64
 CFLAGS/ppc64	+= -m64
+CFLAGS/ppc64le  += -m64 -DABI_ELFv2
 else
 ARCH=$(ARCH1:amd64=i386)
 CFLAGS/i386	+= -m32
--- a/src/share/tools/hsdis/hsdis-demo.c	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/tools/hsdis/hsdis-demo.c	Fri Dec 18 12:39:02 2015 -0800
@@ -66,7 +66,7 @@
   printf("...And now for something completely different:\n");
   void *start = (void*) &main;
   void *end = (void*) &end_of_file;
-#if defined(__ia64) || defined(__powerpc__)
+#if defined(__ia64) || (defined(__powerpc__) && !defined(ABI_ELFv2))
   /* On IA64 and PPC function pointers are pointers to function descriptors */
   start = *((void**)start);
   end = *((void**)end);
--- a/src/share/tools/hsdis/hsdis.c	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/tools/hsdis/hsdis.c	Fri Dec 18 12:39:02 2015 -0800
@@ -461,7 +461,7 @@
 #ifdef LIBARCH_sparcv9
   res = "sparc:v9b";
 #endif
-#ifdef LIBARCH_ppc64
+#if  defined(LIBARCH_ppc64) || defined(LIBARCH_ppc64le)
   res = "powerpc:common64";
 #endif
 #ifdef LIBARCH_aarch64
--- a/src/share/vm/Xusage.txt	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/Xusage.txt	Fri Dec 18 12:39:02 2015 -0800
@@ -8,7 +8,6 @@
                       prepend in front of bootstrap class path
     -Xnoclassgc       disable class garbage collection
     -Xlog:<opts>      control JVM logging, use -Xlog:help for details
-    -Xloggc:<file>    log GC status to a file with time stamps
     -Xbatch           disable background compilation
     -Xms<size>        set initial Java heap size
     -Xmx<size>        set maximum Java heap size
--- a/src/share/vm/c1/c1_Compilation.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/c1/c1_Compilation.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -551,6 +551,7 @@
 , _would_profile(false)
 , _has_unsafe_access(false)
 , _has_method_handle_invokes(false)
+, _has_reserved_stack_access(method->has_reserved_stack_access())
 , _bailout_msg(NULL)
 , _exception_info_list(NULL)
 , _allocator(NULL)
--- a/src/share/vm/c1/c1_Compilation.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/c1/c1_Compilation.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,6 +81,7 @@
   bool               _has_unsafe_access;
   bool               _would_profile;
   bool               _has_method_handle_invokes;  // True if this method has MethodHandle invokes.
+  bool               _has_reserved_stack_access;
   const char*        _bailout_msg;
   ExceptionInfoList* _exception_info_list;
   ExceptionHandlerTable _exception_handler_table;
@@ -171,6 +172,9 @@
   bool     has_method_handle_invokes() const { return _has_method_handle_invokes;     }
   void set_has_method_handle_invokes(bool z) {        _has_method_handle_invokes = z; }
 
+  bool     has_reserved_stack_access() const { return _has_reserved_stack_access; }
+  void set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; }
+
   DebugInformationRecorder* debug_info_recorder() const; // = _env->debug_info();
   Dependencies* dependency_recorder() const; // = _env->dependencies()
   ImplicitExceptionTable* implicit_exception_table()     { return &_implicit_exception_table; }
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -3322,7 +3322,13 @@
 
   // method handle invokes
   if (callee->is_method_handle_intrinsic()) {
-    return try_method_handle_inline(callee);
+    if (try_method_handle_inline(callee)) {
+      if (callee->has_reserved_stack_access()) {
+        compilation()->set_has_reserved_stack_access(true);
+      }
+      return true;
+    }
+    return false;
   }
 
   // handle intrinsics
@@ -3330,6 +3336,9 @@
       (CheckIntrinsics ? callee->intrinsic_candidate() : true)) {
     if (try_inline_intrinsics(callee)) {
       print_inlining(callee, "intrinsic");
+      if (callee->has_reserved_stack_access()) {
+        compilation()->set_has_reserved_stack_access(true);
+      }
       return true;
     }
     // try normal inlining
@@ -3346,8 +3355,12 @@
   if (bc == Bytecodes::_illegal) {
     bc = code();
   }
-  if (try_inline_full(callee, holder_known, bc, receiver))
+  if (try_inline_full(callee, holder_known, bc, receiver)) {
+    if (callee->has_reserved_stack_access()) {
+      compilation()->set_has_reserved_stack_access(true);
+    }
     return true;
+  }
 
   // Entire compilation could fail during try_inline_full call.
   // In that case printing inlining decision info is useless.
--- a/src/share/vm/c1/c1_Runtime1.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -502,7 +502,7 @@
   // Check the stack guard pages and reenable them if necessary and there is
   // enough space on the stack to do so.  Use fast exceptions only if the guard
   // pages are enabled.
-  bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
+  bool guard_pages_enabled = thread->stack_guards_enabled();
   if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
 
   if (JvmtiExport::can_post_on_exceptions()) {
--- a/src/share/vm/ci/ciMethod.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/ci/ciMethod.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -91,6 +91,7 @@
   _balanced_monitors  = !_uses_monitors || h_m()->access_flags().is_monitor_matching();
   _is_c1_compilable   = !h_m()->is_not_c1_compilable();
   _is_c2_compilable   = !h_m()->is_not_c2_compilable();
+  _has_reserved_stack_access = h_m()->has_reserved_stack_access();
   // Lazy fields, filled in on demand.  Require allocation.
   _code               = NULL;
   _exception_handlers = NULL;
--- a/src/share/vm/ci/ciMethod.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/ci/ciMethod.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,6 +81,7 @@
   bool _is_c1_compilable;
   bool _is_c2_compilable;
   bool _can_be_statically_bound;
+  bool _has_reserved_stack_access;
 
   // Lazy fields, filled in on demand
   address              _code;
@@ -322,6 +323,7 @@
   bool is_accessor    () const;
   bool is_initializer () const;
   bool can_be_statically_bound() const           { return _can_be_statically_bound; }
+  bool has_reserved_stack_access() const         { return _has_reserved_stack_access; }
   bool is_boxing_method() const;
   bool is_unboxing_method() const;
 
--- a/src/share/vm/classfile/classFileError.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/classFileError.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -33,28 +33,39 @@
 PRAGMA_DIAG_PUSH
 PRAGMA_FORMAT_NONLITERAL_IGNORED
 
-void ClassFileParser::classfile_parse_error(const char* msg, TRAPS) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(),
-                       msg, _class_name->as_C_string());
+void ClassFileParser::classfile_parse_error(const char* msg, TRAPS) const {
+  assert(_class_name != NULL, "invariant");
+  ResourceMark rm(THREAD);
+  Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(),
+                     msg, _class_name->as_C_string());
+}
+
+void ClassFileParser::classfile_parse_error(const char* msg,
+                                            int index,
+                                            TRAPS) const {
+  assert(_class_name != NULL, "invariant");
+  ResourceMark rm(THREAD);
+  Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(),
+                     msg, index, _class_name->as_C_string());
 }
 
-void ClassFileParser::classfile_parse_error(const char* msg, int index, TRAPS) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(),
-                       msg, index, _class_name->as_C_string());
+void ClassFileParser::classfile_parse_error(const char* msg,
+                                            const char* name,
+                                            TRAPS) const {
+  assert(_class_name != NULL, "invariant");
+  ResourceMark rm(THREAD);
+  Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(),
+                     msg, name, _class_name->as_C_string());
 }
 
-void ClassFileParser::classfile_parse_error(const char* msg, const char *name, TRAPS) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(),
-                       msg, name, _class_name->as_C_string());
-}
-
-void ClassFileParser::classfile_parse_error(const char* msg, int index, const char *name, TRAPS) {
-    ResourceMark rm(THREAD);
-    Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(),
-                       msg, index, name, _class_name->as_C_string());
+void ClassFileParser::classfile_parse_error(const char* msg,
+                                            int index,
+                                            const char* name,
+                                            TRAPS) const {
+  assert(_class_name != NULL, "invariant");
+  ResourceMark rm(THREAD);
+  Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_ClassFormatError(),
+                     msg, index, name, _class_name->as_C_string());
 }
 
 PRAGMA_DIAG_POP
--- a/src/share/vm/classfile/classFileParser.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/classFileParser.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -21,9 +21,9 @@
  * questions.
  *
  */
-
 #include "precompiled.hpp"
 #include "classfile/classFileParser.hpp"
+#include "classfile/classFileStream.hpp"
 #include "classfile/classLoader.hpp"
 #include "classfile/classLoaderData.inline.hpp"
 #include "classfile/defaultMethods.hpp"
@@ -37,16 +37,17 @@
 #include "memory/allocation.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/oopFactory.hpp"
-#include "memory/referenceType.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.inline.hpp"
-#include "oops/constantPool.hpp"
+#include "oops/annotations.hpp"
 #include "oops/fieldStreams.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceMirrorKlass.hpp"
 #include "oops/klass.inline.hpp"
 #include "oops/klassVtable.hpp"
+#include "oops/metadata.hpp"
 #include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
 #include "prims/jvm.h"
 #include "prims/jvmtiExport.hpp"
@@ -58,6 +59,7 @@
 #include "runtime/timer.hpp"
 #include "services/classLoadingService.hpp"
 #include "services/threadService.hpp"
+#include "trace/traceMacros.hpp"
 #include "utilities/array.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -98,20 +100,25 @@
 // Extension method support.
 #define JAVA_8_VERSION                    52
 
-void ClassFileParser::parse_constant_pool_entries(int length, TRAPS) {
+enum { LegalClass, LegalField, LegalMethod }; // used to verify unqualified names
+
+void ClassFileParser::parse_constant_pool_entries(const ClassFileStream* const stream,
+                                                  ConstantPool* cp,
+                                                  const int length,
+                                                  TRAPS) {
+  assert(stream != NULL, "invariant");
+  assert(cp != NULL, "invariant");
+
   // Use a local copy of ClassFileStream. It helps the C++ compiler to optimize
   // this function (_current can be allocated in a register, with scalar
   // replacement of aggregates). The _current pointer is copied back to
   // stream() when this function returns. DON'T call another method within
   // this method that uses stream().
-  ClassFileStream* cfs0 = stream();
-  ClassFileStream cfs1 = *cfs0;
-  ClassFileStream* cfs = &cfs1;
-#ifdef ASSERT
-  assert(cfs->allocated_on_stack(),"should be local");
-  u1* old_current = cfs0->current();
-#endif
-  Handle class_loader(THREAD, _loader_data->class_loader());
+  const ClassFileStream cfs1 = *stream;
+  const ClassFileStream* const cfs = &cfs1;
+
+  assert(cfs->allocated_on_stack(), "should be local");
+  debug_only(const u1* const old_current = stream->current();)
 
   // Used for batching symbol allocations.
   const char* names[SymbolTable::symbol_alloc_batch_size];
@@ -125,48 +132,43 @@
     // Each of the following case guarantees one more byte in the stream
     // for the following tag or the access_flags following constant pool,
     // so we don't need bounds-check for reading tag.
-    u1 tag = cfs->get_u1_fast();
+    const u1 tag = cfs->get_u1_fast();
     switch (tag) {
-      case JVM_CONSTANT_Class :
-        {
-          cfs->guarantee_more(3, CHECK);  // name_index, tag/access_flags
-          u2 name_index = cfs->get_u2_fast();
-          _cp->klass_index_at_put(index, name_index);
-        }
+      case JVM_CONSTANT_Class : {
+        cfs->guarantee_more(3, CHECK);  // name_index, tag/access_flags
+        const u2 name_index = cfs->get_u2_fast();
+        cp->klass_index_at_put(index, name_index);
         break;
-      case JVM_CONSTANT_Fieldref :
-        {
-          cfs->guarantee_more(5, CHECK);  // class_index, name_and_type_index, tag/access_flags
-          u2 class_index = cfs->get_u2_fast();
-          u2 name_and_type_index = cfs->get_u2_fast();
-          _cp->field_at_put(index, class_index, name_and_type_index);
-        }
+      }
+      case JVM_CONSTANT_Fieldref: {
+        cfs->guarantee_more(5, CHECK);  // class_index, name_and_type_index, tag/access_flags
+        const u2 class_index = cfs->get_u2_fast();
+        const u2 name_and_type_index = cfs->get_u2_fast();
+        cp->field_at_put(index, class_index, name_and_type_index);
         break;
-      case JVM_CONSTANT_Methodref :
-        {
-          cfs->guarantee_more(5, CHECK);  // class_index, name_and_type_index, tag/access_flags
-          u2 class_index = cfs->get_u2_fast();
-          u2 name_and_type_index = cfs->get_u2_fast();
-          _cp->method_at_put(index, class_index, name_and_type_index);
-        }
+      }
+      case JVM_CONSTANT_Methodref: {
+        cfs->guarantee_more(5, CHECK);  // class_index, name_and_type_index, tag/access_flags
+        const u2 class_index = cfs->get_u2_fast();
+        const u2 name_and_type_index = cfs->get_u2_fast();
+        cp->method_at_put(index, class_index, name_and_type_index);
         break;
-      case JVM_CONSTANT_InterfaceMethodref :
-        {
-          cfs->guarantee_more(5, CHECK);  // class_index, name_and_type_index, tag/access_flags
-          u2 class_index = cfs->get_u2_fast();
-          u2 name_and_type_index = cfs->get_u2_fast();
-          _cp->interface_method_at_put(index, class_index, name_and_type_index);
-        }
+      }
+      case JVM_CONSTANT_InterfaceMethodref: {
+        cfs->guarantee_more(5, CHECK);  // class_index, name_and_type_index, tag/access_flags
+        const u2 class_index = cfs->get_u2_fast();
+        const u2 name_and_type_index = cfs->get_u2_fast();
+        cp->interface_method_at_put(index, class_index, name_and_type_index);
         break;
-      case JVM_CONSTANT_String :
-        {
-          cfs->guarantee_more(3, CHECK);  // string_index, tag/access_flags
-          u2 string_index = cfs->get_u2_fast();
-          _cp->string_index_at_put(index, string_index);
-        }
+      }
+      case JVM_CONSTANT_String : {
+        cfs->guarantee_more(3, CHECK);  // string_index, tag/access_flags
+        const u2 string_index = cfs->get_u2_fast();
+        cp->string_index_at_put(index, string_index);
         break;
+      }
       case JVM_CONSTANT_MethodHandle :
-      case JVM_CONSTANT_MethodType :
+      case JVM_CONSTANT_MethodType: {
         if (_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
           classfile_parse_error(
             "Class file version does not support constant tag %u in class file %s",
@@ -174,379 +176,401 @@
         }
         if (tag == JVM_CONSTANT_MethodHandle) {
           cfs->guarantee_more(4, CHECK);  // ref_kind, method_index, tag/access_flags
-          u1 ref_kind = cfs->get_u1_fast();
-          u2 method_index = cfs->get_u2_fast();
-          _cp->method_handle_index_at_put(index, ref_kind, method_index);
-        } else if (tag == JVM_CONSTANT_MethodType) {
+          const u1 ref_kind = cfs->get_u1_fast();
+          const u2 method_index = cfs->get_u2_fast();
+          cp->method_handle_index_at_put(index, ref_kind, method_index);
+        }
+        else if (tag == JVM_CONSTANT_MethodType) {
           cfs->guarantee_more(3, CHECK);  // signature_index, tag/access_flags
-          u2 signature_index = cfs->get_u2_fast();
-          _cp->method_type_index_at_put(index, signature_index);
-        } else {
+          const u2 signature_index = cfs->get_u2_fast();
+          cp->method_type_index_at_put(index, signature_index);
+        }
+        else {
           ShouldNotReachHere();
         }
         break;
-      case JVM_CONSTANT_InvokeDynamic :
-        {
-          if (_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
-            classfile_parse_error(
+      }
+      case JVM_CONSTANT_InvokeDynamic : {
+        if (_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
+          classfile_parse_error(
               "Class file version does not support constant tag %u in class file %s",
               tag, CHECK);
-          }
-          cfs->guarantee_more(5, CHECK);  // bsm_index, nt, tag/access_flags
-          u2 bootstrap_specifier_index = cfs->get_u2_fast();
-          u2 name_and_type_index = cfs->get_u2_fast();
-          if (_max_bootstrap_specifier_index < (int) bootstrap_specifier_index)
-            _max_bootstrap_specifier_index = (int) bootstrap_specifier_index;  // collect for later
-          _cp->invoke_dynamic_at_put(index, bootstrap_specifier_index, name_and_type_index);
+        }
+        cfs->guarantee_more(5, CHECK);  // bsm_index, nt, tag/access_flags
+        const u2 bootstrap_specifier_index = cfs->get_u2_fast();
+        const u2 name_and_type_index = cfs->get_u2_fast();
+        if (_max_bootstrap_specifier_index < (int) bootstrap_specifier_index) {
+          _max_bootstrap_specifier_index = (int) bootstrap_specifier_index;  // collect for later
         }
+        cp->invoke_dynamic_at_put(index, bootstrap_specifier_index, name_and_type_index);
         break;
-      case JVM_CONSTANT_Integer :
-        {
-          cfs->guarantee_more(5, CHECK);  // bytes, tag/access_flags
-          u4 bytes = cfs->get_u4_fast();
-          _cp->int_at_put(index, (jint) bytes);
-        }
+      }
+      case JVM_CONSTANT_Integer: {
+        cfs->guarantee_more(5, CHECK);  // bytes, tag/access_flags
+        const u4 bytes = cfs->get_u4_fast();
+        cp->int_at_put(index, (jint)bytes);
         break;
-      case JVM_CONSTANT_Float :
-        {
-          cfs->guarantee_more(5, CHECK);  // bytes, tag/access_flags
-          u4 bytes = cfs->get_u4_fast();
-          _cp->float_at_put(index, *(jfloat*)&bytes);
-        }
+      }
+      case JVM_CONSTANT_Float: {
+        cfs->guarantee_more(5, CHECK);  // bytes, tag/access_flags
+        const u4 bytes = cfs->get_u4_fast();
+        cp->float_at_put(index, *(jfloat*)&bytes);
         break;
-      case JVM_CONSTANT_Long :
+      }
+      case JVM_CONSTANT_Long: {
         // A mangled type might cause you to overrun allocated memory
-        guarantee_property(index+1 < length,
+        guarantee_property(index + 1 < length,
                            "Invalid constant pool entry %u in class file %s",
-                           index, CHECK);
-        {
-          cfs->guarantee_more(9, CHECK);  // bytes, tag/access_flags
-          u8 bytes = cfs->get_u8_fast();
-          _cp->long_at_put(index, bytes);
-        }
+                           index,
+                           CHECK);
+        cfs->guarantee_more(9, CHECK);  // bytes, tag/access_flags
+        const u8 bytes = cfs->get_u8_fast();
+        cp->long_at_put(index, bytes);
         index++;   // Skip entry following eigth-byte constant, see JVM book p. 98
         break;
-      case JVM_CONSTANT_Double :
+      }
+      case JVM_CONSTANT_Double: {
         // A mangled type might cause you to overrun allocated memory
         guarantee_property(index+1 < length,
                            "Invalid constant pool entry %u in class file %s",
-                           index, CHECK);
-        {
-          cfs->guarantee_more(9, CHECK);  // bytes, tag/access_flags
-          u8 bytes = cfs->get_u8_fast();
-          _cp->double_at_put(index, *(jdouble*)&bytes);
-        }
+                           index,
+                           CHECK);
+        cfs->guarantee_more(9, CHECK);  // bytes, tag/access_flags
+        const u8 bytes = cfs->get_u8_fast();
+        cp->double_at_put(index, *(jdouble*)&bytes);
         index++;   // Skip entry following eigth-byte constant, see JVM book p. 98
         break;
-      case JVM_CONSTANT_NameAndType :
-        {
-          cfs->guarantee_more(5, CHECK);  // name_index, signature_index, tag/access_flags
-          u2 name_index = cfs->get_u2_fast();
-          u2 signature_index = cfs->get_u2_fast();
-          _cp->name_and_type_at_put(index, name_index, signature_index);
+      }
+      case JVM_CONSTANT_NameAndType: {
+        cfs->guarantee_more(5, CHECK);  // name_index, signature_index, tag/access_flags
+        const u2 name_index = cfs->get_u2_fast();
+        const u2 signature_index = cfs->get_u2_fast();
+        cp->name_and_type_at_put(index, name_index, signature_index);
+        break;
+      }
+      case JVM_CONSTANT_Utf8 : {
+        cfs->guarantee_more(2, CHECK);  // utf8_length
+        u2  utf8_length = cfs->get_u2_fast();
+        const u1* utf8_buffer = cfs->get_u1_buffer();
+        assert(utf8_buffer != NULL, "null utf8 buffer");
+        // Got utf8 string, guarantee utf8_length+1 bytes, set stream position forward.
+        cfs->guarantee_more(utf8_length+1, CHECK);  // utf8 string, tag/access_flags
+        cfs->skip_u1_fast(utf8_length);
+
+        // Before storing the symbol, make sure it's legal
+        if (_need_verify) {
+          verify_legal_utf8(utf8_buffer, utf8_length, CHECK);
+        }
+
+        if (has_cp_patch_at(index)) {
+          Handle patch = clear_cp_patch_at(index);
+          guarantee_property(java_lang_String::is_instance(patch()),
+                             "Illegal utf8 patch at %d in class file %s",
+                             index,
+                             CHECK);
+          const char* const str = java_lang_String::as_utf8_string(patch());
+          // (could use java_lang_String::as_symbol instead, but might as well batch them)
+          utf8_buffer = (const u1*) str;
+          utf8_length = (int) strlen(str);
+        }
+
+        unsigned int hash;
+        Symbol* const result = SymbolTable::lookup_only((const char*)utf8_buffer,
+                                                        utf8_length,
+                                                        hash);
+        if (result == NULL) {
+          names[names_count] = (const char*)utf8_buffer;
+          lengths[names_count] = utf8_length;
+          indices[names_count] = index;
+          hashValues[names_count++] = hash;
+          if (names_count == SymbolTable::symbol_alloc_batch_size) {
+            SymbolTable::new_symbols(_loader_data,
+                                     cp,
+                                     names_count,
+                                     names,
+                                     lengths,
+                                     indices,
+                                     hashValues,
+                                     CHECK);
+            names_count = 0;
+          }
+        } else {
+          cp->symbol_at_put(index, result);
         }
         break;
-      case JVM_CONSTANT_Utf8 :
-        {
-          cfs->guarantee_more(2, CHECK);  // utf8_length
-          u2  utf8_length = cfs->get_u2_fast();
-          u1* utf8_buffer = cfs->get_u1_buffer();
-          assert(utf8_buffer != NULL, "null utf8 buffer");
-          // Got utf8 string, guarantee utf8_length+1 bytes, set stream position forward.
-          cfs->guarantee_more(utf8_length+1, CHECK);  // utf8 string, tag/access_flags
-          cfs->skip_u1_fast(utf8_length);
-
-          // Before storing the symbol, make sure it's legal
-          if (_need_verify) {
-            verify_legal_utf8((unsigned char*)utf8_buffer, utf8_length, CHECK);
-          }
-
-          if (has_cp_patch_at(index)) {
-            Handle patch = clear_cp_patch_at(index);
-            guarantee_property(java_lang_String::is_instance(patch()),
-                               "Illegal utf8 patch at %d in class file %s",
-                               index, CHECK);
-            char* str = java_lang_String::as_utf8_string(patch());
-            // (could use java_lang_String::as_symbol instead, but might as well batch them)
-            utf8_buffer = (u1*) str;
-            utf8_length = (int) strlen(str);
-          }
-
-          unsigned int hash;
-          Symbol* result = SymbolTable::lookup_only((char*)utf8_buffer, utf8_length, hash);
-          if (result == NULL) {
-            names[names_count] = (char*)utf8_buffer;
-            lengths[names_count] = utf8_length;
-            indices[names_count] = index;
-            hashValues[names_count++] = hash;
-            if (names_count == SymbolTable::symbol_alloc_batch_size) {
-              SymbolTable::new_symbols(_loader_data, _cp, names_count, names, lengths, indices, hashValues, CHECK);
-              names_count = 0;
-            }
-          } else {
-            _cp->symbol_at_put(index, result);
-          }
-        }
+      }
+      default: {
+        classfile_parse_error("Unknown constant tag %u in class file %s",
+                              tag,
+                              CHECK);
         break;
-      default:
-        classfile_parse_error(
-          "Unknown constant tag %u in class file %s", tag, CHECK);
-        break;
-    }
-  }
+      }
+    } // end of switch(tag)
+  } // end of for
 
   // Allocate the remaining symbols
   if (names_count > 0) {
-    SymbolTable::new_symbols(_loader_data, _cp, names_count, names, lengths, indices, hashValues, CHECK);
+    SymbolTable::new_symbols(_loader_data,
+                             cp,
+                             names_count,
+                             names,
+                             lengths,
+                             indices,
+                             hashValues,
+                             CHECK);
   }
 
-  // Copy _current pointer of local copy back to stream().
-#ifdef ASSERT
-  assert(cfs0->current() == old_current, "non-exclusive use of stream()");
-#endif
-  cfs0->set_current(cfs1.current());
+  // Copy _current pointer of local copy back to stream.
+  assert(stream->current() == old_current, "non-exclusive use of stream");
+  stream->set_current(cfs1.current());
+
 }
 
-bool inline valid_cp_range(int index, int length) { return (index > 0 && index < length); }
-
-inline Symbol* check_symbol_at(constantPoolHandle cp, int index) {
-  if (valid_cp_range(index, cp->length()) && cp->tag_at(index).is_utf8())
+static inline bool valid_cp_range(int index, int length) {
+  return (index > 0 && index < length);
+}
+
+static inline Symbol* check_symbol_at(const ConstantPool* cp, int index) {
+  assert(cp != NULL, "invariant");
+  if (valid_cp_range(index, cp->length()) && cp->tag_at(index).is_utf8()) {
     return cp->symbol_at(index);
-  else
-    return NULL;
+  }
+  return NULL;
 }
 
 #ifdef ASSERT
 PRAGMA_DIAG_PUSH
 PRAGMA_FORMAT_NONLITERAL_IGNORED
-void ClassFileParser::report_assert_property_failure(const char* msg, TRAPS) {
+void ClassFileParser::report_assert_property_failure(const char* msg, TRAPS) const {
   ResourceMark rm(THREAD);
   fatal(msg, _class_name->as_C_string());
 }
 
-void ClassFileParser::report_assert_property_failure(const char* msg, int index, TRAPS) {
+void ClassFileParser::report_assert_property_failure(const char* msg,
+                                                     int index,
+                                                     TRAPS) const {
   ResourceMark rm(THREAD);
   fatal(msg, index, _class_name->as_C_string());
 }
 PRAGMA_DIAG_POP
 #endif
 
-constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
-  ClassFileStream* cfs = stream();
-  constantPoolHandle nullHandle;
-
-  cfs->guarantee_more(3, CHECK_(nullHandle)); // length, first cp tag
-  u2 length = cfs->get_u2_fast();
-  guarantee_property(
-    length >= 1, "Illegal constant pool size %u in class file %s",
-    length, CHECK_(nullHandle));
-  ConstantPool* constant_pool = ConstantPool::allocate(_loader_data, length,
-                                                        CHECK_(nullHandle));
-  _cp = constant_pool; // save in case of errors
-  constantPoolHandle cp (THREAD, constant_pool);
+void ClassFileParser::parse_constant_pool(const ClassFileStream* const stream,
+                                          ConstantPool* const cp,
+                                          const int length,
+                                          TRAPS) {
+  assert(cp != NULL, "invariant");
+  assert(stream != NULL, "invariant");
 
   // parsing constant pool entries
-  parse_constant_pool_entries(length, CHECK_(nullHandle));
+  parse_constant_pool_entries(stream, cp, length, CHECK);
 
   int index = 1;  // declared outside of loops for portability
 
-  // first verification pass - validate cross references and fixup class and string constants
+  // first verification pass - validate cross references
+  // and fixup class and string constants
   for (index = 1; index < length; index++) {          // Index 0 is unused
-    jbyte tag = cp->tag_at(index).value();
+    const jbyte tag = cp->tag_at(index).value();
     switch (tag) {
-      case JVM_CONSTANT_Class :
+      case JVM_CONSTANT_Class: {
         ShouldNotReachHere();     // Only JVM_CONSTANT_ClassIndex should be present
         break;
-      case JVM_CONSTANT_Fieldref :
+      }
+      case JVM_CONSTANT_Fieldref:
         // fall through
-      case JVM_CONSTANT_Methodref :
+      case JVM_CONSTANT_Methodref:
         // fall through
-      case JVM_CONSTANT_InterfaceMethodref : {
+      case JVM_CONSTANT_InterfaceMethodref: {
         if (!_need_verify) break;
-        int klass_ref_index = cp->klass_ref_index_at(index);
-        int name_and_type_ref_index = cp->name_and_type_ref_index_at(index);
+        const int klass_ref_index = cp->klass_ref_index_at(index);
+        const int name_and_type_ref_index = cp->name_and_type_ref_index_at(index);
         check_property(valid_klass_reference_at(klass_ref_index),
                        "Invalid constant pool index %u in class file %s",
-                       klass_ref_index,
-                       CHECK_(nullHandle));
+                       klass_ref_index, CHECK);
         check_property(valid_cp_range(name_and_type_ref_index, length) &&
-                       cp->tag_at(name_and_type_ref_index).is_name_and_type(),
-                       "Invalid constant pool index %u in class file %s",
-                       name_and_type_ref_index,
-                       CHECK_(nullHandle));
+          cp->tag_at(name_and_type_ref_index).is_name_and_type(),
+          "Invalid constant pool index %u in class file %s",
+          name_and_type_ref_index, CHECK);
         break;
       }
-      case JVM_CONSTANT_String :
+      case JVM_CONSTANT_String: {
         ShouldNotReachHere();     // Only JVM_CONSTANT_StringIndex should be present
         break;
-      case JVM_CONSTANT_Integer :
+      }
+      case JVM_CONSTANT_Integer:
         break;
-      case JVM_CONSTANT_Float :
+      case JVM_CONSTANT_Float:
         break;
-      case JVM_CONSTANT_Long :
-      case JVM_CONSTANT_Double :
+      case JVM_CONSTANT_Long:
+      case JVM_CONSTANT_Double: {
         index++;
         check_property(
           (index < length && cp->tag_at(index).is_invalid()),
           "Improper constant pool long/double index %u in class file %s",
-          index, CHECK_(nullHandle));
-        break;
-      case JVM_CONSTANT_NameAndType : {
-        if (!_need_verify) break;
-        int name_ref_index = cp->name_ref_index_at(index);
-        int signature_ref_index = cp->signature_ref_index_at(index);
-        check_property(valid_symbol_at(name_ref_index),
-                 "Invalid constant pool index %u in class file %s",
-                 name_ref_index, CHECK_(nullHandle));
-        check_property(valid_symbol_at(signature_ref_index),
-                 "Invalid constant pool index %u in class file %s",
-                 signature_ref_index, CHECK_(nullHandle));
+          index, CHECK);
         break;
       }
-      case JVM_CONSTANT_Utf8 :
+      case JVM_CONSTANT_NameAndType: {
+        if (!_need_verify) break;
+        const int name_ref_index = cp->name_ref_index_at(index);
+        const int signature_ref_index = cp->signature_ref_index_at(index);
+        check_property(valid_symbol_at(name_ref_index),
+          "Invalid constant pool index %u in class file %s",
+          name_ref_index, CHECK);
+        check_property(valid_symbol_at(signature_ref_index),
+          "Invalid constant pool index %u in class file %s",
+          signature_ref_index, CHECK);
         break;
-      case JVM_CONSTANT_UnresolvedClass :         // fall-through
-      case JVM_CONSTANT_UnresolvedClassInError:
+      }
+      case JVM_CONSTANT_Utf8:
+        break;
+      case JVM_CONSTANT_UnresolvedClass:         // fall-through
+      case JVM_CONSTANT_UnresolvedClassInError: {
         ShouldNotReachHere();     // Only JVM_CONSTANT_ClassIndex should be present
         break;
-      case JVM_CONSTANT_ClassIndex :
-        {
-          int class_index = cp->klass_index_at(index);
-          check_property(valid_symbol_at(class_index),
-                 "Invalid constant pool index %u in class file %s",
-                 class_index, CHECK_(nullHandle));
-          cp->unresolved_klass_at_put(index, cp->symbol_at(class_index));
-        }
+      }
+      case JVM_CONSTANT_ClassIndex: {
+        const int class_index = cp->klass_index_at(index);
+        check_property(valid_symbol_at(class_index),
+          "Invalid constant pool index %u in class file %s",
+          class_index, CHECK);
+        cp->unresolved_klass_at_put(index, cp->symbol_at(class_index));
         break;
-      case JVM_CONSTANT_StringIndex :
-        {
-          int string_index = cp->string_index_at(index);
-          check_property(valid_symbol_at(string_index),
-                 "Invalid constant pool index %u in class file %s",
-                 string_index, CHECK_(nullHandle));
-          Symbol* sym = cp->symbol_at(string_index);
-          cp->unresolved_string_at_put(index, sym);
-        }
+      }
+      case JVM_CONSTANT_StringIndex: {
+        const int string_index = cp->string_index_at(index);
+        check_property(valid_symbol_at(string_index),
+          "Invalid constant pool index %u in class file %s",
+          string_index, CHECK);
+        Symbol* const sym = cp->symbol_at(string_index);
+        cp->unresolved_string_at_put(index, sym);
         break;
-      case JVM_CONSTANT_MethodHandle :
-        {
-          int ref_index = cp->method_handle_index_at(index);
-          check_property(
-            valid_cp_range(ref_index, length),
-              "Invalid constant pool index %u in class file %s",
-              ref_index, CHECK_(nullHandle));
-          constantTag tag = cp->tag_at(ref_index);
-          int ref_kind  = cp->method_handle_ref_kind_at(index);
-          switch (ref_kind) {
+      }
+      case JVM_CONSTANT_MethodHandle: {
+        const int ref_index = cp->method_handle_index_at(index);
+        check_property(valid_cp_range(ref_index, length),
+          "Invalid constant pool index %u in class file %s",
+          ref_index, CHECK);
+        const constantTag tag = cp->tag_at(ref_index);
+        const int ref_kind = cp->method_handle_ref_kind_at(index);
+
+        switch (ref_kind) {
           case JVM_REF_getField:
           case JVM_REF_getStatic:
           case JVM_REF_putField:
-          case JVM_REF_putStatic:
+          case JVM_REF_putStatic: {
             check_property(
               tag.is_field(),
               "Invalid constant pool index %u in class file %s (not a field)",
-              ref_index, CHECK_(nullHandle));
+              ref_index, CHECK);
             break;
+          }
           case JVM_REF_invokeVirtual:
-          case JVM_REF_newInvokeSpecial:
+          case JVM_REF_newInvokeSpecial: {
             check_property(
               tag.is_method(),
               "Invalid constant pool index %u in class file %s (not a method)",
-              ref_index, CHECK_(nullHandle));
+              ref_index, CHECK);
             break;
+          }
           case JVM_REF_invokeStatic:
-          case JVM_REF_invokeSpecial:
-            check_property(tag.is_method() ||
-                           ((_major_version >= JAVA_8_VERSION) && tag.is_interface_method()),
-               "Invalid constant pool index %u in class file %s (not a method)",
-               ref_index, CHECK_(nullHandle));
-             break;
-          case JVM_REF_invokeInterface:
+          case JVM_REF_invokeSpecial: {
+            check_property(
+              tag.is_method() ||
+              ((_major_version >= JAVA_8_VERSION) && tag.is_interface_method()),
+              "Invalid constant pool index %u in class file %s (not a method)",
+              ref_index, CHECK);
+            break;
+          }
+          case JVM_REF_invokeInterface: {
             check_property(
               tag.is_interface_method(),
               "Invalid constant pool index %u in class file %s (not an interface method)",
-              ref_index, CHECK_(nullHandle));
+              ref_index, CHECK);
             break;
-          default:
+          }
+          default: {
             classfile_parse_error(
               "Bad method handle kind at constant pool index %u in class file %s",
-              index, CHECK_(nullHandle));
+              index, CHECK);
           }
-          // Keep the ref_index unchanged.  It will be indirected at link-time.
-        }
+        } // switch(refkind)
+        // Keep the ref_index unchanged.  It will be indirected at link-time.
         break;
-      case JVM_CONSTANT_MethodType :
-        {
-          int ref_index = cp->method_type_index_at(index);
-          check_property(valid_symbol_at(ref_index),
-                 "Invalid constant pool index %u in class file %s",
-                 ref_index, CHECK_(nullHandle));
-        }
+      } // case MethodHandle
+      case JVM_CONSTANT_MethodType: {
+        const int ref_index = cp->method_type_index_at(index);
+        check_property(valid_symbol_at(ref_index),
+          "Invalid constant pool index %u in class file %s",
+          ref_index, CHECK);
         break;
-      case JVM_CONSTANT_InvokeDynamic :
-        {
-          int name_and_type_ref_index = cp->invoke_dynamic_name_and_type_ref_index_at(index);
-          check_property(valid_cp_range(name_and_type_ref_index, length) &&
-                         cp->tag_at(name_and_type_ref_index).is_name_and_type(),
-                         "Invalid constant pool index %u in class file %s",
-                         name_and_type_ref_index,
-                         CHECK_(nullHandle));
-          // bootstrap specifier index must be checked later, when BootstrapMethods attr is available
-          break;
-        }
-      default:
+      }
+      case JVM_CONSTANT_InvokeDynamic: {
+        const int name_and_type_ref_index =
+          cp->invoke_dynamic_name_and_type_ref_index_at(index);
+
+        check_property(valid_cp_range(name_and_type_ref_index, length) &&
+          cp->tag_at(name_and_type_ref_index).is_name_and_type(),
+          "Invalid constant pool index %u in class file %s",
+          name_and_type_ref_index, CHECK);
+        // bootstrap specifier index must be checked later,
+        // when BootstrapMethods attr is available
+        break;
+      }
+      default: {
         fatal("bad constant pool tag value %u", cp->tag_at(index).value());
         ShouldNotReachHere();
         break;
-    } // end of switch
+      }
+    } // switch(tag)
   } // end of for
 
   if (_cp_patches != NULL) {
     // need to treat this_class specially...
     int this_class_index;
     {
-      cfs->guarantee_more(8, CHECK_(nullHandle));  // flags, this_class, super_class, infs_len
-      u1* mark = cfs->current();
-      u2 flags         = cfs->get_u2_fast();
-      this_class_index = cfs->get_u2_fast();
-      cfs->set_current(mark);  // revert to mark
+      stream->guarantee_more(8, CHECK);  // flags, this_class, super_class, infs_len
+      const u1* const mark = stream->current();
+      stream->skip_u2_fast(1); // skip flags
+      this_class_index = stream->get_u2_fast();
+      stream->set_current(mark);  // revert to mark
     }
 
     for (index = 1; index < length; index++) {          // Index 0 is unused
       if (has_cp_patch_at(index)) {
         guarantee_property(index != this_class_index,
-                           "Illegal constant pool patch to self at %d in class file %s",
-                           index, CHECK_(nullHandle));
-        patch_constant_pool(cp, index, cp_patch_at(index), CHECK_(nullHandle));
+          "Illegal constant pool patch to self at %d in class file %s",
+          index, CHECK);
+        patch_constant_pool(cp, index, cp_patch_at(index), CHECK);
       }
     }
   }
 
   if (!_need_verify) {
-    return cp;
+    return;
   }
 
   // second verification pass - checks the strings are of the right format.
   // but not yet to the other entries
   for (index = 1; index < length; index++) {
-    jbyte tag = cp->tag_at(index).value();
+    const jbyte tag = cp->tag_at(index).value();
     switch (tag) {
       case JVM_CONSTANT_UnresolvedClass: {
-        Symbol*  class_name = cp->klass_name_at(index);
+        const Symbol* const class_name = cp->klass_name_at(index);
         // check the name, even if _cp_patches will overwrite it
-        verify_legal_class_name(class_name, CHECK_(nullHandle));
+        verify_legal_class_name(class_name, CHECK);
         break;
       }
       case JVM_CONSTANT_NameAndType: {
         if (_need_verify && _major_version >= JAVA_7_VERSION) {
-          int sig_index = cp->signature_ref_index_at(index);
-          int name_index = cp->name_ref_index_at(index);
-          Symbol*  name = cp->symbol_at(name_index);
-          Symbol*  sig = cp->symbol_at(sig_index);
+          const int sig_index = cp->signature_ref_index_at(index);
+          const int name_index = cp->name_ref_index_at(index);
+          const Symbol* const name = cp->symbol_at(name_index);
+          const Symbol* const sig = cp->symbol_at(sig_index);
           if (sig->byte_at(0) == JVM_SIGNATURE_FUNC) {
-            verify_legal_method_signature(name, sig, CHECK_(nullHandle));
+            verify_legal_method_signature(name, sig, CHECK);
           } else {
-            verify_legal_field_signature(name, sig, CHECK_(nullHandle));
+            verify_legal_field_signature(name, sig, CHECK);
           }
         }
         break;
@@ -555,47 +579,50 @@
       case JVM_CONSTANT_Fieldref:
       case JVM_CONSTANT_Methodref:
       case JVM_CONSTANT_InterfaceMethodref: {
-        int name_and_type_ref_index = cp->name_and_type_ref_index_at(index);
+        const int name_and_type_ref_index =
+          cp->name_and_type_ref_index_at(index);
         // already verified to be utf8
-        int name_ref_index = cp->name_ref_index_at(name_and_type_ref_index);
+        const int name_ref_index =
+          cp->name_ref_index_at(name_and_type_ref_index);
         // already verified to be utf8
-        int signature_ref_index = cp->signature_ref_index_at(name_and_type_ref_index);
-        Symbol*  name = cp->symbol_at(name_ref_index);
-        Symbol*  signature = cp->symbol_at(signature_ref_index);
+        const int signature_ref_index =
+          cp->signature_ref_index_at(name_and_type_ref_index);
+        const Symbol* const name = cp->symbol_at(name_ref_index);
+        const Symbol* const signature = cp->symbol_at(signature_ref_index);
         if (tag == JVM_CONSTANT_Fieldref) {
-          verify_legal_field_name(name, CHECK_(nullHandle));
+          verify_legal_field_name(name, CHECK);
           if (_need_verify && _major_version >= JAVA_7_VERSION) {
             // Signature is verified above, when iterating NameAndType_info.
             // Need only to be sure it's the right type.
             if (signature->byte_at(0) == JVM_SIGNATURE_FUNC) {
               throwIllegalSignature(
-                  "Field", name, signature, CHECK_(nullHandle));
+                "Field", name, signature, CHECK);
             }
           } else {
-            verify_legal_field_signature(name, signature, CHECK_(nullHandle));
+            verify_legal_field_signature(name, signature, CHECK);
           }
         } else {
-          verify_legal_method_name(name, CHECK_(nullHandle));
+          verify_legal_method_name(name, CHECK);
           if (_need_verify && _major_version >= JAVA_7_VERSION) {
             // Signature is verified above, when iterating NameAndType_info.
             // Need only to be sure it's the right type.
             if (signature->byte_at(0) != JVM_SIGNATURE_FUNC) {
               throwIllegalSignature(
-                  "Method", name, signature, CHECK_(nullHandle));
+                "Method", name, signature, CHECK);
             }
           } else {
-            verify_legal_method_signature(name, signature, CHECK_(nullHandle));
+            verify_legal_method_signature(name, signature, CHECK);
           }
           if (tag == JVM_CONSTANT_Methodref) {
             // 4509014: If a class method name begins with '<', it must be "<init>".
             assert(name != NULL, "method name in constant pool is null");
-            unsigned int name_len = name->utf8_length();
+            const unsigned int name_len = name->utf8_length();
             assert(name_len > 0, "bad method name");  // already verified as legal name
             if (name->byte_at(0) == '<') {
               if (name != vmSymbols::object_initializer_name()) {
                 classfile_parse_error(
                   "Bad method name at constant pool index %u in class file %s",
-                  name_ref_index, CHECK_(nullHandle));
+                  name_ref_index, CHECK);
               }
             }
           }
@@ -603,84 +630,88 @@
         break;
       }
       case JVM_CONSTANT_MethodHandle: {
-        int ref_index = cp->method_handle_index_at(index);
-        int ref_kind  = cp->method_handle_ref_kind_at(index);
+        const int ref_index = cp->method_handle_index_at(index);
+        const int ref_kind = cp->method_handle_ref_kind_at(index);
         switch (ref_kind) {
-        case JVM_REF_invokeVirtual:
-        case JVM_REF_invokeStatic:
-        case JVM_REF_invokeSpecial:
-        case JVM_REF_newInvokeSpecial:
-          {
-            int name_and_type_ref_index = cp->name_and_type_ref_index_at(ref_index);
-            int name_ref_index = cp->name_ref_index_at(name_and_type_ref_index);
-            Symbol*  name = cp->symbol_at(name_ref_index);
+          case JVM_REF_invokeVirtual:
+          case JVM_REF_invokeStatic:
+          case JVM_REF_invokeSpecial:
+          case JVM_REF_newInvokeSpecial: {
+            const int name_and_type_ref_index =
+              cp->name_and_type_ref_index_at(ref_index);
+            const int name_ref_index =
+              cp->name_ref_index_at(name_and_type_ref_index);
+            const Symbol* const name = cp->symbol_at(name_ref_index);
             if (ref_kind == JVM_REF_newInvokeSpecial) {
               if (name != vmSymbols::object_initializer_name()) {
                 classfile_parse_error(
                   "Bad constructor name at constant pool index %u in class file %s",
-                  name_ref_index, CHECK_(nullHandle));
+                    name_ref_index, CHECK);
               }
             } else {
               if (name == vmSymbols::object_initializer_name()) {
                 classfile_parse_error(
                   "Bad method name at constant pool index %u in class file %s",
-                  name_ref_index, CHECK_(nullHandle));
+                  name_ref_index, CHECK);
               }
             }
+            break;
           }
-          break;
           // Other ref_kinds are already fully checked in previous pass.
-        }
+        } // switch(ref_kind)
         break;
       }
       case JVM_CONSTANT_MethodType: {
-        Symbol* no_name = vmSymbols::type_name(); // place holder
-        Symbol*  signature = cp->method_type_signature_at(index);
-        verify_legal_method_signature(no_name, signature, CHECK_(nullHandle));
+        const Symbol* const no_name = vmSymbols::type_name(); // place holder
+        const Symbol* const signature = cp->method_type_signature_at(index);
+        verify_legal_method_signature(no_name, signature, CHECK);
         break;
       }
       case JVM_CONSTANT_Utf8: {
         assert(cp->symbol_at(index)->refcount() != 0, "count corrupted");
       }
-    }  // end of switch
+    }  // switch(tag)
   }  // end of for
-
-  return cp;
 }
 
-
-void ClassFileParser::patch_constant_pool(const constantPoolHandle& cp, int index, Handle patch, TRAPS) {
+void ClassFileParser::patch_constant_pool(ConstantPool* cp,
+                                          int index,
+                                          Handle patch,
+                                          TRAPS) {
+  assert(cp != NULL, "invariant");
+
   BasicType patch_type = T_VOID;
 
   switch (cp->tag_at(index).value()) {
 
-  case JVM_CONSTANT_UnresolvedClass :
-    // Patching a class means pre-resolving it.
-    // The name in the constant pool is ignored.
-    if (java_lang_Class::is_instance(patch())) {
-      guarantee_property(!java_lang_Class::is_primitive(patch()),
-                         "Illegal class patch at %d in class file %s",
-                         index, CHECK);
-      cp->klass_at_put(index, java_lang_Class::as_Klass(patch()));
-    } else {
-      guarantee_property(java_lang_String::is_instance(patch()),
-                         "Illegal class patch at %d in class file %s",
-                         index, CHECK);
-      Symbol* name = java_lang_String::as_symbol(patch(), CHECK);
-      cp->unresolved_klass_at_put(index, name);
+    case JVM_CONSTANT_UnresolvedClass: {
+      // Patching a class means pre-resolving it.
+      // The name in the constant pool is ignored.
+      if (java_lang_Class::is_instance(patch())) {
+        guarantee_property(!java_lang_Class::is_primitive(patch()),
+                           "Illegal class patch at %d in class file %s",
+                           index, CHECK);
+        cp->klass_at_put(index, java_lang_Class::as_Klass(patch()));
+      } else {
+        guarantee_property(java_lang_String::is_instance(patch()),
+                           "Illegal class patch at %d in class file %s",
+                           index, CHECK);
+        Symbol* const name = java_lang_String::as_symbol(patch(), CHECK);
+        cp->unresolved_klass_at_put(index, name);
+      }
+      break;
     }
-    break;
-
-  case JVM_CONSTANT_String :
-    // skip this patch and don't clear it.  Needs the oop array for resolved
-    // references to be created first.
-    return;
-
-  case JVM_CONSTANT_Integer : patch_type = T_INT;    goto patch_prim;
-  case JVM_CONSTANT_Float :   patch_type = T_FLOAT;  goto patch_prim;
-  case JVM_CONSTANT_Long :    patch_type = T_LONG;   goto patch_prim;
-  case JVM_CONSTANT_Double :  patch_type = T_DOUBLE; goto patch_prim;
-  patch_prim:
+
+    case JVM_CONSTANT_String: {
+      // skip this patch and don't clear it.  Needs the oop array for resolved
+      // references to be created first.
+      return;
+    }
+    case JVM_CONSTANT_Integer: patch_type = T_INT;    goto patch_prim;
+    case JVM_CONSTANT_Float:   patch_type = T_FLOAT;  goto patch_prim;
+    case JVM_CONSTANT_Long:    patch_type = T_LONG;   goto patch_prim;
+    case JVM_CONSTANT_Double:  patch_type = T_DOUBLE; goto patch_prim;
+    patch_prim:
     {
       jvalue value;
       BasicType value_type = java_lang_boxing_object::get_value(patch(), &value);
@@ -688,39 +719,37 @@
                          "Illegal primitive patch at %d in class file %s",
                          index, CHECK);
       switch (value_type) {
-      case T_INT:    cp->int_at_put(index,   value.i); break;
-      case T_FLOAT:  cp->float_at_put(index, value.f); break;
-      case T_LONG:   cp->long_at_put(index,  value.j); break;
-      case T_DOUBLE: cp->double_at_put(index, value.d); break;
-      default:       assert(false, "");
+        case T_INT:    cp->int_at_put(index,   value.i); break;
+        case T_FLOAT:  cp->float_at_put(index, value.f); break;
+        case T_LONG:   cp->long_at_put(index,  value.j); break;
+        case T_DOUBLE: cp->double_at_put(index, value.d); break;
+        default:       assert(false, "");
       }
-    }
+    } // end patch_prim label
     break;
 
-  default:
-    // %%% TODO: put method handles into CONSTANT_InterfaceMethodref, etc.
-    guarantee_property(!has_cp_patch_at(index),
-                       "Illegal unexpected patch at %d in class file %s",
-                       index, CHECK);
-    return;
-  }
+    default: {
+      // %%% TODO: put method handles into CONSTANT_InterfaceMethodref, etc.
+      guarantee_property(!has_cp_patch_at(index),
+                         "Illegal unexpected patch at %d in class file %s",
+                         index, CHECK);
+      return;
+    }
+  } // end of switch(tag)
 
   // On fall-through, mark the patch as used.
   clear_cp_patch_at(index);
 }
-
-
 class NameSigHash: public ResourceObj {
  public:
-  Symbol*       _name;       // name
-  Symbol*       _sig;        // signature
-  NameSigHash*  _next;       // Next entry in hash table
+  const Symbol*       _name;       // name
+  const Symbol*       _sig;        // signature
+  NameSigHash*  _next;             // Next entry in hash table
 };
 
-
-#define HASH_ROW_SIZE 256
-
-unsigned int hash(Symbol* name, Symbol* sig) {
+static const int HASH_ROW_SIZE = 256;
+
+static unsigned int hash(const Symbol* name, const Symbol* sig) {
   unsigned int raw_hash = 0;
   raw_hash += ((unsigned int)(uintptr_t)name) >> (LogHeapWordSize + 2);
   raw_hash += ((unsigned int)(uintptr_t)sig) >> LogHeapWordSize;
@@ -729,16 +758,15 @@
 }
 
 
-void initialize_hashtable(NameSigHash** table) {
+static void initialize_hashtable(NameSigHash** table) {
   memset((void*)table, 0, sizeof(NameSigHash*) * HASH_ROW_SIZE);
 }
-
 // Return false if the name/sig combination is found in table.
 // Return true if no duplicate is found. And name/sig is added as a new entry in table.
 // The old format checker uses heap sort to find duplicates.
 // NOTE: caller should guarantee that GC doesn't happen during the life cycle
 // of table since we don't expect Symbol*'s to move.
-bool put_after_lookup(Symbol* name, Symbol* sig, NameSigHash** table) {
+static bool put_after_lookup(const Symbol* name, const Symbol* sig, NameSigHash** table) {
   assert(name != NULL, "name in constant pool is NULL");
 
   // First lookup for duplicates
@@ -763,69 +791,78 @@
   return true;
 }
 
-
-Array<Klass*>* ClassFileParser::parse_interfaces(int length,
-                                                 Handle protection_domain,
-                                                 Symbol* class_name,
-                                                 bool* has_default_methods,
-                                                 TRAPS) {
-  if (length == 0) {
+// Side-effects: populates the _local_interfaces field
+void ClassFileParser::parse_interfaces(const ClassFileStream* const stream,
+                                       const int itfs_len,
+                                       ConstantPool* const cp,
+                                       bool* const has_default_methods,
+                                       TRAPS) {
+  assert(stream != NULL, "invariant");
+  assert(cp != NULL, "invariant");
+  assert(has_default_methods != NULL, "invariant");
+
+  if (itfs_len == 0) {
     _local_interfaces = Universe::the_empty_klass_array();
   } else {
-    ClassFileStream* cfs = stream();
-    assert(length > 0, "only called for length>0");
-    _local_interfaces = MetadataFactory::new_array<Klass*>(_loader_data, length, NULL, CHECK_NULL);
+    assert(itfs_len > 0, "only called for len>0");
+    _local_interfaces = MetadataFactory::new_array<Klass*>(_loader_data, itfs_len, NULL, CHECK);
 
     int index;
-    for (index = 0; index < length; index++) {
-      u2 interface_index = cfs->get_u2(CHECK_NULL);
+    for (index = 0; index < itfs_len; index++) {
+      const u2 interface_index = stream->get_u2(CHECK);
       KlassHandle interf;
       check_property(
         valid_klass_reference_at(interface_index),
         "Interface name has bad constant pool index %u in class file %s",
-        interface_index, CHECK_NULL);
-      if (_cp->tag_at(interface_index).is_klass()) {
-        interf = KlassHandle(THREAD, _cp->resolved_klass_at(interface_index));
+        interface_index, CHECK);
+      if (cp->tag_at(interface_index).is_klass()) {
+        interf = KlassHandle(THREAD, cp->resolved_klass_at(interface_index));
       } else {
-        Symbol*  unresolved_klass  = _cp->klass_name_at(interface_index);
+        Symbol* const unresolved_klass  = cp->klass_name_at(interface_index);
 
         // Don't need to check legal name because it's checked when parsing constant pool.
         // But need to make sure it's not an array type.
         guarantee_property(unresolved_klass->byte_at(0) != JVM_SIGNATURE_ARRAY,
-                           "Bad interface name in class file %s", CHECK_NULL);
-        Handle class_loader(THREAD, _loader_data->class_loader());
+                           "Bad interface name in class file %s", CHECK);
 
         // Call resolve_super so classcircularity is checked
-        Klass* k = SystemDictionary::resolve_super_or_fail(class_name,
-                      unresolved_klass, class_loader, protection_domain,
-                      false, CHECK_NULL);
+        const Klass* const k =
+          SystemDictionary::resolve_super_or_fail(_class_name,
+                                                  unresolved_klass,
+                                                  _loader_data->class_loader(),
+                                                  _protection_domain,
+                                                  false,
+                                                  CHECK);
         interf = KlassHandle(THREAD, k);
       }
 
       if (!interf()->is_interface()) {
-        THROW_MSG_(vmSymbols::java_lang_IncompatibleClassChangeError(), "Implementing class", NULL);
+        THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(),
+                   "Implementing class");
       }
+
       if (InstanceKlass::cast(interf())->has_default_methods()) {
         *has_default_methods = true;
       }
       _local_interfaces->at_put(index, interf());
     }
 
-    if (!_need_verify || length <= 1) {
-      return _local_interfaces;
+    if (!_need_verify || itfs_len <= 1) {
+      return;
     }
 
     // Check if there's any duplicates in interfaces
     ResourceMark rm(THREAD);
-    NameSigHash** interface_names = NEW_RESOURCE_ARRAY_IN_THREAD(
-      THREAD, NameSigHash*, HASH_ROW_SIZE);
+    NameSigHash** interface_names = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD,
+                                                                 NameSigHash*,
+                                                                 HASH_ROW_SIZE);
     initialize_hashtable(interface_names);
     bool dup = false;
     {
       debug_only(No_Safepoint_Verifier nsv;)
-      for (index = 0; index < length; index++) {
-        Klass* k = _local_interfaces->at(index);
-        Symbol* name = k->name();
+      for (index = 0; index < itfs_len; index++) {
+        const Klass* const k = _local_interfaces->at(index);
+        const Symbol* const name = InstanceKlass::cast(k)->name();
         // If no duplicates, add (name, NULL) in hashtable interface_names.
         if (!put_after_lookup(name, NULL, interface_names)) {
           dup = true;
@@ -834,79 +871,340 @@
       }
     }
     if (dup) {
-      classfile_parse_error("Duplicate interface name in class file %s", CHECK_NULL);
+      classfile_parse_error("Duplicate interface name in class file %s", CHECK);
     }
   }
-  return _local_interfaces;
 }
 
-
-void ClassFileParser::verify_constantvalue(int constantvalue_index, int signature_index, TRAPS) {
+void ClassFileParser::verify_constantvalue(const ConstantPool* const cp,
+                                           int constantvalue_index,
+                                           int signature_index,
+                                           TRAPS) const {
   // Make sure the constant pool entry is of a type appropriate to this field
   guarantee_property(
     (constantvalue_index > 0 &&
-      constantvalue_index < _cp->length()),
+      constantvalue_index < cp->length()),
     "Bad initial value index %u in ConstantValue attribute in class file %s",
     constantvalue_index, CHECK);
-  constantTag value_type = _cp->tag_at(constantvalue_index);
-  switch ( _cp->basic_type_for_signature_at(signature_index) ) {
-    case T_LONG:
-      guarantee_property(value_type.is_long(), "Inconsistent constant value type in class file %s", CHECK);
+
+  const constantTag value_type = cp->tag_at(constantvalue_index);
+  switch(cp->basic_type_for_signature_at(signature_index)) {
+    case T_LONG: {
+      guarantee_property(value_type.is_long(),
+                         "Inconsistent constant value type in class file %s",
+                         CHECK);
+      break;
+    }
+    case T_FLOAT: {
+      guarantee_property(value_type.is_float(),
+                         "Inconsistent constant value type in class file %s",
+                         CHECK);
       break;
-    case T_FLOAT:
-      guarantee_property(value_type.is_float(), "Inconsistent constant value type in class file %s", CHECK);
+    }
+    case T_DOUBLE: {
+      guarantee_property(value_type.is_double(),
+                         "Inconsistent constant value type in class file %s",
+                         CHECK);
+      break;
+    }
+    case T_BYTE:
+    case T_CHAR:
+    case T_SHORT:
+    case T_BOOLEAN:
+    case T_INT: {
+      guarantee_property(value_type.is_int(),
+                         "Inconsistent constant value type in class file %s",
+                         CHECK);
+      break;
+    }
+    case T_OBJECT: {
+      guarantee_property((cp->symbol_at(signature_index)->equals("Ljava/lang/String;")
+                         && value_type.is_string()),
+                         "Bad string initial value in class file %s",
+                         CHECK);
       break;
-    case T_DOUBLE:
-      guarantee_property(value_type.is_double(), "Inconsistent constant value type in class file %s", CHECK);
+    }
+    default: {
+      classfile_parse_error("Unable to set initial value %u in class file %s",
+                             constantvalue_index,
+                             CHECK);
+    }
+  }
+}
+
+class AnnotationCollector : public ResourceObj{
+public:
+  enum Location { _in_field, _in_method, _in_class };
+  enum ID {
+    _unknown = 0,
+    _method_CallerSensitive,
+    _method_ForceInline,
+    _method_DontInline,
+    _method_InjectedProfile,
+    _method_LambdaForm_Compiled,
+    _method_LambdaForm_Hidden,
+    _method_HotSpotIntrinsicCandidate,
+    _jdk_internal_vm_annotation_Contended,
+    _field_Stable,
+    _jdk_internal_vm_annotation_ReservedStackAccess,
+    _annotation_LIMIT
+  };
+  const Location _location;
+  int _annotations_present;
+  u2 _contended_group;
+
+  AnnotationCollector(Location location)
+    : _location(location), _annotations_present(0)
+  {
+    assert((int)_annotation_LIMIT <= (int)sizeof(_annotations_present) * BitsPerByte, "");
+  }
+  // If this annotation name has an ID, report it (or _none).
+  ID annotation_index(const ClassLoaderData* loader_data, const Symbol* name);
+  // Set the annotation name:
+  void set_annotation(ID id) {
+    assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
+    _annotations_present |= nth_bit((int)id);
+  }
+
+  void remove_annotation(ID id) {
+    assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
+    _annotations_present &= ~nth_bit((int)id);
+  }
+
+  // Report if the annotation is present.
+  bool has_any_annotations() const { return _annotations_present != 0; }
+  bool has_annotation(ID id) const { return (nth_bit((int)id) & _annotations_present) != 0; }
+
+  void set_contended_group(u2 group) { _contended_group = group; }
+  u2 contended_group() const { return _contended_group; }
+
+  bool is_contended() const { return has_annotation(_jdk_internal_vm_annotation_Contended); }
+
+  void set_stable(bool stable) { set_annotation(_field_Stable); }
+  bool is_stable() const { return has_annotation(_field_Stable); }
+};
+
+// This class also doubles as a holder for metadata cleanup.
+class ClassFileParser::FieldAnnotationCollector : public AnnotationCollector {
+private:
+  ClassLoaderData* _loader_data;
+  AnnotationArray* _field_annotations;
+  AnnotationArray* _field_type_annotations;
+public:
+  FieldAnnotationCollector(ClassLoaderData* loader_data) :
+    AnnotationCollector(_in_field),
+    _loader_data(loader_data),
+    _field_annotations(NULL),
+    _field_type_annotations(NULL) {}
+  ~FieldAnnotationCollector();
+  void apply_to(FieldInfo* f);
+  AnnotationArray* field_annotations()      { return _field_annotations; }
+  AnnotationArray* field_type_annotations() { return _field_type_annotations; }
+
+  void set_field_annotations(AnnotationArray* a)      { _field_annotations = a; }
+  void set_field_type_annotations(AnnotationArray* a) { _field_type_annotations = a; }
+};
+
+class MethodAnnotationCollector : public AnnotationCollector{
+public:
+  MethodAnnotationCollector() : AnnotationCollector(_in_method) { }
+  void apply_to(methodHandle m);
+};
+
+class ClassFileParser::ClassAnnotationCollector : public AnnotationCollector{
+public:
+  ClassAnnotationCollector() : AnnotationCollector(_in_class) { }
+  void apply_to(InstanceKlass* ik);
+};
+
+
+static int skip_annotation_value(const u1*, int, int); // fwd decl
+
+// Skip an annotation.  Return >=limit if there is any problem.
+static int skip_annotation(const u1* buffer, int limit, int index) {
+  assert(buffer != NULL, "invariant");
+  // annotation := atype:u2 do(nmem:u2) {member:u2 value}
+  // value := switch (tag:u1) { ... }
+  index += 2;  // skip atype
+  if ((index += 2) >= limit)  return limit;  // read nmem
+  int nmem = Bytes::get_Java_u2((address)buffer + index - 2);
+  while (--nmem >= 0 && index < limit) {
+    index += 2; // skip member
+    index = skip_annotation_value(buffer, limit, index);
+  }
+  return index;
+}
+
+// Skip an annotation value.  Return >=limit if there is any problem.
+static int skip_annotation_value(const u1* buffer, int limit, int index) {
+  assert(buffer != NULL, "invariant");
+
+  // value := switch (tag:u1) {
+  //   case B, C, I, S, Z, D, F, J, c: con:u2;
+  //   case e: e_class:u2 e_name:u2;
+  //   case s: s_con:u2;
+  //   case [: do(nval:u2) {value};
+  //   case @: annotation;
+  //   case s: s_con:u2;
+  // }
+  if ((index += 1) >= limit)  return limit;  // read tag
+  const u1 tag = buffer[index - 1];
+  switch (tag) {
+    case 'B':
+    case 'C':
+    case 'I':
+    case 'S':
+    case 'Z':
+    case 'D':
+    case 'F':
+    case 'J':
+    case 'c':
+    case 's':
+      index += 2;  // skip con or s_con
       break;
-    case T_BYTE: case T_CHAR: case T_SHORT: case T_BOOLEAN: case T_INT:
-      guarantee_property(value_type.is_int(), "Inconsistent constant value type in class file %s", CHECK);
+    case 'e':
+      index += 4;  // skip e_class, e_name
       break;
-    case T_OBJECT:
-      guarantee_property((_cp->symbol_at(signature_index)->equals("Ljava/lang/String;")
-                         && value_type.is_string()),
-                         "Bad string initial value in class file %s", CHECK);
+    case '[':
+    {
+      if ((index += 2) >= limit)  return limit;  // read nval
+      int nval = Bytes::get_Java_u2((address)buffer + index - 2);
+      while (--nval >= 0 && index < limit) {
+        index = skip_annotation_value(buffer, limit, index);
+      }
+    }
+    break;
+    case '@':
+      index = skip_annotation(buffer, limit, index);
       break;
     default:
-      classfile_parse_error(
-        "Unable to set initial value %u in class file %s",
-        constantvalue_index, CHECK);
+      return limit;  //  bad tag byte
+  }
+  return index;
+}
+
+// Sift through annotations, looking for those significant to the VM:
+static void parse_annotations(const ConstantPool* const cp,
+                              const u1* buffer, int limit,
+                              AnnotationCollector* coll,
+                              ClassLoaderData* loader_data,
+                              TRAPS) {
+
+  assert(cp != NULL, "invariant");
+  assert(buffer != NULL, "invariant");
+  assert(coll != NULL, "invariant");
+  assert(loader_data != NULL, "invariant");
+
+  // annotations := do(nann:u2) {annotation}
+  int index = 0;
+  if ((index += 2) >= limit)  return;  // read nann
+  int nann = Bytes::get_Java_u2((address)buffer + index - 2);
+  enum {  // initial annotation layout
+    atype_off = 0,      // utf8 such as 'Ljava/lang/annotation/Retention;'
+    count_off = 2,      // u2   such as 1 (one value)
+    member_off = 4,     // utf8 such as 'value'
+    tag_off = 6,        // u1   such as 'c' (type) or 'e' (enum)
+    e_tag_val = 'e',
+    e_type_off = 7,   // utf8 such as 'Ljava/lang/annotation/RetentionPolicy;'
+    e_con_off = 9,    // utf8 payload, such as 'SOURCE', 'CLASS', 'RUNTIME'
+    e_size = 11,     // end of 'e' annotation
+    c_tag_val = 'c',    // payload is type
+    c_con_off = 7,    // utf8 payload, such as 'I'
+    c_size = 9,       // end of 'c' annotation
+    s_tag_val = 's',    // payload is String
+    s_con_off = 7,    // utf8 payload, such as 'Ljava/lang/String;'
+    s_size = 9,
+    min_size = 6        // smallest possible size (zero members)
+  };
+  while ((--nann) >= 0 && (index - 2 + min_size <= limit)) {
+    int index0 = index;
+    index = skip_annotation(buffer, limit, index);
+    const u1* const abase = buffer + index0;
+    const int atype = Bytes::get_Java_u2((address)abase + atype_off);
+    const int count = Bytes::get_Java_u2((address)abase + count_off);
+    const Symbol* const aname = check_symbol_at(cp, atype);
+    if (aname == NULL)  break;  // invalid annotation name
+    const Symbol* member = NULL;
+    if (count >= 1) {
+      const int member_index = Bytes::get_Java_u2((address)abase + member_off);
+      member = check_symbol_at(cp, member_index);
+      if (member == NULL)  break;  // invalid member name
+    }
+
+    // Here is where parsing particular annotations will take place.
+    AnnotationCollector::ID id = coll->annotation_index(loader_data, aname);
+    if (AnnotationCollector::_unknown == id)  continue;
+    coll->set_annotation(id);
+
+    if (AnnotationCollector::_jdk_internal_vm_annotation_Contended == id) {
+      // @Contended can optionally specify the contention group.
+      //
+      // Contended group defines the equivalence class over the fields:
+      // the fields within the same contended group are not treated distinct.
+      // The only exception is default group, which does not incur the
+      // equivalence. Naturally, contention group for classes is meaningless.
+      //
+      // While the contention group is specified as String, annotation
+      // values are already interned, and we might as well use the constant
+      // pool index as the group tag.
+      //
+      u2 group_index = 0; // default contended group
+      if (count == 1
+        && s_size == (index - index0)  // match size
+        && s_tag_val == *(abase + tag_off)
+        && member == vmSymbols::value_name()) {
+        group_index = Bytes::get_Java_u2((address)abase + s_con_off);
+        if (cp->symbol_at(group_index)->utf8_length() == 0) {
+          group_index = 0; // default contended group
+        }
+      }
+      coll->set_contended_group(group_index);
+    }
   }
 }
 
 
 // Parse attributes for a field.
-void ClassFileParser::parse_field_attributes(u2 attributes_count,
+void ClassFileParser::parse_field_attributes(const ClassFileStream* const cfs,
+                                             u2 attributes_count,
                                              bool is_static, u2 signature_index,
-                                             u2* constantvalue_index_addr,
-                                             bool* is_synthetic_addr,
-                                             u2* generic_signature_index_addr,
+                                             u2* const constantvalue_index_addr,
+                                             bool* const is_synthetic_addr,
+                                             u2* const generic_signature_index_addr,
                                              ClassFileParser::FieldAnnotationCollector* parsed_annotations,
                                              TRAPS) {
-  ClassFileStream* cfs = stream();
-  assert(attributes_count > 0, "length should be greater than 0");
+  assert(cfs != NULL, "invariant");
+  assert(constantvalue_index_addr != NULL, "invariant");
+  assert(is_synthetic_addr != NULL, "invariant");
+  assert(generic_signature_index_addr != NULL, "invariant");
+  assert(parsed_annotations != NULL, "invariant");
+  assert(attributes_count > 0, "attributes_count should be greater than 0");
+
   u2 constantvalue_index = 0;
   u2 generic_signature_index = 0;
   bool is_synthetic = false;
-  u1* runtime_visible_annotations = NULL;
+  const u1* runtime_visible_annotations = NULL;
   int runtime_visible_annotations_length = 0;
-  u1* runtime_invisible_annotations = NULL;
+  const u1* runtime_invisible_annotations = NULL;
   int runtime_invisible_annotations_length = 0;
-  u1* runtime_visible_type_annotations = NULL;
+  const u1* runtime_visible_type_annotations = NULL;
   int runtime_visible_type_annotations_length = 0;
-  u1* runtime_invisible_type_annotations = NULL;
+  const u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
   bool runtime_invisible_annotations_exists = false;
   bool runtime_invisible_type_annotations_exists = false;
+  const ConstantPool* const cp = _cp;
+
   while (attributes_count--) {
     cfs->guarantee_more(6, CHECK);  // attribute_name_index, attribute_length
-    u2 attribute_name_index = cfs->get_u2_fast();
-    u4 attribute_length = cfs->get_u4_fast();
+    const u2 attribute_name_index = cfs->get_u2_fast();
+    const u4 attribute_length = cfs->get_u4_fast();
     check_property(valid_symbol_at(attribute_name_index),
                    "Invalid field attribute index %u in class file %s",
                    attribute_name_index,
                    CHECK);
-    Symbol* attribute_name = _cp->symbol_at(attribute_name_index);
+
+    const Symbol* const attribute_name = cp->symbol_at(attribute_name_index);
     if (is_static && attribute_name == vmSymbols::tag_constant_value()) {
       // ignore if non-static
       if (constantvalue_index != 0) {
@@ -916,9 +1214,10 @@
         attribute_length == 2,
         "Invalid ConstantValue field attribute length %u in class file %s",
         attribute_length, CHECK);
+
       constantvalue_index = cfs->get_u2(CHECK);
       if (_need_verify) {
-        verify_constantvalue(constantvalue_index, signature_index, CHECK);
+        verify_constantvalue(cp, constantvalue_index, signature_index, CHECK);
       }
     } else if (attribute_name == vmSymbols::tag_synthetic()) {
       if (attribute_length != 0) {
@@ -940,7 +1239,7 @@
             "Wrong size %u for field's Signature attribute in class file %s",
             attribute_length, CHECK);
         }
-        generic_signature_index = parse_generic_signature_attribute(CHECK);
+        generic_signature_index = parse_generic_signature_attribute(cfs, CHECK);
       } else if (attribute_name == vmSymbols::tag_runtime_visible_annotations()) {
         if (runtime_visible_annotations != NULL) {
           classfile_parse_error(
@@ -949,9 +1248,12 @@
         runtime_visible_annotations_length = attribute_length;
         runtime_visible_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_annotations != NULL, "null visible annotations");
-        parse_annotations(runtime_visible_annotations,
+        parse_annotations(cp,
+                          runtime_visible_annotations,
                           runtime_visible_annotations_length,
-                          parsed_annotations);
+                          parsed_annotations,
+                          _loader_data,
+                          CHECK);
         cfs->skip_u1(runtime_visible_annotations_length, CHECK);
       } else if (attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
         if (runtime_invisible_annotations_exists) {
@@ -1081,7 +1383,7 @@
   return result;
 }
 
-class FieldAllocationCount: public ResourceObj {
+class ClassFileParser::FieldAllocationCount : public ResourceObj {
  public:
   u2 count[MAX_FIELD_ALLOCATION_TYPE];
 
@@ -1100,18 +1402,33 @@
   }
 };
 
-Array<u2>* ClassFileParser::parse_fields(Symbol* class_name,
-                                         bool is_interface,
-                                         FieldAllocationCount *fac,
-                                         u2* java_fields_count_ptr, TRAPS) {
-  ClassFileStream* cfs = stream();
-  cfs->guarantee_more(2, CHECK_NULL);  // length
-  u2 length = cfs->get_u2_fast();
+// Side-effects: populates the _fields, _fields_annotations,
+// _fields_type_annotations fields
+void ClassFileParser::parse_fields(const ClassFileStream* const cfs,
+                                   bool is_interface,
+                                   FieldAllocationCount* const fac,
+                                   ConstantPool* cp,
+                                   const int cp_size,
+                                   u2* const java_fields_count_ptr,
+                                   TRAPS) {
+
+  assert(cfs != NULL, "invariant");
+  assert(fac != NULL, "invariant");
+  assert(cp != NULL, "invariant");
+  assert(java_fields_count_ptr != NULL, "invariant");
+
+  assert(NULL == _fields, "invariant");
+  assert(NULL == _fields_annotations, "invariant");
+  assert(NULL == _fields_type_annotations, "invariant");
+
+  cfs->guarantee_more(2, CHECK);  // length
+  const u2 length = cfs->get_u2_fast();
   *java_fields_count_ptr = length;
 
   int num_injected = 0;
-  InjectedField* injected = JavaClasses::get_injected(class_name, &num_injected);
-  int total_fields = length + num_injected;
+  const InjectedField* const injected = JavaClasses::get_injected(_class_name,
+                                                                  &num_injected);
+  const int total_fields = length + num_injected;
 
   // The field array starts with tuples of shorts
   // [access, name index, sig index, initial value index, byte offset].
@@ -1134,62 +1451,70 @@
   // index. After parsing all fields, the data are copied to a permanent
   // array and any unused slots will be discarded.
   ResourceMark rm(THREAD);
-  u2* fa = NEW_RESOURCE_ARRAY_IN_THREAD(
-             THREAD, u2, total_fields * (FieldInfo::field_slots + 1));
+  u2* const fa = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD,
+                                              u2,
+                                              total_fields * (FieldInfo::field_slots + 1));
 
   // The generic signature slots start after all other fields' data.
   int generic_signature_slot = total_fields * FieldInfo::field_slots;
   int num_generic_signature = 0;
   for (int n = 0; n < length; n++) {
-    cfs->guarantee_more(8, CHECK_NULL);  // access_flags, name_index, descriptor_index, attributes_count
+    // access_flags, name_index, descriptor_index, attributes_count
+    cfs->guarantee_more(8, CHECK);
 
     AccessFlags access_flags;
-    jint flags = cfs->get_u2_fast() & JVM_RECOGNIZED_FIELD_MODIFIERS;
-    verify_legal_field_modifiers(flags, is_interface, CHECK_NULL);
+    const jint flags = cfs->get_u2_fast() & JVM_RECOGNIZED_FIELD_MODIFIERS;
+    verify_legal_field_modifiers(flags, is_interface, CHECK);
     access_flags.set_flags(flags);
 
-    u2 name_index = cfs->get_u2_fast();
-    int cp_size = _cp->length();
+    const u2 name_index = cfs->get_u2_fast();
     check_property(valid_symbol_at(name_index),
       "Invalid constant pool index %u for field name in class file %s",
-      name_index,
-      CHECK_NULL);
-    Symbol*  name = _cp->symbol_at(name_index);
-    verify_legal_field_name(name, CHECK_NULL);
-
-    u2 signature_index = cfs->get_u2_fast();
+      name_index, CHECK);
+    const Symbol* const name = cp->symbol_at(name_index);
+    verify_legal_field_name(name, CHECK);
+
+    const u2 signature_index = cfs->get_u2_fast();
     check_property(valid_symbol_at(signature_index),
       "Invalid constant pool index %u for field signature in class file %s",
-      signature_index, CHECK_NULL);
-    Symbol*  sig = _cp->symbol_at(signature_index);
-    verify_legal_field_signature(name, sig, CHECK_NULL);
+      signature_index, CHECK);
+    const Symbol* const sig = cp->symbol_at(signature_index);
+    verify_legal_field_signature(name, sig, CHECK);
 
     u2 constantvalue_index = 0;
     bool is_synthetic = false;
     u2 generic_signature_index = 0;
-    bool is_static = access_flags.is_static();
+    const bool is_static = access_flags.is_static();
     FieldAnnotationCollector parsed_annotations(_loader_data);
 
-    u2 attributes_count = cfs->get_u2_fast();
+    const u2 attributes_count = cfs->get_u2_fast();
     if (attributes_count > 0) {
-      parse_field_attributes(attributes_count, is_static, signature_index,
-                             &constantvalue_index, &is_synthetic,
-                             &generic_signature_index, &parsed_annotations,
-                             CHECK_NULL);
+      parse_field_attributes(cfs,
+                             attributes_count,
+                             is_static,
+                             signature_index,
+                             &constantvalue_index,
+                             &is_synthetic,
+                             &generic_signature_index,
+                             &parsed_annotations,
+                             CHECK);
+
       if (parsed_annotations.field_annotations() != NULL) {
         if (_fields_annotations == NULL) {
           _fields_annotations = MetadataFactory::new_array<AnnotationArray*>(
                                              _loader_data, length, NULL,
-                                             CHECK_NULL);
+                                             CHECK);
         }
         _fields_annotations->at_put(n, parsed_annotations.field_annotations());
         parsed_annotations.set_field_annotations(NULL);
       }
       if (parsed_annotations.field_type_annotations() != NULL) {
         if (_fields_type_annotations == NULL) {
-          _fields_type_annotations = MetadataFactory::new_array<AnnotationArray*>(
-                                                  _loader_data, length, NULL,
-                                                  CHECK_NULL);
+          _fields_type_annotations =
+            MetadataFactory::new_array<AnnotationArray*>(_loader_data,
+                                                         length,
+                                                         NULL,
+                                                         CHECK);
         }
         _fields_type_annotations->at_put(n, parsed_annotations.field_type_annotations());
         parsed_annotations.set_field_type_annotations(NULL);
@@ -1206,15 +1531,15 @@
       }
     }
 
-    FieldInfo* field = FieldInfo::from_field_array(fa, n);
+    FieldInfo* const field = FieldInfo::from_field_array(fa, n);
     field->initialize(access_flags.as_short(),
                       name_index,
                       signature_index,
                       constantvalue_index);
-    BasicType type = _cp->basic_type_for_signature_at(signature_index);
+    const BasicType type = cp->basic_type_for_signature_at(signature_index);
 
     // Remember how many oops we encountered and compute allocation type
-    FieldAllocationType atype = fac->update(is_static, type);
+    const FieldAllocationType atype = fac->update(is_static, type);
     field->set_allocation_type(atype);
 
     // After field is initialized with type, we can augment it with aux info
@@ -1227,13 +1552,13 @@
     for (int n = 0; n < num_injected; n++) {
       // Check for duplicates
       if (injected[n].may_be_java) {
-        Symbol* name      = injected[n].name();
-        Symbol* signature = injected[n].signature();
+        const Symbol* const name      = injected[n].name();
+        const Symbol* const signature = injected[n].signature();
         bool duplicate = false;
         for (int i = 0; i < length; i++) {
-          FieldInfo* f = FieldInfo::from_field_array(fa, i);
-          if (name      == _cp->symbol_at(f->name_index()) &&
-              signature == _cp->symbol_at(f->signature_index())) {
+          const FieldInfo* const f = FieldInfo::from_field_array(fa, i);
+          if (name      == cp->symbol_at(f->name_index()) &&
+              signature == cp->symbol_at(f->signature_index())) {
             // Symbol is desclared in Java so skip this one
             duplicate = true;
             break;
@@ -1246,40 +1571,41 @@
       }
 
       // Injected field
-      FieldInfo* field = FieldInfo::from_field_array(fa, index);
+      FieldInfo* const field = FieldInfo::from_field_array(fa, index);
       field->initialize(JVM_ACC_FIELD_INTERNAL,
                         injected[n].name_index,
                         injected[n].signature_index,
                         0);
 
-      BasicType type = FieldType::basic_type(injected[n].signature());
+      const BasicType type = FieldType::basic_type(injected[n].signature());
 
       // Remember how many oops we encountered and compute allocation type
-      FieldAllocationType atype = fac->update(false, type);
+      const FieldAllocationType atype = fac->update(false, type);
       field->set_allocation_type(atype);
       index++;
     }
   }
 
-  // Now copy the fields' data from the temporary resource array.
+  assert(NULL == _fields, "invariant");
+
+  _fields =
+    MetadataFactory::new_array<u2>(_loader_data,
+                                   index * FieldInfo::field_slots + num_generic_signature,
+                                   CHECK);
   // Sometimes injected fields already exist in the Java source so
   // the fields array could be too long.  In that case the
   // fields array is trimed. Also unused slots that were reserved
   // for generic signature indexes are discarded.
-  Array<u2>* fields = MetadataFactory::new_array<u2>(
-          _loader_data, index * FieldInfo::field_slots + num_generic_signature,
-          CHECK_NULL);
-  _fields = fields; // save in case of error
   {
     int i = 0;
     for (; i < index * FieldInfo::field_slots; i++) {
-      fields->at_put(i, fa[i]);
+      _fields->at_put(i, fa[i]);
     }
     for (int j = total_fields * FieldInfo::field_slots;
          j < generic_signature_slot; j++) {
-      fields->at_put(i++, fa[j]);
+      _fields->at_put(i++, fa[j]);
     }
-    assert(i == fields->length(), "");
+    assert(_fields->length() == i, "");
   }
 
   if (_need_verify && length > 1) {
@@ -1291,9 +1617,9 @@
     bool dup = false;
     {
       debug_only(No_Safepoint_Verifier nsv;)
-      for (AllFieldStream fs(fields, _cp); !fs.done(); fs.next()) {
-        Symbol* name = fs.name();
-        Symbol* sig = fs.signature();
+      for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
+        const Symbol* const name = fs.name();
+        const Symbol* const sig = fs.signature();
         // If no duplicates, add name/signature in hashtable names_and_sigs.
         if (!put_after_lookup(name, sig, names_and_sigs)) {
           dup = true;
@@ -1303,36 +1629,39 @@
     }
     if (dup) {
       classfile_parse_error("Duplicate field name&signature in class file %s",
-                            CHECK_NULL);
+                            CHECK);
     }
   }
-
-  return fields;
 }
 
 
-static void copy_u2_with_conversion(u2* dest, u2* src, int length) {
+static void copy_u2_with_conversion(u2* dest, const u2* src, int length) {
   while (length-- > 0) {
     *dest++ = Bytes::get_Java_u2((u1*) (src++));
   }
 }
 
-
-u2* ClassFileParser::parse_exception_table(u4 code_length,
-                                           u4 exception_table_length,
-                                           TRAPS) {
-  ClassFileStream* cfs = stream();
-
-  u2* exception_table_start = cfs->get_u2_buffer();
+const u2* ClassFileParser::parse_exception_table(const ClassFileStream* const cfs,
+                                                 u4 code_length,
+                                                 u4 exception_table_length,
+                                                 TRAPS) {
+  assert(cfs != NULL, "invariant");
+
+  const u2* const exception_table_start = cfs->get_u2_buffer();
   assert(exception_table_start != NULL, "null exception table");
-  cfs->guarantee_more(8 * exception_table_length, CHECK_NULL); // start_pc, end_pc, handler_pc, catch_type_index
+
+  cfs->guarantee_more(8 * exception_table_length, CHECK_NULL); // start_pc,
+                                                               // end_pc,
+                                                               // handler_pc,
+                                                               // catch_type_index
+
   // Will check legal target after parsing code array in verifier.
   if (_need_verify) {
     for (unsigned int i = 0; i < exception_table_length; i++) {
-      u2 start_pc = cfs->get_u2_fast();
-      u2 end_pc = cfs->get_u2_fast();
-      u2 handler_pc = cfs->get_u2_fast();
-      u2 catch_type_index = cfs->get_u2_fast();
+      const u2 start_pc = cfs->get_u2_fast();
+      const u2 end_pc = cfs->get_u2_fast();
+      const u2 handler_pc = cfs->get_u2_fast();
+      const u2 catch_type_index = cfs->get_u2_fast();
       guarantee_property((start_pc < end_pc) && (end_pc <= code_length),
                          "Illegal exception table range in class file %s",
                          CHECK_NULL);
@@ -1350,14 +1679,16 @@
   return exception_table_start;
 }
 
-void ClassFileParser::parse_linenumber_table(
-    u4 code_attribute_length, u4 code_length,
-    CompressedLineNumberWriteStream** write_stream, TRAPS) {
-  ClassFileStream* cfs = stream();
+void ClassFileParser::parse_linenumber_table(u4 code_attribute_length,
+                                             u4 code_length,
+                                             CompressedLineNumberWriteStream**const write_stream,
+                                             TRAPS) {
+
+  const ClassFileStream* const cfs = _stream;
   unsigned int num_entries = cfs->get_u2(CHECK);
 
   // Each entry is a u2 start_pc, and a u2 line_number
-  unsigned int length_in_bytes = num_entries * (sizeof(u2) + sizeof(u2));
+  const unsigned int length_in_bytes = num_entries * (sizeof(u2) * 2);
 
   // Verify line number attribute and table length
   check_property(
@@ -1371,13 +1702,13 @@
       (*write_stream) = new CompressedLineNumberWriteStream(length_in_bytes);
     } else {
       (*write_stream) = new CompressedLineNumberWriteStream(
-        linenumbertable_buffer, fixed_buffer_size);
+        _linenumbertable_buffer, fixed_buffer_size);
     }
   }
 
   while (num_entries-- > 0) {
-    u2 bci  = cfs->get_u2_fast(); // start_pc
-    u2 line = cfs->get_u2_fast(); // line_number
+    const u2 bci  = cfs->get_u2_fast(); // start_pc
+    const u2 line = cfs->get_u2_fast(); // line_number
     guarantee_property(bci < code_length,
         "Invalid pc in LineNumberTable in class file %s", CHECK);
     (*write_stream)->write_pair(bci, line);
@@ -1422,7 +1753,8 @@
   u2 slot;
 };
 
-void copy_lvt_element(Classfile_LVT_Element *src, LocalVariableTableElement *lvt) {
+static void copy_lvt_element(const Classfile_LVT_Element* const src,
+                             LocalVariableTableElement* const lvt) {
   lvt->start_bci           = Bytes::get_Java_u2((u1*) &src->start_bci);
   lvt->length              = Bytes::get_Java_u2((u1*) &src->length);
   lvt->name_cp_index       = Bytes::get_Java_u2((u1*) &src->name_cp_index);
@@ -1432,36 +1764,41 @@
 }
 
 // Function is used to parse both attributes:
-//       LocalVariableTable (LVT) and LocalVariableTypeTable (LVTT)
-u2* ClassFileParser::parse_localvariable_table(u4 code_length,
-                                               u2 max_locals,
-                                               u4 code_attribute_length,
-                                               u2* localvariable_table_length,
-                                               bool isLVTT,
-                                               TRAPS) {
-  ClassFileStream* cfs = stream();
-  const char * tbl_name = (isLVTT) ? "LocalVariableTypeTable" : "LocalVariableTable";
+// LocalVariableTable (LVT) and LocalVariableTypeTable (LVTT)
+const u2* ClassFileParser::parse_localvariable_table(const ClassFileStream* cfs,
+                                                     u4 code_length,
+                                                     u2 max_locals,
+                                                     u4 code_attribute_length,
+                                                     u2* const localvariable_table_length,
+                                                     bool isLVTT,
+                                                     TRAPS) {
+  const char* const tbl_name = (isLVTT) ? "LocalVariableTypeTable" : "LocalVariableTable";
   *localvariable_table_length = cfs->get_u2(CHECK_NULL);
-  unsigned int size = (*localvariable_table_length) * sizeof(Classfile_LVT_Element) / sizeof(u2);
+  const unsigned int size =
+    (*localvariable_table_length) * sizeof(Classfile_LVT_Element) / sizeof(u2);
+
+  const ConstantPool* const cp = _cp;
+
   // Verify local variable table attribute has right length
   if (_need_verify) {
     guarantee_property(code_attribute_length == (sizeof(*localvariable_table_length) + size * sizeof(u2)),
                        "%s has wrong length in class file %s", tbl_name, CHECK_NULL);
   }
-  u2* localvariable_table_start = cfs->get_u2_buffer();
+
+  const u2* const localvariable_table_start = cfs->get_u2_buffer();
   assert(localvariable_table_start != NULL, "null local variable table");
   if (!_need_verify) {
     cfs->skip_u2_fast(size);
   } else {
     cfs->guarantee_more(size * 2, CHECK_NULL);
     for(int i = 0; i < (*localvariable_table_length); i++) {
-      u2 start_pc = cfs->get_u2_fast();
-      u2 length = cfs->get_u2_fast();
-      u2 name_index = cfs->get_u2_fast();
-      u2 descriptor_index = cfs->get_u2_fast();
-      u2 index = cfs->get_u2_fast();
+      const u2 start_pc = cfs->get_u2_fast();
+      const u2 length = cfs->get_u2_fast();
+      const u2 name_index = cfs->get_u2_fast();
+      const u2 descriptor_index = cfs->get_u2_fast();
+      const u2 index = cfs->get_u2_fast();
       // Assign to a u4 to avoid overflow
-      u4 end_pc = (u4)start_pc + (u4)length;
+      const u4 end_pc = (u4)start_pc + (u4)length;
 
       if (start_pc >= code_length) {
         classfile_parse_error(
@@ -1473,7 +1810,7 @@
           "Invalid length %u in %s in class file %s",
           length, tbl_name, CHECK_NULL);
       }
-      int cp_size = _cp->length();
+      const int cp_size = cp->length();
       guarantee_property(valid_symbol_at(name_index),
         "Name index %u in %s has bad constant type in class file %s",
         name_index, tbl_name, CHECK_NULL);
@@ -1481,8 +1818,8 @@
         "Signature index %u in %s has bad constant type in class file %s",
         descriptor_index, tbl_name, CHECK_NULL);
 
-      Symbol*  name = _cp->symbol_at(name_index);
-      Symbol*  sig = _cp->symbol_at(descriptor_index);
+      const Symbol* const name = cp->symbol_at(name_index);
+      const Symbol* const sig = cp->symbol_at(descriptor_index);
       verify_legal_field_name(name, CHECK_NULL);
       u2 extra_slot = 0;
       if (!isLVTT) {
@@ -1503,24 +1840,29 @@
 }
 
 
-void ClassFileParser::parse_type_array(u2 array_length, u4 code_length, u4* u1_index, u4* u2_index,
-                                      u1* u1_array, u2* u2_array, TRAPS) {
-  ClassFileStream* cfs = stream();
+void ClassFileParser::parse_type_array(u2 array_length,
+                                       u4 code_length,
+                                       u4* const u1_index,
+                                       u4* const u2_index,
+                                       u1* const u1_array,
+                                       u2* const u2_array,
+                                       TRAPS) {
+  const ClassFileStream* const cfs = _stream;
   u2 index = 0; // index in the array with long/double occupying two slots
   u4 i1 = *u1_index;
   u4 i2 = *u2_index + 1;
   for(int i = 0; i < array_length; i++) {
-    u1 tag = u1_array[i1++] = cfs->get_u1(CHECK);
+    const u1 tag = u1_array[i1++] = cfs->get_u1(CHECK);
     index++;
     if (tag == ITEM_Long || tag == ITEM_Double) {
       index++;
     } else if (tag == ITEM_Object) {
-      u2 class_index = u2_array[i2++] = cfs->get_u2(CHECK);
+      const u2 class_index = u2_array[i2++] = cfs->get_u2(CHECK);
       guarantee_property(valid_klass_reference_at(class_index),
                          "Bad class index %u in StackMap in class file %s",
                          class_index, CHECK);
     } else if (tag == ITEM_Uninitialized) {
-      u2 offset = u2_array[i2++] = cfs->get_u2(CHECK);
+      const u2 offset = u2_array[i2++] = cfs->get_u2(CHECK);
       guarantee_property(
         offset < code_length,
         "Bad uninitialized type offset %u in StackMap in class file %s",
@@ -1537,39 +1879,47 @@
   *u2_index = i2;
 }
 
-u1* ClassFileParser::parse_stackmap_table(
-    u4 code_attribute_length, TRAPS) {
-  if (code_attribute_length == 0)
+static const u1* parse_stackmap_table(const ClassFileStream* const cfs,
+                                      u4 code_attribute_length,
+                                      bool need_verify,
+                                      TRAPS) {
+  assert(cfs != NULL, "invariant");
+
+  if (0 == code_attribute_length) {
     return NULL;
-
-  ClassFileStream* cfs = stream();
-  u1* stackmap_table_start = cfs->get_u1_buffer();
+  }
+
+  const u1* const stackmap_table_start = cfs->get_u1_buffer();
   assert(stackmap_table_start != NULL, "null stackmap table");
 
   // check code_attribute_length first
-  stream()->skip_u1(code_attribute_length, CHECK_NULL);
-
-  if (!_need_verify && !DumpSharedSpaces) {
+  cfs->skip_u1(code_attribute_length, CHECK_NULL);
+
+  if (!need_verify && !DumpSharedSpaces) {
     return NULL;
   }
   return stackmap_table_start;
 }
 
-u2* ClassFileParser::parse_checked_exceptions(u2* checked_exceptions_length,
-                                              u4 method_attribute_length,
-                                              TRAPS) {
-  ClassFileStream* cfs = stream();
+const u2* ClassFileParser::parse_checked_exceptions(const ClassFileStream* const cfs,
+                                                    u2* const checked_exceptions_length,
+                                                    u4 method_attribute_length,
+                                                    TRAPS) {
+  assert(cfs != NULL, "invariant");
+  assert(checked_exceptions_length != NULL, "invariant");
+
   cfs->guarantee_more(2, CHECK_NULL);  // checked_exceptions_length
   *checked_exceptions_length = cfs->get_u2_fast();
-  unsigned int size = (*checked_exceptions_length) * sizeof(CheckedExceptionElement) / sizeof(u2);
-  u2* checked_exceptions_start = cfs->get_u2_buffer();
+  const unsigned int size =
+    (*checked_exceptions_length) * sizeof(CheckedExceptionElement) / sizeof(u2);
+  const u2* const checked_exceptions_start = cfs->get_u2_buffer();
   assert(checked_exceptions_start != NULL, "null checked exceptions");
   if (!_need_verify) {
     cfs->skip_u2_fast(size);
   } else {
     // Verify each value in the checked exception table
     u2 checked_exception;
-    u2 len = *checked_exceptions_length;
+    const u2 len = *checked_exceptions_length;
     cfs->guarantee_more(2 * len, CHECK_NULL);
     for (int i = 0; i < len; i++) {
       checked_exception = cfs->get_u2_fast();
@@ -1588,8 +1938,13 @@
   return checked_exceptions_start;
 }
 
-void ClassFileParser::throwIllegalSignature(
-    const char* type, Symbol* name, Symbol* sig, TRAPS) {
+void ClassFileParser::throwIllegalSignature(const char* type,
+                                            const Symbol* name,
+                                            const Symbol* sig,
+                                            TRAPS) const {
+  assert(name != NULL, "invariant");
+  assert(sig != NULL, "invariant");
+
   ResourceMark rm(THREAD);
   Exceptions::fthrow(THREAD_AND_LOCATION,
       vmSymbols::java_lang_ClassFormatError(),
@@ -1597,181 +1952,79 @@
       name->as_C_string(), _class_name->as_C_string(), sig->as_C_string());
 }
 
-// Skip an annotation.  Return >=limit if there is any problem.
-int ClassFileParser::skip_annotation(u1* buffer, int limit, int index) {
-  // annotation := atype:u2 do(nmem:u2) {member:u2 value}
-  // value := switch (tag:u1) { ... }
-  index += 2;  // skip atype
-  if ((index += 2) >= limit)  return limit;  // read nmem
-  int nmem = Bytes::get_Java_u2(buffer+index-2);
-  while (--nmem >= 0 && index < limit) {
-    index += 2; // skip member
-    index = skip_annotation_value(buffer, limit, index);
-  }
-  return index;
-}
-
-// Skip an annotation value.  Return >=limit if there is any problem.
-int ClassFileParser::skip_annotation_value(u1* buffer, int limit, int index) {
-  // value := switch (tag:u1) {
-  //   case B, C, I, S, Z, D, F, J, c: con:u2;
-  //   case e: e_class:u2 e_name:u2;
-  //   case s: s_con:u2;
-  //   case [: do(nval:u2) {value};
-  //   case @: annotation;
-  //   case s: s_con:u2;
-  // }
-  if ((index += 1) >= limit)  return limit;  // read tag
-  u1 tag = buffer[index-1];
-  switch (tag) {
-  case 'B': case 'C': case 'I': case 'S': case 'Z':
-  case 'D': case 'F': case 'J': case 'c': case 's':
-    index += 2;  // skip con or s_con
-    break;
-  case 'e':
-    index += 4;  // skip e_class, e_name
-    break;
-  case '[':
-    {
-      if ((index += 2) >= limit)  return limit;  // read nval
-      int nval = Bytes::get_Java_u2(buffer+index-2);
-      while (--nval >= 0 && index < limit) {
-        index = skip_annotation_value(buffer, limit, index);
-      }
-    }
-    break;
-  case '@':
-    index = skip_annotation(buffer, limit, index);
-    break;
-  default:
-    return limit;  //  bad tag byte
-  }
-  return index;
-}
-
-// Sift through annotations, looking for those significant to the VM:
-void ClassFileParser::parse_annotations(u1* buffer, int limit,
-                                        ClassFileParser::AnnotationCollector* coll) {
-  // annotations := do(nann:u2) {annotation}
-  int index = 0;
-  if ((index += 2) >= limit)  return;  // read nann
-  int nann = Bytes::get_Java_u2(buffer+index-2);
-  enum {  // initial annotation layout
-    atype_off = 0,      // utf8 such as 'Ljava/lang/annotation/Retention;'
-    count_off = 2,      // u2   such as 1 (one value)
-    member_off = 4,     // utf8 such as 'value'
-    tag_off = 6,        // u1   such as 'c' (type) or 'e' (enum)
-    e_tag_val = 'e',
-      e_type_off = 7,   // utf8 such as 'Ljava/lang/annotation/RetentionPolicy;'
-      e_con_off = 9,    // utf8 payload, such as 'SOURCE', 'CLASS', 'RUNTIME'
-      e_size = 11,     // end of 'e' annotation
-    c_tag_val = 'c',    // payload is type
-      c_con_off = 7,    // utf8 payload, such as 'I'
-      c_size = 9,       // end of 'c' annotation
-    s_tag_val = 's',    // payload is String
-      s_con_off = 7,    // utf8 payload, such as 'Ljava/lang/String;'
-      s_size = 9,
-    min_size = 6        // smallest possible size (zero members)
-  };
-  while ((--nann) >= 0 && (index-2 + min_size <= limit)) {
-    int index0 = index;
-    index = skip_annotation(buffer, limit, index);
-    u1* abase = buffer + index0;
-    int atype = Bytes::get_Java_u2(abase + atype_off);
-    int count = Bytes::get_Java_u2(abase + count_off);
-    Symbol* aname = check_symbol_at(_cp, atype);
-    if (aname == NULL)  break;  // invalid annotation name
-    Symbol* member = NULL;
-    if (count >= 1) {
-      int member_index = Bytes::get_Java_u2(abase + member_off);
-      member = check_symbol_at(_cp, member_index);
-      if (member == NULL)  break;  // invalid member name
-    }
-
-    // Here is where parsing particular annotations will take place.
-    AnnotationCollector::ID id = coll->annotation_index(_loader_data, aname);
-    if (id == AnnotationCollector::_unknown)  continue;
-    coll->set_annotation(id);
-
-    if (id == AnnotationCollector::_jdk_internal_vm_annotation_Contended) {
-      // @Contended can optionally specify the contention group.
-      //
-      // Contended group defines the equivalence class over the fields:
-      // the fields within the same contended group are not treated distinct.
-      // The only exception is default group, which does not incur the
-      // equivalence. Naturally, contention group for classes is meaningless.
-      //
-      // While the contention group is specified as String, annotation
-      // values are already interned, and we might as well use the constant
-      // pool index as the group tag.
-      //
-      u2 group_index = 0; // default contended group
-      if (count == 1
-          && s_size == (index - index0)  // match size
-          && s_tag_val == *(abase + tag_off)
-          && member == vmSymbols::value_name()) {
-        group_index = Bytes::get_Java_u2(abase + s_con_off);
-        if (_cp->symbol_at(group_index)->utf8_length() == 0) {
-          group_index = 0; // default contended group
-        }
-      }
-      coll->set_contended_group(group_index);
-    }
-  }
-}
-
-ClassFileParser::AnnotationCollector::ID
-ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_data,
-                                                                Symbol* name) {
-  vmSymbols::SID sid = vmSymbols::find_sid(name);
+AnnotationCollector::ID
+AnnotationCollector::annotation_index(const ClassLoaderData* loader_data,
+                                      const Symbol* name) {
+  const vmSymbols::SID sid = vmSymbols::find_sid(name);
   // Privileged code can use all annotations.  Other code silently drops some.
   const bool privileged = loader_data->is_the_null_class_loader_data() ||
                           loader_data->is_ext_class_loader_data() ||
                           loader_data->is_anonymous();
   switch (sid) {
-  case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_reflect_CallerSensitive_signature):
-    if (_location != _in_method)  break;  // only allow for methods
-    if (!privileged)              break;  // only allow in privileged code
-    return _method_CallerSensitive;
-  case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_ForceInline_signature):
-    if (_location != _in_method)  break;  // only allow for methods
-    if (!privileged)              break;  // only allow in privileged code
-    return _method_ForceInline;
-  case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_DontInline_signature):
-    if (_location != _in_method)  break;  // only allow for methods
-    if (!privileged)              break;  // only allow in privileged code
-    return _method_DontInline;
-  case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_InjectedProfile_signature):
-    if (_location != _in_method)  break;  // only allow for methods
-    if (!privileged)              break;  // only allow in privileged code
-    return _method_InjectedProfile;
-  case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Compiled_signature):
-    if (_location != _in_method)  break;  // only allow for methods
-    if (!privileged)              break;  // only allow in privileged code
-    return _method_LambdaForm_Compiled;
-  case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Hidden_signature):
-    if (_location != _in_method)  break;  // only allow for methods
-    if (!privileged)              break;  // only allow in privileged code
-    return _method_LambdaForm_Hidden;
-  case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_HotSpotIntrinsicCandidate_signature):
-    if (_location != _in_method)  break;  // only allow for methods
-    if (!privileged)              break;  // only allow in privileged code
-    return _method_HotSpotIntrinsicCandidate;
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_reflect_CallerSensitive_signature): {
+      if (_location != _in_method)  break;  // only allow for methods
+      if (!privileged)              break;  // only allow in privileged code
+      return _method_CallerSensitive;
+    }
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_ForceInline_signature): {
+      if (_location != _in_method)  break;  // only allow for methods
+      if (!privileged)              break;  // only allow in privileged code
+      return _method_ForceInline;
+    }
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_DontInline_signature): {
+      if (_location != _in_method)  break;  // only allow for methods
+      if (!privileged)              break;  // only allow in privileged code
+      return _method_DontInline;
+    }
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_InjectedProfile_signature): {
+      if (_location != _in_method)  break;  // only allow for methods
+      if (!privileged)              break;  // only allow in privileged code
+      return _method_InjectedProfile;
+    }
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Compiled_signature): {
+      if (_location != _in_method)  break;  // only allow for methods
+      if (!privileged)              break;  // only allow in privileged code
+      return _method_LambdaForm_Compiled;
+    }
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Hidden_signature): {
+      if (_location != _in_method)  break;  // only allow for methods
+      if (!privileged)              break;  // only allow in privileged code
+      return _method_LambdaForm_Hidden;
+    }
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_HotSpotIntrinsicCandidate_signature): {
+      if (_location != _in_method)  break;  // only allow for methods
+      if (!privileged)              break;  // only allow in privileged code
+      return _method_HotSpotIntrinsicCandidate;
+    }
 #if INCLUDE_JVMCI
-  case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_vm_ci_hotspot_Stable_signature):
-    if (_location != _in_field)   break;  // only allow for fields
-    if (!privileged)              break;  // only allow in privileged code
-    return _field_Stable;
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_vm_ci_hotspot_Stable_signature): {
+      if (_location != _in_field)   break;  // only allow for fields
+      if (!privileged)              break;  // only allow in privileged code
+      return _field_Stable;
+    }
 #endif
-  case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Stable_signature):
-    if (_location != _in_field)   break;  // only allow for fields
-    if (!privileged)              break;  // only allow in privileged code
-    return _field_Stable;
-  case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Contended_signature):
-    if (_location != _in_field && _location != _in_class)          break;  // only allow for fields and classes
-    if (!EnableContended || (RestrictContended && !privileged))    break;  // honor privileges
-    return _jdk_internal_vm_annotation_Contended;
-  default: break;
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Stable_signature): {
+      if (_location != _in_field)   break;  // only allow for fields
+      if (!privileged)              break;  // only allow in privileged code
+      return _field_Stable;
+    }
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_Contended_signature): {
+      if (_location != _in_field && _location != _in_class) {
+        break;  // only allow for fields and classes
+      }
+      if (!EnableContended || (RestrictContended && !privileged)) {
+        break;  // honor privileges
+      }
+      return _jdk_internal_vm_annotation_Contended;
+    }
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_ReservedStackAccess_signature): {
+      if (_location != _in_method)  break;  // only allow for methods
+      if (RestrictReservedStack && !privileged) break; // honor privileges
+      return _jdk_internal_vm_annotation_ReservedStackAccess;
+    }
+    default: {
+      break;
+    }
   }
   return AnnotationCollector::_unknown;
 }
@@ -1789,7 +2042,7 @@
   MetadataFactory::free_array<u1>(_loader_data, _field_type_annotations);
 }
 
-void ClassFileParser::MethodAnnotationCollector::apply_to(methodHandle m) {
+void MethodAnnotationCollector::apply_to(methodHandle m) {
   if (has_annotation(_method_CallerSensitive))
     m->set_caller_sensitive(true);
   if (has_annotation(_method_ForceInline))
@@ -1804,13 +2057,15 @@
     m->set_hidden(true);
   if (has_annotation(_method_HotSpotIntrinsicCandidate) && !m->is_synthetic())
     m->set_intrinsic_candidate(true);
+  if (has_annotation(_jdk_internal_vm_annotation_ReservedStackAccess))
+    m->set_has_reserved_stack_access(true);
 }
 
-void ClassFileParser::ClassAnnotationCollector::apply_to(instanceKlassHandle k) {
-  k->set_is_contended(is_contended());
+void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) {
+  assert(ik != NULL, "invariant");
+  ik->set_is_contended(is_contended());
 }
 
-
 #define MAX_ARGS_SIZE 255
 #define MAX_CODE_SIZE 65535
 #define INITIAL_MAX_LVT_NUMBER 256
@@ -1828,13 +2083,13 @@
  *     Each LVTT entry has to match some LVT entry.
  *   - HotSpot internal LVT keeps natural ordering of class file LVT entries.
  */
-void ClassFileParser::copy_localvariable_table(ConstMethod* cm,
+void ClassFileParser::copy_localvariable_table(const ConstMethod* cm,
                                                int lvt_cnt,
-                                               u2* localvariable_table_length,
-                                               u2** localvariable_table_start,
+                                               u2* const localvariable_table_length,
+                                               const u2**const localvariable_table_start,
                                                int lvtt_cnt,
-                                               u2* localvariable_type_table_length,
-                                               u2** localvariable_type_table_start,
+                                               u2* const localvariable_type_table_length,
+                                               const u2**const localvariable_type_table_start,
                                                TRAPS) {
 
   ResourceMark rm(THREAD);
@@ -1842,10 +2097,10 @@
   typedef ResourceHashtable<LocalVariableTableElement, LocalVariableTableElement*,
                             &LVT_Hash::hash, &LVT_Hash::equals> LVT_HashTable;
 
-  LVT_HashTable* table = new LVT_HashTable();
+  LVT_HashTable* const table = new LVT_HashTable();
 
   // To fill LocalVariableTable in
-  Classfile_LVT_Element*  cf_lvt;
+  const Classfile_LVT_Element* cf_lvt;
   LocalVariableTableElement* lvt = cm->localvariable_table_start();
 
   for (int tbl_no = 0; tbl_no < lvt_cnt; tbl_no++) {
@@ -1865,7 +2120,7 @@
   }
 
   // To merge LocalVariableTable and LocalVariableTypeTable
-  Classfile_LVT_Element* cf_lvtt;
+  const Classfile_LVT_Element* cf_lvtt;
   LocalVariableTableElement lvtt_elem;
 
   for (int tbl_no = 0; tbl_no < lvtt_cnt; tbl_no++) {
@@ -1895,19 +2150,19 @@
 
 
 void ClassFileParser::copy_method_annotations(ConstMethod* cm,
-                                       u1* runtime_visible_annotations,
+                                       const u1* runtime_visible_annotations,
                                        int runtime_visible_annotations_length,
-                                       u1* runtime_invisible_annotations,
+                                       const u1* runtime_invisible_annotations,
                                        int runtime_invisible_annotations_length,
-                                       u1* runtime_visible_parameter_annotations,
+                                       const u1* runtime_visible_parameter_annotations,
                                        int runtime_visible_parameter_annotations_length,
-                                       u1* runtime_invisible_parameter_annotations,
+                                       const u1* runtime_invisible_parameter_annotations,
                                        int runtime_invisible_parameter_annotations_length,
-                                       u1* runtime_visible_type_annotations,
+                                       const u1* runtime_visible_type_annotations,
                                        int runtime_visible_type_annotations_length,
-                                       u1* runtime_invisible_type_annotations,
+                                       const u1* runtime_invisible_type_annotations,
                                        int runtime_invisible_type_annotations_length,
-                                       u1* annotation_default,
+                                       const u1* annotation_default,
                                        int annotation_default_length,
                                        TRAPS) {
 
@@ -1963,33 +2218,37 @@
 // from the method back up to the containing klass. These flag values
 // are added to klass's access_flags.
 
-methodHandle ClassFileParser::parse_method(bool is_interface,
-                                           AccessFlags *promoted_flags,
-                                           TRAPS) {
-  ClassFileStream* cfs = stream();
-  methodHandle nullHandle;
+Method* ClassFileParser::parse_method(const ClassFileStream* const cfs,
+                                      bool is_interface,
+                                      const ConstantPool* cp,
+                                      AccessFlags* const promoted_flags,
+                                      TRAPS) {
+  assert(cfs != NULL, "invariant");
+  assert(cp != NULL, "invariant");
+  assert(promoted_flags != NULL, "invariant");
+
   ResourceMark rm(THREAD);
-  // Parse fixed parts
-  cfs->guarantee_more(8, CHECK_(nullHandle)); // access_flags, name_index, descriptor_index, attributes_count
+  // Parse fixed parts:
+  // access_flags, name_index, descriptor_index, attributes_count
+  cfs->guarantee_more(8, CHECK_NULL);
 
   int flags = cfs->get_u2_fast();
-  u2 name_index = cfs->get_u2_fast();
-  int cp_size = _cp->length();
+  const u2 name_index = cfs->get_u2_fast();
+  const int cp_size = cp->length();
   check_property(
     valid_symbol_at(name_index),
     "Illegal constant pool index %u for method name in class file %s",
-    name_index, CHECK_(nullHandle));
-  Symbol*  name = _cp->symbol_at(name_index);
-  verify_legal_method_name(name, CHECK_(nullHandle));
-
-  u2 signature_index = cfs->get_u2_fast();
+    name_index, CHECK_NULL);
+  const Symbol* const name = cp->symbol_at(name_index);
+  verify_legal_method_name(name, CHECK_NULL);
+
+  const u2 signature_index = cfs->get_u2_fast();
   guarantee_property(
     valid_symbol_at(signature_index),
     "Illegal constant pool index %u for method signature in class file %s",
-    signature_index, CHECK_(nullHandle));
-  Symbol*  signature = _cp->symbol_at(signature_index);
-
-  AccessFlags access_flags;
+    signature_index, CHECK_NULL);
+  const Symbol* const signature = cp->symbol_at(signature_index);
+
   if (name == vmSymbols::class_initializer_name()) {
     // We ignore the other access flags for a valid class initializer.
     // (JVM Spec 2nd ed., chapter 4.6)
@@ -1998,37 +2257,37 @@
     } else if ((flags & JVM_ACC_STATIC) == JVM_ACC_STATIC) {
       flags &= JVM_ACC_STATIC | JVM_ACC_STRICT;
     } else {
-      classfile_parse_error("Method <clinit> is not static in class file %s", CHECK_(nullHandle));
+      classfile_parse_error("Method <clinit> is not static in class file %s", CHECK_NULL);
     }
   } else {
-    verify_legal_method_modifiers(flags, is_interface, name, CHECK_(nullHandle));
+    verify_legal_method_modifiers(flags, is_interface, name, CHECK_NULL);
   }
 
   if (name == vmSymbols::object_initializer_name() && is_interface) {
-    classfile_parse_error("Interface cannot have a method named <init>, class file %s", CHECK_(nullHandle));
+    classfile_parse_error("Interface cannot have a method named <init>, class file %s", CHECK_NULL);
   }
 
   int args_size = -1;  // only used when _need_verify is true
   if (_need_verify) {
     args_size = ((flags & JVM_ACC_STATIC) ? 0 : 1) +
-                 verify_legal_method_signature(name, signature, CHECK_(nullHandle));
+                 verify_legal_method_signature(name, signature, CHECK_NULL);
     if (args_size > MAX_ARGS_SIZE) {
-      classfile_parse_error("Too many arguments in method signature in class file %s", CHECK_(nullHandle));
+      classfile_parse_error("Too many arguments in method signature in class file %s", CHECK_NULL);
     }
   }
 
-  access_flags.set_flags(flags & JVM_RECOGNIZED_METHOD_MODIFIERS);
+  AccessFlags access_flags(flags & JVM_RECOGNIZED_METHOD_MODIFIERS);
 
   // Default values for code and exceptions attribute elements
   u2 max_stack = 0;
   u2 max_locals = 0;
   u4 code_length = 0;
-  u1* code_start = 0;
+  const u1* code_start = 0;
   u2 exception_table_length = 0;
-  u2* exception_table_start = NULL;
+  const u2* exception_table_start = NULL;
   Array<int>* exception_handlers = Universe::the_empty_int_array();
   u2 checked_exceptions_length = 0;
-  u2* checked_exceptions_start = NULL;
+  const u2* checked_exceptions_start = NULL;
   CompressedLineNumberWriteStream* linenumber_table = NULL;
   int linenumber_table_length = 0;
   int total_lvt_length = 0;
@@ -2038,98 +2297,102 @@
   u2 max_lvt_cnt = INITIAL_MAX_LVT_NUMBER;
   u2 max_lvtt_cnt = INITIAL_MAX_LVT_NUMBER;
   u2* localvariable_table_length = NULL;
-  u2** localvariable_table_start = NULL;
+  const u2** localvariable_table_start = NULL;
   u2* localvariable_type_table_length = NULL;
-  u2** localvariable_type_table_start = NULL;
+  const u2** localvariable_type_table_start = NULL;
   int method_parameters_length = -1;
-  u1* method_parameters_data = NULL;
+  const u1* method_parameters_data = NULL;
   bool method_parameters_seen = false;
   bool parsed_code_attribute = false;
   bool parsed_checked_exceptions_attribute = false;
   bool parsed_stackmap_attribute = false;
   // stackmap attribute - JDK1.5
-  u1* stackmap_data = NULL;
+  const u1* stackmap_data = NULL;
   int stackmap_data_length = 0;
   u2 generic_signature_index = 0;
   MethodAnnotationCollector parsed_annotations;
-  u1* runtime_visible_annotations = NULL;
+  const u1* runtime_visible_annotations = NULL;
   int runtime_visible_annotations_length = 0;
-  u1* runtime_invisible_annotations = NULL;
+  const u1* runtime_invisible_annotations = NULL;
   int runtime_invisible_annotations_length = 0;
-  u1* runtime_visible_parameter_annotations = NULL;
+  const u1* runtime_visible_parameter_annotations = NULL;
   int runtime_visible_parameter_annotations_length = 0;
-  u1* runtime_invisible_parameter_annotations = NULL;
+  const u1* runtime_invisible_parameter_annotations = NULL;
   int runtime_invisible_parameter_annotations_length = 0;
-  u1* runtime_visible_type_annotations = NULL;
+  const u1* runtime_visible_type_annotations = NULL;
   int runtime_visible_type_annotations_length = 0;
-  u1* runtime_invisible_type_annotations = NULL;
+  const u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
   bool runtime_invisible_annotations_exists = false;
   bool runtime_invisible_type_annotations_exists = false;
   bool runtime_invisible_parameter_annotations_exists = false;
-  u1* annotation_default = NULL;
+  const u1* annotation_default = NULL;
   int annotation_default_length = 0;
 
   // Parse code and exceptions attribute
   u2 method_attributes_count = cfs->get_u2_fast();
   while (method_attributes_count--) {
-    cfs->guarantee_more(6, CHECK_(nullHandle));  // method_attribute_name_index, method_attribute_length
-    u2 method_attribute_name_index = cfs->get_u2_fast();
-    u4 method_attribute_length = cfs->get_u4_fast();
+    cfs->guarantee_more(6, CHECK_NULL);  // method_attribute_name_index, method_attribute_length
+    const u2 method_attribute_name_index = cfs->get_u2_fast();
+    const u4 method_attribute_length = cfs->get_u4_fast();
     check_property(
       valid_symbol_at(method_attribute_name_index),
       "Invalid method attribute name index %u in class file %s",
-      method_attribute_name_index, CHECK_(nullHandle));
-
-    Symbol* method_attribute_name = _cp->symbol_at(method_attribute_name_index);
+      method_attribute_name_index, CHECK_NULL);
+
+    const Symbol* const method_attribute_name = cp->symbol_at(method_attribute_name_index);
     if (method_attribute_name == vmSymbols::tag_code()) {
       // Parse Code attribute
       if (_need_verify) {
         guarantee_property(
             !access_flags.is_native() && !access_flags.is_abstract(),
                         "Code attribute in native or abstract methods in class file %s",
-                         CHECK_(nullHandle));
+                         CHECK_NULL);
       }
       if (parsed_code_attribute) {
-        classfile_parse_error("Multiple Code attributes in class file %s", CHECK_(nullHandle));
+        classfile_parse_error("Multiple Code attributes in class file %s",
+                              CHECK_NULL);
       }
       parsed_code_attribute = true;
 
       // Stack size, locals size, and code size
       if (_major_version == 45 && _minor_version <= 2) {
-        cfs->guarantee_more(4, CHECK_(nullHandle));
+        cfs->guarantee_more(4, CHECK_NULL);
         max_stack = cfs->get_u1_fast();
         max_locals = cfs->get_u1_fast();
         code_length = cfs->get_u2_fast();
       } else {
-        cfs->guarantee_more(8, CHECK_(nullHandle));
+        cfs->guarantee_more(8, CHECK_NULL);
         max_stack = cfs->get_u2_fast();
         max_locals = cfs->get_u2_fast();
         code_length = cfs->get_u4_fast();
       }
       if (_need_verify) {
         guarantee_property(args_size <= max_locals,
-                           "Arguments can't fit into locals in class file %s", CHECK_(nullHandle));
+                           "Arguments can't fit into locals in class file %s",
+                           CHECK_NULL);
         guarantee_property(code_length > 0 && code_length <= MAX_CODE_SIZE,
                            "Invalid method Code length %u in class file %s",
-                           code_length, CHECK_(nullHandle));
+                           code_length, CHECK_NULL);
       }
       // Code pointer
       code_start = cfs->get_u1_buffer();
       assert(code_start != NULL, "null code start");
-      cfs->guarantee_more(code_length, CHECK_(nullHandle));
+      cfs->guarantee_more(code_length, CHECK_NULL);
       cfs->skip_u1_fast(code_length);
 
       // Exception handler table
-      cfs->guarantee_more(2, CHECK_(nullHandle));  // exception_table_length
+      cfs->guarantee_more(2, CHECK_NULL);  // exception_table_length
       exception_table_length = cfs->get_u2_fast();
       if (exception_table_length > 0) {
-        exception_table_start =
-              parse_exception_table(code_length, exception_table_length, CHECK_(nullHandle));
+        exception_table_start = parse_exception_table(cfs,
+                                                      code_length,
+                                                      exception_table_length,
+                                                      CHECK_NULL);
       }
 
       // Parse additional attributes in code attribute
-      cfs->guarantee_more(2, CHECK_(nullHandle));  // code_attributes_count
+      cfs->guarantee_more(2, CHECK_NULL);  // code_attributes_count
       u2 code_attributes_count = cfs->get_u2_fast();
 
       unsigned int calculated_attribute_length = 0;
@@ -2152,111 +2415,119 @@
               sizeof(u2) );  // catch_type_index
 
       while (code_attributes_count--) {
-        cfs->guarantee_more(6, CHECK_(nullHandle));  // code_attribute_name_index, code_attribute_length
-        u2 code_attribute_name_index = cfs->get_u2_fast();
-        u4 code_attribute_length = cfs->get_u4_fast();
+        cfs->guarantee_more(6, CHECK_NULL);  // code_attribute_name_index, code_attribute_length
+        const u2 code_attribute_name_index = cfs->get_u2_fast();
+        const u4 code_attribute_length = cfs->get_u4_fast();
         calculated_attribute_length += code_attribute_length +
                                        sizeof(code_attribute_name_index) +
                                        sizeof(code_attribute_length);
         check_property(valid_symbol_at(code_attribute_name_index),
                        "Invalid code attribute name index %u in class file %s",
                        code_attribute_name_index,
-                       CHECK_(nullHandle));
+                       CHECK_NULL);
         if (LoadLineNumberTables &&
-            _cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_line_number_table()) {
+            cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_line_number_table()) {
           // Parse and compress line number table
-          parse_linenumber_table(code_attribute_length, code_length,
-            &linenumber_table, CHECK_(nullHandle));
+          parse_linenumber_table(code_attribute_length,
+                                 code_length,
+                                 &linenumber_table,
+                                 CHECK_NULL);
 
         } else if (LoadLocalVariableTables &&
-                   _cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_table()) {
+                   cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_table()) {
           // Parse local variable table
           if (!lvt_allocated) {
             localvariable_table_length = NEW_RESOURCE_ARRAY_IN_THREAD(
               THREAD, u2,  INITIAL_MAX_LVT_NUMBER);
             localvariable_table_start = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2*, INITIAL_MAX_LVT_NUMBER);
+              THREAD, const u2*, INITIAL_MAX_LVT_NUMBER);
             localvariable_type_table_length = NEW_RESOURCE_ARRAY_IN_THREAD(
               THREAD, u2,  INITIAL_MAX_LVT_NUMBER);
             localvariable_type_table_start = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2*, INITIAL_MAX_LVT_NUMBER);
+              THREAD, const u2*, INITIAL_MAX_LVT_NUMBER);
             lvt_allocated = true;
           }
           if (lvt_cnt == max_lvt_cnt) {
             max_lvt_cnt <<= 1;
             localvariable_table_length = REALLOC_RESOURCE_ARRAY(u2, localvariable_table_length, lvt_cnt, max_lvt_cnt);
-            localvariable_table_start  = REALLOC_RESOURCE_ARRAY(u2*, localvariable_table_start, lvt_cnt, max_lvt_cnt);
+            localvariable_table_start  = REALLOC_RESOURCE_ARRAY(const u2*, localvariable_table_start, lvt_cnt, max_lvt_cnt);
           }
           localvariable_table_start[lvt_cnt] =
-            parse_localvariable_table(code_length,
+            parse_localvariable_table(cfs,
+                                      code_length,
                                       max_locals,
                                       code_attribute_length,
                                       &localvariable_table_length[lvt_cnt],
                                       false,    // is not LVTT
-                                      CHECK_(nullHandle));
+                                      CHECK_NULL);
           total_lvt_length += localvariable_table_length[lvt_cnt];
           lvt_cnt++;
         } else if (LoadLocalVariableTypeTables &&
                    _major_version >= JAVA_1_5_VERSION &&
-                   _cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_type_table()) {
+                   cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_type_table()) {
           if (!lvt_allocated) {
             localvariable_table_length = NEW_RESOURCE_ARRAY_IN_THREAD(
               THREAD, u2,  INITIAL_MAX_LVT_NUMBER);
             localvariable_table_start = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2*, INITIAL_MAX_LVT_NUMBER);
+              THREAD, const u2*, INITIAL_MAX_LVT_NUMBER);
             localvariable_type_table_length = NEW_RESOURCE_ARRAY_IN_THREAD(
               THREAD, u2,  INITIAL_MAX_LVT_NUMBER);
             localvariable_type_table_start = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2*, INITIAL_MAX_LVT_NUMBER);
+              THREAD, const u2*, INITIAL_MAX_LVT_NUMBER);
             lvt_allocated = true;
           }
           // Parse local variable type table
           if (lvtt_cnt == max_lvtt_cnt) {
             max_lvtt_cnt <<= 1;
             localvariable_type_table_length = REALLOC_RESOURCE_ARRAY(u2, localvariable_type_table_length, lvtt_cnt, max_lvtt_cnt);
-            localvariable_type_table_start  = REALLOC_RESOURCE_ARRAY(u2*, localvariable_type_table_start, lvtt_cnt, max_lvtt_cnt);
+            localvariable_type_table_start  = REALLOC_RESOURCE_ARRAY(const u2*, localvariable_type_table_start, lvtt_cnt, max_lvtt_cnt);
           }
           localvariable_type_table_start[lvtt_cnt] =
-            parse_localvariable_table(code_length,
+            parse_localvariable_table(cfs,
+                                      code_length,
                                       max_locals,
                                       code_attribute_length,
                                       &localvariable_type_table_length[lvtt_cnt],
                                       true,     // is LVTT
-                                      CHECK_(nullHandle));
+                                      CHECK_NULL);
           lvtt_cnt++;
         } else if (_major_version >= Verifier::STACKMAP_ATTRIBUTE_MAJOR_VERSION &&
-                   _cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_stack_map_table()) {
+                   cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_stack_map_table()) {
           // Stack map is only needed by the new verifier in JDK1.5.
           if (parsed_stackmap_attribute) {
-            classfile_parse_error("Multiple StackMapTable attributes in class file %s", CHECK_(nullHandle));
+            classfile_parse_error("Multiple StackMapTable attributes in class file %s", CHECK_NULL);
           }
-          stackmap_data = parse_stackmap_table(code_attribute_length, CHECK_(nullHandle));
+          stackmap_data = parse_stackmap_table(cfs, code_attribute_length, _need_verify, CHECK_NULL);
           stackmap_data_length = code_attribute_length;
           parsed_stackmap_attribute = true;
         } else {
           // Skip unknown attributes
-          cfs->skip_u1(code_attribute_length, CHECK_(nullHandle));
+          cfs->skip_u1(code_attribute_length, CHECK_NULL);
         }
       }
       // check method attribute length
       if (_need_verify) {
         guarantee_property(method_attribute_length == calculated_attribute_length,
-                           "Code segment has wrong length in class file %s", CHECK_(nullHandle));
+                           "Code segment has wrong length in class file %s",
+                           CHECK_NULL);
       }
     } else if (method_attribute_name == vmSymbols::tag_exceptions()) {
       // Parse Exceptions attribute
       if (parsed_checked_exceptions_attribute) {
-        classfile_parse_error("Multiple Exceptions attributes in class file %s", CHECK_(nullHandle));
+        classfile_parse_error("Multiple Exceptions attributes in class file %s",
+                              CHECK_NULL);
       }
       parsed_checked_exceptions_attribute = true;
       checked_exceptions_start =
-            parse_checked_exceptions(&checked_exceptions_length,
+            parse_checked_exceptions(cfs,
+                                     &checked_exceptions_length,
                                      method_attribute_length,
-                                     CHECK_(nullHandle));
+                                     CHECK_NULL);
     } else if (method_attribute_name == vmSymbols::tag_method_parameters()) {
       // reject multiple method parameters
       if (method_parameters_seen) {
-        classfile_parse_error("Multiple MethodParameters attributes in class file %s", CHECK_(nullHandle));
+        classfile_parse_error("Multiple MethodParameters attributes in class file %s",
+                              CHECK_NULL);
       }
       method_parameters_seen = true;
       method_parameters_length = cfs->get_u1_fast();
@@ -2264,7 +2535,7 @@
       if (method_attribute_length != real_length) {
         classfile_parse_error(
           "Invalid MethodParameters method attribute length %u in class file",
-          method_attribute_length, CHECK_(nullHandle));
+          method_attribute_length, CHECK_NULL);
       }
       method_parameters_data = cfs->get_u1_buffer();
       cfs->skip_u2_fast(method_parameters_length);
@@ -2276,7 +2547,7 @@
       if (method_attribute_length != 0) {
         classfile_parse_error(
           "Invalid Synthetic method attribute length %u in class file %s",
-          method_attribute_length, CHECK_(nullHandle));
+          method_attribute_length, CHECK_NULL);
       }
       // Should we check that there hasn't already been a synthetic attribute?
       access_flags.set_is_synthetic();
@@ -2284,31 +2555,37 @@
       if (method_attribute_length != 0) {
         classfile_parse_error(
           "Invalid Deprecated method attribute length %u in class file %s",
-          method_attribute_length, CHECK_(nullHandle));
+          method_attribute_length, CHECK_NULL);
       }
     } else if (_major_version >= JAVA_1_5_VERSION) {
       if (method_attribute_name == vmSymbols::tag_signature()) {
         if (method_attribute_length != 2) {
           classfile_parse_error(
             "Invalid Signature attribute length %u in class file %s",
-            method_attribute_length, CHECK_(nullHandle));
+            method_attribute_length, CHECK_NULL);
         }
-        generic_signature_index = parse_generic_signature_attribute(CHECK_(nullHandle));
+        generic_signature_index = parse_generic_signature_attribute(cfs, CHECK_NULL);
       } else if (method_attribute_name == vmSymbols::tag_runtime_visible_annotations()) {
         if (runtime_visible_annotations != NULL) {
           classfile_parse_error(
-            "Multiple RuntimeVisibleAnnotations attributes for method in class file %s", CHECK_(nullHandle));
+            "Multiple RuntimeVisibleAnnotations attributes for method in class file %s",
+            CHECK_NULL);
         }
         runtime_visible_annotations_length = method_attribute_length;
         runtime_visible_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_annotations != NULL, "null visible annotations");
-        parse_annotations(runtime_visible_annotations,
-            runtime_visible_annotations_length, &parsed_annotations);
-        cfs->skip_u1(runtime_visible_annotations_length, CHECK_(nullHandle));
+        parse_annotations(cp,
+                          runtime_visible_annotations,
+                          runtime_visible_annotations_length,
+                          &parsed_annotations,
+                          _loader_data,
+                          CHECK_NULL);
+        cfs->skip_u1(runtime_visible_annotations_length, CHECK_NULL);
       } else if (method_attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
         if (runtime_invisible_annotations_exists) {
           classfile_parse_error(
-            "Multiple RuntimeInvisibleAnnotations attributes for method in class file %s", CHECK_(nullHandle));
+            "Multiple RuntimeInvisibleAnnotations attributes for method in class file %s",
+            CHECK_NULL);
         }
         runtime_invisible_annotations_exists = true;
         if (PreserveAllAnnotations) {
@@ -2316,54 +2593,57 @@
           runtime_invisible_annotations = cfs->get_u1_buffer();
           assert(runtime_invisible_annotations != NULL, "null invisible annotations");
         }
-        cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
+        cfs->skip_u1(method_attribute_length, CHECK_NULL);
       } else if (method_attribute_name == vmSymbols::tag_runtime_visible_parameter_annotations()) {
         if (runtime_visible_parameter_annotations != NULL) {
           classfile_parse_error(
-            "Multiple RuntimeVisibleParameterAnnotations attributes for method in class file %s", CHECK_(nullHandle));
+            "Multiple RuntimeVisibleParameterAnnotations attributes for method in class file %s",
+            CHECK_NULL);
         }
         runtime_visible_parameter_annotations_length = method_attribute_length;
         runtime_visible_parameter_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_parameter_annotations != NULL, "null visible parameter annotations");
-        cfs->skip_u1(runtime_visible_parameter_annotations_length, CHECK_(nullHandle));
+        cfs->skip_u1(runtime_visible_parameter_annotations_length, CHECK_NULL);
       } else if (method_attribute_name == vmSymbols::tag_runtime_invisible_parameter_annotations()) {
         if (runtime_invisible_parameter_annotations_exists) {
           classfile_parse_error(
-            "Multiple RuntimeInvisibleParameterAnnotations attributes for method in class file %s", CHECK_(nullHandle));
+            "Multiple RuntimeInvisibleParameterAnnotations attributes for method in class file %s",
+            CHECK_NULL);
         }
         runtime_invisible_parameter_annotations_exists = true;
         if (PreserveAllAnnotations) {
           runtime_invisible_parameter_annotations_length = method_attribute_length;
           runtime_invisible_parameter_annotations = cfs->get_u1_buffer();
-          assert(runtime_invisible_parameter_annotations != NULL, "null invisible parameter annotations");
+          assert(runtime_invisible_parameter_annotations != NULL,
+            "null invisible parameter annotations");
         }
-        cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
+        cfs->skip_u1(method_attribute_length, CHECK_NULL);
       } else if (method_attribute_name == vmSymbols::tag_annotation_default()) {
         if (annotation_default != NULL) {
           classfile_parse_error(
             "Multiple AnnotationDefault attributes for method in class file %s",
-            CHECK_(nullHandle));
+            CHECK_NULL);
         }
         annotation_default_length = method_attribute_length;
         annotation_default = cfs->get_u1_buffer();
         assert(annotation_default != NULL, "null annotation default");
-        cfs->skip_u1(annotation_default_length, CHECK_(nullHandle));
+        cfs->skip_u1(annotation_default_length, CHECK_NULL);
       } else if (method_attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
         if (runtime_visible_type_annotations != NULL) {
           classfile_parse_error(
             "Multiple RuntimeVisibleTypeAnnotations attributes for method in class file %s",
-            CHECK_(nullHandle));
+            CHECK_NULL);
         }
         runtime_visible_type_annotations_length = method_attribute_length;
         runtime_visible_type_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
         // No need for the VM to parse Type annotations
-        cfs->skip_u1(runtime_visible_type_annotations_length, CHECK_(nullHandle));
+        cfs->skip_u1(runtime_visible_type_annotations_length, CHECK_NULL);
       } else if (method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
         if (runtime_invisible_type_annotations_exists) {
           classfile_parse_error(
             "Multiple RuntimeInvisibleTypeAnnotations attributes for method in class file %s",
-            CHECK_(nullHandle));
+            CHECK_NULL);
         } else {
           runtime_invisible_type_annotations_exists = true;
         }
@@ -2372,14 +2652,14 @@
           runtime_invisible_type_annotations = cfs->get_u1_buffer();
           assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
         }
-        cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
+        cfs->skip_u1(method_attribute_length, CHECK_NULL);
       } else {
         // Skip unknown attributes
-        cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
+        cfs->skip_u1(method_attribute_length, CHECK_NULL);
       }
     } else {
       // Skip unknown attributes
-      cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
+      cfs->skip_u1(method_attribute_length, CHECK_NULL);
     }
   }
 
@@ -2390,8 +2670,11 @@
 
   // Make sure there's at least one Code attribute in non-native/non-abstract method
   if (_need_verify) {
-    guarantee_property(access_flags.is_native() || access_flags.is_abstract() || parsed_code_attribute,
-                      "Absent Code attribute in method that is not native or abstract in class file %s", CHECK_(nullHandle));
+    guarantee_property(access_flags.is_native() ||
+                       access_flags.is_abstract() ||
+                       parsed_code_attribute,
+                       "Absent Code attribute in method that is not native or abstract in class file %s",
+                       CHECK_NULL);
   }
 
   // All sizing information for a Method* is finally available, now create it
@@ -2411,9 +2694,12 @@
       annotation_default_length,
       0);
 
-  Method* m = Method::allocate(
-      _loader_data, code_length, access_flags, &sizes,
-      ConstMethod::NORMAL, CHECK_(nullHandle));
+  Method* const m = Method::allocate(_loader_data,
+                                     code_length,
+                                     access_flags,
+                                     &sizes,
+                                     ConstMethod::NORMAL,
+                                     CHECK_NULL);
 
   ClassLoadingService::add_class_method_size(m->size()*HeapWordSize);
 
@@ -2423,7 +2709,7 @@
   m->set_signature_index(signature_index);
 #ifdef CC_INTERP
   // hmm is there a gc issue here??
-  ResultTypeFinder rtf(_cp->symbol_at(signature_index));
+  ResultTypeFinder rtf(cp->symbol_at(signature_index));
   m->set_result_index(rtf.type());
 #endif
 
@@ -2443,17 +2729,20 @@
   m->set_max_stack(max_stack);
   m->set_max_locals(max_locals);
   if (stackmap_data != NULL) {
-    m->constMethod()->copy_stackmap_data(_loader_data, stackmap_data,
-                                         stackmap_data_length, CHECK_NULL);
+    m->constMethod()->copy_stackmap_data(_loader_data,
+                                         (u1*)stackmap_data,
+                                         stackmap_data_length,
+                                         CHECK_NULL);
   }
 
   // Copy byte codes
-  m->set_code(code_start);
+  m->set_code((u1*)code_start);
 
   // Copy line number table
   if (linenumber_table != NULL) {
     memcpy(m->compressed_linenumber_table(),
-           linenumber_table->buffer(), linenumber_table_length);
+           linenumber_table->buffer(),
+           linenumber_table_length);
   }
 
   // Copy exception table
@@ -2461,35 +2750,40 @@
     int size =
       exception_table_length * sizeof(ExceptionTableElement) / sizeof(u2);
     copy_u2_with_conversion((u2*) m->exception_table_start(),
-                             exception_table_start, size);
+                            exception_table_start, size);
   }
 
   // Copy method parameters
   if (method_parameters_length > 0) {
     MethodParametersElement* elem = m->constMethod()->method_parameters_start();
     for (int i = 0; i < method_parameters_length; i++) {
-      elem[i].name_cp_index = Bytes::get_Java_u2(method_parameters_data);
+      elem[i].name_cp_index = Bytes::get_Java_u2((address)method_parameters_data);
       method_parameters_data += 2;
-      elem[i].flags = Bytes::get_Java_u2(method_parameters_data);
+      elem[i].flags = Bytes::get_Java_u2((address)method_parameters_data);
       method_parameters_data += 2;
     }
   }
 
   // Copy checked exceptions
   if (checked_exceptions_length > 0) {
-    int size = checked_exceptions_length * sizeof(CheckedExceptionElement) / sizeof(u2);
-    copy_u2_with_conversion((u2*) m->checked_exceptions_start(), checked_exceptions_start, size);
+    const int size =
+      checked_exceptions_length * sizeof(CheckedExceptionElement) / sizeof(u2);
+    copy_u2_with_conversion((u2*) m->checked_exceptions_start(),
+                            checked_exceptions_start,
+                            size);
   }
 
   // Copy class file LVT's/LVTT's into the HotSpot internal LVT.
   if (total_lvt_length > 0) {
     promoted_flags->set_has_localvariable_table();
-    copy_localvariable_table(m->constMethod(), lvt_cnt,
+    copy_localvariable_table(m->constMethod(),
+                             lvt_cnt,
                              localvariable_table_length,
                              localvariable_table_start,
                              lvtt_cnt,
                              localvariable_type_table_length,
-                             localvariable_type_table_start, CHECK_NULL);
+                             localvariable_type_table_start,
+                             CHECK_NULL);
   }
 
   if (parsed_annotations.has_any_annotations())
@@ -2535,25 +2829,37 @@
 // The promoted_flags parameter is used to pass relevant access_flags
 // from the methods back up to the containing klass. These flag values
 // are added to klass's access_flags.
-
-Array<Method*>* ClassFileParser::parse_methods(bool is_interface,
-                                               AccessFlags* promoted_flags,
-                                               bool* has_final_method,
-                                               bool* declares_default_methods,
-                                               TRAPS) {
-  ClassFileStream* cfs = stream();
-  cfs->guarantee_more(2, CHECK_NULL);  // length
-  u2 length = cfs->get_u2_fast();
+// Side-effects: populates the _methods field in the parser
+void ClassFileParser::parse_methods(const ClassFileStream* const cfs,
+                                    bool is_interface,
+                                    AccessFlags* promoted_flags,
+                                    bool* has_final_method,
+                                    bool* declares_default_methods,
+                                    TRAPS) {
+  assert(cfs != NULL, "invariant");
+  assert(promoted_flags != NULL, "invariant");
+  assert(has_final_method != NULL, "invariant");
+  assert(declares_default_methods != NULL, "invariant");
+
+  assert(NULL == _methods, "invariant");
+
+  cfs->guarantee_more(2, CHECK);  // length
+  const u2 length = cfs->get_u2_fast();
   if (length == 0) {
     _methods = Universe::the_empty_method_array();
   } else {
-    _methods = MetadataFactory::new_array<Method*>(_loader_data, length, NULL, CHECK_NULL);
+    _methods = MetadataFactory::new_array<Method*>(_loader_data,
+                                                   length,
+                                                   NULL,
+                                                   CHECK);
 
     HandleMark hm(THREAD);
     for (int index = 0; index < length; index++) {
-      methodHandle method = parse_method(is_interface,
-                                         promoted_flags,
-                                         CHECK_NULL);
+      Method* method = parse_method(cfs,
+                                    is_interface,
+                                    _cp,
+                                    promoted_flags,
+                                    CHECK);
 
       if (method->is_final()) {
         *has_final_method = true;
@@ -2564,7 +2870,7 @@
         && !method->is_abstract() && !method->is_static()) {
         *declares_default_methods = true;
       }
-      _methods->at_put(index, method());
+      _methods->at_put(index, method);
     }
 
     if (_need_verify && length > 1) {
@@ -2577,7 +2883,7 @@
       {
         debug_only(No_Safepoint_Verifier nsv;)
         for (int i = 0; i < length; i++) {
-          Method* m = _methods->at(i);
+          const Method* const m = _methods->at(i);
           // If no duplicates, add name/signature in hashtable names_and_sigs.
           if (!put_after_lookup(m->name(), m->signature(), names_and_sigs)) {
             dup = true;
@@ -2587,16 +2893,14 @@
       }
       if (dup) {
         classfile_parse_error("Duplicate method name&signature in class file %s",
-                              CHECK_NULL);
+                              CHECK);
       }
     }
   }
-  return _methods;
 }
 
-
-intArray* ClassFileParser::sort_methods(Array<Method*>* methods) {
-  int length = methods->length();
+static const intArray* sort_methods(Array<Method*>* methods) {
+  const int length = methods->length();
   // If JVMTI original method ordering or sharing is enabled we have to
   // remember the original class file ordering.
   // We temporarily use the vtable_index field in the Method* to store the
@@ -2604,7 +2908,7 @@
   // Put the method ordering in the shared archive.
   if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
     for (int index = 0; index < length; index++) {
-      Method* m = methods->at(index);
+      Method* const m = methods->at(index);
       assert(!m->valid_vtable_index(), "vtable index should not be set");
       m->set_vtable_index(index);
     }
@@ -2619,8 +2923,8 @@
   if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
     method_ordering = new intArray(length);
     for (int index = 0; index < length; index++) {
-      Method* m = methods->at(index);
-      int old_index = m->vtable_index();
+      Method* const m = methods->at(index);
+      const int old_index = m->vtable_index();
       assert(old_index >= 0 && old_index < length, "invalid method index");
       method_ordering->at_put(index, old_index);
       m->set_vtable_index(Method::invalid_vtable_index);
@@ -2630,10 +2934,12 @@
 }
 
 // Parse generic_signature attribute for methods and fields
-u2 ClassFileParser::parse_generic_signature_attribute(TRAPS) {
-  ClassFileStream* cfs = stream();
+u2 ClassFileParser::parse_generic_signature_attribute(const ClassFileStream* const cfs,
+                                                      TRAPS) {
+  assert(cfs != NULL, "invariant");
+
   cfs->guarantee_more(2, CHECK_0);  // generic_signature_index
-  u2 generic_signature_index = cfs->get_u2_fast();
+  const u2 generic_signature_index = cfs->get_u2_fast();
   check_property(
     valid_symbol_at(generic_signature_index),
     "Invalid Signature attribute at constant pool index %u in class file %s",
@@ -2641,10 +2947,13 @@
   return generic_signature_index;
 }
 
-void ClassFileParser::parse_classfile_sourcefile_attribute(TRAPS) {
-  ClassFileStream* cfs = stream();
+void ClassFileParser::parse_classfile_sourcefile_attribute(const ClassFileStream* const cfs,
+                                                           TRAPS) {
+
+  assert(cfs != NULL, "invariant");
+
   cfs->guarantee_more(2, CHECK);  // sourcefile_index
-  u2 sourcefile_index = cfs->get_u2_fast();
+  const u2 sourcefile_index = cfs->get_u2_fast();
   check_property(
     valid_symbol_at(sourcefile_index),
     "Invalid SourceFile attribute at constant pool index %u in class file %s",
@@ -2652,22 +2961,23 @@
   set_class_sourcefile_index(sourcefile_index);
 }
 
-
-
-void ClassFileParser::parse_classfile_source_debug_extension_attribute(int length, TRAPS) {
-  ClassFileStream* cfs = stream();
-  u1* sde_buffer = cfs->get_u1_buffer();
+void ClassFileParser::parse_classfile_source_debug_extension_attribute(const ClassFileStream* const cfs,
+                                                                       int length,
+                                                                       TRAPS) {
+  assert(cfs != NULL, "invariant");
+
+  const u1* const sde_buffer = cfs->get_u1_buffer();
   assert(sde_buffer != NULL, "null sde buffer");
 
   // Don't bother storing it if there is no way to retrieve it
   if (JvmtiExport::can_get_source_debug_extension()) {
     assert((length+1) > length, "Overflow checking");
-    u1* sde = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, u1, length+1);
+    u1* const sde = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, u1, length+1);
     for (int i = 0; i < length; i++) {
       sde[i] = sde_buffer[i];
     }
     sde[length] = '\0';
-    set_class_sde_buffer((char*)sde, length);
+    set_class_sde_buffer((const char*)sde, length);
   }
   // Got utf8 string, set stream position forward
   cfs->skip_u1(length, CHECK);
@@ -2675,16 +2985,20 @@
 
 
 // Inner classes can be static, private or protected (classic VM does this)
-#define RECOGNIZED_INNER_CLASS_MODIFIERS (JVM_RECOGNIZED_CLASS_MODIFIERS | JVM_ACC_PRIVATE | JVM_ACC_PROTECTED | JVM_ACC_STATIC)
+#define RECOGNIZED_INNER_CLASS_MODIFIERS ( JVM_RECOGNIZED_CLASS_MODIFIERS | \
+                                           JVM_ACC_PRIVATE |                \
+                                           JVM_ACC_PROTECTED |              \
+                                           JVM_ACC_STATIC                   \
+                                         )
 
 // Return number of classes in the inner classes attribute table
-u2 ClassFileParser::parse_classfile_inner_classes_attribute(u1* inner_classes_attribute_start,
+u2 ClassFileParser::parse_classfile_inner_classes_attribute(const ClassFileStream* const cfs,
+                                                            const u1* const inner_classes_attribute_start,
                                                             bool parsed_enclosingmethod_attribute,
                                                             u2 enclosing_method_class_index,
                                                             u2 enclosing_method_method_index,
                                                             TRAPS) {
-  ClassFileStream* cfs = stream();
-  u1* current_mark = cfs->current();
+  const u1* const current_mark = cfs->current();
   u2 length = 0;
   if (inner_classes_attribute_start != NULL) {
     cfs->set_current(inner_classes_attribute_start);
@@ -2701,29 +3015,29 @@
   //    ...
   //    enclosing_method_class_index,
   //    enclosing_method_method_index]
-  int size = length * 4 + (parsed_enclosingmethod_attribute ? 2 : 0);
-  Array<u2>* inner_classes = MetadataFactory::new_array<u2>(_loader_data, size, CHECK_0);
+  const int size = length * 4 + (parsed_enclosingmethod_attribute ? 2 : 0);
+  Array<u2>* const inner_classes = MetadataFactory::new_array<u2>(_loader_data, size, CHECK_0);
   _inner_classes = inner_classes;
 
   int index = 0;
-  int cp_size = _cp->length();
+  const int cp_size = _cp->length();
   cfs->guarantee_more(8 * length, CHECK_0);  // 4-tuples of u2
   for (int n = 0; n < length; n++) {
     // Inner class index
-    u2 inner_class_info_index = cfs->get_u2_fast();
+    const u2 inner_class_info_index = cfs->get_u2_fast();
     check_property(
       valid_klass_reference_at(inner_class_info_index),
       "inner_class_info_index %u has bad constant type in class file %s",
       inner_class_info_index, CHECK_0);
     // Outer class index
-    u2 outer_class_info_index = cfs->get_u2_fast();
+    const u2 outer_class_info_index = cfs->get_u2_fast();
     check_property(
       outer_class_info_index == 0 ||
         valid_klass_reference_at(outer_class_info_index),
       "outer_class_info_index %u has bad constant type in class file %s",
       outer_class_info_index, CHECK_0);
     // Inner class name
-    u2 inner_name_index = cfs->get_u2_fast();
+    const u2 inner_name_index = cfs->get_u2_fast();
     check_property(
       inner_name_index == 0 || valid_symbol_at(inner_name_index),
       "inner_name_index %u has bad constant type in class file %s",
@@ -2733,14 +3047,13 @@
                          "Class is both outer and inner class in class file %s", CHECK_0);
     }
     // Access flags
-    AccessFlags inner_access_flags;
     jint flags = cfs->get_u2_fast() & RECOGNIZED_INNER_CLASS_MODIFIERS;
     if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) {
       // Set abstract bit for old class files for backward compatibility
       flags |= JVM_ACC_ABSTRACT;
     }
     verify_legal_class_modifiers(flags, CHECK_0);
-    inner_access_flags.set_flags(flags);
+    AccessFlags inner_access_flags(flags);
 
     inner_classes->at_put(index++, inner_class_info_index);
     inner_classes->at_put(index++, outer_class_info_index);
@@ -2779,9 +3092,10 @@
   set_class_synthetic_flag(true);
 }
 
-void ClassFileParser::parse_classfile_signature_attribute(TRAPS) {
-  ClassFileStream* cfs = stream();
-  u2 signature_index = cfs->get_u2(CHECK);
+void ClassFileParser::parse_classfile_signature_attribute(const ClassFileStream* const cfs, TRAPS) {
+  assert(cfs != NULL, "invariant");
+
+  const u2 signature_index = cfs->get_u2(CHECK);
   check_property(
     valid_symbol_at(signature_index),
     "Invalid constant pool index %u in Signature attribute in class file %s",
@@ -2789,9 +3103,14 @@
   set_class_generic_signature_index(signature_index);
 }
 
-void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_byte_length, TRAPS) {
-  ClassFileStream* cfs = stream();
-  u1* current_start = cfs->current();
+void ClassFileParser::parse_classfile_bootstrap_methods_attribute(const ClassFileStream* const cfs,
+                                                                  ConstantPool* cp,
+                                                                  u4 attribute_byte_length,
+                                                                  TRAPS) {
+  assert(cfs != NULL, "invariant");
+  assert(cp != NULL, "invariant");
+
+  const u1* const current_start = cfs->current();
 
   guarantee_property(attribute_byte_length >= sizeof(u2),
                      "Invalid BootstrapMethods attribute length %u in class file %s",
@@ -2800,7 +3119,7 @@
 
   cfs->guarantee_more(attribute_byte_length, CHECK);
 
-  int attribute_array_length = cfs->get_u2_fast();
+  const int attribute_array_length = cfs->get_u2_fast();
 
   guarantee_property(_max_bootstrap_specifier_index < attribute_array_length,
                      "Short length on BootstrapMethods in class file %s",
@@ -2810,21 +3129,22 @@
   // The attribute contains a counted array of counted tuples of shorts,
   // represending bootstrap specifiers:
   //    length*{bootstrap_method_index, argument_count*{argument_index}}
-  int operand_count = (attribute_byte_length - sizeof(u2)) / sizeof(u2);
+  const int operand_count = (attribute_byte_length - sizeof(u2)) / sizeof(u2);
   // operand_count = number of shorts in attr, except for leading length
 
   // The attribute is copied into a short[] array.
   // The array begins with a series of short[2] pairs, one for each tuple.
-  int index_size = (attribute_array_length * 2);
-
-  Array<u2>* operands = MetadataFactory::new_array<u2>(_loader_data, index_size + operand_count, CHECK);
+  const int index_size = (attribute_array_length * 2);
+
+  Array<u2>* const operands =
+    MetadataFactory::new_array<u2>(_loader_data, index_size + operand_count, CHECK);
 
   // Eagerly assign operands so they will be deallocated with the constant
   // pool if there is an error.
-  _cp->set_operands(operands);
+  cp->set_operands(operands);
 
   int operand_fill_index = index_size;
-  int cp_size = _cp->length();
+  const int cp_size = cp->length();
 
   for (int n = 0; n < attribute_array_length; n++) {
     // Store a 32-bit offset into the header of the operand array.
@@ -2832,11 +3152,11 @@
 
     // Read a bootstrap specifier.
     cfs->guarantee_more(sizeof(u2) * 2, CHECK);  // bsm, argc
-    u2 bootstrap_method_index = cfs->get_u2_fast();
-    u2 argument_count = cfs->get_u2_fast();
+    const u2 bootstrap_method_index = cfs->get_u2_fast();
+    const u2 argument_count = cfs->get_u2_fast();
     check_property(
       valid_cp_range(bootstrap_method_index, cp_size) &&
-      _cp->tag_at(bootstrap_method_index).is_method_handle(),
+      cp->tag_at(bootstrap_method_index).is_method_handle(),
       "bootstrap_method_index %u has bad constant type in class file %s",
       bootstrap_method_index,
       CHECK);
@@ -2850,26 +3170,29 @@
 
     cfs->guarantee_more(sizeof(u2) * argument_count, CHECK);  // argv[argc]
     for (int j = 0; j < argument_count; j++) {
-      u2 argument_index = cfs->get_u2_fast();
+      const u2 argument_index = cfs->get_u2_fast();
       check_property(
         valid_cp_range(argument_index, cp_size) &&
-        _cp->tag_at(argument_index).is_loadable_constant(),
+        cp->tag_at(argument_index).is_loadable_constant(),
         "argument_index %u has bad constant type in class file %s",
         argument_index,
         CHECK);
       operands->at_put(operand_fill_index++, argument_index);
     }
   }
-
-  u1* current_end = cfs->current();
-  guarantee_property(current_end == current_start + attribute_byte_length,
+  guarantee_property(current_start + attribute_byte_length == cfs->current(),
                      "Bad length on BootstrapMethods in class file %s",
                      CHECK);
 }
 
-void ClassFileParser::parse_classfile_attributes(ClassFileParser::ClassAnnotationCollector* parsed_annotations,
+void ClassFileParser::parse_classfile_attributes(const ClassFileStream* const cfs,
+                                                 ConstantPool* cp,
+                 ClassFileParser::ClassAnnotationCollector* parsed_annotations,
                                                  TRAPS) {
-  ClassFileStream* cfs = stream();
+  assert(cfs != NULL, "invariant");
+  assert(cp != NULL, "invariant");
+  assert(parsed_annotations != NULL, "invariant");
+
   // Set inner classes attribute to default sentinel
   _inner_classes = Universe::the_empty_short_array();
   cfs->guarantee_more(2, CHECK);  // attributes_count
@@ -2878,31 +3201,31 @@
   bool parsed_innerclasses_attribute = false;
   bool parsed_enclosingmethod_attribute = false;
   bool parsed_bootstrap_methods_attribute = false;
-  u1* runtime_visible_annotations = NULL;
+  const u1* runtime_visible_annotations = NULL;
   int runtime_visible_annotations_length = 0;
-  u1* runtime_invisible_annotations = NULL;
+  const u1* runtime_invisible_annotations = NULL;
   int runtime_invisible_annotations_length = 0;
-  u1* runtime_visible_type_annotations = NULL;
+  const u1* runtime_visible_type_annotations = NULL;
   int runtime_visible_type_annotations_length = 0;
-  u1* runtime_invisible_type_annotations = NULL;
+  const u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
   bool runtime_invisible_type_annotations_exists = false;
   bool runtime_invisible_annotations_exists = false;
   bool parsed_source_debug_ext_annotations_exist = false;
-  u1* inner_classes_attribute_start = NULL;
+  const u1* inner_classes_attribute_start = NULL;
   u4  inner_classes_attribute_length = 0;
   u2  enclosing_method_class_index = 0;
   u2  enclosing_method_method_index = 0;
   // Iterate over attributes
   while (attributes_count--) {
     cfs->guarantee_more(6, CHECK);  // attribute_name_index, attribute_length
-    u2 attribute_name_index = cfs->get_u2_fast();
-    u4 attribute_length = cfs->get_u4_fast();
+    const u2 attribute_name_index = cfs->get_u2_fast();
+    const u4 attribute_length = cfs->get_u4_fast();
     check_property(
       valid_symbol_at(attribute_name_index),
       "Attribute name has bad constant pool index %u in class file %s",
       attribute_name_index, CHECK);
-    Symbol* tag = _cp->symbol_at(attribute_name_index);
+    const Symbol* const tag = cp->symbol_at(attribute_name_index);
     if (tag == vmSymbols::tag_source_file()) {
       // Check for SourceFile tag
       if (_need_verify) {
@@ -2913,7 +3236,7 @@
       } else {
         parsed_sourcefile_attribute = true;
       }
-      parse_classfile_sourcefile_attribute(CHECK);
+      parse_classfile_sourcefile_attribute(cfs, CHECK);
     } else if (tag == vmSymbols::tag_source_debug_extension()) {
       // Check for SourceDebugExtension tag
       if (parsed_source_debug_ext_annotations_exist) {
@@ -2921,7 +3244,7 @@
             "Multiple SourceDebugExtension attributes in class file %s", CHECK);
       }
       parsed_source_debug_ext_annotations_exist = true;
-      parse_classfile_source_debug_extension_attribute((int)attribute_length, CHECK);
+      parse_classfile_source_debug_extension_attribute(cfs, (int)attribute_length, CHECK);
     } else if (tag == vmSymbols::tag_inner_classes()) {
       // Check for InnerClasses tag
       if (parsed_innerclasses_attribute) {
@@ -2955,7 +3278,7 @@
             "Wrong Signature attribute length %u in class file %s",
             attribute_length, CHECK);
         }
-        parse_classfile_signature_attribute(CHECK);
+        parse_classfile_signature_attribute(cfs, CHECK);
       } else if (tag == vmSymbols::tag_runtime_visible_annotations()) {
         if (runtime_visible_annotations != NULL) {
           classfile_parse_error(
@@ -2964,9 +3287,12 @@
         runtime_visible_annotations_length = attribute_length;
         runtime_visible_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_annotations != NULL, "null visible annotations");
-        parse_annotations(runtime_visible_annotations,
+        parse_annotations(cp,
+                          runtime_visible_annotations,
                           runtime_visible_annotations_length,
-                          parsed_annotations);
+                          parsed_annotations,
+                          _loader_data,
+                          CHECK);
         cfs->skip_u1(runtime_visible_annotations_length, CHECK);
       } else if (tag == vmSymbols::tag_runtime_invisible_annotations()) {
         if (runtime_invisible_annotations_exists) {
@@ -2999,8 +3325,8 @@
         check_property(valid_klass_reference_at(enclosing_method_class_index),
           "Invalid or out-of-bounds class index in EnclosingMethod attribute in class file %s", CHECK);
         if (enclosing_method_method_index != 0 &&
-            (!_cp->is_within_bounds(enclosing_method_method_index) ||
-             !_cp->tag_at(enclosing_method_method_index).is_name_and_type())) {
+            (!cp->is_within_bounds(enclosing_method_method_index) ||
+             !cp->tag_at(enclosing_method_method_index).is_name_and_type())) {
           classfile_parse_error("Invalid or out-of-bounds method index in EnclosingMethod attribute in class file %s", CHECK);
         }
       } else if (tag == vmSymbols::tag_bootstrap_methods() &&
@@ -3008,7 +3334,7 @@
         if (parsed_bootstrap_methods_attribute)
           classfile_parse_error("Multiple BootstrapMethods attributes in class file %s", CHECK);
         parsed_bootstrap_methods_attribute = true;
-        parse_classfile_bootstrap_methods_attribute(attribute_length, CHECK);
+        parse_classfile_bootstrap_methods_attribute(cfs, cp, attribute_length, CHECK);
       } else if (tag == vmSymbols::tag_runtime_visible_type_annotations()) {
         if (runtime_visible_type_annotations != NULL) {
           classfile_parse_error(
@@ -3053,7 +3379,8 @@
                                            CHECK);
 
   if (parsed_innerclasses_attribute || parsed_enclosingmethod_attribute) {
-    u2 num_of_classes = parse_classfile_inner_classes_attribute(
+    const u2 num_of_classes = parse_classfile_inner_classes_attribute(
+                            cfs,
                             inner_classes_attribute_start,
                             parsed_innerclasses_attribute,
                             enclosing_method_class_index,
@@ -3072,7 +3399,9 @@
   }
 }
 
-void ClassFileParser::apply_parsed_class_attributes(instanceKlassHandle k) {
+void ClassFileParser::apply_parsed_class_attributes(InstanceKlass* k) {
+  assert(k != NULL, "invariant");
+
   if (_synthetic_flag)
     k->set_is_synthetic();
   if (_sourcefile_index != 0) {
@@ -3097,7 +3426,7 @@
       return;
     }
 
-    Annotations* annotations = Annotations::allocate(_loader_data, CHECK);
+    Annotations* const annotations = Annotations::allocate(_loader_data, CHECK);
     annotations->set_class_annotations(_annotations);
     annotations->set_class_type_annotations(_type_annotations);
     annotations->set_fields_annotations(_fields_annotations);
@@ -3117,9 +3446,11 @@
 
 // Transfer ownership of metadata allocated to the InstanceKlass.
 void ClassFileParser::apply_parsed_class_metadata(
-                                            instanceKlassHandle this_klass,
+                                            InstanceKlass* this_klass,
                                             int java_fields_count, TRAPS) {
-  _cp->set_pool_holder(this_klass());
+  assert(this_klass != NULL, "invariant");
+
+  _cp->set_pool_holder(this_klass);
   this_klass->set_constants(_cp);
   this_klass->set_fields(_fields, java_fields_count);
   this_klass->set_methods(_methods);
@@ -3132,10 +3463,11 @@
   clear_class_metadata();
 }
 
-AnnotationArray* ClassFileParser::assemble_annotations(u1* runtime_visible_annotations,
+AnnotationArray* ClassFileParser::assemble_annotations(const u1* const runtime_visible_annotations,
                                                        int runtime_visible_annotations_length,
-                                                       u1* runtime_invisible_annotations,
-                                                       int runtime_invisible_annotations_length, TRAPS) {
+                                                       const u1* const runtime_invisible_annotations,
+                                                       int runtime_invisible_annotations_length,
+                                                       TRAPS) {
   AnnotationArray* annotations = NULL;
   if (runtime_visible_annotations != NULL ||
       runtime_invisible_annotations != NULL) {
@@ -3158,9 +3490,13 @@
   return annotations;
 }
 
-instanceKlassHandle ClassFileParser::parse_super_class(int super_class_index,
-                                                       TRAPS) {
-  instanceKlassHandle super_klass;
+const InstanceKlass* ClassFileParser::parse_super_class(ConstantPool* const cp,
+                                                        const int super_class_index,
+                                                        const bool need_verify,
+                                                        TRAPS) {
+  assert(cp != NULL, "invariant");
+  const InstanceKlass* super_klass = NULL;
+
   if (super_class_index == 0) {
     check_property(_class_name == vmSymbols::java_lang_Object(),
                    "Invalid superclass index %u in class file %s",
@@ -3174,15 +3510,14 @@
     // The class name should be legal because it is checked when parsing constant pool.
     // However, make sure it is not an array type.
     bool is_array = false;
-    if (_cp->tag_at(super_class_index).is_klass()) {
-      super_klass = instanceKlassHandle(THREAD, _cp->resolved_klass_at(super_class_index));
-      if (_need_verify) {
+    if (cp->tag_at(super_class_index).is_klass()) {
+      super_klass = InstanceKlass::cast(cp->resolved_klass_at(super_class_index));
+      if (need_verify)
         is_array = super_klass->is_array_klass();
-      }
-    } else if (_need_verify) {
-      is_array = (_cp->klass_name_at(super_class_index)->byte_at(0) == JVM_SIGNATURE_ARRAY);
+    } else if (need_verify) {
+      is_array = (cp->klass_name_at(super_class_index)->byte_at(0) == JVM_SIGNATURE_ARRAY);
     }
-    if (_need_verify) {
+    if (need_verify) {
       guarantee_property(!is_array,
                         "Bad superclass name in class file %s", CHECK_NULL);
     }
@@ -3190,9 +3525,78 @@
   return super_klass;
 }
 
+static unsigned int compute_oop_map_count(const InstanceKlass* super,
+                                          unsigned int nonstatic_oop_map_count,
+                                          int first_nonstatic_oop_offset) {
+
+  unsigned int map_count =
+    NULL == super ? 0 : super->nonstatic_oop_map_count();
+  if (nonstatic_oop_map_count > 0) {
+    // We have oops to add to map
+    if (map_count == 0) {
+      map_count = nonstatic_oop_map_count;
+    }
+    else {
+      // Check whether we should add a new map block or whether the last one can
+      // be extended
+      const OopMapBlock* const first_map = super->start_of_nonstatic_oop_maps();
+      const OopMapBlock* const last_map = first_map + map_count - 1;
+
+      const int next_offset = last_map->offset() + last_map->count() * heapOopSize;
+      if (next_offset == first_nonstatic_oop_offset) {
+        // There is no gap bettwen superklass's last oop field and first
+        // local oop field, merge maps.
+        nonstatic_oop_map_count -= 1;
+      }
+      else {
+        // Superklass didn't end with a oop field, add extra maps
+        assert(next_offset < first_nonstatic_oop_offset, "just checking");
+      }
+      map_count += nonstatic_oop_map_count;
+    }
+  }
+  return map_count;
+}
+
+#ifndef PRODUCT
+static void print_field_layout(const Symbol* name,
+                               Array<u2>* fields,
+                               constantPoolHandle cp,
+                               int instance_size,
+                               int instance_fields_start,
+                               int instance_fields_end,
+                               int static_fields_end) {
+
+  assert(name != NULL, "invariant");
+
+  tty->print("%s: field layout\n", name->as_klass_external_name());
+  tty->print("  @%3d %s\n", instance_fields_start, "--- instance fields start ---");
+  for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
+    if (!fs.access_flags().is_static()) {
+      tty->print("  @%3d \"%s\" %s\n",
+        fs.offset(),
+        fs.name()->as_klass_external_name(),
+        fs.signature()->as_klass_external_name());
+    }
+  }
+  tty->print("  @%3d %s\n", instance_fields_end, "--- instance fields end ---");
+  tty->print("  @%3d %s\n", instance_size * wordSize, "--- instance ends ---");
+  tty->print("  @%3d %s\n", InstanceMirrorKlass::offset_of_static_fields(), "--- static fields start ---");
+  for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
+    if (fs.access_flags().is_static()) {
+      tty->print("  @%3d \"%s\" %s\n",
+        fs.offset(),
+        fs.name()->as_klass_external_name(),
+        fs.signature()->as_klass_external_name());
+    }
+  }
+  tty->print("  @%3d %s\n", static_fields_end, "--- static fields end ---");
+  tty->print("\n");
+}
+#endif
 
 // Values needed for oopmap and InstanceKlass creation
-class FieldLayoutInfo : public StackObj {
+class ClassFileParser::FieldLayoutInfo : public ResourceObj {
  public:
   int*          nonstatic_oop_offsets;
   unsigned int* nonstatic_oop_counts;
@@ -3205,27 +3609,17 @@
 };
 
 // Layout fields and fill in FieldLayoutInfo.  Could use more refactoring!
-void ClassFileParser::layout_fields(Handle class_loader,
-                                    FieldAllocationCount* fac,
-                                    ClassAnnotationCollector* parsed_annotations,
+void ClassFileParser::layout_fields(ConstantPool* cp,
+                                    const FieldAllocationCount* fac,
+                                    const ClassAnnotationCollector* parsed_annotations,
                                     FieldLayoutInfo* info,
                                     TRAPS) {
 
+  assert(cp != NULL, "invariant");
+
   // Field size and offset computation
-  int nonstatic_field_size = _super_klass() == NULL ? 0 : _super_klass()->nonstatic_field_size();
-  int next_static_oop_offset = 0;
-  int next_static_double_offset = 0;
-  int next_static_word_offset = 0;
-  int next_static_short_offset = 0;
-  int next_static_byte_offset = 0;
-  int next_nonstatic_oop_offset = 0;
-  int next_nonstatic_double_offset = 0;
-  int next_nonstatic_word_offset = 0;
-  int next_nonstatic_short_offset = 0;
-  int next_nonstatic_byte_offset = 0;
-  int first_nonstatic_oop_offset = 0;
-  int next_nonstatic_field_offset = 0;
-  int next_nonstatic_padded_offset = 0;
+  int nonstatic_field_size = _super_klass == NULL ? 0 :
+                               _super_klass->nonstatic_field_size();
 
   // Count the contended fields by type.
   //
@@ -3233,7 +3627,7 @@
   // The layout code below will also ignore the static fields.
   int nonstatic_contended_count = 0;
   FieldAllocationCount fac_contended;
-  for (AllFieldStream fs(_fields, _cp); !fs.done(); fs.next()) {
+  for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
     FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
     if (fs.is_contended()) {
       fac_contended.count[atype]++;
@@ -3245,28 +3639,28 @@
 
 
   // Calculate the starting byte offsets
-  next_static_oop_offset      = InstanceMirrorKlass::offset_of_static_fields();
-  next_static_double_offset   = next_static_oop_offset +
-                                ((fac->count[STATIC_OOP]) * heapOopSize);
+  int next_static_oop_offset    = InstanceMirrorKlass::offset_of_static_fields();
+  int next_static_double_offset = next_static_oop_offset +
+                                      ((fac->count[STATIC_OOP]) * heapOopSize);
   if ( fac->count[STATIC_DOUBLE] &&
        (Universe::field_type_should_be_aligned(T_DOUBLE) ||
         Universe::field_type_should_be_aligned(T_LONG)) ) {
     next_static_double_offset = align_size_up(next_static_double_offset, BytesPerLong);
   }
 
-  next_static_word_offset     = next_static_double_offset +
-                                ((fac->count[STATIC_DOUBLE]) * BytesPerLong);
-  next_static_short_offset    = next_static_word_offset +
-                                ((fac->count[STATIC_WORD]) * BytesPerInt);
-  next_static_byte_offset     = next_static_short_offset +
-                                ((fac->count[STATIC_SHORT]) * BytesPerShort);
+  int next_static_word_offset   = next_static_double_offset +
+                                    ((fac->count[STATIC_DOUBLE]) * BytesPerLong);
+  int next_static_short_offset  = next_static_word_offset +
+                                    ((fac->count[STATIC_WORD]) * BytesPerInt);
+  int next_static_byte_offset   = next_static_short_offset +
+                                  ((fac->count[STATIC_SHORT]) * BytesPerShort);
 
   int nonstatic_fields_start  = instanceOopDesc::base_offset_in_bytes() +
                                 nonstatic_field_size * heapOopSize;
 
-  next_nonstatic_field_offset = nonstatic_fields_start;
-
-  bool is_contended_class     = parsed_annotations->is_contended();
+  int next_nonstatic_field_offset = nonstatic_fields_start;
+
+  const bool is_contended_class     = parsed_annotations->is_contended();
 
   // Class is contended, pad before all the fields
   if (is_contended_class) {
@@ -3288,9 +3682,10 @@
                                         fac->count[NONSTATIC_SHORT] + fac->count[NONSTATIC_BYTE] +
                                         fac->count[NONSTATIC_OOP];
 
-  bool super_has_nonstatic_fields =
-          (_super_klass() != NULL && _super_klass->has_nonstatic_fields());
-  bool has_nonstatic_fields = super_has_nonstatic_fields || (nonstatic_fields_count != 0);
+  const bool super_has_nonstatic_fields =
+          (_super_klass != NULL && _super_klass->has_nonstatic_fields());
+  const bool has_nonstatic_fields =
+    super_has_nonstatic_fields || (nonstatic_fields_count != 0);
 
 
   // Prepare list of oops for oop map generation.
@@ -3303,20 +3698,18 @@
   //
   // TODO: We add +1 to always allocate non-zero resource arrays; we need
   // to figure out if we still need to do this.
-  int* nonstatic_oop_offsets;
-  unsigned int* nonstatic_oop_counts;
   unsigned int nonstatic_oop_map_count = 0;
   unsigned int max_nonstatic_oop_maps  = fac->count[NONSTATIC_OOP] + 1;
 
-  nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD(
+  int* nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD(
             THREAD, int, max_nonstatic_oop_maps);
-  nonstatic_oop_counts  = NEW_RESOURCE_ARRAY_IN_THREAD(
+  unsigned int* const nonstatic_oop_counts  = NEW_RESOURCE_ARRAY_IN_THREAD(
             THREAD, unsigned int, max_nonstatic_oop_maps);
 
-  first_nonstatic_oop_offset = 0; // will be set for first oop field
+  int first_nonstatic_oop_offset = 0; // will be set for first oop field
 
   bool compact_fields   = CompactFields;
-  int  allocation_style = FieldsAllocationStyle;
+  int allocation_style = FieldsAllocationStyle;
   if( allocation_style < 0 || allocation_style > 2 ) { // Out of range?
     assert(false, "0 <= FieldsAllocationStyle <= 2");
     allocation_style = 1; // Optimistic
@@ -3325,7 +3718,7 @@
   // The next classes have predefined hard-coded fields offsets
   // (see in JavaClasses::compute_hard_coded_offsets()).
   // Use default fields allocation order for them.
-  if( (allocation_style != 0 || compact_fields ) && class_loader.is_null() &&
+  if( (allocation_style != 0 || compact_fields ) && _loader_data->class_loader() == NULL &&
       (_class_name == vmSymbols::java_lang_AssertionStatusDirectives() ||
        _class_name == vmSymbols::java_lang_Class() ||
        _class_name == vmSymbols::java_lang_ClassLoader() ||
@@ -3346,6 +3739,9 @@
     compact_fields   = false; // Don't compact fields
   }
 
+  int next_nonstatic_oop_offset = 0;
+  int next_nonstatic_double_offset = 0;
+
   // Rearrange fields for a given allocation style
   if( allocation_style == 0 ) {
     // Fields order: oops, longs/doubles, ints, shorts/chars, bytes, padded fields
@@ -3357,12 +3753,12 @@
     next_nonstatic_double_offset = next_nonstatic_field_offset;
   } else if( allocation_style == 2 ) {
     // Fields allocation: oops fields in super and sub classes are together.
-    if( nonstatic_field_size > 0 && _super_klass() != NULL &&
+    if( nonstatic_field_size > 0 && _super_klass != NULL &&
         _super_klass->nonstatic_oop_map_size() > 0 ) {
-      unsigned int map_count = _super_klass->nonstatic_oop_map_count();
-      OopMapBlock* first_map = _super_klass->start_of_nonstatic_oop_maps();
-      OopMapBlock* last_map = first_map + map_count - 1;
-      int next_offset = last_map->offset() + (last_map->count() * heapOopSize);
+      const unsigned int map_count = _super_klass->nonstatic_oop_map_count();
+      const OopMapBlock* const first_map = _super_klass->start_of_nonstatic_oop_maps();
+      const OopMapBlock* const last_map = first_map + map_count - 1;
+      const int next_offset = last_map->offset() + (last_map->count() * heapOopSize);
       if (next_offset == next_nonstatic_field_offset) {
         allocation_style = 0;   // allocate oops first
         next_nonstatic_oop_offset    = next_nonstatic_field_offset;
@@ -3378,48 +3774,48 @@
     ShouldNotReachHere();
   }
 
-  int nonstatic_oop_space_count    = 0;
-  int nonstatic_word_space_count   = 0;
-  int nonstatic_short_space_count  = 0;
-  int nonstatic_byte_space_count   = 0;
-  int nonstatic_oop_space_offset   = 0;
-  int nonstatic_word_space_offset  = 0;
+  int nonstatic_oop_space_count   = 0;
+  int nonstatic_word_space_count  = 0;
+  int nonstatic_short_space_count = 0;
+  int nonstatic_byte_space_count  = 0;
+  int nonstatic_oop_space_offset = 0;
+  int nonstatic_word_space_offset = 0;
   int nonstatic_short_space_offset = 0;
-  int nonstatic_byte_space_offset  = 0;
+  int nonstatic_byte_space_offset = 0;
 
   // Try to squeeze some of the fields into the gaps due to
   // long/double alignment.
-  if( nonstatic_double_count > 0 ) {
+  if (nonstatic_double_count > 0) {
     int offset = next_nonstatic_double_offset;
     next_nonstatic_double_offset = align_size_up(offset, BytesPerLong);
-    if( compact_fields && offset != next_nonstatic_double_offset ) {
+    if (compact_fields && offset != next_nonstatic_double_offset) {
       // Allocate available fields into the gap before double field.
       int length = next_nonstatic_double_offset - offset;
       assert(length == BytesPerInt, "");
       nonstatic_word_space_offset = offset;
-      if( nonstatic_word_count > 0 ) {
+      if (nonstatic_word_count > 0) {
         nonstatic_word_count      -= 1;
         nonstatic_word_space_count = 1; // Only one will fit
         length -= BytesPerInt;
         offset += BytesPerInt;
       }
       nonstatic_short_space_offset = offset;
-      while( length >= BytesPerShort && nonstatic_short_count > 0 ) {
+      while (length >= BytesPerShort && nonstatic_short_count > 0) {
         nonstatic_short_count       -= 1;
         nonstatic_short_space_count += 1;
         length -= BytesPerShort;
         offset += BytesPerShort;
       }
       nonstatic_byte_space_offset = offset;
-      while( length > 0 && nonstatic_byte_count > 0 ) {
+      while (length > 0 && nonstatic_byte_count > 0) {
         nonstatic_byte_count       -= 1;
         nonstatic_byte_space_count += 1;
         length -= 1;
       }
       // Allocate oop field in the gap if there are no other fields for that.
       nonstatic_oop_space_offset = offset;
-      if( length >= heapOopSize && nonstatic_oop_count > 0 &&
-          allocation_style != 0 ) { // when oop fields not first
+      if (length >= heapOopSize && nonstatic_oop_count > 0 &&
+          allocation_style != 0) { // when oop fields not first
         nonstatic_oop_count      -= 1;
         nonstatic_oop_space_count = 1; // Only one will fit
         length -= heapOopSize;
@@ -3428,14 +3824,14 @@
     }
   }
 
-  next_nonstatic_word_offset  = next_nonstatic_double_offset +
-                                (nonstatic_double_count * BytesPerLong);
-  next_nonstatic_short_offset = next_nonstatic_word_offset +
-                                (nonstatic_word_count * BytesPerInt);
-  next_nonstatic_byte_offset  = next_nonstatic_short_offset +
-                                (nonstatic_short_count * BytesPerShort);
-  next_nonstatic_padded_offset = next_nonstatic_byte_offset +
-                                nonstatic_byte_count;
+  int next_nonstatic_word_offset = next_nonstatic_double_offset +
+                                     (nonstatic_double_count * BytesPerLong);
+  int next_nonstatic_short_offset = next_nonstatic_word_offset +
+                                      (nonstatic_word_count * BytesPerInt);
+  int next_nonstatic_byte_offset = next_nonstatic_short_offset +
+                                     (nonstatic_short_count * BytesPerShort);
+  int next_nonstatic_padded_offset = next_nonstatic_byte_offset +
+                                       nonstatic_byte_count;
 
   // let oops jump before padding with this allocation style
   if( allocation_style == 1 ) {
@@ -3449,7 +3845,7 @@
   // Iterate over fields again and compute correct offsets.
   // The field allocation type was temporarily stored in the offset slot.
   // oop fields are located before non-oop fields (static and non-static).
-  for (AllFieldStream fs(_fields, _cp); !fs.done(); fs.next()) {
+  for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
 
     // skip already laid out fields
     if (fs.is_offset_set()) continue;
@@ -3458,7 +3854,7 @@
     if (fs.is_contended() && !fs.access_flags().is_static()) continue;
 
     int real_offset = 0;
-    FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
+    const FieldAllocationType atype = (const FieldAllocationType) fs.allocation_type();
 
     // pack the rest of the fields
     switch (atype) {
@@ -3567,8 +3963,8 @@
     next_nonstatic_padded_offset += ContendedPaddingWidth;
 
     // collect all contended groups
-    BitMap bm(_cp->size());
-    for (AllFieldStream fs(_fields, _cp); !fs.done(); fs.next()) {
+    BitMap bm(cp->size());
+    for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
       // skip already laid out fields
       if (fs.is_offset_set()) continue;
 
@@ -3580,7 +3976,7 @@
     int current_group = -1;
     while ((current_group = (int)bm.get_next_one_offset(current_group + 1)) != (int)bm.size()) {
 
-      for (AllFieldStream fs(_fields, _cp); !fs.done(); fs.next()) {
+      for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
 
         // skip already laid out fields
         if (fs.is_offset_set()) continue;
@@ -3714,7 +4110,7 @@
   if (PrintFieldLayout) {
     print_field_layout(_class_name,
           _fields,
-          _cp,
+          cp,
           instance_size,
           nonstatic_fields_start,
           nonstatic_fields_end,
@@ -3733,751 +4129,13 @@
   info->has_nonstatic_fields = has_nonstatic_fields;
 }
 
-
-instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
-                                                    ClassLoaderData* loader_data,
-                                                    Handle protection_domain,
-                                                    KlassHandle host_klass,
-                                                    GrowableArray<Handle>* cp_patches,
-                                                    TempNewSymbol& parsed_name,
-                                                    bool verify,
-                                                    TRAPS) {
-
-  // When a retransformable agent is attached, JVMTI caches the
-  // class bytes that existed before the first retransformation.
-  // If RedefineClasses() was used before the retransformable
-  // agent attached, then the cached class bytes may not be the
-  // original class bytes.
-  JvmtiCachedClassFileData *cached_class_file = NULL;
-  Handle class_loader(THREAD, loader_data->class_loader());
-  bool has_default_methods = false;
-  bool declares_default_methods = false;
-  ResourceMark rm(THREAD);
-
-  ClassFileStream* cfs = stream();
-  // Timing
-  assert(THREAD->is_Java_thread(), "must be a JavaThread");
-  JavaThread* jt = (JavaThread*) THREAD;
-
-  PerfClassTraceTime ctimer(ClassLoader::perf_class_parse_time(),
-                            ClassLoader::perf_class_parse_selftime(),
-                            NULL,
-                            jt->get_thread_stat()->perf_recursion_counts_addr(),
-                            jt->get_thread_stat()->perf_timers_addr(),
-                            PerfClassTraceTime::PARSE_CLASS);
-
-  init_parsed_class_attributes(loader_data);
-
-  if (JvmtiExport::should_post_class_file_load_hook()) {
-    // Get the cached class file bytes (if any) from the class that
-    // is being redefined or retransformed. We use jvmti_thread_state()
-    // instead of JvmtiThreadState::state_for(jt) so we don't allocate
-    // a JvmtiThreadState any earlier than necessary. This will help
-    // avoid the bug described by 7126851.
-    JvmtiThreadState *state = jt->jvmti_thread_state();
-    if (state != NULL) {
-      KlassHandle *h_class_being_redefined =
-                     state->get_class_being_redefined();
-      if (h_class_being_redefined != NULL) {
-        instanceKlassHandle ikh_class_being_redefined =
-          instanceKlassHandle(THREAD, (*h_class_being_redefined)());
-        cached_class_file = ikh_class_being_redefined->get_cached_class_file();
-      }
-    }
-
-    unsigned char* ptr = cfs->buffer();
-    unsigned char* end_ptr = cfs->buffer() + cfs->length();
-
-    JvmtiExport::post_class_file_load_hook(name, class_loader(), protection_domain,
-                                           &ptr, &end_ptr, &cached_class_file);
-
-    if (ptr != cfs->buffer()) {
-      // JVMTI agent has modified class file data.
-      // Set new class file stream using JVMTI agent modified
-      // class file data.
-      cfs = new ClassFileStream(ptr, end_ptr - ptr, cfs->source());
-      set_stream(cfs);
-    }
-  }
-
-  _host_klass = host_klass;
-  _cp_patches = cp_patches;
-
-  instanceKlassHandle nullHandle;
-
-  // Figure out whether we can skip format checking (matching classic VM behavior)
-  if (DumpSharedSpaces) {
-    // verify == true means it's a 'remote' class (i.e., non-boot class)
-    // Verification decision is based on BytecodeVerificationRemote flag
-    // for those classes.
-    _need_verify = (verify) ? BytecodeVerificationRemote :
-                              BytecodeVerificationLocal;
-  } else {
-    _need_verify = Verifier::should_verify_for(class_loader(), verify);
-  }
-
-  // Set the verify flag in stream
-  cfs->set_verify(_need_verify);
-
-  // Save the class file name for easier error message printing.
-  _class_name = (name != NULL) ? name : vmSymbols::unknown_class_name();
-
-  cfs->guarantee_more(8, CHECK_(nullHandle));  // magic, major, minor
-  // Magic value
-  u4 magic = cfs->get_u4_fast();
-  guarantee_property(magic == JAVA_CLASSFILE_MAGIC,
-                     "Incompatible magic value %u in class file %s",
-                     magic, CHECK_(nullHandle));
-
-  // Version numbers
-  u2 minor_version = cfs->get_u2_fast();
-  u2 major_version = cfs->get_u2_fast();
-
-  if (DumpSharedSpaces && major_version < JAVA_1_5_VERSION) {
-    ResourceMark rm;
-    warning("Pre JDK 1.5 class not supported by CDS: %u.%u %s",
-            major_version,  minor_version, name->as_C_string());
-    Exceptions::fthrow(
-      THREAD_AND_LOCATION,
-      vmSymbols::java_lang_UnsupportedClassVersionError(),
-      "Unsupported major.minor version for dump time %u.%u",
-      major_version,
-      minor_version);
-  }
-
-  // Check version numbers - we check this even with verifier off
-  if (!is_supported_version(major_version, minor_version)) {
-    if (name == NULL) {
-      Exceptions::fthrow(
-        THREAD_AND_LOCATION,
-        vmSymbols::java_lang_UnsupportedClassVersionError(),
-        "Unsupported class file version %u.%u, "
-        "this version of the Java Runtime only recognizes class file versions up to %u.%u",
-        major_version,
-        minor_version,
-        JAVA_MAX_SUPPORTED_VERSION,
-        JAVA_MAX_SUPPORTED_MINOR_VERSION);
-    } else {
-      ResourceMark rm(THREAD);
-      Exceptions::fthrow(
-        THREAD_AND_LOCATION,
-        vmSymbols::java_lang_UnsupportedClassVersionError(),
-        "%s has been compiled by a more recent version of the Java Runtime (class file version %u.%u), "
-        "this version of the Java Runtime only recognizes class file versions up to %u.%u",
-        name->as_C_string(),
-        major_version,
-        minor_version,
-        JAVA_MAX_SUPPORTED_VERSION,
-        JAVA_MAX_SUPPORTED_MINOR_VERSION);
-    }
-    return nullHandle;
-  }
-
-  _major_version = major_version;
-  _minor_version = minor_version;
-
-
-  // Check if verification needs to be relaxed for this class file
-  // Do not restrict it to jdk1.0 or jdk1.1 to maintain backward compatibility (4982376)
-  _relax_verify = Verifier::relax_verify_for(class_loader());
-
-  // Constant pool
-  constantPoolHandle cp = parse_constant_pool(CHECK_(nullHandle));
-
-  int cp_size = cp->length();
-
-  cfs->guarantee_more(8, CHECK_(nullHandle));  // flags, this_class, super_class, infs_len
-
-  // Access flags
-  AccessFlags access_flags;
-  jint flags = cfs->get_u2_fast() & JVM_RECOGNIZED_CLASS_MODIFIERS;
-
-  if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) {
-    // Set abstract bit for old class files for backward compatibility
-    flags |= JVM_ACC_ABSTRACT;
-  }
-  verify_legal_class_modifiers(flags, CHECK_(nullHandle));
-  access_flags.set_flags(flags);
-
-  // This class and superclass
-  u2 this_class_index = cfs->get_u2_fast();
-  check_property(
-    valid_cp_range(this_class_index, cp_size) &&
-      cp->tag_at(this_class_index).is_unresolved_klass(),
-    "Invalid this class index %u in constant pool in class file %s",
-    this_class_index, CHECK_(nullHandle));
-
-  Symbol*  class_name  = cp->klass_name_at(this_class_index);
-  assert(class_name != NULL, "class_name can't be null");
-
-  // It's important to set parsed_name *before* resolving the super class.
-  // (it's used for cleanup by the caller if parsing fails)
-  parsed_name = class_name;
-  // parsed_name is returned and can be used if there's an error, so add to
-  // its reference count.  Caller will decrement the refcount.
-  parsed_name->increment_refcount();
-
-  // Update _class_name which could be null previously to be class_name
-  _class_name = class_name;
-
-  // Don't need to check whether this class name is legal or not.
-  // It has been checked when constant pool is parsed.
-  // However, make sure it is not an array type.
-  if (_need_verify) {
-    guarantee_property(class_name->byte_at(0) != JVM_SIGNATURE_ARRAY,
-                       "Bad class name in class file %s",
-                       CHECK_(nullHandle));
-  }
-
-  Klass* preserve_this_klass;   // for storing result across HandleMark
-
-  // release all handles when parsing is done
-  { HandleMark hm(THREAD);
-
-    // Checks if name in class file matches requested name
-    if (name != NULL && class_name != name) {
-      ResourceMark rm(THREAD);
-      Exceptions::fthrow(
-        THREAD_AND_LOCATION,
-        vmSymbols::java_lang_NoClassDefFoundError(),
-        "%s (wrong name: %s)",
-        name->as_C_string(),
-        class_name->as_C_string()
-      );
-      return nullHandle;
-    }
-
-    if (TraceClassLoadingPreorder) {
-      tty->print("[Loading %s", (name != NULL) ? name->as_klass_external_name() : "NoName");
-      if (cfs->source() != NULL) tty->print(" from %s", cfs->source());
-      tty->print_cr("]");
-    }
-#if INCLUDE_CDS
-    if (DumpLoadedClassList != NULL && cfs->source() != NULL && classlist_file->is_open()) {
-      // Only dump the classes that can be stored into CDS archive
-      if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
-        if (name != NULL) {
-          ResourceMark rm(THREAD);
-          classlist_file->print_cr("%s", name->as_C_string());
-          classlist_file->flush();
-        }
-      }
-    }
-#endif
-
-    u2 super_class_index = cfs->get_u2_fast();
-    instanceKlassHandle super_klass = parse_super_class(super_class_index,
-                                                        CHECK_NULL);
-
-    // Interfaces
-    u2 itfs_len = cfs->get_u2_fast();
-    Array<Klass*>* local_interfaces =
-      parse_interfaces(itfs_len, protection_domain, _class_name,
-                       &has_default_methods, CHECK_(nullHandle));
-
-    u2 java_fields_count = 0;
-    // Fields (offsets are filled in later)
-    FieldAllocationCount fac;
-    Array<u2>* fields = parse_fields(class_name,
-                                     access_flags.is_interface(),
-                                     &fac, &java_fields_count,
-                                     CHECK_(nullHandle));
-    // Methods
-    bool has_final_method = false;
-    AccessFlags promoted_flags;
-    promoted_flags.set_flags(0);
-    Array<Method*>* methods = parse_methods(access_flags.is_interface(),
-                                            &promoted_flags,
-                                            &has_final_method,
-                                            &declares_default_methods,
-                                            CHECK_(nullHandle));
-
-    if (declares_default_methods) {
-      has_default_methods = true;
-    }
-
-    // Additional attributes
-    ClassAnnotationCollector parsed_annotations;
-    parse_classfile_attributes(&parsed_annotations, CHECK_(nullHandle));
-
-    // Finalize the Annotations metadata object,
-    // now that all annotation arrays have been created.
-    create_combined_annotations(CHECK_(nullHandle));
-
-    // Make sure this is the end of class file stream
-    guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle));
-
-    // We check super class after class file is parsed and format is checked
-    if (super_class_index > 0 && super_klass.is_null()) {
-      Symbol*  sk  = cp->klass_name_at(super_class_index);
-      if (access_flags.is_interface()) {
-        // Before attempting to resolve the superclass, check for class format
-        // errors not checked yet.
-        guarantee_property(sk == vmSymbols::java_lang_Object(),
-                           "Interfaces must have java.lang.Object as superclass in class file %s",
-                           CHECK_(nullHandle));
-      }
-      Klass* k = SystemDictionary::resolve_super_or_fail(class_name, sk,
-                                                         class_loader,
-                                                         protection_domain,
-                                                         true,
-                                                         CHECK_(nullHandle));
-
-      KlassHandle kh (THREAD, k);
-      super_klass = instanceKlassHandle(THREAD, kh());
-    }
-    if (super_klass.not_null()) {
-
-      if (super_klass->has_default_methods()) {
-        has_default_methods = true;
-      }
-
-      if (super_klass->is_interface()) {
-        ResourceMark rm(THREAD);
-        Exceptions::fthrow(
-          THREAD_AND_LOCATION,
-          vmSymbols::java_lang_IncompatibleClassChangeError(),
-          "class %s has interface %s as super class",
-          class_name->as_klass_external_name(),
-          super_klass->external_name()
-        );
-        return nullHandle;
-      }
-      // Make sure super class is not final
-      if (super_klass->is_final()) {
-        THROW_MSG_(vmSymbols::java_lang_VerifyError(), "Cannot inherit from final class", nullHandle);
-      }
-    }
-
-    // save super klass for error handling.
-    _super_klass = super_klass;
-
-    // Compute the transitive list of all unique interfaces implemented by this class
-    _transitive_interfaces =
-          compute_transitive_interfaces(super_klass, local_interfaces, CHECK_(nullHandle));
-
-    // sort methods
-    intArray* method_ordering = sort_methods(methods);
-
-    // promote flags from parse_methods() to the klass' flags
-    access_flags.add_promoted_flags(promoted_flags.as_int());
-
-    // Size of Java vtable (in words)
-    int vtable_size = 0;
-    int itable_size = 0;
-    int num_miranda_methods = 0;
-
-    GrowableArray<Method*> all_mirandas(20);
-
-    klassVtable::compute_vtable_size_and_num_mirandas(
-        &vtable_size, &num_miranda_methods, &all_mirandas, super_klass(), methods,
-        access_flags, class_loader, class_name, local_interfaces,
-                                                      CHECK_(nullHandle));
-
-    // Size of Java itable (in words)
-    itable_size = access_flags.is_interface() ? 0 : klassItable::compute_itable_size(_transitive_interfaces);
-
-    FieldLayoutInfo info;
-    layout_fields(class_loader, &fac, &parsed_annotations, &info, CHECK_NULL);
-
-    int total_oop_map_size2 =
-          InstanceKlass::nonstatic_oop_map_size(info.total_oop_map_count);
-
-    // Compute reference type
-    ReferenceType rt;
-    if (super_klass() == NULL) {
-      rt = REF_NONE;
-    } else {
-      rt = super_klass->reference_type();
-    }
-
-    // We can now create the basic Klass* for this klass
-    _klass = InstanceKlass::allocate_instance_klass(loader_data,
-                                                    vtable_size,
-                                                    itable_size,
-                                                    info.static_field_size,
-                                                    total_oop_map_size2,
-                                                    rt,
-                                                    access_flags,
-                                                    name,
-                                                    super_klass(),
-                                                    !host_klass.is_null(),
-                                                    CHECK_(nullHandle));
-    instanceKlassHandle this_klass (THREAD, _klass);
-
-    assert(this_klass->static_field_size() == info.static_field_size, "sanity");
-    assert(this_klass->nonstatic_oop_map_count() == info.total_oop_map_count,
-           "sanity");
-
-    // Fill in information already parsed
-    this_klass->set_should_verify_class(verify);
-    jint lh = Klass::instance_layout_helper(info.instance_size, false);
-    this_klass->set_layout_helper(lh);
-    assert(this_klass->is_instance_klass(), "layout is correct");
-    assert(this_klass->size_helper() == info.instance_size, "correct size_helper");
-    // Not yet: supers are done below to support the new subtype-checking fields
-    //this_klass->set_super(super_klass());
-    this_klass->set_class_loader_data(loader_data);
-    this_klass->set_nonstatic_field_size(info.nonstatic_field_size);
-    this_klass->set_has_nonstatic_fields(info.has_nonstatic_fields);
-    this_klass->set_static_oop_field_count(fac.count[STATIC_OOP]);
-
-    apply_parsed_class_metadata(this_klass, java_fields_count, CHECK_NULL);
-
-    if (has_final_method) {
-      this_klass->set_has_final_method();
-    }
-    this_klass->copy_method_ordering(method_ordering, CHECK_NULL);
-    // The InstanceKlass::_methods_jmethod_ids cache
-    // is managed on the assumption that the initial cache
-    // size is equal to the number of methods in the class. If
-    // that changes, then InstanceKlass::idnum_can_increment()
-    // has to be changed accordingly.
-    this_klass->set_initial_method_idnum(methods->length());
-    this_klass->set_name(cp->klass_name_at(this_class_index));
-    if (is_anonymous())  // I am well known to myself
-      cp->klass_at_put(this_class_index, this_klass()); // eagerly resolve
-
-    this_klass->set_minor_version(minor_version);
-    this_klass->set_major_version(major_version);
-    this_klass->set_has_default_methods(has_default_methods);
-    this_klass->set_declares_default_methods(declares_default_methods);
-
-    if (!host_klass.is_null()) {
-      assert (this_klass->is_anonymous(), "should be the same");
-      this_klass->set_host_klass(host_klass());
-    }
-
-    // Set up Method*::intrinsic_id as soon as we know the names of methods.
-    // (We used to do this lazily, but now we query it in Rewriter,
-    // which is eagerly done for every method, so we might as well do it now,
-    // when everything is fresh in memory.)
-    vmSymbols::SID klass_id = Method::klass_id_for_intrinsics(this_klass());
-    if (klass_id != vmSymbols::NO_SID) {
-      for (int j = 0; j < methods->length(); j++) {
-        Method* method = methods->at(j);
-        method->init_intrinsic_id();
-
-        if (CheckIntrinsics) {
-          // Check if an intrinsic is defined for method 'method',
-          // but the method is not annotated with @HotSpotIntrinsicCandidate.
-          if (method->intrinsic_id() != vmIntrinsics::_none &&
-              !method->intrinsic_candidate()) {
-            tty->print("Compiler intrinsic is defined for method [%s], "
-                       "but the method is not annotated with @HotSpotIntrinsicCandidate.%s",
-                       method->name_and_sig_as_C_string(),
-                       NOT_DEBUG(" Method will not be inlined.") DEBUG_ONLY(" Exiting.")
-                       );
-            tty->cr();
-            DEBUG_ONLY(vm_exit(1));
-          }
-          // Check is the method 'method' is annotated with @HotSpotIntrinsicCandidate,
-          // but there is no intrinsic available for it.
-          if (method->intrinsic_candidate() &&
-              method->intrinsic_id() == vmIntrinsics::_none) {
-            tty->print("Method [%s] is annotated with @HotSpotIntrinsicCandidate, "
-                       "but no compiler intrinsic is defined for the method.%s",
-                       method->name_and_sig_as_C_string(),
-                       NOT_DEBUG("") DEBUG_ONLY(" Exiting.")
-                       );
-            tty->cr();
-            DEBUG_ONLY(vm_exit(1));
-          }
-        }
-      }
-
-#ifdef ASSERT
-      if (CheckIntrinsics) {
-        // Check for orphan methods in the current class. A method m
-        // of a class C is orphan if an intrinsic is defined for method m,
-        // but class C does not declare m.
-        // The check is potentially expensive, therefore it is available
-        // only in debug builds.
-
-        for (int id = vmIntrinsics::FIRST_ID; id < (int)vmIntrinsics::ID_LIMIT; id++) {
-          if (id == vmIntrinsics::_compiledLambdaForm) {
-            // The _compiledLamdbdaForm intrinsic is a special marker for bytecode
-            // generated for the JVM from a LambdaForm and therefore no method
-            // is defined for it.
-            continue;
-          }
-
-          if (vmIntrinsics::class_for(vmIntrinsics::ID_from(id)) == klass_id) {
-            // Check if the current class contains a method with the same
-            // name, flags, signature.
-            bool match = false;
-            for (int j = 0; j < methods->length(); j++) {
-              Method* method = methods->at(j);
-              if (id == method->intrinsic_id()) {
-                match = true;
-                break;
-              }
-            }
-
-            if (!match) {
-              char buf[1000];
-              tty->print("Compiler intrinsic is defined for method [%s], "
-                         "but the method is not available in class [%s].%s",
-                         vmIntrinsics::short_name_as_C_string(vmIntrinsics::ID_from(id), buf, sizeof(buf)),
-                         this_klass->name()->as_C_string(),
-                         NOT_DEBUG("") DEBUG_ONLY(" Exiting.")
-                         );
-              tty->cr();
-              DEBUG_ONLY(vm_exit(1));
-            }
-          }
-        }
-      }
-#endif // ASSERT
-    }
-
-
-    if (cached_class_file != NULL) {
-      // JVMTI: we have an InstanceKlass now, tell it about the cached bytes
-      this_klass->set_cached_class_file(cached_class_file);
-    }
-
-    // Fill in field values obtained by parse_classfile_attributes
-    if (parsed_annotations.has_any_annotations())
-      parsed_annotations.apply_to(this_klass);
-    apply_parsed_class_attributes(this_klass);
-
-    // Miranda methods
-    if ((num_miranda_methods > 0) ||
-        // if this class introduced new miranda methods or
-        (super_klass.not_null() && (super_klass->has_miranda_methods()))
-        // super class exists and this class inherited miranda methods
-        ) {
-      this_klass->set_has_miranda_methods(); // then set a flag
-    }
-
-    // Fill in information needed to compute superclasses.
-    this_klass->initialize_supers(super_klass(), CHECK_(nullHandle));
-
-    // Initialize itable offset tables
-    klassItable::setup_itable_offset_table(this_klass);
-
-    // Compute transitive closure of interfaces this class implements
-    // Do final class setup
-    fill_oop_maps(this_klass, info.nonstatic_oop_map_count, info.nonstatic_oop_offsets, info.nonstatic_oop_counts);
-
-    // Fill in has_finalizer, has_vanilla_constructor, and layout_helper
-    set_precomputed_flags(this_klass);
-
-    // reinitialize modifiers, using the InnerClasses attribute
-    int computed_modifiers = this_klass->compute_modifier_flags(CHECK_(nullHandle));
-    this_klass->set_modifier_flags(computed_modifiers);
-
-    // check if this class can access its super class
-    check_super_class_access(this_klass, CHECK_(nullHandle));
-
-    // check if this class can access its superinterfaces
-    check_super_interface_access(this_klass, CHECK_(nullHandle));
-
-    // check if this class overrides any final method
-    check_final_method_override(this_klass, CHECK_(nullHandle));
-
-    // check that if this class is an interface then it doesn't have static methods
-    if (this_klass->is_interface()) {
-      /* An interface in a JAVA 8 classfile can be static */
-      if (_major_version < JAVA_8_VERSION) {
-        check_illegal_static_method(this_klass, CHECK_(nullHandle));
-      }
-    }
-
-    // Allocate mirror and initialize static fields
-    java_lang_Class::create_mirror(this_klass, class_loader, protection_domain,
-                                   CHECK_(nullHandle));
-
-    // Generate any default methods - default methods are interface methods
-    // that have a default implementation.  This is new with Lambda project.
-    if (has_default_methods ) {
-      DefaultMethods::generate_default_methods(
-          this_klass(), &all_mirandas, CHECK_(nullHandle));
-    }
-
-    // Update the loader_data graph.
-    record_defined_class_dependencies(this_klass, CHECK_NULL);
-
-    ClassLoadingService::notify_class_loaded(InstanceKlass::cast(this_klass()),
-                                             false /* not shared class */);
-
-    if (TraceClassLoading) {
-      ResourceMark rm;
-      // print in a single call to reduce interleaving of output
-      if (cfs->source() != NULL) {
-        tty->print("[Loaded %s from %s]\n", this_klass->external_name(),
-                   cfs->source());
-      } else if (class_loader.is_null()) {
-        Klass* caller =
-            THREAD->is_Java_thread()
-                ? ((JavaThread*)THREAD)->security_get_caller_class(1)
-                : NULL;
-        // caller can be NULL, for example, during a JVMTI VM_Init hook
-        if (caller != NULL) {
-          tty->print("[Loaded %s by instance of %s]\n",
-                     this_klass->external_name(),
-                     caller->external_name());
-        } else {
-          tty->print("[Loaded %s]\n", this_klass->external_name());
-        }
-      } else {
-        tty->print("[Loaded %s from %s]\n", this_klass->external_name(),
-                   class_loader->klass()->external_name());
-      }
-    }
-
-    if (TraceClassResolution) {
-      ResourceMark rm;
-      // print out the superclass.
-      const char * from = this_klass()->external_name();
-      if (this_klass->java_super() != NULL) {
-        tty->print("RESOLVE %s %s (super)\n", from, this_klass->java_super()->external_name());
-      }
-      // print out each of the interface classes referred to by this class.
-      Array<Klass*>* local_interfaces = this_klass->local_interfaces();
-      if (local_interfaces != NULL) {
-        int length = local_interfaces->length();
-        for (int i = 0; i < length; i++) {
-          Klass* k = local_interfaces->at(i);
-          const char * to = k->external_name();
-          tty->print("RESOLVE %s %s (interface)\n", from, to);
-        }
-      }
-    }
-
-    // preserve result across HandleMark
-    preserve_this_klass = this_klass();
-  }
-
-  // Create new handle outside HandleMark (might be needed for
-  // Extended Class Redefinition)
-  instanceKlassHandle this_klass (THREAD, preserve_this_klass);
-  debug_only(this_klass->verify();)
-
-  // Clear class if no error has occurred so destructor doesn't deallocate it
-  _klass = NULL;
-  return this_klass;
-}
-
-// Destructor to clean up if there's an error
-ClassFileParser::~ClassFileParser() {
-  MetadataFactory::free_metadata(_loader_data, _cp);
-  MetadataFactory::free_array<u2>(_loader_data, _fields);
-
-  // Free methods
-  InstanceKlass::deallocate_methods(_loader_data, _methods);
-
-  // beware of the Universe::empty_blah_array!!
-  if (_inner_classes != Universe::the_empty_short_array()) {
-    MetadataFactory::free_array<u2>(_loader_data, _inner_classes);
-  }
-
-  // Free interfaces
-  InstanceKlass::deallocate_interfaces(_loader_data, _super_klass(),
-                                       _local_interfaces, _transitive_interfaces);
-
-  if (_combined_annotations != NULL) {
-    // After all annotations arrays have been created, they are installed into the
-    // Annotations object that will be assigned to the InstanceKlass being created.
-
-    // Deallocate the Annotations object and the installed annotations arrays.
-    _combined_annotations->deallocate_contents(_loader_data);
-
-    // If the _combined_annotations pointer is non-NULL,
-    // then the other annotations fields should have been cleared.
-    assert(_annotations             == NULL, "Should have been cleared");
-    assert(_type_annotations        == NULL, "Should have been cleared");
-    assert(_fields_annotations      == NULL, "Should have been cleared");
-    assert(_fields_type_annotations == NULL, "Should have been cleared");
-  } else {
-    // If the annotations arrays were not installed into the Annotations object,
-    // then they have to be deallocated explicitly.
-    MetadataFactory::free_array<u1>(_loader_data, _annotations);
-    MetadataFactory::free_array<u1>(_loader_data, _type_annotations);
-    Annotations::free_contents(_loader_data, _fields_annotations);
-    Annotations::free_contents(_loader_data, _fields_type_annotations);
-  }
-
-  clear_class_metadata();
-
-  // deallocate the klass if already created.  Don't directly deallocate, but add
-  // to the deallocate list so that the klass is removed from the CLD::_klasses list
-  // at a safepoint.
-  if (_klass != NULL) {
-    _loader_data->add_to_deallocate_list(_klass);
-  }
-  _klass = NULL;
-}
-
-void ClassFileParser::print_field_layout(Symbol* name,
-                                         Array<u2>* fields,
-                                         const constantPoolHandle& cp,
-                                         int instance_size,
-                                         int instance_fields_start,
-                                         int instance_fields_end,
-                                         int static_fields_end) {
-  tty->print("%s: field layout\n", name->as_klass_external_name());
-  tty->print("  @%3d %s\n", instance_fields_start, "--- instance fields start ---");
-  for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
-    if (!fs.access_flags().is_static()) {
-      tty->print("  @%3d \"%s\" %s\n",
-          fs.offset(),
-          fs.name()->as_klass_external_name(),
-          fs.signature()->as_klass_external_name());
-    }
-  }
-  tty->print("  @%3d %s\n", instance_fields_end, "--- instance fields end ---");
-  tty->print("  @%3d %s\n", instance_size * wordSize, "--- instance ends ---");
-  tty->print("  @%3d %s\n", InstanceMirrorKlass::offset_of_static_fields(), "--- static fields start ---");
-  for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
-    if (fs.access_flags().is_static()) {
-      tty->print("  @%3d \"%s\" %s\n",
-          fs.offset(),
-          fs.name()->as_klass_external_name(),
-          fs.signature()->as_klass_external_name());
-    }
-  }
-  tty->print("  @%3d %s\n", static_fields_end, "--- static fields end ---");
-  tty->print("\n");
-}
-
-unsigned int
-ClassFileParser::compute_oop_map_count(instanceKlassHandle super,
-                                       unsigned int nonstatic_oop_map_count,
-                                       int first_nonstatic_oop_offset) {
-  unsigned int map_count =
-    super.is_null() ? 0 : super->nonstatic_oop_map_count();
-  if (nonstatic_oop_map_count > 0) {
-    // We have oops to add to map
-    if (map_count == 0) {
-      map_count = nonstatic_oop_map_count;
-    } else {
-      // Check whether we should add a new map block or whether the last one can
-      // be extended
-      OopMapBlock* const first_map = super->start_of_nonstatic_oop_maps();
-      OopMapBlock* const last_map = first_map + map_count - 1;
-
-      int next_offset = last_map->offset() + last_map->count() * heapOopSize;
-      if (next_offset == first_nonstatic_oop_offset) {
-        // There is no gap bettwen superklass's last oop field and first
-        // local oop field, merge maps.
-        nonstatic_oop_map_count -= 1;
-      } else {
-        // Superklass didn't end with a oop field, add extra maps
-        assert(next_offset < first_nonstatic_oop_offset, "just checking");
-      }
-      map_count += nonstatic_oop_map_count;
-    }
-  }
-  return map_count;
-}
-
-
-void ClassFileParser::fill_oop_maps(instanceKlassHandle k,
-                                    unsigned int nonstatic_oop_map_count,
-                                    int* nonstatic_oop_offsets,
-                                    unsigned int* nonstatic_oop_counts) {
+static void fill_oop_maps(const InstanceKlass* k,
+                          unsigned int nonstatic_oop_map_count,
+                          const int* nonstatic_oop_offsets,
+                          const unsigned int* nonstatic_oop_counts) {
+
+  assert(k != NULL, "invariant");
+
   OopMapBlock* this_oop_map = k->start_of_nonstatic_oop_maps();
   const InstanceKlass* const super = k->superklass();
   const unsigned int super_count = super ? super->nonstatic_oop_map_count() : 0;
@@ -4513,22 +4171,24 @@
 }
 
 
-void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) {
-  Klass* super = k->super();
+void ClassFileParser::set_precomputed_flags(InstanceKlass* ik) {
+  assert(ik != NULL, "invariant");
+
+  const Klass* const super = ik->super();
 
   // Check if this klass has an empty finalize method (i.e. one with return bytecode only),
   // in which case we don't have to register objects as finalizable
   if (!_has_empty_finalizer) {
     if (_has_finalizer ||
         (super != NULL && super->has_finalizer())) {
-      k->set_has_finalizer();
+      ik->set_has_finalizer();
     }
   }
 
 #ifdef ASSERT
   bool f = false;
-  Method* m = k->lookup_method(vmSymbols::finalize_method_name(),
-                                 vmSymbols::void_method_signature());
+  const Method* const m = ik->lookup_method(vmSymbols::finalize_method_name(),
+                                           vmSymbols::void_method_signature());
   if (m != NULL && !m->is_empty_method()) {
       f = true;
   }
@@ -4536,70 +4196,74 @@
   // Spec doesn't prevent agent from redefinition of empty finalizer.
   // Despite the fact that it's generally bad idea and redefined finalizer
   // will not work as expected we shouldn't abort vm in this case
-  if (!k->has_redefined_this_or_super()) {
-    assert(f == k->has_finalizer(), "inconsistent has_finalizer");
+  if (!ik->has_redefined_this_or_super()) {
+    assert(ik->has_finalizer() == f, "inconsistent has_finalizer");
   }
 #endif
 
   // Check if this klass supports the java.lang.Cloneable interface
   if (SystemDictionary::Cloneable_klass_loaded()) {
-    if (k->is_subtype_of(SystemDictionary::Cloneable_klass())) {
-      k->set_is_cloneable();
+    if (ik->is_subtype_of(SystemDictionary::Cloneable_klass())) {
+      ik->set_is_cloneable();
     }
   }
 
   // Check if this klass has a vanilla default constructor
   if (super == NULL) {
     // java.lang.Object has empty default constructor
-    k->set_has_vanilla_constructor();
+    ik->set_has_vanilla_constructor();
   } else {
     if (super->has_vanilla_constructor() &&
         _has_vanilla_constructor) {
-      k->set_has_vanilla_constructor();
+      ik->set_has_vanilla_constructor();
     }
 #ifdef ASSERT
     bool v = false;
     if (super->has_vanilla_constructor()) {
-      Method* constructor = k->find_method(vmSymbols::object_initializer_name(
-), vmSymbols::void_method_signature());
+      const Method* const constructor =
+        ik->find_method(vmSymbols::object_initializer_name(),
+                       vmSymbols::void_method_signature());
       if (constructor != NULL && constructor->is_vanilla_constructor()) {
         v = true;
       }
     }
-    assert(v == k->has_vanilla_constructor(), "inconsistent has_vanilla_constructor");
+    assert(v == ik->has_vanilla_constructor(), "inconsistent has_vanilla_constructor");
 #endif
   }
 
   // If it cannot be fast-path allocated, set a bit in the layout helper.
   // See documentation of InstanceKlass::can_be_fastpath_allocated().
-  assert(k->size_helper() > 0, "layout_helper is initialized");
-  if ((!RegisterFinalizersAtInit && k->has_finalizer())
-      || k->is_abstract() || k->is_interface()
-      || (k->name() == vmSymbols::java_lang_Class() && k->class_loader() == NULL)
-      || k->size_helper() >= FastAllocateSizeLimit) {
+  assert(ik->size_helper() > 0, "layout_helper is initialized");
+  if ((!RegisterFinalizersAtInit && ik->has_finalizer())
+      || ik->is_abstract() || ik->is_interface()
+      || (ik->name() == vmSymbols::java_lang_Class() && ik->class_loader() == NULL)
+      || ik->size_helper() >= FastAllocateSizeLimit) {
     // Forbid fast-path allocation.
-    jint lh = Klass::instance_layout_helper(k->size_helper(), true);
-    k->set_layout_helper(lh);
+    const jint lh = Klass::instance_layout_helper(ik->size_helper(), true);
+    ik->set_layout_helper(lh);
   }
 }
 
 // Attach super classes and interface classes to class loader data
-void ClassFileParser::record_defined_class_dependencies(instanceKlassHandle defined_klass, TRAPS) {
-  ClassLoaderData * defining_loader_data = defined_klass->class_loader_data();
+static void record_defined_class_dependencies(const InstanceKlass* defined_klass,
+                                              TRAPS) {
+  assert(defined_klass != NULL, "invariant");
+
+  ClassLoaderData* const defining_loader_data = defined_klass->class_loader_data();
   if (defining_loader_data->is_the_null_class_loader_data()) {
       // Dependencies to null class loader data are implicit.
       return;
   } else {
     // add super class dependency
-    Klass* super = defined_klass->super();
+    Klass* const super = defined_klass->super();
     if (super != NULL) {
       defining_loader_data->record_dependency(super, CHECK);
     }
 
     // add super interface dependencies
-    Array<Klass*>* local_interfaces = defined_klass->local_interfaces();
+    const Array<Klass*>* const local_interfaces = defined_klass->local_interfaces();
     if (local_interfaces != NULL) {
-      int length = local_interfaces->length();
+      const int length = local_interfaces->length();
       for (int i = 0; i < length; i++) {
         defining_loader_data->record_dependency(local_interfaces->at(i), CHECK);
       }
@@ -4609,31 +4273,36 @@
 
 // utility methods for appending an array with check for duplicates
 
-void append_interfaces(GrowableArray<Klass*>* result, Array<Klass*>* ifs) {
+static void append_interfaces(GrowableArray<Klass*>* result,
+                              const Array<Klass*>* const ifs) {
   // iterate over new interfaces
   for (int i = 0; i < ifs->length(); i++) {
-    Klass* e = ifs->at(i);
+    Klass* const e = ifs->at(i);
     assert(e->is_klass() && InstanceKlass::cast(e)->is_interface(), "just checking");
     // add new interface
     result->append_if_missing(e);
   }
 }
 
-Array<Klass*>* ClassFileParser::compute_transitive_interfaces(
-                                        instanceKlassHandle super,
-                                        Array<Klass*>* local_ifs, TRAPS) {
+static Array<Klass*>* compute_transitive_interfaces(const InstanceKlass* super,
+                                                    Array<Klass*>* local_ifs,
+                                                    ClassLoaderData* loader_data,
+                                                    TRAPS) {
+  assert(local_ifs != NULL, "invariant");
+  assert(loader_data != NULL, "invariant");
+
   // Compute maximum size for transitive interfaces
   int max_transitive_size = 0;
   int super_size = 0;
   // Add superclass transitive interfaces size
-  if (super.not_null()) {
+  if (super != NULL) {
     super_size = super->transitive_interfaces()->length();
     max_transitive_size += super_size;
   }
   // Add local interfaces' super interfaces
-  int local_size = local_ifs->length();
+  const int local_size = local_ifs->length();
   for (int i = 0; i < local_size; i++) {
-    Klass* l = local_ifs->at(i);
+    Klass* const l = local_ifs->at(i);
     max_transitive_size += InstanceKlass::cast(l)->transitive_interfaces()->length();
   }
   // Finally add local interfaces
@@ -4650,38 +4319,40 @@
     return local_ifs;
   } else {
     ResourceMark rm;
-    GrowableArray<Klass*>* result = new GrowableArray<Klass*>(max_transitive_size);
+    GrowableArray<Klass*>* const result = new GrowableArray<Klass*>(max_transitive_size);
 
     // Copy down from superclass
-    if (super.not_null()) {
+    if (super != NULL) {
       append_interfaces(result, super->transitive_interfaces());
     }
 
     // Copy down from local interfaces' superinterfaces
-    for (int i = 0; i < local_ifs->length(); i++) {
-      Klass* l = local_ifs->at(i);
+    for (int i = 0; i < local_size; i++) {
+      Klass* const l = local_ifs->at(i);
       append_interfaces(result, InstanceKlass::cast(l)->transitive_interfaces());
     }
     // Finally add local interfaces
     append_interfaces(result, local_ifs);
 
     // length will be less than the max_transitive_size if duplicates were removed
-    int length = result->length();
+    const int length = result->length();
     assert(length <= max_transitive_size, "just checking");
-    Array<Klass*>* new_result = MetadataFactory::new_array<Klass*>(_loader_data, length, CHECK_NULL);
+    Array<Klass*>* const new_result =
+      MetadataFactory::new_array<Klass*>(loader_data, length, CHECK_NULL);
     for (int i = 0; i < length; i++) {
-      Klass* e = result->at(i);
-        assert(e != NULL, "just checking");
+      Klass* const e = result->at(i);
+      assert(e != NULL, "just checking");
       new_result->at_put(i, e);
     }
     return new_result;
   }
 }
 
-void ClassFileParser::check_super_class_access(instanceKlassHandle this_klass, TRAPS) {
-  Klass* super = this_klass->super();
+static void check_super_class_access(const InstanceKlass* this_klass, TRAPS) {
+  assert(this_klass != NULL, "invariant");
+  const Klass* const super = this_klass->super();
   if ((super != NULL) &&
-      (!Reflection::verify_class_access(this_klass(), super, false))) {
+      (!Reflection::verify_class_access(this_klass, super, false))) {
     ResourceMark rm(THREAD);
     Exceptions::fthrow(
       THREAD_AND_LOCATION,
@@ -4695,13 +4366,14 @@
 }
 
 
-void ClassFileParser::check_super_interface_access(instanceKlassHandle this_klass, TRAPS) {
-  Array<Klass*>* local_interfaces = this_klass->local_interfaces();
-  int lng = local_interfaces->length();
+static void check_super_interface_access(const InstanceKlass* this_klass, TRAPS) {
+  assert(this_klass != NULL, "invariant");
+  const Array<Klass*>* const local_interfaces = this_klass->local_interfaces();
+  const int lng = local_interfaces->length();
   for (int i = lng - 1; i >= 0; i--) {
-    Klass* k = local_interfaces->at(i);
+    Klass* const k = local_interfaces->at(i);
     assert (k != NULL && k->is_interface(), "invalid interface");
-    if (!Reflection::verify_class_access(this_klass(), k, false)) {
+    if (!Reflection::verify_class_access(this_klass, k, false)) {
       ResourceMark rm(THREAD);
       Exceptions::fthrow(
         THREAD_AND_LOCATION,
@@ -4716,22 +4388,23 @@
 }
 
 
-void ClassFileParser::check_final_method_override(instanceKlassHandle this_klass, TRAPS) {
-  Array<Method*>* methods = this_klass->methods();
-  int num_methods = methods->length();
+static void check_final_method_override(const InstanceKlass* this_klass, TRAPS) {
+  assert(this_klass != NULL, "invariant");
+  const Array<Method*>* const methods = this_klass->methods();
+  const int num_methods = methods->length();
 
   // go thru each method and check if it overrides a final method
   for (int index = 0; index < num_methods; index++) {
-    Method* m = methods->at(index);
+    const Method* const m = methods->at(index);
 
     // skip private, static, and <init> methods
     if ((!m->is_private() && !m->is_static()) &&
         (m->name() != vmSymbols::object_initializer_name())) {
 
-      Symbol* name = m->name();
-      Symbol* signature = m->signature();
-      Klass* k = this_klass->super();
-      Method* super_m = NULL;
+      const Symbol* const name = m->name();
+      const Symbol* const signature = m->signature();
+      const Klass* k = this_klass->super();
+      const Method* super_m = NULL;
       while (k != NULL) {
         // skip supers that don't have final methods.
         if (k->has_final_method()) {
@@ -4743,7 +4416,7 @@
 
           if (super_m->is_final() && !super_m->is_static() &&
               // matching method in super is final, and not static
-              (Reflection::verify_field_access(this_klass(),
+              (Reflection::verify_field_access(this_klass,
                                                super_m->method_holder(),
                                                super_m->method_holder(),
                                                super_m->access_flags(), false))
@@ -4775,13 +4448,14 @@
 
 
 // assumes that this_klass is an interface
-void ClassFileParser::check_illegal_static_method(instanceKlassHandle this_klass, TRAPS) {
+static void check_illegal_static_method(const InstanceKlass* this_klass, TRAPS) {
+  assert(this_klass != NULL, "invariant");
   assert(this_klass->is_interface(), "not an interface");
-  Array<Method*>* methods = this_klass->methods();
-  int num_methods = methods->length();
+  const Array<Method*>* methods = this_klass->methods();
+  const int num_methods = methods->length();
 
   for (int index = 0; index < num_methods; index++) {
-    Method* m = methods->at(index);
+    const Method* const m = methods->at(index);
     // if m is static and not the init method, throw a verify error
     if ((m->is_static()) && (m->name() != vmSymbols::class_initializer_name())) {
       ResourceMark rm(THREAD);
@@ -4799,7 +4473,7 @@
 
 // utility methods for format checking
 
-void ClassFileParser::verify_legal_class_modifiers(jint flags, TRAPS) {
+void ClassFileParser::verify_legal_class_modifiers(jint flags, TRAPS) const {
   if (!_need_verify) { return; }
 
   const bool is_interface  = (flags & JVM_ACC_INTERFACE)  != 0;
@@ -4825,7 +4499,7 @@
   }
 }
 
-bool ClassFileParser::has_illegal_visibility(jint flags) {
+static bool has_illegal_visibility(jint flags) {
   const bool is_public    = (flags & JVM_ACC_PUBLIC)    != 0;
   const bool is_protected = (flags & JVM_ACC_PROTECTED) != 0;
   const bool is_private   = (flags & JVM_ACC_PRIVATE)   != 0;
@@ -4835,16 +4509,17 @@
           (is_protected && is_private));
 }
 
-bool ClassFileParser::is_supported_version(u2 major, u2 minor) {
-  u2 max_version = JAVA_MAX_SUPPORTED_VERSION;
+static bool is_supported_version(u2 major, u2 minor){
+  const u2 max_version = JAVA_MAX_SUPPORTED_VERSION;
   return (major >= JAVA_MIN_SUPPORTED_VERSION) &&
          (major <= max_version) &&
          ((major != max_version) ||
           (minor <= JAVA_MAX_SUPPORTED_MINOR_VERSION));
 }
 
-void ClassFileParser::verify_legal_field_modifiers(
-    jint flags, bool is_interface, TRAPS) {
+void ClassFileParser::verify_legal_field_modifiers(jint flags,
+                                                   bool is_interface,
+                                                   TRAPS) const {
   if (!_need_verify) { return; }
 
   const bool is_public    = (flags & JVM_ACC_PUBLIC)    != 0;
@@ -4882,8 +4557,10 @@
   }
 }
 
-void ClassFileParser::verify_legal_method_modifiers(
-    jint flags, bool is_interface, Symbol* name, TRAPS) {
+void ClassFileParser::verify_legal_method_modifiers(jint flags,
+                                                    bool is_interface,
+                                                    const Symbol* name,
+                                                    TRAPS) const {
   if (!_need_verify) { return; }
 
   const bool is_public       = (flags & JVM_ACC_PUBLIC)       != 0;
@@ -4962,10 +4639,12 @@
   }
 }
 
-void ClassFileParser::verify_legal_utf8(const unsigned char* buffer, int length, TRAPS) {
+void ClassFileParser::verify_legal_utf8(const unsigned char* buffer,
+                                        int length,
+                                        TRAPS) const {
   assert(_need_verify, "only called when _need_verify is true");
   int i = 0;
-  int count = length >> 2;
+  const int count = length >> 2;
   for (int k=0; k<count; k++) {
     unsigned char b0 = buffer[i];
     unsigned char b1 = buffer[i+1];
@@ -4974,10 +4653,10 @@
     // For an unsigned char v,
     // (v | v - 1) is < 128 (highest bit 0) for 0 < v < 128;
     // (v | v - 1) is >= 128 (highest bit 1) for v == 0 or v >= 128.
-    unsigned char res = b0 | b0 - 1 |
-                        b1 | b1 - 1 |
-                        b2 | b2 - 1 |
-                        b3 | b3 - 1;
+    const unsigned char res = b0 | b0 - 1 |
+                              b1 | b1 - 1 |
+                              b2 | b2 - 1 |
+                              b3 | b3 - 1;
     if (res >= 128) break;
     i += 4;
   }
@@ -5025,8 +4704,193 @@
   } // end of for
 }
 
+// Unqualified names may not contain the characters '.', ';', '[', or '/'.
+// Method names also may not contain the characters '<' or '>', unless <init>
+// or <clinit>.  Note that method names may not be <init> or <clinit> in this
+// method.  Because these names have been checked as special cases before
+// calling this method in verify_legal_method_name.
+static bool verify_unqualified_name(const char* name,
+                                    unsigned int length,
+                                    int type) {
+  for (const char* p = name; p != name + length;) {
+    jchar ch = *p;
+    if (ch < 128) {
+      p++;
+      if (ch == '.' || ch == ';' || ch == '[') {
+        return false;   // do not permit '.', ';', or '['
+      }
+      if (type != LegalClass && ch == '/') {
+        return false;   // do not permit '/' unless it's class name
+      }
+      if (type == LegalMethod && (ch == '<' || ch == '>')) {
+        return false;   // do not permit '<' or '>' in method names
+      }
+    }
+    else {
+      char* tmp_p = UTF8::next(p, &ch);
+      p = tmp_p;
+    }
+  }
+  return true;
+}
+
+// Take pointer to a string. Skip over the longest part of the string that could
+// be taken as a fieldname. Allow '/' if slash_ok is true.
+// Return a pointer to just past the fieldname.
+// Return NULL if no fieldname at all was found, or in the case of slash_ok
+// being true, we saw consecutive slashes (meaning we were looking for a
+// qualified path but found something that was badly-formed).
+static const char* skip_over_field_name(const char* name,
+                                        bool slash_ok,
+                                        unsigned int length) {
+  const char* p;
+  jboolean last_is_slash = false;
+  jboolean not_first_ch = false;
+
+  for (p = name; p != name + length; not_first_ch = true) {
+    const char* old_p = p;
+    jchar ch = *p;
+    if (ch < 128) {
+      p++;
+      // quick check for ascii
+      if ((ch >= 'a' && ch <= 'z') ||
+        (ch >= 'A' && ch <= 'Z') ||
+        (ch == '_' || ch == '$') ||
+        (not_first_ch && ch >= '0' && ch <= '9')) {
+        last_is_slash = false;
+        continue;
+      }
+      if (slash_ok && ch == '/') {
+        if (last_is_slash) {
+          return NULL;  // Don't permit consecutive slashes
+        }
+        last_is_slash = true;
+        continue;
+      }
+    }
+    else {
+      jint unicode_ch;
+      char* tmp_p = UTF8::next_character(p, &unicode_ch);
+      p = tmp_p;
+      last_is_slash = false;
+      // Check if ch is Java identifier start or is Java identifier part
+      // 4672820: call java.lang.Character methods directly without generating separate tables.
+      EXCEPTION_MARK;
+
+      // return value
+      JavaValue result(T_BOOLEAN);
+      // Set up the arguments to isJavaIdentifierStart and isJavaIdentifierPart
+      JavaCallArguments args;
+      args.push_int(unicode_ch);
+
+      // public static boolean isJavaIdentifierStart(char ch);
+      JavaCalls::call_static(&result,
+        SystemDictionary::Character_klass(),
+        vmSymbols::isJavaIdentifierStart_name(),
+        vmSymbols::int_bool_signature(),
+        &args,
+        THREAD);
+
+      if (HAS_PENDING_EXCEPTION) {
+        CLEAR_PENDING_EXCEPTION;
+        return 0;
+      }
+      if (result.get_jboolean()) {
+        continue;
+      }
+
+      if (not_first_ch) {
+        // public static boolean isJavaIdentifierPart(char ch);
+        JavaCalls::call_static(&result,
+          SystemDictionary::Character_klass(),
+          vmSymbols::isJavaIdentifierPart_name(),
+          vmSymbols::int_bool_signature(),
+          &args,
+          THREAD);
+
+        if (HAS_PENDING_EXCEPTION) {
+          CLEAR_PENDING_EXCEPTION;
+          return 0;
+        }
+
+        if (result.get_jboolean()) {
+          continue;
+        }
+      }
+    }
+    return (not_first_ch) ? old_p : NULL;
+  }
+  return (not_first_ch) ? p : NULL;
+}
+
+// Take pointer to a string. Skip over the longest part of the string that could
+// be taken as a field signature. Allow "void" if void_ok.
+// Return a pointer to just past the signature.
+// Return NULL if no legal signature is found.
+const char* ClassFileParser::skip_over_field_signature(const char* signature,
+                                                       bool void_ok,
+                                                       unsigned int length,
+                                                       TRAPS) const {
+  unsigned int array_dim = 0;
+  while (length > 0) {
+    switch (signature[0]) {
+    case JVM_SIGNATURE_VOID: if (!void_ok) { return NULL; }
+    case JVM_SIGNATURE_BOOLEAN:
+    case JVM_SIGNATURE_BYTE:
+    case JVM_SIGNATURE_CHAR:
+    case JVM_SIGNATURE_SHORT:
+    case JVM_SIGNATURE_INT:
+    case JVM_SIGNATURE_FLOAT:
+    case JVM_SIGNATURE_LONG:
+    case JVM_SIGNATURE_DOUBLE:
+      return signature + 1;
+    case JVM_SIGNATURE_CLASS: {
+      if (_major_version < JAVA_1_5_VERSION) {
+        // Skip over the class name if one is there
+        const char* const p = skip_over_field_name(signature + 1, true, --length);
+
+        // The next character better be a semicolon
+        if (p && (p - signature) > 1 && p[0] == ';') {
+          return p + 1;
+        }
+      }
+      else {
+        // 4900761: For class version > 48, any unicode is allowed in class name.
+        length--;
+        signature++;
+        while (length > 0 && signature[0] != ';') {
+          if (signature[0] == '.') {
+            classfile_parse_error("Class name contains illegal character '.' in descriptor in class file %s", CHECK_0);
+          }
+          length--;
+          signature++;
+        }
+        if (signature[0] == ';') { return signature + 1; }
+      }
+
+      return NULL;
+    }
+    case JVM_SIGNATURE_ARRAY:
+      array_dim++;
+      if (array_dim > 255) {
+        // 4277370: array descriptor is valid only if it represents 255 or fewer dimensions.
+        classfile_parse_error("Array type descriptor has more than 255 dimensions in class file %s", CHECK_0);
+      }
+      // The rest of what's there better be a legal signature
+      signature++;
+      length--;
+      void_ok = false;
+      break;
+
+    default:
+      return NULL;
+    }
+  }
+  return NULL;
+}
+
 // Checks if name is a legal class name.
-void ClassFileParser::verify_legal_class_name(Symbol* name, TRAPS) {
+void ClassFileParser::verify_legal_class_name(const Symbol* name, TRAPS) const {
   if (!_need_verify || _relax_verify) { return; }
 
   char buf[fixed_buffer_size];
@@ -5035,7 +4899,7 @@
   bool legal = false;
 
   if (length > 0) {
-    char* p;
+    const char* p;
     if (bytes[0] == JVM_SIGNATURE_ARRAY) {
       p = skip_over_field_signature(bytes, false, length, CHECK);
       legal = (p != NULL) && ((p - bytes) == (int)length);
@@ -5054,6 +4918,7 @@
   }
   if (!legal) {
     ResourceMark rm(THREAD);
+    assert(_class_name != NULL, "invariant");
     Exceptions::fthrow(
       THREAD_AND_LOCATION,
       vmSymbols::java_lang_ClassFormatError(),
@@ -5065,7 +4930,7 @@
 }
 
 // Checks if name is a legal field name.
-void ClassFileParser::verify_legal_field_name(Symbol* name, TRAPS) {
+void ClassFileParser::verify_legal_field_name(const Symbol* name, TRAPS) const {
   if (!_need_verify || _relax_verify) { return; }
 
   char buf[fixed_buffer_size];
@@ -5076,7 +4941,7 @@
   if (length > 0) {
     if (_major_version < JAVA_1_5_VERSION) {
       if (bytes[0] != '<') {
-        char* p = skip_over_field_name(bytes, false, length);
+        const char* p = skip_over_field_name(bytes, false, length);
         legal = (p != NULL) && ((p - bytes) == (int)length);
       }
     } else {
@@ -5087,6 +4952,7 @@
 
   if (!legal) {
     ResourceMark rm(THREAD);
+    assert(_class_name != NULL, "invariant");
     Exceptions::fthrow(
       THREAD_AND_LOCATION,
       vmSymbols::java_lang_ClassFormatError(),
@@ -5098,7 +4964,7 @@
 }
 
 // Checks if name is a legal method name.
-void ClassFileParser::verify_legal_method_name(Symbol* name, TRAPS) {
+void ClassFileParser::verify_legal_method_name(const Symbol* name, TRAPS) const {
   if (!_need_verify || _relax_verify) { return; }
 
   assert(name != NULL, "method name is null");
@@ -5113,7 +4979,7 @@
         legal = true;
       }
     } else if (_major_version < JAVA_1_5_VERSION) {
-      char* p;
+      const char* p;
       p = skip_over_field_name(bytes, false, length);
       legal = (p != NULL) && ((p - bytes) == (int)length);
     } else {
@@ -5124,6 +4990,7 @@
 
   if (!legal) {
     ResourceMark rm(THREAD);
+    assert(_class_name != NULL, "invariant");
     Exceptions::fthrow(
       THREAD_AND_LOCATION,
       vmSymbols::java_lang_ClassFormatError(),
@@ -5136,13 +5003,15 @@
 
 
 // Checks if signature is a legal field signature.
-void ClassFileParser::verify_legal_field_signature(Symbol* name, Symbol* signature, TRAPS) {
+void ClassFileParser::verify_legal_field_signature(const Symbol* name,
+                                                   const Symbol* signature,
+                                                   TRAPS) const {
   if (!_need_verify) { return; }
 
   char buf[fixed_buffer_size];
-  char* bytes = signature->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
-  unsigned int length = signature->utf8_length();
-  char* p = skip_over_field_signature(bytes, false, length, CHECK);
+  const char* const bytes = signature->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
+  const unsigned int length = signature->utf8_length();
+  const char* const p = skip_over_field_signature(bytes, false, length, CHECK);
 
   if (p == NULL || (p - bytes) != (int)length) {
     throwIllegalSignature("Field", name, signature, CHECK);
@@ -5151,7 +5020,9 @@
 
 // Checks if signature is a legal method signature.
 // Returns number of parameters
-int ClassFileParser::verify_legal_method_signature(Symbol* name, Symbol* signature, TRAPS) {
+int ClassFileParser::verify_legal_method_signature(const Symbol* name,
+                                                   const Symbol* signature,
+                                                   TRAPS) const {
   if (!_need_verify) {
     // make sure caller's args_size will be less than 0 even for non-static
     // method so it will be recomputed in compute_size_of_parameters().
@@ -5168,9 +5039,9 @@
 
   unsigned int args_size = 0;
   char buf[fixed_buffer_size];
-  char* p = signature->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
+  const char* p = signature->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
   unsigned int length = signature->utf8_length();
-  char* nextp;
+  const char* nextp;
 
   // The first character must be a '('
   if ((length > 0) && (*p++ == JVM_SIGNATURE_FUNC)) {
@@ -5208,188 +5079,823 @@
   return 0;
 }
 
-
-// Unqualified names may not contain the characters '.', ';', '[', or '/'.
-// Method names also may not contain the characters '<' or '>', unless <init>
-// or <clinit>.  Note that method names may not be <init> or <clinit> in this
-// method.  Because these names have been checked as special cases before
-// calling this method in verify_legal_method_name.
-bool ClassFileParser::verify_unqualified_name(
-    char* name, unsigned int length, int type) {
-  jchar ch;
-
-  for (char* p = name; p != name + length; ) {
-    ch = *p;
-    if (ch < 128) {
-      p++;
-      if (ch == '.' || ch == ';' || ch == '[' ) {
-        return false;   // do not permit '.', ';', or '['
+int ClassFileParser::static_field_size() const {
+  assert(_field_info != NULL, "invariant");
+  return _field_info->static_field_size;
+}
+
+int ClassFileParser::total_oop_map_count() const {
+  assert(_field_info != NULL, "invariant");
+  return _field_info->total_oop_map_count;
+}
+
+jint ClassFileParser::layout_size() const {
+  assert(_field_info != NULL, "invariant");
+  return _field_info->instance_size;
+}
+
+static void check_methods_for_intrinsics(const InstanceKlass* ik,
+                                         const Array<Method*>* methods) {
+  assert(ik != NULL, "invariant");
+  assert(methods != NULL, "invariant");
+
+  // Set up Method*::intrinsic_id as soon as we know the names of methods.
+  // (We used to do this lazily, but now we query it in Rewriter,
+  // which is eagerly done for every method, so we might as well do it now,
+  // when everything is fresh in memory.)
+  const vmSymbols::SID klass_id = Method::klass_id_for_intrinsics(ik);
+
+  if (klass_id != vmSymbols::NO_SID) {
+    for (int j = 0; j < methods->length(); ++j) {
+      Method* method = methods->at(j);
+      method->init_intrinsic_id();
+
+      if (CheckIntrinsics) {
+        // Check if an intrinsic is defined for method 'method',
+        // but the method is not annotated with @HotSpotIntrinsicCandidate.
+        if (method->intrinsic_id() != vmIntrinsics::_none &&
+            !method->intrinsic_candidate()) {
+              tty->print("Compiler intrinsic is defined for method [%s], "
+              "but the method is not annotated with @HotSpotIntrinsicCandidate.%s",
+              method->name_and_sig_as_C_string(),
+              NOT_DEBUG(" Method will not be inlined.") DEBUG_ONLY(" Exiting.")
+            );
+          tty->cr();
+          DEBUG_ONLY(vm_exit(1));
+        }
+        // Check is the method 'method' is annotated with @HotSpotIntrinsicCandidate,
+        // but there is no intrinsic available for it.
+        if (method->intrinsic_candidate() &&
+          method->intrinsic_id() == vmIntrinsics::_none) {
+            tty->print("Method [%s] is annotated with @HotSpotIntrinsicCandidate, "
+              "but no compiler intrinsic is defined for the method.%s",
+              method->name_and_sig_as_C_string(),
+              NOT_DEBUG("") DEBUG_ONLY(" Exiting.")
+            );
+          tty->cr();
+          DEBUG_ONLY(vm_exit(1));
+        }
       }
-      if (type != LegalClass && ch == '/') {
-        return false;   // do not permit '/' unless it's class name
-      }
-      if (type == LegalMethod && (ch == '<' || ch == '>')) {
-        return false;   // do not permit '<' or '>' in method names
-      }
-    } else {
-      char* tmp_p = UTF8::next(p, &ch);
-      p = tmp_p;
+    } // end for
+
+#ifdef ASSERT
+    if (CheckIntrinsics) {
+      // Check for orphan methods in the current class. A method m
+      // of a class C is orphan if an intrinsic is defined for method m,
+      // but class C does not declare m.
+      // The check is potentially expensive, therefore it is available
+      // only in debug builds.
+
+      for (int id = vmIntrinsics::FIRST_ID; id < (int)vmIntrinsics::ID_LIMIT; ++id) {
+        if (vmIntrinsics::_compiledLambdaForm == id) {
+          // The _compiledLamdbdaForm intrinsic is a special marker for bytecode
+          // generated for the JVM from a LambdaForm and therefore no method
+          // is defined for it.
+          continue;
+        }
+
+        if (vmIntrinsics::class_for(vmIntrinsics::ID_from(id)) == klass_id) {
+          // Check if the current class contains a method with the same
+          // name, flags, signature.
+          bool match = false;
+          for (int j = 0; j < methods->length(); ++j) {
+            const Method* method = methods->at(j);
+            if (method->intrinsic_id() == id) {
+              match = true;
+              break;
+            }
+          }
+
+          if (!match) {
+            char buf[1000];
+            tty->print("Compiler intrinsic is defined for method [%s], "
+                       "but the method is not available in class [%s].%s",
+                        vmIntrinsics::short_name_as_C_string(vmIntrinsics::ID_from(id),
+                                                             buf, sizeof(buf)),
+                        ik->name()->as_C_string(),
+                        NOT_DEBUG("") DEBUG_ONLY(" Exiting.")
+            );
+            tty->cr();
+            DEBUG_ONLY(vm_exit(1));
+          }
+        }
+      } // end for
+    } // CheckIntrinsics
+#endif // ASSERT
+  }
+}
+
+InstanceKlass* ClassFileParser::create_instance_klass(TRAPS) {
+  if (_klass != NULL) {
+    return _klass;
+  }
+
+  InstanceKlass* const ik =
+    InstanceKlass::allocate_instance_klass(*this, CHECK_NULL);
+
+  fill_instance_klass(ik, CHECK_NULL);
+
+  assert(_klass == ik, "invariant");
+
+  return ik;
+}
+
+void ClassFileParser::fill_instance_klass(InstanceKlass* ik, TRAPS) {
+  assert(ik != NULL, "invariant");
+
+  set_klass_to_deallocate(ik);
+
+  assert(_field_info != NULL, "invariant");
+  assert(ik->static_field_size() == _field_info->static_field_size, "sanity");
+  assert(ik->nonstatic_oop_map_count() == _field_info->total_oop_map_count,
+    "sanity");
+
+  assert(ik->is_instance_klass(), "sanity");
+  assert(ik->size_helper() == _field_info->instance_size, "sanity");
+
+  // Fill in information already parsed
+  ik->set_should_verify_class(_need_verify);
+
+  // Not yet: supers are done below to support the new subtype-checking fields
+  ik->set_class_loader_data(_loader_data);
+  ik->set_nonstatic_field_size(_field_info->nonstatic_field_size);
+  ik->set_has_nonstatic_fields(_field_info->has_nonstatic_fields);
+  assert(_fac != NULL, "invariant");
+  ik->set_static_oop_field_count(_fac->count[STATIC_OOP]);
+
+  // this transfers ownership of a lot of arrays from
+  // the parser onto the InstanceKlass*
+  apply_parsed_class_metadata(ik, _java_fields_count, CHECK);
+
+  // note that is not safe to use the fields in the parser from this point on
+  assert(NULL == _cp, "invariant");
+  assert(NULL == _fields, "invariant");
+  assert(NULL == _methods, "invariant");
+  assert(NULL == _inner_classes, "invariant");
+  assert(NULL == _local_interfaces, "invariant");
+  assert(NULL == _transitive_interfaces, "invariant");
+  assert(NULL == _combined_annotations, "invariant");
+
+  if (_has_final_method) {
+    ik->set_has_final_method();
+  }
+
+  ik->copy_method_ordering(_method_ordering, CHECK);
+  // The InstanceKlass::_methods_jmethod_ids cache
+  // is managed on the assumption that the initial cache
+  // size is equal to the number of methods in the class. If
+  // that changes, then InstanceKlass::idnum_can_increment()
+  // has to be changed accordingly.
+  ik->set_initial_method_idnum(ik->methods()->length());
+
+  ik->set_name(_class_name);
+
+  if (is_anonymous()) {
+    // I am well known to myself
+    ik->constants()->klass_at_put(_this_class_index, ik); // eagerly resolve
+  }
+
+  ik->set_minor_version(_minor_version);
+  ik->set_major_version(_major_version);
+  ik->set_has_default_methods(_has_default_methods);
+  ik->set_declares_default_methods(_declares_default_methods);
+
+  if (_host_klass != NULL) {
+    assert (ik->is_anonymous(), "should be the same");
+    ik->set_host_klass(_host_klass);
+  }
+
+  const Array<Method*>* const methods = ik->methods();
+  assert(methods != NULL, "invariant");
+  const int methods_len = methods->length();
+
+  check_methods_for_intrinsics(ik, methods);
+
+  // Fill in field values obtained by parse_classfile_attributes
+  if (_parsed_annotations->has_any_annotations()) {
+    _parsed_annotations->apply_to(ik);
+  }
+
+  apply_parsed_class_attributes(ik);
+
+  // Miranda methods
+  if ((_num_miranda_methods > 0) ||
+      // if this class introduced new miranda methods or
+      (_super_klass != NULL && _super_klass->has_miranda_methods())
+        // super class exists and this class inherited miranda methods
+     ) {
+       ik->set_has_miranda_methods(); // then set a flag
+  }
+
+  // Fill in information needed to compute superclasses.
+  ik->initialize_supers(const_cast<InstanceKlass*>(_super_klass), CHECK);
+
+  // Initialize itable offset tables
+  klassItable::setup_itable_offset_table(ik);
+
+  // Compute transitive closure of interfaces this class implements
+  // Do final class setup
+  fill_oop_maps(ik,
+                _field_info->nonstatic_oop_map_count,
+                _field_info->nonstatic_oop_offsets,
+                _field_info->nonstatic_oop_counts);
+
+  // Fill in has_finalizer, has_vanilla_constructor, and layout_helper
+  set_precomputed_flags(ik);
+
+  // check if this class can access its super class
+  check_super_class_access(ik, CHECK);
+
+  // check if this class can access its superinterfaces
+  check_super_interface_access(ik, CHECK);
+
+  // check if this class overrides any final method
+  check_final_method_override(ik, CHECK);
+
+  // check that if this class is an interface then it doesn't have static methods
+  if (ik->is_interface()) {
+    /* An interface in a JAVA 8 classfile can be static */
+    if (_major_version < JAVA_8_VERSION) {
+      check_illegal_static_method(ik, CHECK);
     }
   }
-  return true;
-}
-
-
-// Take pointer to a string. Skip over the longest part of the string that could
-// be taken as a fieldname. Allow '/' if slash_ok is true.
-// Return a pointer to just past the fieldname.
-// Return NULL if no fieldname at all was found, or in the case of slash_ok
-// being true, we saw consecutive slashes (meaning we were looking for a
-// qualified path but found something that was badly-formed).
-char* ClassFileParser::skip_over_field_name(char* name, bool slash_ok, unsigned int length) {
-  char* p;
-  jchar ch;
-  jboolean last_is_slash = false;
-  jboolean not_first_ch = false;
-
-  for (p = name; p != name + length; not_first_ch = true) {
-    char* old_p = p;
-    ch = *p;
-    if (ch < 128) {
-      p++;
-      // quick check for ascii
-      if ((ch >= 'a' && ch <= 'z') ||
-          (ch >= 'A' && ch <= 'Z') ||
-          (ch == '_' || ch == '$') ||
-          (not_first_ch && ch >= '0' && ch <= '9')) {
-        last_is_slash = false;
-        continue;
-      }
-      if (slash_ok && ch == '/') {
-        if (last_is_slash) {
-          return NULL;  // Don't permit consecutive slashes
+
+  // Allocate mirror and initialize static fields
+  // The create_mirror() call will also call compute_modifiers()
+  java_lang_Class::create_mirror(ik,
+                                 _loader_data->class_loader(),
+                                 _protection_domain,
+                                 CHECK);
+
+  assert(_all_mirandas != NULL, "invariant");
+
+  // Generate any default methods - default methods are interface methods
+  // that have a default implementation.  This is new with Lambda project.
+  if (_has_default_methods ) {
+    DefaultMethods::generate_default_methods(ik,
+                                             _all_mirandas,
+                                             CHECK);
+  }
+
+  // Update the loader_data graph.
+  record_defined_class_dependencies(ik, CHECK);
+
+  ClassLoadingService::notify_class_loaded(ik, false /* not shared class */);
+
+  if (!is_internal()) {
+    if (TraceClassLoading) {
+      ResourceMark rm;
+      // print in a single call to reduce interleaving of output
+      if (_stream->source() != NULL) {
+        tty->print("[Loaded %s from %s]\n",
+                   ik->external_name(),
+                   _stream->source());
+      } else if (_loader_data->class_loader() == NULL) {
+        const Klass* const caller =
+          THREAD->is_Java_thread()
+                ? ((JavaThread*)THREAD)->security_get_caller_class(1)
+                : NULL;
+        // caller can be NULL, for example, during a JVMTI VM_Init hook
+        if (caller != NULL) {
+          tty->print("[Loaded %s by instance of %s]\n",
+                     ik->external_name(),
+                     caller->external_name());
+        } else {
+          tty->print("[Loaded %s]\n", ik->external_name());
         }
-        last_is_slash = true;
-        continue;
+      } else {
+        tty->print("[Loaded %s from %s]\n", ik->external_name(),
+                   _loader_data->class_loader()->klass()->external_name());
       }
-    } else {
-      jint unicode_ch;
-      char* tmp_p = UTF8::next_character(p, &unicode_ch);
-      p = tmp_p;
-      last_is_slash = false;
-      // Check if ch is Java identifier start or is Java identifier part
-      // 4672820: call java.lang.Character methods directly without generating separate tables.
-      EXCEPTION_MARK;
-      instanceKlassHandle klass (THREAD, SystemDictionary::Character_klass());
-
-      // return value
-      JavaValue result(T_BOOLEAN);
-      // Set up the arguments to isJavaIdentifierStart and isJavaIdentifierPart
-      JavaCallArguments args;
-      args.push_int(unicode_ch);
-
-      // public static boolean isJavaIdentifierStart(char ch);
-      JavaCalls::call_static(&result,
-                             klass,
-                             vmSymbols::isJavaIdentifierStart_name(),
-                             vmSymbols::int_bool_signature(),
-                             &args,
-                             THREAD);
-
-      if (HAS_PENDING_EXCEPTION) {
-        CLEAR_PENDING_EXCEPTION;
-        return 0;
+    }
+
+    if (TraceClassResolution) {
+      ResourceMark rm;
+      // print out the superclass.
+      const char * from = ik->external_name();
+      if (ik->java_super() != NULL) {
+        tty->print("RESOLVE %s %s (super)\n",
+                   from,
+                   ik->java_super()->external_name());
       }
-      if (result.get_jboolean()) {
-        continue;
-      }
-
-      if (not_first_ch) {
-        // public static boolean isJavaIdentifierPart(char ch);
-        JavaCalls::call_static(&result,
-                               klass,
-                               vmSymbols::isJavaIdentifierPart_name(),
-                               vmSymbols::int_bool_signature(),
-                               &args,
-                               THREAD);
-
-        if (HAS_PENDING_EXCEPTION) {
-          CLEAR_PENDING_EXCEPTION;
-          return 0;
-        }
-
-        if (result.get_jboolean()) {
-          continue;
+      // print out each of the interface classes referred to by this class.
+      const Array<Klass*>* const local_interfaces = ik->local_interfaces();
+      if (local_interfaces != NULL) {
+        const int length = local_interfaces->length();
+        for (int i = 0; i < length; i++) {
+          const Klass* const k = local_interfaces->at(i);
+          const char * to = k->external_name();
+          tty->print("RESOLVE %s %s (interface)\n", from, to);
         }
       }
     }
-    return (not_first_ch) ? old_p : NULL;
   }
-  return (not_first_ch) ? p : NULL;
+
+  TRACE_INIT_ID(ik);
+
+  // If we reach here, all is well.
+  // Now remove the InstanceKlass* from the _klass_to_deallocate field
+  // in order for it to not be destroyed in the ClassFileParser destructor.
+  set_klass_to_deallocate(NULL);
+
+  // it's official
+  set_klass(ik);
+
+  debug_only(ik->verify();)
+}
+
+ClassFileParser::ClassFileParser(ClassFileStream* stream,
+                                 Symbol* name,
+                                 ClassLoaderData* loader_data,
+                                 Handle protection_domain,
+                                 TempNewSymbol* parsed_name,
+                                 const Klass* host_klass,
+                                 GrowableArray<Handle>* cp_patches,
+                                 Publicity pub_level,
+                                 TRAPS) :
+  _stream(stream),
+  _requested_name(name),
+  _loader_data(loader_data),
+  _host_klass(host_klass),
+  _cp_patches(cp_patches),
+  _parsed_name(parsed_name),
+  _super_klass(),
+  _cp(NULL),
+  _fields(NULL),
+  _methods(NULL),
+  _inner_classes(NULL),
+  _local_interfaces(NULL),
+  _transitive_interfaces(NULL),
+  _combined_annotations(NULL),
+  _annotations(NULL),
+  _type_annotations(NULL),
+  _fields_annotations(NULL),
+  _fields_type_annotations(NULL),
+  _klass(NULL),
+  _klass_to_deallocate(NULL),
+  _parsed_annotations(NULL),
+  _fac(NULL),
+  _field_info(NULL),
+  _method_ordering(NULL),
+  _all_mirandas(NULL),
+  _vtable_size(0),
+  _itable_size(0),
+  _num_miranda_methods(0),
+  _rt(REF_NONE),
+  _protection_domain(protection_domain),
+  _access_flags(),
+  _pub_level(pub_level),
+  _synthetic_flag(false),
+  _sde_length(false),
+  _sde_buffer(NULL),
+  _sourcefile_index(0),
+  _generic_signature_index(0),
+  _major_version(0),
+  _minor_version(0),
+  _this_class_index(0),
+  _super_class_index(0),
+  _itfs_len(0),
+  _java_fields_count(0),
+  _need_verify(false),
+  _relax_verify(false),
+  _has_default_methods(false),
+  _declares_default_methods(false),
+  _has_final_method(false),
+  _has_finalizer(false),
+  _has_empty_finalizer(false),
+  _has_vanilla_constructor(false),
+  _max_bootstrap_specifier_index(-1) {
+
+  _class_name = name != NULL ? name : vmSymbols::unknown_class_name();
+
+  assert(THREAD->is_Java_thread(), "invariant");
+  assert(_loader_data != NULL, "invariant");
+  assert(stream != NULL, "invariant");
+  assert(_stream != NULL, "invariant");
+  assert(_stream->buffer() == _stream->current(), "invariant");
+  assert(_class_name != NULL, "invariant");
+  assert(0 == _access_flags.as_int(), "invariant");
+
+  // Figure out whether we can skip format checking (matching classic VM behavior)
+  if (DumpSharedSpaces) {
+    // verify == true means it's a 'remote' class (i.e., non-boot class)
+    // Verification decision is based on BytecodeVerificationRemote flag
+    // for those classes.
+    _need_verify = (stream->need_verify()) ? BytecodeVerificationRemote :
+                                              BytecodeVerificationLocal;
+  }
+  else {
+    _need_verify = Verifier::should_verify_for(_loader_data->class_loader(),
+                                               stream->need_verify());
+  }
+
+  // synch back verification state to stream
+  stream->set_verify(_need_verify);
+
+  // Check if verification needs to be relaxed for this class file
+  // Do not restrict it to jdk1.0 or jdk1.1 to maintain backward compatibility (4982376)
+  _relax_verify = Verifier::relax_verify_for(_loader_data->class_loader());
+
+  parse_stream(stream, CHECK);
+
+  post_process_parsed_stream(stream, _cp, CHECK);
+}
+
+void ClassFileParser::clear_class_metadata() {
+  // metadata created before the instance klass is created.  Must be
+  // deallocated if classfile parsing returns an error.
+  _cp = NULL;
+  _fields = NULL;
+  _methods = NULL;
+  _inner_classes = NULL;
+  _local_interfaces = NULL;
+  _transitive_interfaces = NULL;
+  _combined_annotations = NULL;
+  _annotations = _type_annotations = NULL;
+  _fields_annotations = _fields_type_annotations = NULL;
+}
+
+// Destructor to clean up
+ClassFileParser::~ClassFileParser() {
+  if (_cp != NULL) {
+    MetadataFactory::free_metadata(_loader_data, _cp);
+  }
+  if (_fields != NULL) {
+    MetadataFactory::free_array<u2>(_loader_data, _fields);
+  }
+
+  if (_methods != NULL) {
+    // Free methods
+    InstanceKlass::deallocate_methods(_loader_data, _methods);
+  }
+
+  // beware of the Universe::empty_blah_array!!
+  if (_inner_classes != NULL && _inner_classes != Universe::the_empty_short_array()) {
+    MetadataFactory::free_array<u2>(_loader_data, _inner_classes);
+  }
+
+  // Free interfaces
+  InstanceKlass::deallocate_interfaces(_loader_data, _super_klass,
+                                       _local_interfaces, _transitive_interfaces);
+
+  if (_combined_annotations != NULL) {
+    // After all annotations arrays have been created, they are installed into the
+    // Annotations object that will be assigned to the InstanceKlass being created.
+
+    // Deallocate the Annotations object and the installed annotations arrays.
+    _combined_annotations->deallocate_contents(_loader_data);
+
+    // If the _combined_annotations pointer is non-NULL,
+    // then the other annotations fields should have been cleared.
+    assert(_annotations             == NULL, "Should have been cleared");
+    assert(_type_annotations        == NULL, "Should have been cleared");
+    assert(_fields_annotations      == NULL, "Should have been cleared");
+    assert(_fields_type_annotations == NULL, "Should have been cleared");
+  } else {
+    // If the annotations arrays were not installed into the Annotations object,
+    // then they have to be deallocated explicitly.
+    MetadataFactory::free_array<u1>(_loader_data, _annotations);
+    MetadataFactory::free_array<u1>(_loader_data, _type_annotations);
+    Annotations::free_contents(_loader_data, _fields_annotations);
+    Annotations::free_contents(_loader_data, _fields_type_annotations);
+  }
+
+  clear_class_metadata();
+
+  // deallocate the klass if already created.  Don't directly deallocate, but add
+  // to the deallocate list so that the klass is removed from the CLD::_klasses list
+  // at a safepoint.
+  if (_klass_to_deallocate != NULL) {
+    _loader_data->add_to_deallocate_list(_klass_to_deallocate);
+  }
 }
 
-
-// Take pointer to a string. Skip over the longest part of the string that could
-// be taken as a field signature. Allow "void" if void_ok.
-// Return a pointer to just past the signature.
-// Return NULL if no legal signature is found.
-char* ClassFileParser::skip_over_field_signature(char* signature,
-                                                 bool void_ok,
-                                                 unsigned int length,
+void ClassFileParser::parse_stream(const ClassFileStream* const stream,
+                                   TRAPS) {
+
+  assert(stream != NULL, "invariant");
+  assert(_class_name != NULL, "invariant");
+
+  // BEGIN STREAM PARSING
+  stream->guarantee_more(8, CHECK);  // magic, major, minor
+  // Magic value
+  const u4 magic = stream->get_u4_fast();
+  guarantee_property(magic == JAVA_CLASSFILE_MAGIC,
+                     "Incompatible magic value %u in class file %s",
+                     magic, CHECK);
+
+  // Version numbers
+  _minor_version = stream->get_u2_fast();
+  _major_version = stream->get_u2_fast();
+
+  if (DumpSharedSpaces && _major_version < JAVA_1_5_VERSION) {
+    ResourceMark rm;
+    warning("Pre JDK 1.5 class not supported by CDS: %u.%u %s",
+            _major_version,  _minor_version, _class_name->as_C_string());
+    Exceptions::fthrow(
+      THREAD_AND_LOCATION,
+      vmSymbols::java_lang_UnsupportedClassVersionError(),
+      "Unsupported major.minor version for dump time %u.%u",
+      _major_version,
+      _minor_version);
+  }
+
+  // Check version numbers - we check this even with verifier off
+  if (!is_supported_version(_major_version, _minor_version)) {
+    ResourceMark rm(THREAD);
+    Exceptions::fthrow(
+      THREAD_AND_LOCATION,
+      vmSymbols::java_lang_UnsupportedClassVersionError(),
+      "%s has been compiled by a more recent version of the Java Runtime (class file version %u.%u), "
+      "this version of the Java Runtime only recognizes class file versions up to %u.%u",
+      _class_name->as_C_string(),
+      _major_version,
+      _minor_version,
+      JAVA_MAX_SUPPORTED_VERSION,
+      JAVA_MAX_SUPPORTED_MINOR_VERSION);
+    return;
+  }
+
+  stream->guarantee_more(3, CHECK); // length, first cp tag
+  const u2 cp_size = stream->get_u2_fast();
+
+  guarantee_property(
+    cp_size >= 1, "Illegal constant pool size %u in class file %s",
+    cp_size, CHECK);
+
+  _cp = ConstantPool::allocate(_loader_data,
+                               cp_size,
+                               CHECK);
+
+  ConstantPool* const cp = _cp;
+
+  parse_constant_pool(stream, cp, cp_size, CHECK);
+
+  assert(cp_size == (const u2)cp->length(), "invariant");
+
+  // ACCESS FLAGS
+  stream->guarantee_more(8, CHECK);  // flags, this_class, super_class, infs_len
+
+  // Access flags
+  jint flags = stream->get_u2_fast() & JVM_RECOGNIZED_CLASS_MODIFIERS;
+
+  if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) {
+    // Set abstract bit for old class files for backward compatibility
+    flags |= JVM_ACC_ABSTRACT;
+  }
+
+  _access_flags.set_flags(flags);
+
+  verify_legal_class_modifiers((jint)_access_flags.as_int(), CHECK);
+
+  // This class and superclass
+  _this_class_index = stream->get_u2_fast();
+  check_property(
+    valid_cp_range(_this_class_index, cp_size) &&
+      cp->tag_at(_this_class_index).is_unresolved_klass(),
+    "Invalid this class index %u in constant pool in class file %s",
+    _this_class_index, CHECK);
+
+  Symbol* const class_name_in_cp = cp->klass_name_at(_this_class_index);
+  assert(class_name_in_cp != NULL, "class_name can't be null");
+
+  if (_parsed_name != NULL) {
+    // It's important to set parsed_name *before* resolving the super class.
+    // (it's used for cleanup by the caller if parsing fails)
+    *_parsed_name = class_name_in_cp;
+    // parsed_name is returned and can be used if there's an error, so add to
+    // its reference count.  Caller will decrement the refcount.
+    (*_parsed_name)->increment_refcount();
+  }
+
+  // Update _class_name which could be null previously
+  // to reflect the name in the constant pool
+  _class_name = class_name_in_cp;
+
+  // Don't need to check whether this class name is legal or not.
+  // It has been checked when constant pool is parsed.
+  // However, make sure it is not an array type.
+  if (_need_verify) {
+    guarantee_property(_class_name->byte_at(0) != JVM_SIGNATURE_ARRAY,
+                       "Bad class name in class file %s",
+                       CHECK);
+  }
+
+  // Checks if name in class file matches requested name
+  if (_requested_name != NULL && _requested_name != _class_name) {
+    ResourceMark rm(THREAD);
+    Exceptions::fthrow(
+      THREAD_AND_LOCATION,
+      vmSymbols::java_lang_NoClassDefFoundError(),
+      "%s (wrong name: %s)",
+      _class_name->as_C_string(),
+      _requested_name != NULL ? _requested_name->as_C_string() : "NoName"
+    );
+    return;
+  }
+
+  if (!is_internal()) {
+    if (TraceClassLoadingPreorder) {
+      tty->print("[Loading %s",
+        _class_name->as_klass_external_name());
+
+      if (stream->source() != NULL) {
+        tty->print(" from %s", stream->source());
+      }
+      tty->print_cr("]");
+    }
+#if INCLUDE_CDS
+    if (DumpLoadedClassList != NULL && stream->source() != NULL && classlist_file->is_open()) {
+      // Only dump the classes that can be stored into CDS archive
+      if (SystemDictionaryShared::is_sharing_possible(_loader_data)) {
+        ResourceMark rm(THREAD);
+        classlist_file->print_cr("%s", _class_name->as_C_string());
+        classlist_file->flush();
+      }
+    }
+#endif
+  }
+
+  // SUPERKLASS
+  _super_class_index = stream->get_u2_fast();
+  _super_klass = parse_super_class(cp,
+                                   _super_class_index,
+                                   _need_verify,
+                                   CHECK);
+
+  // Interfaces
+  _itfs_len = stream->get_u2_fast();
+  parse_interfaces(stream,
+                   _itfs_len,
+                   cp,
+                   &_has_default_methods,
+                   CHECK);
+
+  assert(_local_interfaces != NULL, "invariant");
+
+  // Fields (offsets are filled in later)
+  _fac = new FieldAllocationCount();
+  parse_fields(stream,
+               _access_flags.is_interface(),
+               _fac,
+               cp,
+               cp_size,
+               &_java_fields_count,
+               CHECK);
+
+  assert(_fields != NULL, "invariant");
+
+  // Methods
+  AccessFlags promoted_flags;
+  parse_methods(stream,
+                _access_flags.is_interface(),
+                &promoted_flags,
+                &_has_final_method,
+                &_declares_default_methods,
+                CHECK);
+
+  assert(_methods != NULL, "invariant");
+
+  // promote flags from parse_methods() to the klass' flags
+  _access_flags.add_promoted_flags(promoted_flags.as_int());
+
+  if (_declares_default_methods) {
+    _has_default_methods = true;
+  }
+
+  // Additional attributes/annotations
+  _parsed_annotations = new ClassAnnotationCollector();
+  parse_classfile_attributes(stream, cp, _parsed_annotations, CHECK);
+
+  assert(_inner_classes != NULL, "invariant");
+
+  // Finalize the Annotations metadata object,
+  // now that all annotation arrays have been created.
+  create_combined_annotations(CHECK);
+
+  // Make sure this is the end of class file stream
+  guarantee_property(stream->at_eos(),
+                     "Extra bytes at the end of class file %s",
+                     CHECK);
+
+  // all bytes in stream read and parsed
+}
+
+void ClassFileParser::post_process_parsed_stream(const ClassFileStream* const stream,
+                                                 ConstantPool* cp,
                                                  TRAPS) {
-  unsigned int array_dim = 0;
-  while (length > 0) {
-    switch (signature[0]) {
-      case JVM_SIGNATURE_VOID: if (!void_ok) { return NULL; }
-      case JVM_SIGNATURE_BOOLEAN:
-      case JVM_SIGNATURE_BYTE:
-      case JVM_SIGNATURE_CHAR:
-      case JVM_SIGNATURE_SHORT:
-      case JVM_SIGNATURE_INT:
-      case JVM_SIGNATURE_FLOAT:
-      case JVM_SIGNATURE_LONG:
-      case JVM_SIGNATURE_DOUBLE:
-        return signature + 1;
-      case JVM_SIGNATURE_CLASS: {
-        if (_major_version < JAVA_1_5_VERSION) {
-          // Skip over the class name if one is there
-          char* p = skip_over_field_name(signature + 1, true, --length);
-
-          // The next character better be a semicolon
-          if (p && (p - signature) > 1 && p[0] == ';') {
-            return p + 1;
-          }
-        } else {
-          // 4900761: For class version > 48, any unicode is allowed in class name.
-          length--;
-          signature++;
-          while (length > 0 && signature[0] != ';') {
-            if (signature[0] == '.') {
-              classfile_parse_error("Class name contains illegal character '.' in descriptor in class file %s", CHECK_0);
-            }
-            length--;
-            signature++;
-          }
-          if (signature[0] == ';') { return signature + 1; }
-        }
-
-        return NULL;
+  assert(stream != NULL, "invariant");
+  assert(stream->at_eos(), "invariant");
+  assert(cp != NULL, "invariant");
+  assert(_loader_data != NULL, "invariant");
+
+  // We check super class after class file is parsed and format is checked
+  if (_super_class_index > 0 && NULL ==_super_klass) {
+    Symbol* const super_class_name = cp->klass_name_at(_super_class_index);
+    if (_access_flags.is_interface()) {
+      // Before attempting to resolve the superclass, check for class format
+      // errors not checked yet.
+      guarantee_property(super_class_name == vmSymbols::java_lang_Object(),
+        "Interfaces must have java.lang.Object as superclass in class file %s",
+        CHECK);
       }
-      case JVM_SIGNATURE_ARRAY:
-        array_dim++;
-        if (array_dim > 255) {
-          // 4277370: array descriptor is valid only if it represents 255 or fewer dimensions.
-          classfile_parse_error("Array type descriptor has more than 255 dimensions in class file %s", CHECK_0);
-        }
-        // The rest of what's there better be a legal signature
-        signature++;
-        length--;
-        void_ok = false;
-        break;
-
-      default:
-        return NULL;
+      _super_klass = (const InstanceKlass*)
+                       SystemDictionary::resolve_super_or_fail(_class_name,
+                                                               super_class_name,
+                                                               _loader_data->class_loader(),
+                                                               _protection_domain,
+                                                               true,
+                                                               CHECK);
+  }
+
+  if (_super_klass != NULL) {
+    if (_super_klass->has_default_methods()) {
+      _has_default_methods = true;
+    }
+
+    if (_super_klass->is_interface()) {
+      ResourceMark rm(THREAD);
+      Exceptions::fthrow(
+        THREAD_AND_LOCATION,
+        vmSymbols::java_lang_IncompatibleClassChangeError(),
+        "class %s has interface %s as super class",
+        _class_name->as_klass_external_name(),
+        _super_klass->external_name()
+      );
+      return;
+    }
+    // Make sure super class is not final
+    if (_super_klass->is_final()) {
+      THROW_MSG(vmSymbols::java_lang_VerifyError(), "Cannot inherit from final class");
     }
   }
-  return NULL;
+
+  // Compute the transitive list of all unique interfaces implemented by this class
+  _transitive_interfaces =
+    compute_transitive_interfaces(_super_klass,
+                                  _local_interfaces,
+                                  _loader_data,
+                                  CHECK);
+
+  assert(_transitive_interfaces != NULL, "invariant");
+
+  // sort methods
+  _method_ordering = sort_methods(_methods);
+
+  _all_mirandas = new GrowableArray<Method*>(20);
+
+  klassVtable::compute_vtable_size_and_num_mirandas(&_vtable_size,
+                                                    &_num_miranda_methods,
+                                                    _all_mirandas,
+                                                    _super_klass,
+                                                    _methods,
+                                                    _access_flags,
+                                                    _loader_data->class_loader(),
+                                                    _class_name,
+                                                    _local_interfaces,
+                                                    CHECK);
+
+  // Size of Java itable (in words)
+  _itable_size = _access_flags.is_interface() ? 0 :
+    klassItable::compute_itable_size(_transitive_interfaces);
+
+  assert(_fac != NULL, "invariant");
+  assert(_parsed_annotations != NULL, "invariant");
+
+  _field_info = new FieldLayoutInfo();
+  layout_fields(cp, _fac, _parsed_annotations, _field_info, CHECK);
+
+  // Compute reference typ
+  _rt = (NULL ==_super_klass) ? REF_NONE : _super_klass->reference_type();
+
 }
+
+void ClassFileParser::set_klass(InstanceKlass* klass) {
+
+#ifdef ASSERT
+  if (klass != NULL) {
+    assert(NULL == _klass, "leaking?");
+  }
+#endif
+
+  _klass = klass;
+}
+
+void ClassFileParser::set_klass_to_deallocate(InstanceKlass* klass) {
+
+#ifdef ASSERT
+  if (klass != NULL) {
+    assert(NULL == _klass_to_deallocate, "leaking?");
+  }
+#endif
+
+  _klass_to_deallocate = klass;
+}
+
+// Caller responsible for ResourceMark
+// clone stream with rewound position
+const ClassFileStream* ClassFileParser::clone_stream() const {
+  assert(_stream != NULL, "invariant");
+
+  return _stream->clone();
+}
--- a/src/share/vm/classfile/classFileParser.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/classFileParser.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,33 +25,123 @@
 #ifndef SHARE_VM_CLASSFILE_CLASSFILEPARSER_HPP
 #define SHARE_VM_CLASSFILE_CLASSFILEPARSER_HPP
 
-#include "classfile/classFileStream.hpp"
-#include "classfile/symbolTable.hpp"
-#include "oops/annotations.hpp"
+#include "memory/referenceType.hpp"
+#include "runtime/handles.inline.hpp"
 #include "oops/constantPool.hpp"
 #include "oops/typeArrayOop.hpp"
 #include "utilities/accessFlags.hpp"
 
+class Annotations;
+template <typename T>
+class Array;
+class ClassFileStream;
+class ClassLoaderData;
 class CompressedLineNumberWriteStream;
-class FieldAllocationCount;
+class ConstMethod;
 class FieldInfo;
-class FieldLayoutInfo;
-
+template <typename T>
+class GrowableArray;
+class InstanceKlass;
+class intArray;
+class Symbol;
+class TempNewSymbol;
 
 // Parser for for .class files
 //
 // The bytes describing the class file structure is read from a Stream object
 
 class ClassFileParser VALUE_OBJ_CLASS_SPEC {
+
+ class ClassAnnotationCollector;
+ class FieldAllocationCount;
+ class FieldAnnotationCollector;
+ class FieldLayoutInfo;
+
+ public:
+  // The ClassFileParser has an associated "publicity" level
+  // It is used to control which subsystems (if any)
+  // will observe the parsing (logging, events, tracing).
+  // Default level is "BROADCAST", which is equivalent to
+  // a "public" parsing attempt.
+  //
+  // "INTERNAL" level should be entirely private to the
+  // caller - this allows for internal reuse of ClassFileParser
+  //
+  enum Publicity {
+    INTERNAL,
+    BROADCAST,
+    NOF_PUBLICITY_LEVELS
+  };
+
  private:
+  const ClassFileStream* _stream; // Actual input stream
+  const Symbol* _requested_name;
+  Symbol* _class_name;
+  mutable ClassLoaderData* _loader_data;
+  const Klass* _host_klass;
+  GrowableArray<Handle>* _cp_patches; // overrides for CP entries
+  TempNewSymbol* _parsed_name;
+
+  // Metadata created before the instance klass is created.  Must be deallocated
+  // if not transferred to the InstanceKlass upon successful class loading
+  // in which case these pointers have been set to NULL.
+  const InstanceKlass* _super_klass;
+  ConstantPool* _cp;
+  Array<u2>* _fields;
+  Array<Method*>* _methods;
+  Array<u2>* _inner_classes;
+  Array<Klass*>* _local_interfaces;
+  Array<Klass*>* _transitive_interfaces;
+  Annotations* _combined_annotations;
+  AnnotationArray* _annotations;
+  AnnotationArray* _type_annotations;
+  Array<AnnotationArray*>* _fields_annotations;
+  Array<AnnotationArray*>* _fields_type_annotations;
+  InstanceKlass* _klass;  // InstanceKlass* once created.
+  InstanceKlass* _klass_to_deallocate; // an InstanceKlass* to be destroyed
+
+  ClassAnnotationCollector* _parsed_annotations;
+  FieldAllocationCount* _fac;
+  FieldLayoutInfo* _field_info;
+  const intArray* _method_ordering;
+  GrowableArray<Method*>* _all_mirandas;
+
+  enum { fixed_buffer_size = 128 };
+  u_char _linenumbertable_buffer[fixed_buffer_size];
+
+  // Size of Java vtable (in words)
+  int _vtable_size;
+  int _itable_size;
+
+  int _num_miranda_methods;
+
+  ReferenceType _rt;
+  Handle _protection_domain;
+  AccessFlags _access_flags;
+
+  // for tracing and notifications
+  Publicity _pub_level;
+
+  // class attributes parsed before the instance klass is created:
+  bool _synthetic_flag;
+  int _sde_length;
+  const char* _sde_buffer;
+  u2 _sourcefile_index;
+  u2 _generic_signature_index;
+
+  u2 _major_version;
+  u2 _minor_version;
+  u2 _this_class_index;
+  u2 _super_class_index;
+  u2 _itfs_len;
+  u2 _java_fields_count;
+
   bool _need_verify;
   bool _relax_verify;
-  u2   _major_version;
-  u2   _minor_version;
-  Symbol* _class_name;
-  ClassLoaderData* _loader_data;
-  KlassHandle _host_klass;
-  GrowableArray<Handle>* _cp_patches; // overrides for CP entries
+
+  bool _has_default_methods;
+  bool _declares_default_methods;
+  bool _has_final_method;
 
   // precomputed flags
   bool _has_finalizer;
@@ -59,270 +149,164 @@
   bool _has_vanilla_constructor;
   int _max_bootstrap_specifier_index;  // detects BSS values
 
-  // class attributes parsed before the instance klass is created:
-  bool       _synthetic_flag;
-  int        _sde_length;
-  char*      _sde_buffer;
-  u2         _sourcefile_index;
-  u2         _generic_signature_index;
+  void parse_stream(const ClassFileStream* const stream, TRAPS);
 
-  // Metadata created before the instance klass is created.  Must be deallocated
-  // if not transferred to the InstanceKlass upon successful class loading
-  // in which case these pointers have been set to NULL.
-  instanceKlassHandle _super_klass;
-  ConstantPool*    _cp;
-  Array<u2>*       _fields;
-  Array<Method*>*  _methods;
-  Array<u2>*       _inner_classes;
-  Array<Klass*>*   _local_interfaces;
-  Array<Klass*>*   _transitive_interfaces;
-  Annotations*     _combined_annotations;
-  AnnotationArray* _annotations;
-  AnnotationArray* _type_annotations;
-  Array<AnnotationArray*>* _fields_annotations;
-  Array<AnnotationArray*>* _fields_type_annotations;
-  InstanceKlass*   _klass;  // InstanceKlass once created.
+  void post_process_parsed_stream(const ClassFileStream* const stream,
+                                  ConstantPool* cp,
+                                  TRAPS);
+
+  void fill_instance_klass(InstanceKlass* ik, TRAPS);
+  void set_klass(InstanceKlass* instance);
 
   void set_class_synthetic_flag(bool x)        { _synthetic_flag = x; }
   void set_class_sourcefile_index(u2 x)        { _sourcefile_index = x; }
   void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; }
-  void set_class_sde_buffer(char* x, int len)  { _sde_buffer = x; _sde_length = len; }
+  void set_class_sde_buffer(const char* x, int len)  { _sde_buffer = x; _sde_length = len; }
 
   void create_combined_annotations(TRAPS);
-
-  void init_parsed_class_attributes(ClassLoaderData* loader_data) {
-    _loader_data = loader_data;
-    _synthetic_flag = false;
-    _sourcefile_index = 0;
-    _generic_signature_index = 0;
-    _sde_buffer = NULL;
-    _sde_length = 0;
-    // initialize the other flags too:
-    _has_finalizer = _has_empty_finalizer = _has_vanilla_constructor = false;
-    _max_bootstrap_specifier_index = -1;
-    clear_class_metadata();
-    _klass = NULL;
-  }
-  void apply_parsed_class_attributes(instanceKlassHandle k);  // update k
-  void apply_parsed_class_metadata(instanceKlassHandle k, int fields_count, TRAPS);
-  void clear_class_metadata() {
-    // metadata created before the instance klass is created.  Must be
-    // deallocated if classfile parsing returns an error.
-    _cp = NULL;
-    _fields = NULL;
-    _methods = NULL;
-    _inner_classes = NULL;
-    _local_interfaces = NULL;
-    _transitive_interfaces = NULL;
-    _combined_annotations = NULL;
-    _annotations = _type_annotations = NULL;
-    _fields_annotations = _fields_type_annotations = NULL;
-  }
-
-  class AnnotationCollector {
-  public:
-    enum Location { _in_field, _in_method, _in_class };
-    enum ID {
-      _unknown = 0,
-      _method_CallerSensitive,
-      _method_ForceInline,
-      _method_DontInline,
-      _method_InjectedProfile,
-      _method_LambdaForm_Compiled,
-      _method_LambdaForm_Hidden,
-      _method_HotSpotIntrinsicCandidate,
-      _jdk_internal_vm_annotation_Contended,
-      _field_Stable,
-      _annotation_LIMIT
-    };
-    const Location _location;
-    int _annotations_present;
-    u2 _contended_group;
-
-    AnnotationCollector(Location location)
-    : _location(location), _annotations_present(0)
-    {
-      assert((int)_annotation_LIMIT <= (int)sizeof(_annotations_present) * BitsPerByte, "");
-    }
-    // If this annotation name has an ID, report it (or _none).
-    ID annotation_index(ClassLoaderData* loader_data, Symbol* name);
-    // Set the annotation name:
-    void set_annotation(ID id) {
-      assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
-      _annotations_present |= nth_bit((int)id);
-    }
-
-    void remove_annotation(ID id) {
-      assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
-      _annotations_present &= ~nth_bit((int)id);
-    }
-
-    // Report if the annotation is present.
-    bool has_any_annotations() const { return _annotations_present != 0; }
-    bool has_annotation(ID id) const { return (nth_bit((int)id) & _annotations_present) != 0; }
-
-    void set_contended_group(u2 group) { _contended_group = group; }
-    u2 contended_group() const { return _contended_group; }
-
-    bool is_contended() const { return has_annotation(_jdk_internal_vm_annotation_Contended); }
-
-    void set_stable(bool stable) { set_annotation(_field_Stable); }
-    bool is_stable() const { return has_annotation(_field_Stable); }
-  };
-
-  // This class also doubles as a holder for metadata cleanup.
-  class FieldAnnotationCollector: public AnnotationCollector {
-    ClassLoaderData* _loader_data;
-    AnnotationArray* _field_annotations;
-    AnnotationArray* _field_type_annotations;
-  public:
-    FieldAnnotationCollector(ClassLoaderData* loader_data) :
-                                 AnnotationCollector(_in_field),
-                                 _loader_data(loader_data),
-                                 _field_annotations(NULL),
-                                 _field_type_annotations(NULL) {}
-    void apply_to(FieldInfo* f);
-    ~FieldAnnotationCollector();
-    AnnotationArray* field_annotations()      { return _field_annotations; }
-    AnnotationArray* field_type_annotations() { return _field_type_annotations; }
-
-    void set_field_annotations(AnnotationArray* a)      { _field_annotations = a; }
-    void set_field_type_annotations(AnnotationArray* a) { _field_type_annotations = a; }
-  };
-
-  class MethodAnnotationCollector: public AnnotationCollector {
-  public:
-    MethodAnnotationCollector() : AnnotationCollector(_in_method) { }
-    void apply_to(methodHandle m);
-  };
-  class ClassAnnotationCollector: public AnnotationCollector {
-  public:
-    ClassAnnotationCollector() : AnnotationCollector(_in_class) { }
-    void apply_to(instanceKlassHandle k);
-  };
-
-  enum { fixed_buffer_size = 128 };
-  u_char linenumbertable_buffer[fixed_buffer_size];
-
-  ClassFileStream* _stream;              // Actual input stream
-
-  enum { LegalClass, LegalField, LegalMethod }; // used to verify unqualified names
-
-  // Accessors
-  ClassFileStream* stream()                        { return _stream; }
-  void set_stream(ClassFileStream* st)             { _stream = st; }
+  void apply_parsed_class_attributes(InstanceKlass* k);  // update k
+  void apply_parsed_class_metadata(InstanceKlass* k, int fields_count, TRAPS);
+  void clear_class_metadata();
 
   // Constant pool parsing
-  void parse_constant_pool_entries(int length, TRAPS);
+  void parse_constant_pool_entries(const ClassFileStream* const stream,
+                                   ConstantPool* cp,
+                                   const int length,
+                                   TRAPS);
 
-  constantPoolHandle parse_constant_pool(TRAPS);
+  void parse_constant_pool(const ClassFileStream* const cfs,
+                           ConstantPool* const cp,
+                           const int length,
+                           TRAPS);
 
   // Interface parsing
-  Array<Klass*>* parse_interfaces(int length,
-                                  Handle protection_domain,
-                                  Symbol* class_name,
-                                  bool* has_default_methods,
-                                  TRAPS);
-  void record_defined_class_dependencies(instanceKlassHandle defined_klass, TRAPS);
+  void parse_interfaces(const ClassFileStream* const stream,
+                        const int itfs_len,
+                        ConstantPool* const cp,
+                        bool* has_default_methods,
+                        TRAPS);
 
-  instanceKlassHandle parse_super_class(int super_class_index, TRAPS);
+  const InstanceKlass* parse_super_class(ConstantPool* const cp,
+                                         const int super_class_index,
+                                         const bool need_verify,
+                                         TRAPS);
+
   // Field parsing
-  void parse_field_attributes(u2 attributes_count,
-                              bool is_static, u2 signature_index,
-                              u2* constantvalue_index_addr,
-                              bool* is_synthetic_addr,
-                              u2* generic_signature_index_addr,
+  void parse_field_attributes(const ClassFileStream* const cfs,
+                              u2 attributes_count,
+                              bool is_static,
+                              u2 signature_index,
+                              u2* const constantvalue_index_addr,
+                              bool* const is_synthetic_addr,
+                              u2* const generic_signature_index_addr,
                               FieldAnnotationCollector* parsed_annotations,
                               TRAPS);
-  Array<u2>* parse_fields(Symbol* class_name,
-                          bool is_interface,
-                          FieldAllocationCount *fac,
-                          u2* java_fields_count_ptr, TRAPS);
 
-  void print_field_layout(Symbol* name,
-                          Array<u2>* fields,
-                          const constantPoolHandle& cp,
-                          int instance_size,
-                          int instance_fields_start,
-                          int instance_fields_end,
-                          int static_fields_end);
+  void parse_fields(const ClassFileStream* const cfs,
+                    bool is_interface,
+                    FieldAllocationCount* const fac,
+                    ConstantPool* cp,
+                    const int cp_size,
+                    u2* const java_fields_count_ptr,
+                    TRAPS);
 
   // Method parsing
-  methodHandle parse_method(bool is_interface,
-                            AccessFlags* promoted_flags,
-                            TRAPS);
-  Array<Method*>* parse_methods(bool is_interface,
-                                AccessFlags* promoted_flags,
-                                bool* has_final_method,
-                                bool* declares_default_methods,
-                                TRAPS);
-  intArray* sort_methods(Array<Method*>* methods);
+  Method* parse_method(const ClassFileStream* const cfs,
+                       bool is_interface,
+                       const ConstantPool* cp,
+                       AccessFlags* const promoted_flags,
+                       TRAPS);
+
+  void parse_methods(const ClassFileStream* const cfs,
+                     bool is_interface,
+                     AccessFlags* const promoted_flags,
+                     bool* const has_final_method,
+                     bool* const declares_default_methods,
+                     TRAPS);
+
+  const u2* parse_exception_table(const ClassFileStream* const stream,
+                                  u4 code_length,
+                                  u4 exception_table_length,
+                                  TRAPS);
 
-  u2* parse_exception_table(u4 code_length, u4 exception_table_length,
-                            TRAPS);
-  void parse_linenumber_table(
-      u4 code_attribute_length, u4 code_length,
-      CompressedLineNumberWriteStream** write_stream, TRAPS);
-  u2* parse_localvariable_table(u4 code_length, u2 max_locals, u4 code_attribute_length,
-                                u2* localvariable_table_length,
-                                bool isLVTT, TRAPS);
-  u2* parse_checked_exceptions(u2* checked_exceptions_length, u4 method_attribute_length,
-                               TRAPS);
-  void parse_type_array(u2 array_length, u4 code_length, u4* u1_index, u4* u2_index,
-                        u1* u1_array, u2* u2_array, TRAPS);
-  u1* parse_stackmap_table(u4 code_attribute_length, TRAPS);
+  void parse_linenumber_table(u4 code_attribute_length,
+                              u4 code_length,
+                              CompressedLineNumberWriteStream**const write_stream,
+                              TRAPS);
+
+  const u2* parse_localvariable_table(const ClassFileStream* const cfs,
+                                      u4 code_length,
+                                      u2 max_locals,
+                                      u4 code_attribute_length,
+                                      u2* const localvariable_table_length,
+                                      bool isLVTT,
+                                      TRAPS);
+
+  const u2* parse_checked_exceptions(const ClassFileStream* const cfs,
+                                     u2* const checked_exceptions_length,
+                                     u4 method_attribute_length,
+                                     TRAPS);
+
+  void parse_type_array(u2 array_length,
+                        u4 code_length,
+                        u4* const u1_index,
+                        u4* const u2_index,
+                        u1* const u1_array,
+                        u2* const u2_array,
+                        TRAPS);
 
   // Classfile attribute parsing
-  u2 parse_generic_signature_attribute(TRAPS);
-  void parse_classfile_sourcefile_attribute(TRAPS);
-  void parse_classfile_source_debug_extension_attribute(int length, TRAPS);
-  u2   parse_classfile_inner_classes_attribute(u1* inner_classes_attribute_start,
+  u2 parse_generic_signature_attribute(const ClassFileStream* const cfs, TRAPS);
+  void parse_classfile_sourcefile_attribute(const ClassFileStream* const cfs, TRAPS);
+  void parse_classfile_source_debug_extension_attribute(const ClassFileStream* const cfs,
+                                                        int length,
+                                                        TRAPS);
+
+  u2   parse_classfile_inner_classes_attribute(const ClassFileStream* const cfs,
+                                               const u1* const inner_classes_attribute_start,
                                                bool parsed_enclosingmethod_attribute,
                                                u2 enclosing_method_class_index,
                                                u2 enclosing_method_method_index,
                                                TRAPS);
-  void parse_classfile_attributes(ClassAnnotationCollector* parsed_annotations,
+
+  void parse_classfile_attributes(const ClassFileStream* const cfs,
+                                  ConstantPool* cp,
+                                  ClassAnnotationCollector* parsed_annotations,
                                   TRAPS);
+
   void parse_classfile_synthetic_attribute(TRAPS);
-  void parse_classfile_signature_attribute(TRAPS);
-  void parse_classfile_bootstrap_methods_attribute(u4 attribute_length, TRAPS);
+  void parse_classfile_signature_attribute(const ClassFileStream* const cfs, TRAPS);
+  void parse_classfile_bootstrap_methods_attribute(const ClassFileStream* const cfs,
+                                                   ConstantPool* cp,
+                                                   u4 attribute_length,
+                                                   TRAPS);
 
   // Annotations handling
-  AnnotationArray* assemble_annotations(u1* runtime_visible_annotations,
+  AnnotationArray* assemble_annotations(const u1* const runtime_visible_annotations,
                                         int runtime_visible_annotations_length,
-                                        u1* runtime_invisible_annotations,
-                                        int runtime_invisible_annotations_length, TRAPS);
-  int skip_annotation(u1* buffer, int limit, int index);
-  int skip_annotation_value(u1* buffer, int limit, int index);
-  void parse_annotations(u1* buffer, int limit,
-                         /* Results (currently, only one result is supported): */
-                         AnnotationCollector* result);
+                                        const u1* const runtime_invisible_annotations,
+                                        int runtime_invisible_annotations_length,
+                                        TRAPS);
 
-  // Final setup
-  unsigned int compute_oop_map_count(instanceKlassHandle super,
-                                     unsigned int nonstatic_oop_count,
-                                     int first_nonstatic_oop_offset);
-  void fill_oop_maps(instanceKlassHandle k,
-                     unsigned int nonstatic_oop_map_count,
-                     int* nonstatic_oop_offsets,
-                     unsigned int* nonstatic_oop_counts);
-  void set_precomputed_flags(instanceKlassHandle k);
-  Array<Klass*>* compute_transitive_interfaces(instanceKlassHandle super,
-                                               Array<Klass*>* local_ifs, TRAPS);
+  void set_precomputed_flags(InstanceKlass* k);
 
   // Format checker methods
-  void classfile_parse_error(const char* msg, TRAPS);
-  void classfile_parse_error(const char* msg, int index, TRAPS);
-  void classfile_parse_error(const char* msg, const char *name, TRAPS);
-  void classfile_parse_error(const char* msg, int index, const char *name, TRAPS);
-  inline void guarantee_property(bool b, const char* msg, TRAPS) {
+  void classfile_parse_error(const char* msg, TRAPS) const;
+  void classfile_parse_error(const char* msg, int index, TRAPS) const;
+  void classfile_parse_error(const char* msg, const char *name, TRAPS) const;
+  void classfile_parse_error(const char* msg,
+                             int index,
+                             const char *name,
+                             TRAPS) const;
+
+  inline void guarantee_property(bool b, const char* msg, TRAPS) const {
     if (!b) { classfile_parse_error(msg, CHECK); }
   }
 
-  void report_assert_property_failure(const char* msg, TRAPS) PRODUCT_RETURN;
-  void report_assert_property_failure(const char* msg, int index, TRAPS) PRODUCT_RETURN;
+  void report_assert_property_failure(const char* msg, TRAPS) const PRODUCT_RETURN;
+  void report_assert_property_failure(const char* msg, int index, TRAPS) const PRODUCT_RETURN;
 
-  inline void assert_property(bool b, const char* msg, TRAPS) {
+  inline void assert_property(bool b, const char* msg, TRAPS) const {
 #ifdef ASSERT
     if (!b) {
       report_assert_property_failure(msg, THREAD);
@@ -330,7 +314,7 @@
 #endif
   }
 
-  inline void assert_property(bool b, const char* msg, int index, TRAPS) {
+  inline void assert_property(bool b, const char* msg, int index, TRAPS) const {
 #ifdef ASSERT
     if (!b) {
       report_assert_property_failure(msg, index, THREAD);
@@ -338,7 +322,10 @@
 #endif
   }
 
-  inline void check_property(bool property, const char* msg, int index, TRAPS) {
+  inline void check_property(bool property,
+                             const char* msg,
+                             int index,
+                             TRAPS) const {
     if (_need_verify) {
       guarantee_property(property, msg, index, CHECK);
     } else {
@@ -346,7 +333,7 @@
     }
   }
 
-  inline void check_property(bool property, const char* msg, TRAPS) {
+  inline void check_property(bool property, const char* msg, TRAPS) const {
     if (_need_verify) {
       guarantee_property(property, msg, CHECK);
     } else {
@@ -354,136 +341,177 @@
     }
   }
 
-  inline void guarantee_property(bool b, const char* msg, int index, TRAPS) {
+  inline void guarantee_property(bool b,
+                                 const char* msg,
+                                 int index,
+                                 TRAPS) const {
     if (!b) { classfile_parse_error(msg, index, CHECK); }
   }
-  inline void guarantee_property(bool b, const char* msg, const char *name, TRAPS) {
+
+  inline void guarantee_property(bool b,
+                                 const char* msg,
+                                 const char *name,
+                                 TRAPS) const {
     if (!b) { classfile_parse_error(msg, name, CHECK); }
   }
-  inline void guarantee_property(bool b, const char* msg, int index, const char *name, TRAPS) {
+
+  inline void guarantee_property(bool b,
+                                 const char* msg,
+                                 int index,
+                                 const char *name,
+                                 TRAPS) const {
     if (!b) { classfile_parse_error(msg, index, name, CHECK); }
   }
 
-  void throwIllegalSignature(
-      const char* type, Symbol* name, Symbol* sig, TRAPS);
+  void throwIllegalSignature(const char* type,
+                             const Symbol* name,
+                             const Symbol* sig,
+                             TRAPS) const;
 
-  bool is_supported_version(u2 major, u2 minor);
-  bool has_illegal_visibility(jint flags);
+  void verify_constantvalue(const ConstantPool* const cp,
+                            int constantvalue_index,
+                            int signature_index,
+                            TRAPS) const;
+
+  void verify_legal_utf8(const unsigned char* buffer, int length, TRAPS) const;
+  void verify_legal_class_name(const Symbol* name, TRAPS) const;
+  void verify_legal_field_name(const Symbol* name, TRAPS) const;
+  void verify_legal_method_name(const Symbol* name, TRAPS) const;
 
-  void verify_constantvalue(int constantvalue_index, int signature_index, TRAPS);
-  void verify_legal_utf8(const unsigned char* buffer, int length, TRAPS);
-  void verify_legal_class_name(Symbol* name, TRAPS);
-  void verify_legal_field_name(Symbol* name, TRAPS);
-  void verify_legal_method_name(Symbol* name, TRAPS);
-  void verify_legal_field_signature(Symbol* fieldname, Symbol* signature, TRAPS);
-  int  verify_legal_method_signature(Symbol* methodname, Symbol* signature, TRAPS);
-  void verify_legal_class_modifiers(jint flags, TRAPS);
-  void verify_legal_field_modifiers(jint flags, bool is_interface, TRAPS);
-  void verify_legal_method_modifiers(jint flags, bool is_interface, Symbol* name, TRAPS);
-  bool verify_unqualified_name(char* name, unsigned int length, int type);
-  char* skip_over_field_name(char* name, bool slash_ok, unsigned int length);
-  char* skip_over_field_signature(char* signature, bool void_ok, unsigned int length, TRAPS);
+  void verify_legal_field_signature(const Symbol* fieldname,
+                                    const Symbol* signature,
+                                    TRAPS) const;
+  int  verify_legal_method_signature(const Symbol* methodname,
+                                     const Symbol* signature,
+                                     TRAPS) const;
 
-  bool is_anonymous() {
-    return _host_klass.not_null();
-  }
-  bool has_cp_patch_at(int index) {
+  void verify_legal_class_modifiers(jint flags, TRAPS) const;
+  void verify_legal_field_modifiers(jint flags, bool is_interface, TRAPS) const;
+  void verify_legal_method_modifiers(jint flags,
+                                     bool is_interface,
+                                     const Symbol* name,
+                                     TRAPS) const;
+
+  const char* skip_over_field_signature(const char* signature,
+                                        bool void_ok,
+                                        unsigned int length,
+                                        TRAPS) const;
+
+  bool has_cp_patch_at(int index) const {
     assert(index >= 0, "oob");
     return (_cp_patches != NULL
             && index < _cp_patches->length()
             && _cp_patches->adr_at(index)->not_null());
   }
-  Handle cp_patch_at(int index) {
+
+  Handle cp_patch_at(int index) const {
     assert(has_cp_patch_at(index), "oob");
     return _cp_patches->at(index);
   }
+
   Handle clear_cp_patch_at(int index) {
     Handle patch = cp_patch_at(index);
     _cp_patches->at_put(index, Handle());
     assert(!has_cp_patch_at(index), "");
     return patch;
   }
-  void patch_constant_pool(const constantPoolHandle& cp, int index, Handle patch, TRAPS);
+
+  void patch_constant_pool(ConstantPool* cp,
+                           int index,
+                           Handle patch,
+                           TRAPS);
 
   // Wrapper for constantTag.is_klass_[or_]reference.
   // In older versions of the VM, Klass*s cannot sneak into early phases of
   // constant pool construction, but in later versions they can.
   // %%% Let's phase out the old is_klass_reference.
-  bool valid_klass_reference_at(int index) {
-    return _cp->is_within_bounds(index) && _cp->tag_at(index).is_klass_or_reference();
+  bool valid_klass_reference_at(int index) const {
+    return _cp->is_within_bounds(index) &&
+             _cp->tag_at(index).is_klass_or_reference();
   }
 
   // Checks that the cpool index is in range and is a utf8
-  bool valid_symbol_at(int cpool_index) {
-    return (_cp->is_within_bounds(cpool_index) &&
-            _cp->tag_at(cpool_index).is_utf8());
+  bool valid_symbol_at(int cpool_index) const {
+    return _cp->is_within_bounds(cpool_index) &&
+             _cp->tag_at(cpool_index).is_utf8();
   }
 
-  void copy_localvariable_table(ConstMethod* cm, int lvt_cnt,
-                                u2* localvariable_table_length,
-                                u2** localvariable_table_start,
+  void copy_localvariable_table(const ConstMethod* cm,
+                                int lvt_cnt,
+                                u2* const localvariable_table_length,
+                                const u2**const localvariable_table_start,
                                 int lvtt_cnt,
-                                u2* localvariable_type_table_length,
-                                u2** localvariable_type_table_start,
+                                u2* const localvariable_type_table_length,
+                                const u2** const localvariable_type_table_start,
                                 TRAPS);
 
   void copy_method_annotations(ConstMethod* cm,
-                               u1* runtime_visible_annotations,
+                               const u1* runtime_visible_annotations,
                                int runtime_visible_annotations_length,
-                               u1* runtime_invisible_annotations,
+                               const u1* runtime_invisible_annotations,
                                int runtime_invisible_annotations_length,
-                               u1* runtime_visible_parameter_annotations,
+                               const u1* runtime_visible_parameter_annotations,
                                int runtime_visible_parameter_annotations_length,
-                               u1* runtime_invisible_parameter_annotations,
+                               const u1* runtime_invisible_parameter_annotations,
                                int runtime_invisible_parameter_annotations_length,
-                               u1* runtime_visible_type_annotations,
+                               const u1* runtime_visible_type_annotations,
                                int runtime_visible_type_annotations_length,
-                               u1* runtime_invisible_type_annotations,
+                               const u1* runtime_invisible_type_annotations,
                                int runtime_invisible_type_annotations_length,
-                               u1* annotation_default,
+                               const u1* annotation_default,
                                int annotation_default_length,
                                TRAPS);
 
   // lays out fields in class and returns the total oopmap count
-  void layout_fields(Handle class_loader, FieldAllocationCount* fac,
-                     ClassAnnotationCollector* parsed_annotations,
-                     FieldLayoutInfo* info, TRAPS);
+  void layout_fields(ConstantPool* cp,
+                     const FieldAllocationCount* fac,
+                     const ClassAnnotationCollector* parsed_annotations,
+                     FieldLayoutInfo* info,
+                     TRAPS);
 
  public:
-  // Constructor
-  ClassFileParser(ClassFileStream* st) { set_stream(st); }
+  ClassFileParser(ClassFileStream* stream,
+                  Symbol* name,
+                  ClassLoaderData* loader_data,
+                  Handle protection_domain,
+                  TempNewSymbol* parsed_name,
+                  const Klass* host_klass,
+                  GrowableArray<Handle>* cp_patches,
+                  Publicity pub_level,
+                  TRAPS);
+
   ~ClassFileParser();
 
-  // Parse .class file and return new Klass*. The Klass* is not hooked up
-  // to the system dictionary or any other structures, so a .class file can
-  // be loaded several times if desired.
-  // The system dictionary hookup is done by the caller.
-  //
-  // "parsed_name" is updated by this method, and is the name found
-  // while parsing the stream.
-  instanceKlassHandle parseClassFile(Symbol* name,
-                                     ClassLoaderData* loader_data,
-                                     Handle protection_domain,
-                                     TempNewSymbol& parsed_name,
-                                     bool verify,
-                                     TRAPS) {
-    KlassHandle no_host_klass;
-    return parseClassFile(name, loader_data, protection_domain, no_host_klass, NULL, parsed_name, verify, THREAD);
-  }
-  instanceKlassHandle parseClassFile(Symbol* name,
-                                     ClassLoaderData* loader_data,
-                                     Handle protection_domain,
-                                     KlassHandle host_klass,
-                                     GrowableArray<Handle>* cp_patches,
-                                     TempNewSymbol& parsed_name,
-                                     bool verify,
-                                     TRAPS);
+  InstanceKlass* create_instance_klass(TRAPS);
+
+  const ClassFileStream* clone_stream() const;
+
+  void set_klass_to_deallocate(InstanceKlass* klass);
+
+  int static_field_size() const;
+  int total_oop_map_count() const;
+  jint layout_size() const;
+
+  int vtable_size() const { return _vtable_size; }
+  int itable_size() const { return _itable_size; }
 
-  // Verifier checks
-  static void check_super_class_access(instanceKlassHandle this_klass, TRAPS);
-  static void check_super_interface_access(instanceKlassHandle this_klass, TRAPS);
-  static void check_final_method_override(instanceKlassHandle this_klass, TRAPS);
-  static void check_illegal_static_method(instanceKlassHandle this_klass, TRAPS);
+  u2 this_class_index() const { return _this_class_index; }
+  u2 super_class_index() const { return _super_class_index; }
+
+  bool is_anonymous() const { return _host_klass != NULL; }
+  bool is_interface() const { return _access_flags.is_interface(); }
+
+  const Klass* host_klass() const { return _host_klass; }
+  const GrowableArray<Handle>* cp_patches() const { return _cp_patches; }
+  ClassLoaderData* loader_data() const { return _loader_data; }
+  const Symbol* class_name() const { return _class_name; }
+  const Klass* super_klass() const { return _super_klass; }
+
+  ReferenceType reference_type() const { return _rt; }
+  AccessFlags access_flags() const { return _access_flags; }
+
+  bool is_internal() const { return INTERNAL == _pub_level; }
+
 };
 
 #endif // SHARE_VM_CLASSFILE_CLASSFILEPARSER_HPP
--- a/src/share/vm/classfile/classFileStream.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/classFileStream.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,19 +26,51 @@
 #include "classfile/classFileStream.hpp"
 #include "classfile/vmSymbols.hpp"
 
-void ClassFileStream::truncated_file_error(TRAPS) {
+const bool ClassFileStream::verify = true;
+const bool ClassFileStream::no_verification = false;
+
+void ClassFileStream::truncated_file_error(TRAPS) const {
   THROW_MSG(vmSymbols::java_lang_ClassFormatError(), "Truncated class file");
 }
 
-ClassFileStream::ClassFileStream(u1* buffer, int length, const char* source) {
-  _buffer_start = buffer;
-  _buffer_end   = buffer + length;
-  _current      = buffer;
-  _source       = source;
-  _need_verify  = false;
+ClassFileStream::ClassFileStream(const u1* buffer,
+                                 int length,
+                                 const char* source,
+                                 bool verify_stream) :
+  _buffer_start(buffer),
+  _buffer_end(buffer + length),
+  _current(buffer),
+  _source(source),
+  _need_verify(verify_stream) {}
+
+const u1* ClassFileStream::clone_buffer() const {
+  u1* const new_buffer_start = NEW_RESOURCE_ARRAY(u1, length());
+  memcpy(new_buffer_start, _buffer_start, length());
+  return new_buffer_start;
 }
 
-u1 ClassFileStream::get_u1(TRAPS) {
+const char* const ClassFileStream::clone_source() const {
+  const char* const src = source();
+  char* source_copy = NULL;
+  if (src != NULL) {
+    size_t source_len = strlen(src);
+    source_copy = NEW_RESOURCE_ARRAY(char, source_len + 1);
+    strncpy(source_copy, src, source_len + 1);
+  }
+  return source_copy;
+}
+
+// Caller responsible for ResourceMark
+// clone stream with a rewound position
+const ClassFileStream* ClassFileStream::clone() const {
+  const u1* const new_buffer_start = clone_buffer();
+  return new ClassFileStream(new_buffer_start,
+                             length(),
+                             clone_source(),
+                             need_verify());
+}
+
+u1 ClassFileStream::get_u1(TRAPS) const {
   if (_need_verify) {
     guarantee_more(1, CHECK_0);
   } else {
@@ -47,54 +79,54 @@
   return *_current++;
 }
 
-u2 ClassFileStream::get_u2(TRAPS) {
+u2 ClassFileStream::get_u2(TRAPS) const {
   if (_need_verify) {
     guarantee_more(2, CHECK_0);
   } else {
     assert(2 <= _buffer_end - _current, "buffer overflow");
   }
-  u1* tmp = _current;
+  const u1* tmp = _current;
   _current += 2;
-  return Bytes::get_Java_u2(tmp);
+  return Bytes::get_Java_u2((address)tmp);
 }
 
-u4 ClassFileStream::get_u4(TRAPS) {
+u4 ClassFileStream::get_u4(TRAPS) const {
   if (_need_verify) {
     guarantee_more(4, CHECK_0);
   } else {
     assert(4 <= _buffer_end - _current, "buffer overflow");
   }
-  u1* tmp = _current;
+  const u1* tmp = _current;
   _current += 4;
-  return Bytes::get_Java_u4(tmp);
+  return Bytes::get_Java_u4((address)tmp);
 }
 
-u8 ClassFileStream::get_u8(TRAPS) {
+u8 ClassFileStream::get_u8(TRAPS) const {
   if (_need_verify) {
     guarantee_more(8, CHECK_0);
   } else {
     assert(8 <= _buffer_end - _current, "buffer overflow");
   }
-  u1* tmp = _current;
+  const u1* tmp = _current;
   _current += 8;
-  return Bytes::get_Java_u8(tmp);
+  return Bytes::get_Java_u8((address)tmp);
 }
 
-void ClassFileStream::skip_u1(int length, TRAPS) {
+void ClassFileStream::skip_u1(int length, TRAPS) const {
   if (_need_verify) {
     guarantee_more(length, CHECK);
   }
   _current += length;
 }
 
-void ClassFileStream::skip_u2(int length, TRAPS) {
+void ClassFileStream::skip_u2(int length, TRAPS) const {
   if (_need_verify) {
     guarantee_more(length * 2, CHECK);
   }
   _current += length * 2;
 }
 
-void ClassFileStream::skip_u4(int length, TRAPS) {
+void ClassFileStream::skip_u4(int length, TRAPS) const {
   if (_need_verify) {
     guarantee_more(length * 4, CHECK);
   }
--- a/src/share/vm/classfile/classFileStream.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/classFileStream.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,65 +34,88 @@
 // The caller is responsible for deallocating the buffer and for using
 // ResourceMarks appropriately when constructing streams.
 
+class ClassPathEntry;
+
 class ClassFileStream: public ResourceObj {
  private:
-  u1*   _buffer_start; // Buffer bottom
-  u1*   _buffer_end;   // Buffer top (one past last element)
-  u1*   _current;      // Current buffer position
-  const char* _source; // Source of stream (directory name, ZIP/JAR archive name)
-  bool  _need_verify;  // True if verification is on for the class file
+  const u1* const _buffer_start; // Buffer bottom
+  const u1* const _buffer_end;   // Buffer top (one past last element)
+  mutable const u1* _current;    // Current buffer position
+  const char* const _source;     // Source of stream (directory name, ZIP/JAR archive name)
+  bool _need_verify;             // True if verification is on for the class file
+
+  void truncated_file_error(TRAPS) const ;
 
-  void truncated_file_error(TRAPS);
+ protected:
+  const u1* clone_buffer() const;
+  const char* const clone_source() const;
+
  public:
-  // Constructor
-  ClassFileStream(u1* buffer, int length, const char* source);
+  static const bool no_verification;
+  static const bool verify;
+
+  ClassFileStream(const u1* buffer,
+                  int length,
+                  const char* source,
+                  bool verify_stream = verify); // to be verified by default
+
+  virtual const ClassFileStream* clone() const;
 
   // Buffer access
-  u1* buffer() const           { return _buffer_start; }
-  int length() const           { return _buffer_end - _buffer_start; }
-  u1* current() const          { return _current; }
-  void set_current(u1* pos)    { _current = pos; }
-  const char* source() const   { return _source; }
-  void set_verify(bool flag)   { _need_verify = flag; }
+  const u1* buffer() const { return _buffer_start; }
+  int length() const { return _buffer_end - _buffer_start; }
+  const u1* current() const { return _current; }
+  void set_current(const u1* pos) const {
+    assert(pos >= _buffer_start && pos <= _buffer_end, "invariant");
+    _current = pos;
+  }
 
-  void check_truncated_file(bool b, TRAPS) {
+  // for relative positioning
+  juint current_offset() const {
+    return (juint)(_current - _buffer_start);
+  }
+  const char* source() const { return _source; }
+  bool need_verify() const { return _need_verify; }
+  void set_verify(bool flag) { _need_verify = flag; }
+
+  void check_truncated_file(bool b, TRAPS) const {
     if (b) {
       truncated_file_error(THREAD);
     }
   }
 
-  void guarantee_more(int size, TRAPS) {
+  void guarantee_more(int size, TRAPS) const {
     size_t remaining = (size_t)(_buffer_end - _current);
     unsigned int usize = (unsigned int)size;
     check_truncated_file(usize > remaining, CHECK);
   }
 
   // Read u1 from stream
-  u1 get_u1(TRAPS);
-  u1 get_u1_fast() {
+  u1 get_u1(TRAPS) const;
+  u1 get_u1_fast() const {
     return *_current++;
   }
 
   // Read u2 from stream
-  u2 get_u2(TRAPS);
-  u2 get_u2_fast() {
-    u2 res = Bytes::get_Java_u2(_current);
+  u2 get_u2(TRAPS) const;
+  u2 get_u2_fast() const {
+    u2 res = Bytes::get_Java_u2((address)_current);
     _current += 2;
     return res;
   }
 
   // Read u4 from stream
-  u4 get_u4(TRAPS);
-  u4 get_u4_fast() {
-    u4 res = Bytes::get_Java_u4(_current);
+  u4 get_u4(TRAPS) const;
+  u4 get_u4_fast() const {
+    u4 res = Bytes::get_Java_u4((address)_current);
     _current += 4;
     return res;
   }
 
   // Read u8 from stream
-  u8 get_u8(TRAPS);
-  u8 get_u8_fast() {
-    u8 res = Bytes::get_Java_u8(_current);
+  u8 get_u8(TRAPS) const;
+  u8 get_u8_fast() const {
+    u8 res = Bytes::get_Java_u8((address)_current);
     _current += 8;
     return res;
   }
@@ -100,32 +123,32 @@
   // Get direct pointer into stream at current position.
   // Returns NULL if length elements are not remaining. The caller is
   // responsible for calling skip below if buffer contents is used.
-  u1* get_u1_buffer() {
+  const u1* get_u1_buffer() const {
     return _current;
   }
 
-  u2* get_u2_buffer() {
-    return (u2*) _current;
+  const u2* get_u2_buffer() const {
+    return (const u2*) _current;
   }
 
   // Skip length u1 or u2 elements from stream
-  void skip_u1(int length, TRAPS);
-  void skip_u1_fast(int length) {
+  void skip_u1(int length, TRAPS) const;
+  void skip_u1_fast(int length) const {
     _current += length;
   }
 
-  void skip_u2(int length, TRAPS);
-  void skip_u2_fast(int length) {
+  void skip_u2(int length, TRAPS) const;
+  void skip_u2_fast(int length) const {
     _current += 2 * length;
   }
 
-  void skip_u4(int length, TRAPS);
-  void skip_u4_fast(int length) {
+  void skip_u4(int length, TRAPS) const;
+  void skip_u4_fast(int length) const {
     _current += 4 * length;
   }
 
   // Tells whether eos is reached
-  bool at_eos() const          { return _current == _buffer_end; }
+  bool at_eos() const { return _current == _buffer_end; }
 };
 
 #endif // SHARE_VM_CLASSFILE_CLASSFILESTREAM_HPP
--- a/src/share/vm/classfile/classLoader.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/classLoader.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -23,13 +23,13 @@
  */
 
 #include "precompiled.hpp"
-#include "classfile/classFileParser.hpp"
 #include "classfile/classFileStream.hpp"
 #include "classfile/classLoader.hpp"
 #include "classfile/classLoaderData.inline.hpp"
 #include "classfile/classLoaderExt.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/jimage.hpp"
+#include "classfile/klassFactory.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
@@ -170,17 +170,13 @@
 }
 
 
-ClassPathEntry::ClassPathEntry() {
-  set_next(NULL);
-}
-
-
 ClassPathDirEntry::ClassPathDirEntry(const char* dir) : ClassPathEntry() {
   char* copy = NEW_C_HEAP_ARRAY(char, strlen(dir)+1, mtClass);
   strcpy(copy, dir);
   _dir = copy;
 }
 
+
 ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) {
   // construct full path name
   char path[JVM_MAXPATHLEN];
@@ -211,14 +207,17 @@
         if (UsePerfData) {
           ClassLoader::perf_sys_classfile_bytes_read()->inc(num_read);
         }
-        return new ClassFileStream(buffer, st.st_size, _dir);    // Resource allocated
+        // Resource allocated
+        return new ClassFileStream(buffer,
+                                   st.st_size,
+                                   _dir,
+                                   ClassFileStream::verify);
       }
     }
   }
   return NULL;
 }
 
-
 ClassPathZipEntry::ClassPathZipEntry(jzfile* zip, const char* zip_name) : ClassPathEntry() {
   _zip = zip;
   char *copy = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1, mtClass);
@@ -269,14 +268,18 @@
 
 ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
   jint filesize;
-  u1* buffer = open_entry(name, &filesize, false, CHECK_NULL);
+  const u1* buffer = open_entry(name, &filesize, false, CHECK_NULL);
   if (buffer == NULL) {
     return NULL;
   }
   if (UsePerfData) {
     ClassLoader::perf_sys_classfile_bytes_read()->inc(filesize);
   }
-  return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated
+  // Resource allocated
+  return new ClassFileStream(buffer,
+                             filesize,
+                             _zip_name,
+                             ClassFileStream::verify);
 }
 
 // invoke function for each entry in the zip file
@@ -366,7 +369,11 @@
     }
     char* data = NEW_RESOURCE_ARRAY(char, size);
     (*JImageGetResource)(_jimage, location, data, size);
-    return new ClassFileStream((u1*)data, (int)size, _name);  // Resource allocated
+    // Resource allocated
+    return new ClassFileStream((u1*)data,
+                               (int)size,
+                               _name,
+                               ClassFileStream::verify);
   }
 
   return NULL;
@@ -996,74 +1003,94 @@
   return result();
 }
 
+// caller needs ResourceMark
+const char* ClassLoader::file_name_for_class_name(const char* class_name,
+                                                  int class_name_len) {
+  assert(class_name != NULL, "invariant");
+  assert((int)strlen(class_name) == class_name_len, "invariant");
 
-instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
-  ResourceMark rm(THREAD);
-  const char* class_name = h_name->as_C_string();
+  static const char class_suffix[] = ".class";
+
+  char* const file_name = NEW_RESOURCE_ARRAY(char,
+                                             class_name_len +
+                                             sizeof(class_suffix)); // includes term NULL
+
+  strncpy(file_name, class_name, class_name_len);
+  strncpy(&file_name[class_name_len], class_suffix, sizeof(class_suffix));
+
+  return file_name;
+}
+
+instanceKlassHandle ClassLoader::load_class(Symbol* name, TRAPS) {
+
+  assert(name != NULL, "invariant");
+  assert(THREAD->is_Java_thread(), "must be a JavaThread");
+
+  ResourceMark rm;
+  HandleMark hm;
+
+  const char* const class_name = name->as_C_string();
+
   EventMark m("loading class %s", class_name);
   ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion);
 
-  stringStream st;
-  // st.print() uses too much stack space while handling a StackOverflowError
-  // st.print("%s.class", h_name->as_utf8());
-  st.print_raw(h_name->as_utf8());
-  st.print_raw(".class");
-  const char* file_name = st.as_string();
+  const char* const file_name = file_name_for_class_name(class_name,
+                                                         name->utf8_length());
+  assert(file_name != NULL, "invariant");
+
   ClassLoaderExt::Context context(class_name, file_name, THREAD);
 
-  // Lookup stream for parsing .class file
+  // Lookup stream
   ClassFileStream* stream = NULL;
   int classpath_index = 0;
-  ClassPathEntry* e = NULL;
-  instanceKlassHandle h;
+  ClassPathEntry* e = _first_entry;
   {
     PerfClassTraceTime vmtimer(perf_sys_class_lookup_time(),
-                               ((JavaThread*) THREAD)->get_thread_stat()->perf_timers_addr(),
-                               PerfClassTraceTime::CLASS_LOAD);
-    e = _first_entry;
-    while (e != NULL) {
+      ((JavaThread*)THREAD)->get_thread_stat()->perf_timers_addr(),
+      PerfClassTraceTime::CLASS_LOAD);
+
+    for (; e != NULL; e = e->next(), ++classpath_index) {
       stream = e->open_stream(file_name, CHECK_NULL);
+      if (NULL == stream) {
+        continue;
+      }
       if (!context.check(stream, classpath_index)) {
-        return h; // NULL
+        return NULL;
       }
-      if (stream != NULL) {
-        break;
-      }
-      e = e->next();
-      ++classpath_index;
+      break;
     }
   }
 
-  if (stream != NULL) {
-    // class file found, parse it
-    ClassFileParser parser(stream);
-    ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
-    Handle protection_domain;
-    TempNewSymbol parsed_name = NULL;
-    instanceKlassHandle result = parser.parseClassFile(h_name,
-                                                       loader_data,
-                                                       protection_domain,
-                                                       parsed_name,
-                                                       context.should_verify(classpath_index),
-                                                       THREAD);
-    if (HAS_PENDING_EXCEPTION) {
-      ResourceMark rm;
-      if (DumpSharedSpaces) {
-        tty->print_cr("Preload Error: Failed to load %s", class_name);
-      }
-      return h;
-    }
-    h = context.record_result(classpath_index, e, result, THREAD);
-  } else {
+  if (NULL == stream) {
     if (DumpSharedSpaces) {
       tty->print_cr("Preload Warning: Cannot find %s", class_name);
     }
+    return NULL;
   }
 
-  return h;
+  stream->set_verify(context.should_verify(classpath_index));
+
+  ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+  Handle protection_domain;
+
+  instanceKlassHandle result = KlassFactory::create_from_stream(stream,
+                                                                name,
+                                                                loader_data,
+                                                                protection_domain,
+                                                                NULL, // host_klass
+                                                                NULL, // cp_patches
+                                                                NULL, // parsed_name
+                                                                THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    if (DumpSharedSpaces) {
+      tty->print_cr("Preload Error: Failed to load %s", class_name);
+    }
+    return NULL;
+  }
+
+  return context.record_result(classpath_index, e, result, THREAD);
 }
 
-
 void ClassLoader::create_package_info_table(HashtableBucket<mtClass> *t, int length,
                                             int number_of_entries) {
   assert(_package_hash_table == NULL, "One package info table allowed.");
--- a/src/share/vm/classfile/classLoader.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/classLoader.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,8 +25,9 @@
 #ifndef SHARE_VM_CLASSFILE_CLASSLOADER_HPP
 #define SHARE_VM_CLASSFILE_CLASSLOADER_HPP
 
-#include "classfile/classFileParser.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/perfData.hpp"
+#include "utilities/exceptions.hpp"
 #include "utilities/macros.hpp"
 
 // The VM class loader.
@@ -35,41 +36,39 @@
 // Name of boot module image
 #define  BOOT_IMAGE_NAME "bootmodules.jimage"
 
-// Class path entry (directory or zip file)
-
 class JImageFile;
+class ClassFileStream;
 
-class ClassPathEntry: public CHeapObj<mtClass> {
- private:
+class ClassPathEntry : public CHeapObj<mtClass> {
+private:
   ClassPathEntry* _next;
- public:
+public:
   // Next entry in class path
-  ClassPathEntry* next()              { return _next; }
+  ClassPathEntry* next() const { return _next; }
   void set_next(ClassPathEntry* next) {
     // may have unlocked readers, so write atomically.
     OrderAccess::release_store_ptr(&_next, next);
   }
-  virtual bool is_jar_file() = 0;
-  virtual const char* name() = 0;
-  virtual JImageFile* jimage() = 0;
+  virtual bool is_jar_file() const = 0;
+  virtual const char* name() const = 0;
+  virtual JImageFile* jimage() const = 0;
   // Constructor
-  ClassPathEntry();
+  ClassPathEntry() : _next(NULL) {}
   // Attempt to locate file_name through this class path entry.
   // Returns a class file parsing stream if successfull.
   virtual ClassFileStream* open_stream(const char* name, TRAPS) = 0;
   // Debugging
   NOT_PRODUCT(virtual void compile_the_world(Handle loader, TRAPS) = 0;)
-  NOT_PRODUCT(virtual bool is_jrt() = 0;)
+    NOT_PRODUCT(virtual bool is_jrt() = 0;)
 };
 
-
 class ClassPathDirEntry: public ClassPathEntry {
  private:
   const char* _dir;           // Name of directory
  public:
-  bool is_jar_file()       { return false;  }
-  const char* name()       { return _dir; }
-  JImageFile* jimage()     { return NULL; }
+  bool is_jar_file() const { return false;  }
+  const char* name() const { return _dir; }
+  JImageFile* jimage() const { return NULL; }
   ClassPathDirEntry(const char* dir);
   ClassFileStream* open_stream(const char* name, TRAPS);
   // Debugging
@@ -97,9 +96,9 @@
   jzfile* _zip;              // The zip archive
   const char*   _zip_name;   // Name of zip archive
  public:
-  bool is_jar_file()       { return true;  }
-  const char* name()       { return _zip_name; }
-  JImageFile* jimage()     { return NULL; }
+  bool is_jar_file() const { return true;  }
+  const char* name() const { return _zip_name; }
+  JImageFile* jimage() const { return NULL; }
   ClassPathZipEntry(jzfile* zip, const char* zip_name);
   ~ClassPathZipEntry();
   u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
@@ -117,10 +116,10 @@
   JImageFile* _jimage;
   const char* _name;
 public:
-  bool is_jar_file()  { return false;  }
-  bool is_open()  { return _jimage != NULL; }
-  const char* name() { return _name == NULL ? "" : _name; }
-  JImageFile* jimage() { return _jimage; }
+  bool is_jar_file() const { return false; }
+  bool is_open() const { return _jimage != NULL; }
+  const char* name() const { return _name == NULL ? "" : _name; }
+  JImageFile* jimage() const { return _jimage; }
   ClassPathImageEntry(JImageFile* jimage, const char* name);
   ~ClassPathImageEntry();
   static void name_to_package(const char* name, char* buffer, int length);
@@ -212,6 +211,10 @@
   // Canonicalizes path names, so strcmp will work properly. This is mainly
   // to avoid confusing the zip library
   static bool get_canonical_path(const char* orig, char* out, int len);
+
+  static const char* file_name_for_class_name(const char* class_name,
+                                              int class_name_len);
+
  public:
   static jboolean decompress(void *in, u8 inSize, void *out, u8 outSize, char **pmsg);
   static int crc32(int crc, const char* buf, int len);
@@ -282,7 +285,7 @@
   }
 
   // Load individual .class file
-  static instanceKlassHandle load_classfile(Symbol* h_name, TRAPS);
+  static instanceKlassHandle load_class(Symbol* class_name, TRAPS);
 
   // If the specified package has been loaded by the system, then returns
   // the name of the directory or ZIP file that the package was loaded from.
--- a/src/share/vm/classfile/classLoaderData.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/classLoaderData.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -166,7 +166,9 @@
   }
 }
 
-void ClassLoaderData::record_dependency(Klass* k, TRAPS) {
+void ClassLoaderData::record_dependency(const Klass* k, TRAPS) {
+  assert(k != NULL, "invariant");
+
   ClassLoaderData * const from_cld = this;
   ClassLoaderData * const to_cld = k->class_loader_data();
 
@@ -273,16 +275,18 @@
   }
 }
 
-void ClassLoaderData::add_class(Klass* k) {
-  MutexLockerEx ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
-  Klass* old_value = _klasses;
-  k->set_next_link(old_value);
-  // Make sure linked class is stable, since the class list is walked without a lock
-  OrderAccess::storestore();
-  // link the new item into the list
-  _klasses = k;
+void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
+  {
+    MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
+    Klass* old_value = _klasses;
+    k->set_next_link(old_value);
+    // Make sure linked class is stable, since the class list is walked without a lock
+    OrderAccess::storestore();
+    // link the new item into the list
+    _klasses = k;
+  }
 
-  if (TraceClassLoaderData && Verbose && k->class_loader_data() != NULL) {
+  if (publicize && TraceClassLoaderData && Verbose && k->class_loader_data() != NULL) {
     ResourceMark rm;
     tty->print_cr("[TraceClassLoaderData] Adding k: " PTR_FORMAT " %s to CLD: "
                   PTR_FORMAT " loader: " PTR_FORMAT " %s",
--- a/src/share/vm/classfile/classLoaderData.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/classLoaderData.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -275,7 +275,7 @@
   // Used to make sure that this CLD is not unloaded.
   void set_keep_alive(bool value) { _keep_alive = value; }
 
-  unsigned int identity_hash() {
+  unsigned int identity_hash() const {
     return _class_loader == NULL ? 0 : _class_loader->identity_hash();
   }
 
@@ -294,10 +294,10 @@
   const char* loader_name();
 
   jobject add_handle(Handle h);
-  void add_class(Klass* k);
+  void add_class(Klass* k, bool publicize = true);
   void remove_class(Klass* k);
   bool contains_klass(Klass* k);
-  void record_dependency(Klass* to, TRAPS);
+  void record_dependency(const Klass* to, TRAPS);
   void init_dependencies(TRAPS);
 
   void add_to_deallocate_list(Metadata* m);
@@ -312,7 +312,7 @@
   Metaspace* rw_metaspace();
   void initialize_shared_metaspaces();
 
-  int shared_class_loader_id() {
+  int shared_class_loader_id() const {
     return _shared_class_loader_id;
   }
   void set_shared_class_loader_id(int id) {
--- a/src/share/vm/classfile/classLoaderExt.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/classLoaderExt.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -41,7 +41,7 @@
       _file_name = file_name;
     }
 
-    bool check(ClassFileStream* stream, const int classpath_index) {
+    bool check(const ClassFileStream* stream, const int classpath_index) {
       return true;
     }
 
@@ -50,7 +50,8 @@
     }
 
     instanceKlassHandle record_result(const int classpath_index,
-                                      ClassPathEntry* e, instanceKlassHandle result, TRAPS) {
+                                      const ClassPathEntry* e,
+                                      instanceKlassHandle result, TRAPS) {
       if (ClassLoader::add_package(_file_name, classpath_index, THREAD)) {
         if (DumpSharedSpaces) {
           result->set_shared_classpath_index(classpath_index);
--- a/src/share/vm/classfile/compactHashtable.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/compactHashtable.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/compactHashtable.inline.hpp"
 #include "classfile/javaClasses.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "prims/jvm.h"
--- a/src/share/vm/classfile/compactHashtable.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/compactHashtable.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,8 +27,6 @@
 
 #include "classfile/stringTable.hpp"
 #include "classfile/symbolTable.hpp"
-#include "memory/allocation.inline.hpp"
-#include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "utilities/hashtable.hpp"
@@ -117,13 +115,8 @@
     return _required_bytes;
   }
 
-  void add(unsigned int hash, Symbol* symbol) {
-    add(hash, new Entry(hash, symbol));
-  }
-
-  void add(unsigned int hash, oop string) {
-    add(hash, new Entry(hash, string));
-  }
+  inline void add(unsigned int hash, Symbol* symbol);
+  inline void add(unsigned int hash, oop string);
 
 private:
   void add(unsigned int hash, Entry* entry);
@@ -219,27 +212,10 @@
   juint* _buckets;
 
   inline Symbol* lookup_entry(CompactHashtable<Symbol*, char>* const t,
-                              juint* addr, const char* name, int len) {
-    Symbol* sym = (Symbol*)((void*)(_base_address + *addr));
-    if (sym->equals(name, len)) {
-      assert(sym->refcount() == -1, "must be shared");
-      return sym;
-    }
-
-    return NULL;
-  }
+                              juint* addr, const char* name, int len);
 
   inline oop lookup_entry(CompactHashtable<oop, char>* const t,
-                        juint* addr, const char* name, int len) {
-    narrowOop obj = (narrowOop)(*addr);
-    oop string = oopDesc::decode_heap_oop(obj);
-    if (java_lang_String::equals(string, (jchar*)name, len)) {
-      return string;
-    }
-
-    return NULL;
-  }
-
+                          juint* addr, const char* name, int len);
 public:
   CompactHashtable() {
     _entry_count = 0;
@@ -257,41 +233,7 @@
   }
 
   // Lookup an entry from the compact table
-  inline T lookup(const N* name, unsigned int hash, int len) {
-    if (_entry_count > 0) {
-      assert(!DumpSharedSpaces, "run-time only");
-      int index = hash % _bucket_count;
-      juint bucket_info = _buckets[index];
-      juint bucket_offset = BUCKET_OFFSET(bucket_info);
-      int   bucket_type = BUCKET_TYPE(bucket_info);
-      juint* bucket = _buckets + bucket_offset;
-      juint* bucket_end = _buckets;
-
-      if (bucket_type == COMPACT_BUCKET_TYPE) {
-        // the compact bucket has one entry with entry offset only
-        T res = lookup_entry(this, &bucket[0], name, len);
-        if (res != NULL) {
-          return res;
-        }
-      } else {
-        // This is a regular bucket, which has more than one
-        // entries. Each entry is a pair of entry (hash, offset).
-        // Seek until the end of the bucket.
-        bucket_end += BUCKET_OFFSET(_buckets[index + 1]);
-        while (bucket < bucket_end) {
-          unsigned int h = (unsigned int)(bucket[0]);
-          if (h == hash) {
-            T res = lookup_entry(this, &bucket[1], name, len);
-            if (res != NULL) {
-              return res;
-            }
-          }
-          bucket += 2;
-        }
-      }
-    }
-    return NULL;
-  }
+  inline T lookup(const N* name, unsigned int hash, int len);
 
   // iterate over symbols
   void symbols_do(SymbolClosure *cl);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/compactHashtable.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_COMPACTHASHTABLE_INLINE_HPP
+#define SHARE_VM_CLASSFILE_COMPACTHASHTABLE_INLINE_HPP
+
+#include "classfile/compactHashtable.hpp"
+#include "memory/allocation.inline.hpp"
+#include "oops/oop.inline.hpp"
+
+template <class T, class N>
+inline Symbol* CompactHashtable<T, N>::lookup_entry(CompactHashtable<Symbol*, char>* const t,
+                                             juint* addr, const char* name, int len) {
+  Symbol* sym = (Symbol*)((void*)(_base_address + *addr));
+  if (sym->equals(name, len)) {
+    assert(sym->refcount() == -1, "must be shared");
+    return sym;
+  }
+
+  return NULL;
+}
+
+template <class T, class N>
+inline oop CompactHashtable<T, N>::lookup_entry(CompactHashtable<oop, char>* const t,
+                                                juint* addr, const char* name, int len) {
+  narrowOop obj = (narrowOop)(*addr);
+  oop string = oopDesc::decode_heap_oop(obj);
+  if (java_lang_String::equals(string, (jchar*)name, len)) {
+    return string;
+  }
+
+  return NULL;
+}
+
+template <class T, class N>
+inline T CompactHashtable<T,N>::lookup(const N* name, unsigned int hash, int len) {
+  if (_entry_count > 0) {
+    assert(!DumpSharedSpaces, "run-time only");
+    int index = hash % _bucket_count;
+    juint bucket_info = _buckets[index];
+    juint bucket_offset = BUCKET_OFFSET(bucket_info);
+    int   bucket_type = BUCKET_TYPE(bucket_info);
+    juint* bucket = _buckets + bucket_offset;
+    juint* bucket_end = _buckets;
+
+    if (bucket_type == COMPACT_BUCKET_TYPE) {
+      // the compact bucket has one entry with entry offset only
+      T res = lookup_entry(this, &bucket[0], name, len);
+      if (res != NULL) {
+        return res;
+      }
+    } else {
+      // This is a regular bucket, which has more than one
+      // entries. Each entry is a pair of entry (hash, offset).
+      // Seek until the end of the bucket.
+      bucket_end += BUCKET_OFFSET(_buckets[index + 1]);
+      while (bucket < bucket_end) {
+        unsigned int h = (unsigned int)(bucket[0]);
+        if (h == hash) {
+          T res = lookup_entry(this, &bucket[1], name, len);
+          if (res != NULL) {
+            return res;
+          }
+        }
+        bucket += 2;
+      }
+    }
+  }
+  return NULL;
+}
+
+inline void CompactHashtableWriter::add(unsigned int hash, Symbol* symbol) {
+  add(hash, new Entry(hash, symbol));
+}
+
+inline void CompactHashtableWriter::add(unsigned int hash, oop string) {
+  add(hash, new Entry(hash, string));
+}
+
+
+#endif // SHARE_VM_CLASSFILE_COMPACTHASHTABLE_INLINE_HPP
--- a/src/share/vm/classfile/defaultMethods.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/defaultMethods.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -30,6 +30,7 @@
 #include "memory/allocation.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/resourceArea.hpp"
+#include "runtime/handles.inline.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/thread.hpp"
 #include "oops/instanceKlass.hpp"
@@ -606,7 +607,7 @@
 }
 
 static GrowableArray<EmptyVtableSlot*>* find_empty_vtable_slots(
-    InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
+    InstanceKlass* klass, const GrowableArray<Method*>* mirandas, TRAPS) {
 
   assert(klass != NULL, "Must be valid class");
 
@@ -777,7 +778,8 @@
 // candidate).  These methods are then added to the class's method list.
 // The JVM does not create bridges nor handle generic signatures here.
 void DefaultMethods::generate_default_methods(
-    InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
+    InstanceKlass* klass, const GrowableArray<Method*>* mirandas, TRAPS) {
+  assert(klass != NULL, "invariant");
 
   // This resource mark is the bound for all memory allocation that takes
   // place during default method processing.  After this goes out of scope,
@@ -787,6 +789,7 @@
   ResourceMark rm(THREAD);
 
   // Keep entire hierarchy alive for the duration of the computation
+  constantPoolHandle cp(THREAD, klass->constants());
   KeepAliveRegistrar keepAlive(THREAD);
   KeepAliveVisitor loadKeepAlive(&keepAlive);
   loadKeepAlive.run(klass);
--- a/src/share/vm/classfile/defaultMethods.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/defaultMethods.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -43,6 +43,6 @@
   // default method.  Overpass methods are added to the methods lists for
   // the class.
   static void generate_default_methods(
-      InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS);
+      InstanceKlass* klass, const GrowableArray<Method*>* mirandas, TRAPS);
 };
 #endif // SHARE_VM_CLASSFILE_DEFAULTMETHODS_HPP
--- a/src/share/vm/classfile/dictionary.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/dictionary.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -54,7 +54,7 @@
                              Symbol* name, ClassLoaderData* loader_data);
 
 protected:
-  DictionaryEntry* bucket(int i) {
+  DictionaryEntry* bucket(int i) const {
     return (DictionaryEntry*)Hashtable<Klass*, mtClass>::bucket(i);
   }
 
@@ -323,7 +323,7 @@
     }
   }
 
-  bool equals(Symbol* class_name, ClassLoaderData* loader_data) const {
+  bool equals(const Symbol* class_name, ClassLoaderData* loader_data) const {
     Klass* klass = (Klass*)literal();
     return (klass->name() == class_name && _loader_data == loader_data);
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/klassFactory.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,140 @@
+/*
+* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#include "precompiled.hpp"
+#include "classfile/classFileParser.hpp"
+#include "classfile/classFileStream.hpp"
+#include "classfile/classLoaderData.hpp"
+#include "classfile/klassFactory.hpp"
+#include "memory/resourceArea.hpp"
+#include "prims/jvmtiEnvBase.hpp"
+
+static ClassFileStream* prologue(ClassFileStream* stream,
+                                 Symbol* name,
+                                 ClassLoaderData* loader_data,
+                                 Handle protection_domain,
+                                 JvmtiCachedClassFileData** cached_class_file,
+                                 TRAPS) {
+
+  assert(stream != NULL, "invariant");
+
+  if (JvmtiExport::should_post_class_file_load_hook()) {
+    assert(THREAD->is_Java_thread(), "must be a JavaThread");
+    const JavaThread* jt = (JavaThread*)THREAD;
+
+    Handle class_loader(THREAD, loader_data->class_loader());
+
+    // Get the cached class file bytes (if any) from the class that
+    // is being redefined or retransformed. We use jvmti_thread_state()
+    // instead of JvmtiThreadState::state_for(jt) so we don't allocate
+    // a JvmtiThreadState any earlier than necessary. This will help
+    // avoid the bug described by 7126851.
+
+    JvmtiThreadState* state = jt->jvmti_thread_state();
+
+    if (state != NULL) {
+      KlassHandle* h_class_being_redefined =
+        state->get_class_being_redefined();
+
+      if (h_class_being_redefined != NULL) {
+        instanceKlassHandle ikh_class_being_redefined =
+          instanceKlassHandle(THREAD, (*h_class_being_redefined)());
+
+        *cached_class_file = ikh_class_being_redefined->get_cached_class_file();
+      }
+    }
+
+    unsigned char* ptr = const_cast<unsigned char*>(stream->buffer());
+    unsigned char* end_ptr = ptr + stream->length();
+
+    JvmtiExport::post_class_file_load_hook(name,
+                                           class_loader,
+                                           protection_domain,
+                                           &ptr,
+                                           &end_ptr,
+                                           cached_class_file);
+
+    if (ptr != stream->buffer()) {
+      // JVMTI agent has modified class file data.
+      // Set new class file stream using JVMTI agent modified class file data.
+      stream = new ClassFileStream(ptr,
+                                   end_ptr - ptr,
+                                   stream->source(),
+                                   stream->need_verify());
+    }
+  }
+
+  return stream;
+}
+
+
+instanceKlassHandle KlassFactory::create_from_stream(ClassFileStream* stream,
+                                                     Symbol* name,
+                                                     ClassLoaderData* loader_data,
+                                                     Handle protection_domain,
+                                                     const Klass* host_klass,
+                                                     GrowableArray<Handle>* cp_patches,
+                                                     TempNewSymbol* parsed_name,
+                                                     TRAPS) {
+
+  assert(stream != NULL, "invariant");
+  assert(loader_data != NULL, "invariant");
+  assert(THREAD->is_Java_thread(), "must be a JavaThread");
+
+  ResourceMark rm;
+  HandleMark hm;
+
+  JvmtiCachedClassFileData* cached_class_file = NULL;
+
+  stream = prologue(stream,
+                    name,
+                    loader_data,
+                    protection_domain,
+                    &cached_class_file,
+                    CHECK_NULL);
+
+  ClassFileParser parser(stream,
+                         name,
+                         loader_data,
+                         protection_domain,
+                         parsed_name,
+                         host_klass,
+                         cp_patches,
+                         ClassFileParser::BROADCAST, // publicity level
+                         CHECK_NULL);
+
+  instanceKlassHandle result = parser.create_instance_klass(CHECK_NULL);
+  assert(result == parser.create_instance_klass(THREAD), "invariant");
+
+  if (result.is_null()) {
+    return NULL;
+  }
+
+  if (cached_class_file != NULL) {
+    // JVMTI: we have an InstanceKlass now, tell it about the cached bytes
+    result->set_cached_class_file(cached_class_file);
+  }
+
+  return result;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/klassFactory.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,81 @@
+/*
+* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*
+*/
+
+#ifndef SHARE_VM_CLASSFILE_KLASSFACTORY_HPP
+#define SHARE_VM_CLASSFILE_KLASSFACTORY_HPP
+
+#include "memory/allocation.inline.hpp"
+#include "runtime/handles.hpp"
+
+class ClassFileStream;
+class ClassLoaderData;
+template <typename>
+class GrowableArray;
+class Klass;
+class Symbol;
+class TempNewSymbol;
+
+/*
+ * KlassFactory is an interface to implementations of the following mapping/function:
+ *
+ * Summary: create a VM internal runtime representation ("Klass")
+            from a bytestream (classfile).
+ *
+ * Input:  a named bytestream in the Java class file format (see JVMS, chapter 4).
+ * Output: a VM runtime representation of a Java class
+ *
+ * Pre-conditions:
+ *   a non-NULL ClassFileStream* // the classfile bytestream
+ *   a non-NULL Symbol*          // the name of the class
+ *   a non-NULL ClassLoaderData* // the metaspace allocator
+ *   (no pending exceptions)
+ *
+ * Returns:
+ *   if the returned value is non-NULL, that value is an indirection (pointer/handle)
+ *   to a Klass. The caller will not have a pending exception.
+ *
+ *   On broken invariants and/or runtime errors the returned value will be
+ *   NULL (or a NULL handle) and the caller *might* now have a pending exception.
+ *
+ */
+
+class KlassFactory : AllStatic {
+
+  // approved clients
+  friend class ClassLoader;
+  friend class ClassLoaderExt;
+  friend class SystemDictionary;
+
+ private:
+  static instanceKlassHandle create_from_stream(ClassFileStream* stream,
+                                                Symbol* name,
+                                                ClassLoaderData* loader_data,
+                                                Handle protection_domain,
+                                                const Klass* host_klass,
+                                                GrowableArray<Handle>* cp_patches,
+                                                TempNewSymbol* parsed_name,
+                                                TRAPS);
+};
+
+#endif // SHARE_VM_CLASSFILE_KLASSFACTORY_HPP
--- a/src/share/vm/classfile/stringTable.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/stringTable.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -24,7 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/altHashing.hpp"
-#include "classfile/compactHashtable.hpp"
+#include "classfile/compactHashtable.inline.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
--- a/src/share/vm/classfile/symbolTable.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/symbolTable.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -24,7 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/altHashing.hpp"
-#include "classfile/compactHashtable.hpp"
+#include "classfile/compactHashtable.inline.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
--- a/src/share/vm/classfile/systemDictionary.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/systemDictionary.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -23,9 +23,14 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/classFileParser.hpp"
+#include "classfile/classFileStream.hpp"
+#include "classfile/classLoader.hpp"
 #include "classfile/classLoaderData.inline.hpp"
+#include "classfile/classLoaderExt.hpp"
 #include "classfile/dictionary.hpp"
 #include "classfile/javaClasses.inline.hpp"
+#include "classfile/klassFactory.hpp"
 #include "classfile/loaderConstraints.hpp"
 #include "classfile/placeholders.hpp"
 #include "classfile/resolutionErrors.hpp"
@@ -616,6 +621,25 @@
   return (nh);
 }
 
+// utility function for class load event
+static void post_class_load_event(const Ticks& start_time,
+                                  instanceKlassHandle k,
+                                  Handle initiating_loader) {
+#if INCLUDE_TRACE
+  EventClassLoad event(UNTIMED);
+  if (event.should_commit()) {
+    event.set_starttime(start_time);
+    event.set_loadedClass(k());
+    oop defining_class_loader = k->class_loader();
+    event.set_definingClassLoader(defining_class_loader != NULL ?
+      defining_class_loader->klass() : (Klass*)NULL);
+    oop class_loader = initiating_loader.is_null() ? (oop)NULL : initiating_loader();
+    event.set_initiatingClassLoader(class_loader != NULL ?
+      class_loader->klass() : (Klass*)NULL);
+    event.commit();
+  }
+#endif // INCLUDE_TRACE
+}
 
 Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
                                                         Handle class_loader,
@@ -984,42 +1008,42 @@
                                       Handle class_loader,
                                       Handle protection_domain,
                                       ClassFileStream* st,
-                                      KlassHandle host_klass,
+                                      const Klass* host_klass,
                                       GrowableArray<Handle>* cp_patches,
                                       TRAPS) {
-  TempNewSymbol parsed_name = NULL;
 
   Ticks class_load_start_time = Ticks::now();
 
   ClassLoaderData* loader_data;
-  if (host_klass.not_null()) {
+  if (host_klass != NULL) {
     // Create a new CLD for anonymous class, that uses the same class loader
     // as the host_klass
     guarantee(host_klass->class_loader() == class_loader(), "should be the same");
     guarantee(!DumpSharedSpaces, "must not create anonymous classes when dumping");
     loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader(), CHECK_NULL);
-    loader_data->record_dependency(host_klass(), CHECK_NULL);
+    loader_data->record_dependency(host_klass, CHECK_NULL);
   } else {
     loader_data = ClassLoaderData::class_loader_data(class_loader());
   }
 
-  // Parse the stream. Note that we do this even though this klass might
+  assert(st != NULL, "invariant");
+  assert(st->need_verify(), "invariant");
+
+  // Parse stream and create a klass.
+  // Note that we do this even though this klass might
   // already be present in the SystemDictionary, otherwise we would not
   // throw potential ClassFormatErrors.
-  //
-  // Note: "name" is updated.
 
-  instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name,
-                                                             loader_data,
-                                                             protection_domain,
-                                                             host_klass,
-                                                             cp_patches,
-                                                             parsed_name,
-                                                             true,
-                                                             THREAD);
+  instanceKlassHandle k = KlassFactory::create_from_stream(st,
+                                                           class_name,
+                                                           loader_data,
+                                                           protection_domain,
+                                                           host_klass,
+                                                           cp_patches,
+                                                           NULL, // parsed_name
+                                                           THREAD);
 
-
-  if (host_klass.not_null() && k.not_null()) {
+  if (host_klass != NULL && k.not_null()) {
     // If it's anonymous, initialize it now, since nobody else will.
 
     {
@@ -1050,7 +1074,7 @@
 
     post_class_load_event(class_load_start_time, k, class_loader);
   }
-  assert(host_klass.not_null() || cp_patches == NULL,
+  assert(host_klass != NULL || NULL == cp_patches,
          "cp_patches only found with host_klass");
 
   return k();
@@ -1065,7 +1089,6 @@
                                              Handle class_loader,
                                              Handle protection_domain,
                                              ClassFileStream* st,
-                                             bool verify,
                                              TRAPS) {
 
   // Classloaders that support parallelism, e.g. bootstrap classloader,
@@ -1082,22 +1105,23 @@
   check_loader_lock_contention(lockObject, THREAD);
   ObjectLocker ol(lockObject, THREAD, DoObjectLock);
 
-  TempNewSymbol parsed_name = NULL;
+  assert(st != NULL, "invariant");
 
-  // Parse the stream. Note that we do this even though this klass might
+  // Parse the stream and create a klass.
+  // Note that we do this even though this klass might
   // already be present in the SystemDictionary, otherwise we would not
   // throw potential ClassFormatErrors.
   //
-  // Note: "name" is updated.
+  // Note: "parsed_name" is updated.
+  TempNewSymbol parsed_name = NULL;
 
-  instanceKlassHandle k;
+ instanceKlassHandle k;
 
 #if INCLUDE_CDS
   k = SystemDictionaryShared::lookup_from_stream(class_name,
                                                  class_loader,
                                                  protection_domain,
                                                  st,
-                                                 verify,
                                                  CHECK_NULL);
 #endif
 
@@ -1107,12 +1131,14 @@
     if (st->buffer() == NULL) {
       return NULL;
     }
-    k = ClassFileParser(st).parseClassFile(class_name,
-                                           loader_data,
-                                           protection_domain,
-                                           parsed_name,
-                                           verify,
-                                           THREAD);
+    k = KlassFactory::create_from_stream(st,
+                                         class_name,
+                                         loader_data,
+                                         protection_domain,
+                                         NULL, // host_klass
+                                         NULL, // cp_patches
+                                         &parsed_name,
+                                         THREAD);
   }
 
   const char* pkg = "java/";
@@ -1319,7 +1345,7 @@
     if (k.is_null()) {
       // Use VM class loader
       PerfTraceTime vmtimer(ClassLoader::perf_sys_classload_time());
-      k = ClassLoader::load_classfile(class_name, CHECK_(nh));
+      k = ClassLoader::load_class(class_name, CHECK_(nh));
     }
 
     // find_or_define_instance_class may return a different InstanceKlass
@@ -2704,23 +2730,14 @@
   constraints()->verify(dictionary(), placeholders());
 }
 
-// utility function for class load event
-void SystemDictionary::post_class_load_event(const Ticks& start_time,
-                                             instanceKlassHandle k,
-                                             Handle initiating_loader) {
-#if INCLUDE_TRACE
-  EventClassLoad event(UNTIMED);
-  if (event.should_commit()) {
-    event.set_starttime(start_time);
-    event.set_loadedClass(k());
-    oop defining_class_loader = k->class_loader();
-    event.set_definingClassLoader(defining_class_loader !=  NULL ?
-                                    defining_class_loader->klass() : (Klass*)NULL);
-    oop class_loader = initiating_loader.is_null() ? (oop)NULL : initiating_loader();
-    event.set_initiatingClassLoader(class_loader != NULL ?
-                                      class_loader->klass() : (Klass*)NULL);
-    event.commit();
-  }
-#endif // INCLUDE_TRACE
+// caller needs ResourceMark
+const char* SystemDictionary::loader_name(const oop loader) {
+  return ((loader) == NULL ? "<bootloader>" :
+    InstanceKlass::cast((loader)->klass())->name()->as_C_string());
 }
 
+// caller needs ResourceMark
+const char* SystemDictionary::loader_name(const ClassLoaderData* loader_data) {
+  return (loader_data->class_loader() == NULL ? "<bootloader>" :
+    InstanceKlass::cast((loader_data->class_loader())->klass())->name()->as_C_string());
+}
--- a/src/share/vm/classfile/systemDictionary.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/systemDictionary.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,17 +25,15 @@
 #ifndef SHARE_VM_CLASSFILE_SYSTEMDICTIONARY_HPP
 #define SHARE_VM_CLASSFILE_SYSTEMDICTIONARY_HPP
 
-#include "classfile/classFileStream.hpp"
 #include "classfile/classLoader.hpp"
 #include "classfile/systemDictionary_ext.hpp"
+#include "jvmci/systemDictionary_jvmci.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/symbol.hpp"
 #include "runtime/java.hpp"
 #include "runtime/reflectionUtils.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/hashtable.inline.hpp"
-#include "jvmci/systemDictionary_jvmci.hpp"
-
 
 // The system dictionary stores all loaded classes and maps:
 //
@@ -73,13 +71,13 @@
 // of placeholders must hold the SystemDictionary_lock.
 //
 
+class ClassFileStream;
 class Dictionary;
 class PlaceholderTable;
 class LoaderConstraintTable;
 template <MEMFLAGS F> class HashtableBucket;
 class ResolutionErrorTable;
 class SymbolPropertyTable;
-class Ticks;
 
 // Certain classes are preloaded, such as java.lang.Object and java.lang.String.
 // They are all "well-known", in the sense that no class loader is allowed
@@ -272,34 +270,41 @@
   // parse_interfaces, resolve_instance_class_or_null, load_shared_class
   // "child_name" is the class whose super class or interface is being resolved.
   static Klass* resolve_super_or_fail(Symbol* child_name,
-                                        Symbol* class_name,
-                                        Handle class_loader,
-                                        Handle protection_domain,
-                                        bool is_superclass,
-                                        TRAPS);
+                                      Symbol* class_name,
+                                      Handle class_loader,
+                                      Handle protection_domain,
+                                      bool is_superclass,
+                                      TRAPS);
 
   // Parse new stream. This won't update the system dictionary or
   // class hierarchy, simply parse the stream. Used by JVMTI RedefineClasses.
   static Klass* parse_stream(Symbol* class_name,
-                               Handle class_loader,
-                               Handle protection_domain,
-                               ClassFileStream* st,
-                               TRAPS) {
-    KlassHandle nullHandle;
-    return parse_stream(class_name, class_loader, protection_domain, st, nullHandle, NULL, THREAD);
+                             Handle class_loader,
+                             Handle protection_domain,
+                             ClassFileStream* st,
+                             TRAPS) {
+    return parse_stream(class_name,
+                        class_loader,
+                        protection_domain,
+                        st,
+                        NULL, // host klass
+                        NULL, // cp_patches
+                        THREAD);
   }
   static Klass* parse_stream(Symbol* class_name,
-                               Handle class_loader,
-                               Handle protection_domain,
-                               ClassFileStream* st,
-                               KlassHandle host_klass,
-                               GrowableArray<Handle>* cp_patches,
-                               TRAPS);
+                             Handle class_loader,
+                             Handle protection_domain,
+                             ClassFileStream* st,
+                             const Klass* host_klass,
+                             GrowableArray<Handle>* cp_patches,
+                             TRAPS);
 
   // Resolve from stream (called by jni_DefineClass and JVM_DefineClass)
-  static Klass* resolve_from_stream(Symbol* class_name, Handle class_loader,
-                                      Handle protection_domain,
-                                      ClassFileStream* st, bool verify, TRAPS);
+  static Klass* resolve_from_stream(Symbol* class_name,
+                                    Handle class_loader,
+                                    Handle protection_domain,
+                                    ClassFileStream* st,
+                                    TRAPS);
 
   // Lookup an already loaded class. If not found NULL is returned.
   static Klass* find(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS);
@@ -546,14 +551,8 @@
                                                      TRAPS);
 
   // Utility for printing loader "name" as part of tracing constraints
-  static const char* loader_name(oop loader) {
-    return ((loader) == NULL ? "<bootloader>" :
-            InstanceKlass::cast((loader)->klass())->name()->as_C_string() );
-  }
-  static const char* loader_name(ClassLoaderData* loader_data) {
-    return (loader_data->class_loader() == NULL ? "<bootloader>" :
-            InstanceKlass::cast((loader_data->class_loader())->klass())->name()->as_C_string() );
-  }
+  static const char* loader_name(const oop loader);
+  static const char* loader_name(const ClassLoaderData* loader_data);
 
   // Record the error when the first attempt to resolve a reference from a constant
   // pool entry to a class fails.
@@ -663,9 +662,6 @@
   // Setup link to hierarchy
   static void add_to_hierarchy(instanceKlassHandle k, TRAPS);
 
-  // event based tracing
-  static void post_class_load_event(const Ticks& start_time, instanceKlassHandle k,
-                                    Handle initiating_loader);
   // We pass in the hashtable index so we can calculate it outside of
   // the SystemDictionary_lock.
 
--- a/src/share/vm/classfile/systemDictionaryShared.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/systemDictionaryShared.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -63,8 +63,7 @@
   static InstanceKlass* lookup_from_stream(Symbol* class_name,
                                            Handle class_loader,
                                            Handle protection_domain,
-                                           ClassFileStream* st,
-                                           bool verify,
+                                           const ClassFileStream* st,
                                            TRAPS) {
     return NULL;
   }
--- a/src/share/vm/classfile/verifier.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/verifier.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -48,6 +48,7 @@
 #include "runtime/thread.hpp"
 #include "services/threadService.hpp"
 #include "utilities/bytes.hpp"
+#include "logging/log.hpp"
 
 #define NOFAILOVER_MAJOR_VERSION                       51
 #define NONZERO_PADDING_BYTES_IN_SWITCH_MAJOR_VERSION  51
@@ -111,6 +112,18 @@
   }
 }
 
+// Prints the end-verification message to the appropriate output.
+void Verifier::log_end_verification(outputStream* st, const char* klassName, Symbol* exception_name, TRAPS) {
+  if (HAS_PENDING_EXCEPTION) {
+    st->print("Verification for %s has", klassName);
+    st->print_cr(" exception pending %s ",
+                 PENDING_EXCEPTION->klass()->external_name());
+  } else if (exception_name != NULL) {
+    st->print_cr("Verification for %s failed", klassName);
+  }
+  st->print_cr("End class verification for: %s", klassName);
+}
+
 bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool should_verify_class, TRAPS) {
   HandleMark hm;
   ResourceMark rm(THREAD);
@@ -155,9 +168,7 @@
   bool can_failover = FailOverToOldVerifier &&
      klass->major_version() < NOFAILOVER_MAJOR_VERSION;
 
-  if (TraceClassInitialization) {
-    tty->print_cr("Start class verification for: %s", klassName);
-  }
+  log_info(classinit)("Start class verification for: %s", klassName);
   if (klass->major_version() >= STACKMAP_ATTRIBUTE_MAJOR_VERSION) {
     ClassVerifier split_verifier(klass, THREAD);
     split_verifier.verify_class(THREAD);
@@ -165,10 +176,10 @@
     if (can_failover && !HAS_PENDING_EXCEPTION &&
         (exception_name == vmSymbols::java_lang_VerifyError() ||
          exception_name == vmSymbols::java_lang_ClassFormatError())) {
-      if (TraceClassInitialization || VerboseVerification) {
-        tty->print_cr(
-          "Fail over class verification to old verifier for: %s", klassName);
+      if (VerboseVerification) {
+        tty->print_cr("Fail over class verification to old verifier for: %s", klassName);
       }
+      log_info(classinit)("Fail over class verification to old verifier for: %s", klassName);
       exception_name = inference_verify(
         klass, message_buffer, message_buffer_len, THREAD);
     }
@@ -180,15 +191,11 @@
         klass, message_buffer, message_buffer_len, THREAD);
   }
 
-  if (TraceClassInitialization || VerboseVerification) {
-    if (HAS_PENDING_EXCEPTION) {
-      tty->print("Verification for %s has", klassName);
-      tty->print_cr(" exception pending %s ",
-        PENDING_EXCEPTION->klass()->external_name());
-    } else if (exception_name != NULL) {
-      tty->print_cr("Verification for %s failed", klassName);
-    }
-    tty->print_cr("End class verification for: %s", klassName);
+  if (log_is_enabled(Info, classinit)){
+    log_end_verification(LogHandle(classinit)::info_stream(), klassName, exception_name, THREAD);
+  }
+  if (VerboseVerification){
+    log_end_verification(tty, klassName, exception_name, THREAD);
   }
 
   if (HAS_PENDING_EXCEPTION) {
@@ -598,10 +605,13 @@
     verify_method(methodHandle(THREAD, m), CHECK_VERIFY(this));
   }
 
-  if (VerboseVerification || TraceClassInitialization) {
-    if (was_recursively_verified())
+  if (was_recursively_verified()){
+    if (VerboseVerification){
       tty->print_cr("Recursive verification detected for: %s",
-          _klass->external_name());
+                    _klass->external_name());
+    }
+    log_info(classinit)("Recursive verification detected for: %s",
+                        _klass->external_name());
   }
 }
 
--- a/src/share/vm/classfile/verifier.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/verifier.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -50,6 +50,7 @@
    * Otherwise, no exception is thrown and the return indicates the
    * error.
    */
+  static void log_end_verification(outputStream* st, const char* klassName, Symbol* exception_name, TRAPS);
   static bool verify(instanceKlassHandle klass, Mode mode, bool should_verify_class, TRAPS);
 
   // Return false if the class is loaded by the bootstrap loader,
--- a/src/share/vm/classfile/vmSymbols.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/vmSymbols.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -34,7 +34,7 @@
 
 Symbol* vmSymbols::_type_signatures[T_VOID+1] = { NULL /*, NULL...*/ };
 
-inline int compare_symbol(Symbol* a, Symbol* b) {
+inline int compare_symbol(const Symbol* a, const Symbol* b) {
   if (a == b)  return 0;
   // follow the natural address order:
   return (address)a > (address)b ? +1 : -1;
@@ -43,8 +43,8 @@
 static vmSymbols::SID vm_symbol_index[vmSymbols::SID_LIMIT];
 extern "C" {
   static int compare_vmsymbol_sid(const void* void_a, const void* void_b) {
-    Symbol* a = vmSymbols::symbol_at(*((vmSymbols::SID*) void_a));
-    Symbol* b = vmSymbols::symbol_at(*((vmSymbols::SID*) void_b));
+    const Symbol* a = vmSymbols::symbol_at(*((vmSymbols::SID*) void_a));
+    const Symbol* b = vmSymbols::symbol_at(*((vmSymbols::SID*) void_b));
     return compare_symbol(a, b);
   }
 }
@@ -188,7 +188,7 @@
 }
 
 
-BasicType vmSymbols::signature_type(Symbol* s) {
+BasicType vmSymbols::signature_type(const Symbol* s) {
   assert(s != NULL, "checking");
   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
     if (s == _type_signatures[i]) {
@@ -206,7 +206,7 @@
 // (Typical counts are calls=7000 and probes=17000.)
 #endif
 
-vmSymbols::SID vmSymbols::find_sid(Symbol* symbol) {
+vmSymbols::SID vmSymbols::find_sid(const Symbol* symbol) {
   // Handle the majority of misses by a bounds check.
   // Then, use a binary search over the index.
   // Expected trip count is less than log2_SID_LIMIT, about eight.
--- a/src/share/vm/classfile/vmSymbols.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/classfile/vmSymbols.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -212,6 +212,7 @@
   template(java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$LockedUpdater") \
   template(java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl,     "java/util/concurrent/atomic/AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl") \
   template(jdk_internal_vm_annotation_Contended_signature,                   "Ljdk/internal/vm/annotation/Contended;")    \
+  template(jdk_internal_vm_annotation_ReservedStackAccess_signature,         "Ljdk/internal/vm/annotation/ReservedStackAccess;") \
                                                                                                   \
   /* class symbols needed by intrinsics */                                                        \
   VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, template, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
@@ -1378,7 +1379,7 @@
     return _type_signatures[t];
   }
   // inverse of type_signature; returns T_OBJECT if s is not recognized
-  static BasicType signature_type(Symbol* s);
+  static BasicType signature_type(const Symbol* s);
 
   static Symbol* symbol_at(SID id) {
     assert(id >= FIRST_SID && id < SID_LIMIT, "oob");
@@ -1387,7 +1388,7 @@
   }
 
   // Returns symbol's SID if one is assigned, else NO_SID.
-  static SID find_sid(Symbol* symbol);
+  static SID find_sid(const Symbol* symbol);
   static SID find_sid(const char* symbol_name);
 
 #ifndef PRODUCT
--- a/src/share/vm/code/nmethod.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/code/nmethod.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -2633,7 +2633,7 @@
   int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
 #ifdef ASSERT
   if (cont_offset == 0) {
-    Thread* thread = ThreadLocalStorage::get_thread_slow();
+    Thread* thread = Thread::current();
     ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
     HandleMark hm(thread);
     ResourceMark rm(thread);
--- a/src/share/vm/compiler/compileBroker.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/compiler/compileBroker.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
--- a/src/share/vm/compiler/compilerDirectives.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/compiler/compilerDirectives.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,6 @@
 #include "ci/ciUtilities.hpp"
 #include "compiler/methodMatcher.hpp"
 #include "compiler/compilerOracle.hpp"
-#include "oops/oop.inline.hpp"
 #include "utilities/exceptions.hpp"
 
   //      Directives flag name,    type, default value, compile command name
--- a/src/share/vm/gc/cms/allocationStats.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/cms/allocationStats.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_CMS_ALLOCATIONSTATS_HPP
 
 #include "gc/shared/gcUtil.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
@@ -119,11 +120,9 @@
       ssize_t old_desired = _desired;
       float delta_ise = (CMSExtrapolateSweep ? intra_sweep_estimate : 0.0);
       _desired = (ssize_t)(new_rate * (inter_sweep_estimate + delta_ise));
-      if (PrintFLSStatistics > 1) {
-        gclog_or_tty->print_cr("demand: " SSIZE_FORMAT ", old_rate: %f, current_rate: %f, "
-                               "new_rate: %f, old_desired: " SSIZE_FORMAT ", new_desired: " SSIZE_FORMAT,
-                                demand, old_rate, rate, new_rate, old_desired, _desired);
-      }
+      log_trace(gc, freelist)("demand: " SSIZE_FORMAT ", old_rate: %f, current_rate: %f, "
+                              "new_rate: %f, old_desired: " SSIZE_FORMAT ", new_desired: " SSIZE_FORMAT,
+                              demand, old_rate, rate, new_rate, old_desired, _desired);
     }
   }
 
--- a/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -400,17 +400,16 @@
 
 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 const {
-  reportIndexedFreeListStatistics();
-  gclog_or_tty->print_cr("Layout of Indexed Freelists");
-  gclog_or_tty->print_cr("---------------------------");
+  reportIndexedFreeListStatistics(st);
+  st->print_cr("Layout of Indexed Freelists");
+  st->print_cr("---------------------------");
   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    _indexedFreeList[i].print_on(gclog_or_tty);
-    for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
-         fc = fc->next()) {
-      gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
-                          p2i(fc), p2i((HeapWord*)fc + i),
-                          fc->cantCoalesce() ? "\t CC" : "");
+    _indexedFreeList[i].print_on(st);
+    for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
+      st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
+                   p2i(fc), p2i((HeapWord*)fc + i),
+                   fc->cantCoalesce() ? "\t CC" : "");
     }
   }
 }
@@ -422,7 +421,7 @@
 
 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 const {
-  _dictionary->report_statistics();
+  _dictionary->report_statistics(st);
   st->print_cr("Layout of Freelists in Tree");
   st->print_cr("---------------------------");
   _dictionary->print_free_lists(st);
@@ -472,54 +471,58 @@
   return sz;
 }
 
-void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
-  outputStream* st) {
-  st->print_cr("\n=========================");
+void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
+  st->print_cr("=========================");
   st->print_cr("Block layout in CMS Heap:");
   st->print_cr("=========================");
   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
   blk_iterate(&bpcl);
 
-  st->print_cr("\n=======================================");
+  st->print_cr("=======================================");
   st->print_cr("Order & Layout of Promotion Info Blocks");
   st->print_cr("=======================================");
   print_promo_info_blocks(st);
 
-  st->print_cr("\n===========================");
+  st->print_cr("===========================");
   st->print_cr("Order of Indexed Free Lists");
   st->print_cr("=========================");
   print_indexed_free_lists(st);
 
-  st->print_cr("\n=================================");
+  st->print_cr("=================================");
   st->print_cr("Order of Free Lists in Dictionary");
   st->print_cr("=================================");
   print_dictionary_free_lists(st);
 }
 
 
-void CompactibleFreeListSpace::reportFreeListStatistics() const {
+void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
   assert_lock_strong(&_freelistLock);
-  assert(PrintFLSStatistics != 0, "Reporting error");
-  _dictionary->report_statistics();
-  if (PrintFLSStatistics > 1) {
-    reportIndexedFreeListStatistics();
+  LogHandle(gc, freelist, stats) log;
+  if (!log.is_debug()) {
+    return;
+  }
+  log.debug("%s", title);
+  _dictionary->report_statistics(log.debug_stream());
+  if (log.is_trace()) {
+    ResourceMark rm;
+    reportIndexedFreeListStatistics(log.trace_stream());
     size_t total_size = totalSizeInIndexedFreeLists() +
                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
-    gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
+    log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
   }
 }
 
-void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
+void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
   assert_lock_strong(&_freelistLock);
-  gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
-                      "--------------------------------\n");
+  st->print_cr("Statistics for IndexedFreeLists:");
+  st->print_cr("--------------------------------");
   size_t total_size = totalSizeInIndexedFreeLists();
-  size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
-  gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
-  gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
-  gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
+  size_t free_blocks = numFreeBlocksInIndexedFreeLists();
+  st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
+  st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
+  st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
   if (free_blocks != 0) {
-    gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
+    st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
   }
 }
 
@@ -1824,10 +1827,7 @@
 void
 CompactibleFreeListSpace::gc_prologue() {
   assert_locked();
-  if (PrintFLSStatistics != 0) {
-    gclog_or_tty->print("Before GC:\n");
-    reportFreeListStatistics();
-  }
+  reportFreeListStatistics("Before GC:");
   refillLinearAllocBlocksIfNeeded();
 }
 
@@ -1837,11 +1837,7 @@
   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
   _promoInfo.stopTrackingPromotions();
   repairLinearAllocationBlocks();
-  // Print Space's stats
-  if (PrintFLSStatistics != 0) {
-    gclog_or_tty->print("After GC:\n");
-    reportFreeListStatistics();
-  }
+  reportFreeListStatistics("After GC:");
 }
 
 // Iteration support, mostly delegated from a CMS generation
@@ -2014,9 +2010,7 @@
   size_t i;
   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
-    if (PrintFLSStatistics > 1) {
-      gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
-    }
+    log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
     fl->set_before_sweep(fl->count());
@@ -2065,16 +2059,10 @@
 }
 
 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
-  if (PrintFLSStatistics > 0) {
-    HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
-    gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
-                           p2i(largestAddr));
-  }
+  log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
   setFLSurplus();
   setFLHints();
-  if (PrintGC && PrintFLSCensus > 0) {
-    printFLCensus(sweep_count);
-  }
+  printFLCensus(sweep_count);
   clearFLCensus();
   assert_locked();
   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
@@ -2213,14 +2201,15 @@
       }
     }
     if (res == 0) {
-      gclog_or_tty->print_cr("Livelock: no rank reduction!");
-      gclog_or_tty->print_cr(
-        " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
-        " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
+      LogHandle(gc, verify) log;
+      log.info("Livelock: no rank reduction!");
+      log.info(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
+               " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
-      _sp->print_on(gclog_or_tty);
-      guarantee(false, "Seppuku!");
+      ResourceMark rm;
+      _sp->print_on(log.info_stream());
+      guarantee(false, "Verification failed.");
     }
     _last_addr = addr;
     _last_size = res;
@@ -2386,17 +2375,23 @@
 
 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
   assert_lock_strong(&_freelistLock);
+  LogHandle(gc, freelist, census) log;
+  if (!log.is_debug()) {
+    return;
+  }
   AdaptiveFreeList<FreeChunk> total;
-  gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
-  AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
+  log.debug("end sweep# " SIZE_FORMAT, sweep_count);
+  ResourceMark rm;
+  outputStream* out = log.debug_stream();
+  AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
   size_t total_free = 0;
   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
     total_free += fl->count() * fl->size();
     if (i % (40*IndexSetStride) == 0) {
-      AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
+      AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
     }
-    fl->print_on(gclog_or_tty);
+    fl->print_on(out);
     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
     total.set_surplus(    total.surplus()     + fl->surplus()    );
     total.set_desired(    total.desired()     + fl->desired()    );
@@ -2408,14 +2403,13 @@
     total.set_split_births(total.split_births() + fl->split_births());
     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
   }
-  total.print_on(gclog_or_tty, "TOTAL");
-  gclog_or_tty->print_cr("Total free in indexed lists "
-                         SIZE_FORMAT " words", total_free);
-  gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
-    (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
-            (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
-    (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
-  _dictionary->print_dict_census();
+  total.print_on(out, "TOTAL");
+  log.debug("Total free in indexed lists " SIZE_FORMAT " words", total_free);
+  log.debug("growth: %8.5f  deficit: %8.5f",
+            (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
+                    (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
+            (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
+  _dictionary->print_dict_census(out);
 }
 
 ///////////////////////////////////////////////////////////////////////////
@@ -2544,10 +2538,7 @@
       // Reset counters for next round
       _global_num_workers[i] = 0;
       _global_num_blocks[i] = 0;
-      if (PrintOldPLAB) {
-        gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT,
-                               i, (size_t)_blocks_to_claim[i].average());
-      }
+      log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
     }
   }
 }
@@ -2584,10 +2575,8 @@
           _indexedFreeList[i].set_size(i);
         }
       }
-      if (PrintOldPLAB) {
-        gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
-                               tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
-      }
+      log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
+                          tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
       // Reset stats for next round
       _num_blocks[i]         = 0;
     }
--- a/src/share/vm/gc/cms/compactibleFreeListSpace.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/cms/compactibleFreeListSpace.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,6 +29,7 @@
 #include "gc/cms/promotionInfo.hpp"
 #include "gc/shared/blockOffsetTable.hpp"
 #include "gc/shared/space.hpp"
+#include "logging/log.hpp"
 #include "memory/binaryTreeDictionary.hpp"
 #include "memory/freeList.hpp"
 
@@ -275,8 +276,8 @@
   void       verify_objects_initialized() const;
 
   // Statistics reporting helper functions
-  void       reportFreeListStatistics() const;
-  void       reportIndexedFreeListStatistics() const;
+  void       reportFreeListStatistics(const char* title) const;
+  void       reportIndexedFreeListStatistics(outputStream* st) const;
   size_t     maxChunkSizeInIndexedFreeLists() const;
   size_t     numFreeBlocksInIndexedFreeLists() const;
   // Accessor
@@ -450,11 +451,9 @@
   void save_sweep_limit() {
     _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
                    unallocated_block() : end();
-    if (CMSTraceSweeper) {
-      gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT
-                             "  for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
-                             p2i(_sweep_limit), p2i(bottom()), p2i(end()));
-    }
+    log_develop_trace(gc, sweep)(">>>>> Saving sweep limit " PTR_FORMAT
+                                 "  for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
+                                 p2i(_sweep_limit), p2i(bottom()), p2i(end()));
   }
   NOT_PRODUCT(
     void clear_sweep_limit() { _sweep_limit = NULL; }
--- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/classLoaderData.hpp"
 #include "classfile/stringTable.hpp"
+#include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc/cms/cmsCollectorPolicy.hpp"
@@ -46,13 +47,14 @@
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/iterator.inline.hpp"
 #include "memory/padded.hpp"
@@ -64,6 +66,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/orderAccess.inline.hpp"
+#include "runtime/timer.hpp"
 #include "runtime/vmThread.hpp"
 #include "services/memoryService.hpp"
 #include "services/runtimeService.hpp"
@@ -366,13 +369,9 @@
     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
     cms_free_dbl = cms_free_dbl * cms_adjustment;
 
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
-        SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
-        cms_free, expected_promotion);
-      gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
-        cms_free_dbl, cms_consumption_rate() + 1.0);
-    }
+    log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
+                  cms_free, expected_promotion);
+    log_trace(gc)("  cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0);
     // Add 1 in case the consumption rate goes to zero.
     return cms_free_dbl / (cms_consumption_rate() + 1.0);
   }
@@ -401,12 +400,8 @@
   // If a concurrent mode failure occurred recently, we want to be
   // more conservative and halve our expected time_until_cms_gen_full()
   if (work > deadline) {
-    if (Verbose && PrintGCDetails) {
-      gclog_or_tty->print(
-        " CMSCollector: collect because of anticipated promotion "
-        "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
-        gc0_period(), time_until_cms_gen_full());
-    }
+    log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ",
+                          cms_duration(), gc0_period(), time_until_cms_gen_full());
     return 0.0;
   }
   return work - deadline;
@@ -668,31 +663,6 @@
 }
 #endif
 
-void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  if (PrintGCDetails) {
-    // I didn't want to change the logging when removing the level concept,
-    // but I guess this logging could say "old" or something instead of "1".
-    assert(gch->is_old_gen(this),
-           "The CMS generation should be the old generation");
-    uint level = 1;
-    if (Verbose) {
-      gclog_or_tty->print("[%u %s-%s: " SIZE_FORMAT "(" SIZE_FORMAT ")]",
-        level, short_name(), s, used(), capacity());
-    } else {
-      gclog_or_tty->print("[%u %s-%s: " SIZE_FORMAT "K(" SIZE_FORMAT "K)]",
-        level, short_name(), s, used() / K, capacity() / K);
-    }
-  }
-  if (Verbose) {
-    gclog_or_tty->print(" " SIZE_FORMAT "(" SIZE_FORMAT ")",
-              gch->used(), gch->capacity());
-  } else {
-    gclog_or_tty->print(" " SIZE_FORMAT "K(" SIZE_FORMAT "K)",
-              gch->used() / K, gch->capacity() / K);
-  }
-}
-
 size_t
 ConcurrentMarkSweepGeneration::contiguous_available() const {
   // dld proposes an improvement in precision here. If the committed
@@ -716,21 +686,18 @@
   size_t available = max_available();
   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
-  if (Verbose && PrintGCDetails) {
-    gclog_or_tty->print_cr(
-      "CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "),"
-      "max_promo(" SIZE_FORMAT ")",
-      res? "":" not", available, res? ">=":"<",
-      av_promo, max_promotion_in_bytes);
-  }
+  log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
+                           res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
   return res;
 }
 
 // At a promotion failure dump information on block layout in heap
 // (cms old generation).
 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
-  if (CMSDumpAtPromotionFailure) {
-    cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
+  LogHandle(gc, promotion) log;
+  if (log.is_trace()) {
+    ResourceMark rm;
+    cmsSpace()->dump_at_safepoint_with_locks(collector(), log.trace_stream());
   }
 }
 
@@ -786,27 +753,26 @@
     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
     assert(desired_capacity >= capacity(), "invalid expansion size");
     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
-    if (PrintGCDetails && Verbose) {
+    LogHandle(gc) log;
+    if (log.is_trace()) {
       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
-      gclog_or_tty->print_cr("\nFrom compute_new_size: ");
-      gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
-      gclog_or_tty->print_cr("  Desired free fraction %f", desired_free_percentage);
-      gclog_or_tty->print_cr("  Maximum free fraction %f", maximum_free_percentage);
-      gclog_or_tty->print_cr("  Capacity " SIZE_FORMAT, capacity() / 1000);
-      gclog_or_tty->print_cr("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
+      log.trace("From compute_new_size: ");
+      log.trace("  Free fraction %f", free_percentage);
+      log.trace("  Desired free fraction %f", desired_free_percentage);
+      log.trace("  Maximum free fraction %f", maximum_free_percentage);
+      log.trace("  Capacity " SIZE_FORMAT, capacity() / 1000);
+      log.trace("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
       GenCollectedHeap* gch = GenCollectedHeap::heap();
       assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
       size_t young_size = gch->young_gen()->capacity();
-      gclog_or_tty->print_cr("  Young gen size " SIZE_FORMAT, young_size / 1000);
-      gclog_or_tty->print_cr("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
-      gclog_or_tty->print_cr("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
-      gclog_or_tty->print_cr("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
+      log.trace("  Young gen size " SIZE_FORMAT, young_size / 1000);
+      log.trace("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
+      log.trace("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
+      log.trace("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
     }
     // safe if expansion fails
     expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("  Expanded free fraction %f", ((double) free()) / capacity());
-    }
+    log.trace("  Expanded free fraction %f", ((double) free()) / capacity());
   } else {
     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
     assert(desired_capacity <= capacity(), "invalid expansion size");
@@ -1144,10 +1110,7 @@
 
 bool CMSCollector::shouldConcurrentCollect() {
   if (_full_gc_requested) {
-    if (Verbose && PrintGCDetails) {
-      gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
-                             " gc request (or gc_locker)");
-    }
+    log_trace(gc)("CMSCollector: collect because of explicit  gc request (or gc_locker)");
     return true;
   }
 
@@ -1155,24 +1118,21 @@
   // ------------------------------------------------------------------
   // Print out lots of information which affects the initiation of
   // a collection.
-  if (PrintCMSInitiationStatistics && stats().valid()) {
-    gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
-    gclog_or_tty->stamp();
-    gclog_or_tty->cr();
-    stats().print_on(gclog_or_tty);
-    gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
-      stats().time_until_cms_gen_full());
-    gclog_or_tty->print_cr("free=" SIZE_FORMAT, _cmsGen->free());
-    gclog_or_tty->print_cr("contiguous_available=" SIZE_FORMAT,
-                           _cmsGen->contiguous_available());
-    gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
-    gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
-    gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
-    gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
-    gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
-    gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
-    gclog_or_tty->print_cr("metadata initialized %d",
-      MetaspaceGC::should_concurrent_collect());
+  LogHandle(gc) log;
+  if (log.is_trace() && stats().valid()) {
+    log.trace("CMSCollector shouldConcurrentCollect: ");
+    ResourceMark rm;
+    stats().print_on(log.debug_stream());
+    log.trace("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full());
+    log.trace("free=" SIZE_FORMAT, _cmsGen->free());
+    log.trace("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available());
+    log.trace("promotion_rate=%g", stats().promotion_rate());
+    log.trace("cms_allocation_rate=%g", stats().cms_allocation_rate());
+    log.trace("occupancy=%3.7f", _cmsGen->occupancy());
+    log.trace("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
+    log.trace("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
+    log.trace("cms_time_since_end=%3.7f", stats().cms_time_since_end());
+    log.trace("metadata initialized %d", MetaspaceGC::should_concurrent_collect());
   }
   // ------------------------------------------------------------------
 
@@ -1190,12 +1150,8 @@
       // this branch will not fire after the first successful CMS
       // collection because the stats should then be valid.
       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
-        if (Verbose && PrintGCDetails) {
-          gclog_or_tty->print_cr(
-            " CMSCollector: collect for bootstrapping statistics:"
-            " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
-            _bootstrap_occupancy);
-        }
+        log_trace(gc)(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f",
+                      _cmsGen->occupancy(), _bootstrap_occupancy);
         return true;
       }
     }
@@ -1207,9 +1163,7 @@
   // XXX We need to make sure that the gen expansion
   // criterion dovetails well with this. XXX NEED TO FIX THIS
   if (_cmsGen->should_concurrent_collect()) {
-    if (Verbose && PrintGCDetails) {
-      gclog_or_tty->print_cr("CMS old gen initiated");
-    }
+    log_trace(gc)("CMS old gen initiated");
     return true;
   }
 
@@ -1220,16 +1174,12 @@
   assert(gch->collector_policy()->is_generation_policy(),
          "You may want to check the correctness of the following");
   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
-    if (Verbose && PrintGCDetails) {
-      gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
-    }
+    log_trace(gc)("CMSCollector: collect because incremental collection will fail ");
     return true;
   }
 
   if (MetaspaceGC::should_concurrent_collect()) {
-    if (Verbose && PrintGCDetails) {
-      gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
-    }
+    log_trace(gc)("CMSCollector: collect for metadata allocation ");
     return true;
   }
 
@@ -1243,13 +1193,11 @@
     // Check the CMS time since begin (we do not check the stats validity
     // as we want to be able to trigger the first CMS cycle as well)
     if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
-      if (Verbose && PrintGCDetails) {
-        if (stats().valid()) {
-          gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
-                                 stats().cms_time_since_begin());
-        } else {
-          gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
-        }
+      if (stats().valid()) {
+        log_trace(gc)("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
+                      stats().cms_time_since_begin());
+      } else {
+        log_trace(gc)("CMSCollector: collect because of trigger interval (first collection)");
       }
       return true;
     }
@@ -1292,20 +1240,15 @@
 
   assert_lock_strong(freelistLock());
   if (occupancy() > initiating_occupancy()) {
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
-        short_name(), occupancy(), initiating_occupancy());
-    }
+    log_trace(gc)(" %s: collect because of occupancy %f / %f  ",
+                  short_name(), occupancy(), initiating_occupancy());
     return true;
   }
   if (UseCMSInitiatingOccupancyOnly) {
     return false;
   }
   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print(" %s: collect because expanded for allocation ",
-        short_name());
-    }
+    log_trace(gc)(" %s: collect because expanded for allocation ", short_name());
     return true;
   }
   return false;
@@ -1362,13 +1305,9 @@
 
 void CMSCollector::report_concurrent_mode_interruption() {
   if (is_external_interruption()) {
-    if (PrintGCDetails) {
-      gclog_or_tty->print(" (concurrent mode interrupted)");
-    }
+    log_debug(gc)("Concurrent mode interrupted");
   } else {
-    if (PrintGCDetails) {
-      gclog_or_tty->print(" (concurrent mode failure)");
-    }
+    log_debug(gc)("Concurrent mode failure");
     _gc_tracer_cm->report_concurrent_mode_failure();
   }
 }
@@ -1502,11 +1441,9 @@
          "VM thread should have CMS token");
   getFreelistLocks();
   bitMapLock()->lock_without_safepoint_check();
-  if (TraceCMSState) {
-    gclog_or_tty->print_cr("CMS foreground collector has asked for control "
-      INTPTR_FORMAT " with first state %d", p2i(Thread::current()), first_state);
-    gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
-  }
+  log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d",
+                       p2i(Thread::current()), first_state);
+  log_debug(gc, state)("    gets control with state %d", _collectorState);
 
   // Inform cms gen if this was due to partial collection failing.
   // The CMS gen may use this fact to determine its expansion policy.
@@ -1580,7 +1517,7 @@
   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
 
-  GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
+  GCTraceTime(Trace, gc) t("CMS:MSC");
 
   // Temporarily widen the span of the weak reference processing to
   // the entire heap.
@@ -1665,33 +1602,34 @@
 }
 
 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
+  LogHandle(gc, heap) log;
+  if (!log.is_trace()) {
+    return;
+  }
+
   ContiguousSpace* eden_space = _young_gen->eden();
   ContiguousSpace* from_space = _young_gen->from();
   ContiguousSpace* to_space   = _young_gen->to();
   // Eden
   if (_eden_chunk_array != NULL) {
-    gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
-                           p2i(eden_space->bottom()), p2i(eden_space->top()),
-                           p2i(eden_space->end()), eden_space->capacity());
-    gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
-                           "_eden_chunk_capacity=" SIZE_FORMAT,
-                           _eden_chunk_index, _eden_chunk_capacity);
+    log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
+              p2i(eden_space->bottom()), p2i(eden_space->top()),
+              p2i(eden_space->end()), eden_space->capacity());
+    log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT,
+              _eden_chunk_index, _eden_chunk_capacity);
     for (size_t i = 0; i < _eden_chunk_index; i++) {
-      gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
-                             i, p2i(_eden_chunk_array[i]));
+      log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i]));
     }
   }
   // Survivor
   if (_survivor_chunk_array != NULL) {
-    gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
-                           p2i(from_space->bottom()), p2i(from_space->top()),
-                           p2i(from_space->end()), from_space->capacity());
-    gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
-                           "_survivor_chunk_capacity=" SIZE_FORMAT,
-                           _survivor_chunk_index, _survivor_chunk_capacity);
+    log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
+              p2i(from_space->bottom()), p2i(from_space->top()),
+              p2i(from_space->end()), from_space->capacity());
+    log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT,
+              _survivor_chunk_index, _survivor_chunk_capacity);
     for (size_t i = 0; i < _survivor_chunk_index; i++) {
-      gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
-                             i, p2i(_survivor_chunk_array[i]));
+      log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i]));
     }
   }
 }
@@ -1780,11 +1718,7 @@
     _collection_count_start = gch->total_full_collections();
   }
 
-  // Used for PrintGC
-  size_t prev_used = 0;
-  if (PrintGC && Verbose) {
-    prev_used = _cmsGen->used();
-  }
+  size_t prev_used = _cmsGen->used();
 
   // The change of the collection state is normally done at this level;
   // the exceptions are phases that are executed while the world is
@@ -1795,10 +1729,8 @@
   // while the world is stopped because the foreground collector already
   // has the world stopped and would deadlock.
   while (_collectorState != Idling) {
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
-        p2i(Thread::current()), _collectorState);
-    }
+    log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d",
+                         p2i(Thread::current()), _collectorState);
     // The foreground collector
     //   holds the Heap_lock throughout its collection.
     //   holds the CMS token (but not the lock)
@@ -1828,11 +1760,8 @@
         // done this round.
         assert(_foregroundGCShouldWait == false, "We set it to false in "
                "waitForForegroundGC()");
-        if (TraceCMSState) {
-          gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
-            " exiting collection CMS state %d",
-            p2i(Thread::current()), _collectorState);
-        }
+        log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
+                             p2i(Thread::current()), _collectorState);
         return;
       } else {
         // The background collector can run but check to see if the
@@ -1936,10 +1865,8 @@
         ShouldNotReachHere();
         break;
     }
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
-        p2i(Thread::current()), _collectorState);
-    }
+    log_debug(gc, state)("  Thread " INTPTR_FORMAT " done - next CMS state %d",
+                         p2i(Thread::current()), _collectorState);
     assert(_foregroundGCShouldWait, "block post-condition");
   }
 
@@ -1958,14 +1885,10 @@
     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
            "Possible deadlock");
   }
-  if (TraceCMSState) {
-    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
-      " exiting collection CMS state %d",
-      p2i(Thread::current()), _collectorState);
-  }
-  if (PrintGC && Verbose) {
-    _cmsGen->print_heap_change(prev_used);
-  }
+  log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
+                       p2i(Thread::current()), _collectorState);
+  log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
+                     prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K);
 }
 
 void CMSCollector::register_gc_start(GCCause::Cause cause) {
@@ -2017,10 +1940,8 @@
       ConcurrentMarkSweepThread::CMS_cms_wants_token);
     // Get a possibly blocked foreground thread going
     CGC_lock->notify();
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
-        p2i(Thread::current()), _collectorState);
-    }
+    log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
+                         p2i(Thread::current()), _collectorState);
     while (_foregroundGCIsActive) {
       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
     }
@@ -2029,10 +1950,8 @@
     ConcurrentMarkSweepThread::clear_CMS_flag(
       ConcurrentMarkSweepThread::CMS_cms_wants_token);
   }
-  if (TraceCMSState) {
-    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
-      p2i(Thread::current()), _collectorState);
-  }
+  log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
+                       p2i(Thread::current()), _collectorState);
   return res;
 }
 
@@ -2129,11 +2048,8 @@
   NOT_PRODUCT(
     assert(_numObjectsPromoted == 0, "check");
     assert(_numWordsPromoted   == 0, "check");
-    if (Verbose && PrintGC) {
-      gclog_or_tty->print("Allocated " SIZE_FORMAT " objects, "
-                          SIZE_FORMAT " bytes concurrently",
-      _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
-    }
+    log_develop_trace(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently",
+                                 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
     _numObjectsAllocated = 0;
     _numWordsAllocated   = 0;
   )
@@ -2210,21 +2126,15 @@
   NOT_PRODUCT(
     assert(_numObjectsAllocated == 0, "check");
     assert(_numWordsAllocated == 0, "check");
-    if (Verbose && PrintGC) {
-      gclog_or_tty->print("Promoted " SIZE_FORMAT " objects, "
-                          SIZE_FORMAT " bytes",
-                 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
-    }
+    log_develop_trace(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
+                                     _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
     _numObjectsPromoted = 0;
     _numWordsPromoted   = 0;
   )
 
-  if (PrintGC && Verbose) {
-    // Call down the chain in contiguous_available needs the freelistLock
-    // so print this out before releasing the freeListLock.
-    gclog_or_tty->print(" Contiguous available " SIZE_FORMAT " bytes ",
-                        contiguous_available());
-  }
+  // Call down the chain in contiguous_available needs the freelistLock
+  // so print this out before releasing the freeListLock.
+  log_develop_trace(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available());
 }
 
 #ifndef PRODUCT
@@ -2308,8 +2218,10 @@
   bool do_bit(size_t offset) {
     HeapWord* addr = _marks->offsetToHeapWord(offset);
     if (!_marks->isMarked(addr)) {
-      oop(addr)->print_on(gclog_or_tty);
-      gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
+      LogHandle(gc, verify) log;
+      ResourceMark rm;
+      oop(addr)->print_on(log.info_stream());
+      log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
       _failed = true;
     }
     return true;
@@ -2318,8 +2230,8 @@
   bool failed() { return _failed; }
 };
 
-bool CMSCollector::verify_after_remark(bool silent) {
-  if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
+bool CMSCollector::verify_after_remark() {
+  GCTraceTime(Info, gc, verify) tm("Verifying CMS Marking.");
   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
   static bool init = false;
 
@@ -2382,7 +2294,6 @@
     warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
             CMSRemarkVerifyVariant);
   }
-  if (!silent) gclog_or_tty->print(" done] ");
   return true;
 }
 
@@ -2434,8 +2345,10 @@
   VerifyMarkedClosure vcl(markBitMap());
   verification_mark_bm()->iterate(&vcl);
   if (vcl.failed()) {
-    gclog_or_tty->print("Verification failed");
-    gch->print_on(gclog_or_tty);
+    LogHandle(gc, verify) log;
+    log.info("Verification failed");
+    ResourceMark rm;
+    gch->print_on(log.info_stream());
     fatal("CMS: failed marking verification after remark");
   }
 }
@@ -2728,10 +2641,7 @@
   // a new CMS cycle.
   if (success) {
     set_expansion_cause(cause);
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("Expanded CMS gen for %s",
-        CMSExpansionCause::to_string(cause));
-    }
+    log_trace(gc)("Expanded CMS gen for %s",  CMSExpansionCause::to_string(cause));
   }
 }
 
@@ -2799,9 +2709,7 @@
 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
   assert_locked_or_safepoint(Heap_lock);
   assert_lock_strong(freelistLock());
-  if (PrintGCDetails && Verbose) {
-    warning("Shrinking of CMS not yet implemented");
-  }
+  log_trace(gc)("Shrinking of CMS not yet implemented");
   return;
 }
 
@@ -2811,63 +2719,35 @@
 class CMSPhaseAccounting: public StackObj {
  public:
   CMSPhaseAccounting(CMSCollector *collector,
-                     const char *phase,
-                     bool print_cr = true);
+                     const char *title);
   ~CMSPhaseAccounting();
 
  private:
   CMSCollector *_collector;
-  const char *_phase;
-  elapsedTimer _wallclock;
-  bool _print_cr;
+  const char *_title;
+  GCTraceConcTime(Info, gc) _trace_time;
 
  public:
   // Not MT-safe; so do not pass around these StackObj's
   // where they may be accessed by other threads.
   jlong wallclock_millis() {
-    assert(_wallclock.is_active(), "Wall clock should not stop");
-    _wallclock.stop();  // to record time
-    jlong ret = _wallclock.milliseconds();
-    _wallclock.start(); // restart
-    return ret;
+    return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time());
   }
 };
 
 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
-                                       const char *phase,
-                                       bool print_cr) :
-  _collector(collector), _phase(phase), _print_cr(print_cr) {
-
-  if (PrintCMSStatistics != 0) {
-    _collector->resetYields();
-  }
-  if (PrintGCDetails) {
-    gclog_or_tty->gclog_stamp();
-    gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
-      _collector->cmsGen()->short_name(), _phase);
-  }
+                                       const char *title) :
+  _collector(collector), _title(title), _trace_time(title) {
+
+  _collector->resetYields();
   _collector->resetTimer();
-  _wallclock.start();
   _collector->startTimer();
 }
 
 CMSPhaseAccounting::~CMSPhaseAccounting() {
-  assert(_wallclock.is_active(), "Wall clock should not have stopped");
   _collector->stopTimer();
-  _wallclock.stop();
-  if (PrintGCDetails) {
-    gclog_or_tty->gclog_stamp();
-    gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
-                 _collector->cmsGen()->short_name(),
-                 _phase, _collector->timerValue(), _wallclock.seconds());
-    if (_print_cr) {
-      gclog_or_tty->cr();
-    }
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
-                    _collector->yields());
-    }
-  }
+  log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_seconds(_collector->timerTicks()));
+  log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields());
 }
 
 // CMS work
@@ -2934,8 +2814,7 @@
   // CMS collection cycle.
   setup_cms_unloading_and_verification_state();
 
-  NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
-    PrintGCDetails && Verbose, true, _gc_timer_cm);)
+  GCTraceTime(Trace, gc) ts("checkpointRootsInitialWork", _gc_timer_cm);
 
   // Reset all the PLAB chunk arrays if necessary.
   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
@@ -2966,9 +2845,7 @@
   // the klasses. The claimed marks need to be cleared before marking starts.
   ClassLoaderDataGraph::clear_claimed_marks();
 
-  if (CMSPrintEdenSurvivorChunks) {
-    print_eden_and_survivor_chunk_arrays();
-  }
+  print_eden_and_survivor_chunk_arrays();
 
   {
 #if defined(COMPILER2) || INCLUDE_JVMCI
@@ -3039,17 +2916,15 @@
   // weak ref discovery by the young generation collector.
 
   CMSTokenSyncWithLocks ts(true, bitMapLock());
-  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-  CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
+  GCTraceCPUTime tcpu;
+  CMSPhaseAccounting pa(this, "Concrurrent Mark");
   bool res = markFromRootsWork();
   if (res) {
     _collectorState = Precleaning;
   } else { // We failed and a foreground collection wants to take over
     assert(_foregroundGCIsActive, "internal state inconsistency");
     assert(_restart_addr == NULL,  "foreground will restart from scratch");
-    if (PrintGCDetails) {
-      gclog_or_tty->print_cr("bailing out to foreground collection");
-    }
+    log_debug(gc)("bailing out to foreground collection");
   }
   verify_overflow_empty();
   return res;
@@ -3254,22 +3129,14 @@
   _timer.start();
   do_scan_and_mark(worker_id, _cms_space);
   _timer.stop();
-  if (PrintCMSStatistics != 0) {
-    gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
-      worker_id, _timer.seconds());
-      // XXX: need xxx/xxx type of notation, two timers
-  }
+  log_trace(gc, task)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds());
 
   // ... do work stealing
   _timer.reset();
   _timer.start();
   do_work_steal(worker_id);
   _timer.stop();
-  if (PrintCMSStatistics != 0) {
-    gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
-      worker_id, _timer.seconds());
-      // XXX: need xxx/xxx type of notation, two timers
-  }
+  log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
   // Note that under the current task protocol, the
@@ -3484,10 +3351,7 @@
       if (simulate_overflow ||
           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
         // stack overflow
-        if (PrintCMSStatistics != 0) {
-          gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
-                                 SIZE_FORMAT, _overflow_stack->capacity());
-        }
+        log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
         // We cannot assert that the overflow stack is full because
         // it may have been emptied since.
         assert(simulate_overflow ||
@@ -3572,9 +3436,7 @@
   _bit_map_lock->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
   _collector->stopTimer();
-  if (PrintCMSStatistics != 0) {
-    _collector->incrementYields();
-  }
+  _collector->incrementYields();
 
   // It is possible for whichever thread initiated the yield request
   // not to get a chance to wake up and take the bitmap lock between
@@ -3736,8 +3598,8 @@
     } else {
       _start_sampling = false;
     }
-    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
+    GCTraceCPUTime tcpu;
+    CMSPhaseAccounting pa(this, "Concurrent Preclean");
     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
   }
   CMSTokenSync x(true); // is cms thread
@@ -3765,8 +3627,8 @@
   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
   // we will never do an actual abortable preclean cycle.
   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
-    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
+    GCTraceCPUTime tcpu;
+    CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean");
     // We need more smarts in the abortable preclean
     // loop below to deal with cases where allocation
     // in young gen is very very slow, and our precleaning
@@ -3788,15 +3650,11 @@
       // been at it for too long.
       if ((CMSMaxAbortablePrecleanLoops != 0) &&
           loops >= CMSMaxAbortablePrecleanLoops) {
-        if (PrintGCDetails) {
-          gclog_or_tty->print(" CMS: abort preclean due to loops ");
-        }
+        log_debug(gc)(" CMS: abort preclean due to loops ");
         break;
       }
       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
-        if (PrintGCDetails) {
-          gclog_or_tty->print(" CMS: abort preclean due to time ");
-        }
+        log_debug(gc)(" CMS: abort preclean due to time ");
         break;
       }
       // If we are doing little work each iteration, we should
@@ -3809,10 +3667,8 @@
         waited++;
       }
     }
-    if (PrintCMSStatistics > 0) {
-      gclog_or_tty->print(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
-                          loops, waited, cumworkdone);
-    }
+    log_trace(gc)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
+                               loops, waited, cumworkdone);
   }
   CMSTokenSync x(true); // is cms thread
   if (_collectorState != Idling) {
@@ -3956,9 +3812,7 @@
        numIter < CMSPrecleanIter;
        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
-    if (Verbose && PrintGCDetails) {
-      gclog_or_tty->print(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
-    }
+    log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
     // Either there are very few dirty cards, so re-mark
     // pause will be small anyway, or our pre-cleaning isn't
     // that much faster than the rate at which cards are being
@@ -3978,10 +3832,8 @@
 
   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
   cumNumCards += curNumCards;
-  if (PrintGCDetails && PrintCMSStatistics != 0) {
-    gclog_or_tty->print_cr(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
-                  curNumCards, cumNumCards, numIter);
-  }
+  log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
+                             curNumCards, cumNumCards, numIter);
   return cumNumCards;   // as a measure of useful work done
 }
 
@@ -4235,19 +4087,17 @@
   verify_work_stacks_empty();
   verify_overflow_empty();
 
-  if (PrintGCDetails) {
-    gclog_or_tty->print("[YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)]",
-                        _young_gen->used() / K,
-                        _young_gen->capacity() / K);
-  }
+  log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
+                _young_gen->used() / K, _young_gen->capacity() / K);
   {
     if (CMSScavengeBeforeRemark) {
       GenCollectedHeap* gch = GenCollectedHeap::heap();
       // Temporarily set flag to false, GCH->do_collection will
       // expect it to be false and set to true
       FlagSetting fl(gch->_is_gc_active, false);
-      NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
-        PrintGCDetails && Verbose, true, _gc_timer_cm);)
+
+      GCTraceTime(Trace, gc) tm("Pause Scavenge Before Remark", _gc_timer_cm);
+
       gch->do_collection(true,                      // full (i.e. force, see below)
                          false,                     // !clear_all_soft_refs
                          0,                         // size
@@ -4265,7 +4115,7 @@
 }
 
 void CMSCollector::checkpointRootsFinalWork() {
-  NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
+  GCTraceTime(Trace, gc) tm("checkpointRootsFinalWork", _gc_timer_cm);
 
   assert(haveFreelistLocks(), "must have free list locks");
   assert_lock_strong(bitMapLock());
@@ -4297,9 +4147,7 @@
   // Update the saved marks which may affect the root scans.
   gch->save_marks();
 
-  if (CMSPrintEdenSurvivorChunks) {
-    print_eden_and_survivor_chunk_arrays();
-  }
+  print_eden_and_survivor_chunk_arrays();
 
   {
 #if defined(COMPILER2) || INCLUDE_JVMCI
@@ -4317,10 +4165,10 @@
     // the most recent young generation GC, minus those cleaned up by the
     // concurrent precleaning.
     if (CMSParallelRemarkEnabled) {
-      GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
+      GCTraceTime(Debug, gc) t("Rescan (parallel)", _gc_timer_cm);
       do_remark_parallel();
     } else {
-      GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, _gc_timer_cm);
+      GCTraceTime(Debug, gc) t("Rescan (non-parallel)", _gc_timer_cm);
       do_remark_non_parallel();
     }
   }
@@ -4328,7 +4176,7 @@
   verify_overflow_empty();
 
   {
-    NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
+    GCTraceTime(Trace, gc) ts("refProcessingWork", _gc_timer_cm);
     refProcessingWork();
   }
   verify_work_stacks_empty();
@@ -4347,13 +4195,8 @@
   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
   if (ser_ovflw > 0) {
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print_cr("Marking stack overflow (benign) "
-        "(pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT
-        ", kac_preclean=" SIZE_FORMAT ")",
-        _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
-        _ser_kac_ovflw, _ser_kac_preclean_ovflw);
-    }
+    log_trace(gc)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")",
+                         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw);
     _markStack.expand();
     _ser_pmc_remark_ovflw = 0;
     _ser_pmc_preclean_ovflw = 0;
@@ -4361,26 +4204,19 @@
     _ser_kac_ovflw = 0;
   }
   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print_cr("Work queue overflow (benign) "
-        "(pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
-        _par_pmc_remark_ovflw, _par_kac_ovflw);
-    }
-    _par_pmc_remark_ovflw = 0;
+     log_trace(gc)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
+                          _par_pmc_remark_ovflw, _par_kac_ovflw);
+     _par_pmc_remark_ovflw = 0;
     _par_kac_ovflw = 0;
   }
-  if (PrintCMSStatistics != 0) {
-     if (_markStack._hit_limit > 0) {
-       gclog_or_tty->print_cr(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
-                              _markStack._hit_limit);
-     }
-     if (_markStack._failed_double > 0) {
-       gclog_or_tty->print_cr(" (benign) Failed stack doubling (" SIZE_FORMAT "),"
-                              " current capacity " SIZE_FORMAT,
-                              _markStack._failed_double,
-                              _markStack.capacity());
-     }
-  }
+   if (_markStack._hit_limit > 0) {
+     log_trace(gc)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
+                          _markStack._hit_limit);
+   }
+   if (_markStack._failed_double > 0) {
+     log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT,
+                          _markStack._failed_double, _markStack.capacity());
+   }
   _markStack._hit_limit = 0;
   _markStack._failed_double = 0;
 
@@ -4414,11 +4250,7 @@
   {
     work_on_young_gen_roots(worker_id, &par_mri_cl);
     _timer.stop();
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print_cr(
-        "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
-        worker_id, _timer.seconds());
-    }
+    log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
   }
 
   // ---------- remaining roots --------------
@@ -4439,11 +4271,7 @@
          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   _timer.stop();
-  if (PrintCMSStatistics != 0) {
-    gclog_or_tty->print_cr(
-      "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
-      worker_id, _timer.seconds());
-  }
+  log_trace(gc, task)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
 }
 
 // Parallel remark task
@@ -4556,11 +4384,7 @@
   {
     work_on_young_gen_roots(worker_id, &par_mrias_cl);
     _timer.stop();
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print_cr(
-        "Finished young gen rescan work in %dth thread: %3.3f sec",
-        worker_id, _timer.seconds());
-    }
+    log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
   }
 
   // ---------- remaining roots --------------
@@ -4579,11 +4403,7 @@
          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   _timer.stop();
-  if (PrintCMSStatistics != 0) {
-    gclog_or_tty->print_cr(
-      "Finished remaining root rescan work in %dth thread: %3.3f sec",
-      worker_id, _timer.seconds());
-  }
+  log_trace(gc, task)("Finished remaining root rescan work in %dth thread: %3.3f sec",  worker_id, _timer.seconds());
 
   // ---------- unhandled CLD scanning ----------
   if (worker_id == 0) { // Single threaded at the moment.
@@ -4602,11 +4422,7 @@
     ClassLoaderDataGraph::remember_new_clds(false);
 
     _timer.stop();
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print_cr(
-          "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
-          worker_id, _timer.seconds());
-    }
+    log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
   }
 
   // ---------- dirty klass scanning ----------
@@ -4619,11 +4435,7 @@
     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
 
     _timer.stop();
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print_cr(
-          "Finished dirty klass scanning work in %dth thread: %3.3f sec",
-          worker_id, _timer.seconds());
-    }
+    log_trace(gc, task)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
   }
 
   // We might have added oops to ClassLoaderData::_handles during the
@@ -4641,11 +4453,7 @@
   // "worker_id" is passed to select the task_queue for "worker_id"
   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
   _timer.stop();
-  if (PrintCMSStatistics != 0) {
-    gclog_or_tty->print_cr(
-      "Finished dirty card rescan work in %dth thread: %3.3f sec",
-      worker_id, _timer.seconds());
-  }
+  log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
 
   // ---------- steal work from other threads ...
   // ---------- ... and drain overflow list.
@@ -4653,11 +4461,7 @@
   _timer.start();
   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
   _timer.stop();
-  if (PrintCMSStatistics != 0) {
-    gclog_or_tty->print_cr(
-      "Finished work stealing in %dth thread: %3.3f sec",
-      worker_id, _timer.seconds());
-  }
+  log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
 }
 
 // Note that parameter "i" is not used.
@@ -4851,11 +4655,7 @@
         break;  // nirvana from the infinite cycle
     }
   }
-  NOT_PRODUCT(
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
-    }
-  )
+  log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
          "Else our work is not yet done");
 }
@@ -4952,9 +4752,7 @@
   }
   // We are all done; record the size of the _survivor_chunk_array
   _survivor_chunk_index = i; // exclusive: [0, i)
-  if (PrintCMSStatistics > 0) {
-    gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
-  }
+  log_trace(gc, survivor)(" (Survivor:" SIZE_FORMAT "chunks) ", i);
   // Verify that we used up all the recorded entries
   #ifdef ASSERT
     size_t total = 0;
@@ -4966,10 +4764,8 @@
     // Check that the merged array is in sorted order
     if (total > 0) {
       for (size_t i = 0; i < total - 1; i++) {
-        if (PrintCMSStatistics > 0) {
-          gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
-                              i, p2i(_survivor_chunk_array[i]));
-        }
+        log_develop_trace(gc, survivor)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
+                                     i, p2i(_survivor_chunk_array[i]));
         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
                "Not sorted");
       }
@@ -5103,7 +4899,7 @@
                               NULL,  // space is set further below
                               &_markBitMap, &_markStack, &mrias_cl);
   {
-    GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
+    GCTraceTime(Trace, gc) t("Grey Object Rescan", _gc_timer_cm);
     // Iterate over the dirty cards, setting the corresponding bits in the
     // mod union table.
     {
@@ -5128,10 +4924,7 @@
       _modUnionTable.dirty_range_iterate_clear(cms_span,
                                                &markFromDirtyCardsClosure);
       verify_work_stacks_empty();
-      if (PrintCMSStatistics != 0) {
-        gclog_or_tty->print(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ",
-          markFromDirtyCardsClosure.num_dirty_cards());
-      }
+      log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards());
     }
   }
   if (VerifyDuringGC &&
@@ -5140,7 +4933,7 @@
     Universe::verify();
   }
   {
-    GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
+    GCTraceTime(Trace, gc) t("Root Rescan", _gc_timer_cm);
 
     verify_work_stacks_empty();
 
@@ -5162,7 +4955,7 @@
   }
 
   {
-    GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
+    GCTraceTime(Trace, gc) t("Visit Unhandled CLDs", _gc_timer_cm);
 
     verify_work_stacks_empty();
 
@@ -5181,7 +4974,7 @@
   }
 
   {
-    GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
+    GCTraceTime(Trace, gc) t("Dirty Klass Scan", _gc_timer_cm);
 
     verify_work_stacks_empty();
 
@@ -5343,11 +5136,7 @@
       break;  // nirvana from the infinite cycle
     }
   }
-  NOT_PRODUCT(
-    if (PrintCMSStatistics != 0) {
-      gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
-    }
-  )
+  log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
 }
 
 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
@@ -5389,7 +5178,7 @@
                                 _span, &_markBitMap, &_markStack,
                                 &cmsKeepAliveClosure, false /* !preclean */);
   {
-    GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
+    GCTraceTime(Debug, gc) t("Weak Refs Processing", _gc_timer_cm);
 
     ReferenceProcessorStats stats;
     if (rp->processing_is_mt()) {
@@ -5431,7 +5220,7 @@
 
   if (should_unload_classes()) {
     {
-      GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
+      GCTraceTime(Debug, gc) t("Class Unloading", _gc_timer_cm);
 
       // Unload classes and purge the SystemDictionary.
       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
@@ -5444,13 +5233,13 @@
     }
 
     {
-      GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
+      GCTraceTime(Debug, gc) t("Scrub Symbol Table", _gc_timer_cm);
       // Clean up unreferenced symbols in symbol table.
       SymbolTable::unlink();
     }
 
     {
-      GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
+      GCTraceTime(Debug, gc) t("Scrub String Table", _gc_timer_cm);
       // Delete entries for dead interned strings.
       StringTable::unlink(&_is_alive_closure);
     }
@@ -5517,8 +5306,8 @@
   _intra_sweep_timer.reset();
   _intra_sweep_timer.start();
   {
-    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
+    GCTraceCPUTime tcpu;
+    CMSPhaseAccounting pa(this, "Concurrent Sweep");
     // First sweep the old gen
     {
       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
@@ -5601,13 +5390,8 @@
   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
   size_t nearLargestOffset =
     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
-  if (PrintFLSStatistics != 0) {
-    gclog_or_tty->print_cr(
-      "CMS: Large Block: " PTR_FORMAT ";"
-      " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
-      p2i(largestAddr),
-      p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
-  }
+  log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
+                          p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
 }
 
@@ -5701,8 +5485,8 @@
 
   // Clear the mark bitmap (no grey objects to start with)
   // for the next cycle.
-  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-  CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
+  GCTraceCPUTime tcpu;
+  CMSPhaseAccounting cmspa(this, "Concurrent Reset");
 
   HeapWord* curAddr = _markBitMap.startWord();
   while (curAddr < _markBitMap.endWord()) {
@@ -5718,9 +5502,7 @@
       bitMapLock()->unlock();
       ConcurrentMarkSweepThread::desynchronize(true);
       stopTimer();
-      if (PrintCMSStatistics != 0) {
-        incrementYields();
-      }
+      incrementYields();
 
       // See the comment in coordinator_yield()
       for (unsigned i = 0; i < CMSYieldSleepCount &&
@@ -5757,25 +5539,20 @@
 }
 
 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
-  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-  GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
+  GCTraceCPUTime tcpu;
   TraceCollectorStats tcs(counters());
 
   switch (op) {
     case CMS_op_checkpointRootsInitial: {
+      GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
       SvcGCMarker sgcm(SvcGCMarker::OTHER);
       checkpointRootsInitial();
-      if (PrintGC) {
-        _cmsGen->printOccupancy("initial-mark");
-      }
       break;
     }
     case CMS_op_checkpointRootsFinal: {
+      GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
       SvcGCMarker sgcm(SvcGCMarker::OTHER);
       checkpointRootsFinal();
-      if (PrintGC) {
-        _cmsGen->printOccupancy("remark");
-      }
       break;
     }
     default:
@@ -5988,9 +5765,9 @@
 void CMSMarkStack::expand() {
   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
   if (_capacity == MarkStackSizeMax) {
-    if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
+    if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) {
       // We print a warning message only once per CMS cycle.
-      gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
+      log_debug(gc)(" (benign) Hit CMSMarkStack max size limit");
     }
     return;
   }
@@ -6010,12 +5787,11 @@
     _base = (oop*)(_virtual_space.low());
     _index = 0;
     _capacity = new_capacity;
-  } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
+  } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) {
     // Failed to double capacity, continue;
     // we print a detail message only once per CMS cycle.
-    gclog_or_tty->print(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to "
-            SIZE_FORMAT "K",
-            _capacity / K, new_capacity / K);
+    log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
+                        _capacity / K, new_capacity / K);
   }
 }
 
@@ -6092,8 +5868,10 @@
   if (_span.contains(addr)) {
     _verification_bm->mark(addr);
     if (!_cms_bm->isMarked(addr)) {
-      oop(addr)->print();
-      gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
+      LogHandle(gc, verify) log;
+      ResourceMark rm;
+      oop(addr)->print_on(log.info_stream());
+      log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
       fatal("... aborting");
     }
   }
@@ -6189,9 +5967,7 @@
   _freelistLock->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
   _collector->stopTimer();
-  if (PrintCMSStatistics != 0) {
-    _collector->incrementYields();
-  }
+  _collector->incrementYields();
 
   // See the comment in coordinator_yield()
   for (unsigned i = 0;
@@ -6347,9 +6123,7 @@
   _freelistLock->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
   _collector->stopTimer();
-  if (PrintCMSStatistics != 0) {
-    _collector->incrementYields();
-  }
+  _collector->incrementYields();
 
   // See the comment in coordinator_yield()
   for (unsigned i = 0; i < CMSYieldSleepCount &&
@@ -6416,9 +6190,7 @@
   _bit_map->lock()->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
   _collector->stopTimer();
-  if (PrintCMSStatistics != 0) {
-    _collector->incrementYields();
-  }
+  _collector->incrementYields();
 
   // See the comment in coordinator_yield()
   for (unsigned i = 0; i < CMSYieldSleepCount &&
@@ -6571,9 +6343,7 @@
   _bitMap->lock()->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
   _collector->stopTimer();
-  if (PrintCMSStatistics != 0) {
-    _collector->incrementYields();
-  }
+  _collector->incrementYields();
 
   // See the comment in coordinator_yield()
   for (unsigned i = 0; i < CMSYieldSleepCount &&
@@ -6879,17 +6649,15 @@
     // Oop lies in _span and isn't yet grey or black
     _verification_bm->mark(addr);            // now grey
     if (!_cms_bm->isMarked(addr)) {
-      oop(addr)->print();
-      gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
-                             p2i(addr));
+      LogHandle(gc, verify) log;
+      ResourceMark rm;
+      oop(addr)->print_on(log.info_stream());
+      log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
       fatal("... aborting");
     }
 
     if (!_mark_stack->push(obj)) { // stack overflow
-      if (PrintCMSStatistics != 0) {
-        gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
-                               SIZE_FORMAT, _mark_stack->capacity());
-      }
+      log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
       assert(_mark_stack->isFull(), "Else push should have succeeded");
       handle_stack_overflow(addr);
     }
@@ -6989,10 +6757,7 @@
         }
       )
       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
-        if (PrintCMSStatistics != 0) {
-          gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
-                                 SIZE_FORMAT, _markStack->capacity());
-        }
+        log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity());
         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
         handle_stack_overflow(addr);
       }
@@ -7041,10 +6806,7 @@
     if (simulate_overflow ||
         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
       // stack overflow
-      if (PrintCMSStatistics != 0) {
-        gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
-                               SIZE_FORMAT, _overflow_stack->capacity());
-      }
+      log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
       // We cannot assert that the overflow stack is full because
       // it may have been emptied since.
       assert(simulate_overflow ||
@@ -7206,9 +6968,7 @@
   ConcurrentMarkSweepThread::desynchronize(true);
 
   _collector->stopTimer();
-  if (PrintCMSStatistics != 0) {
-    _collector->incrementYields();
-  }
+  _collector->incrementYields();
 
   // See the comment in coordinator_yield()
   for (unsigned i = 0; i < CMSYieldSleepCount &&
@@ -7239,10 +6999,7 @@
   // However, that would be too strong in one case -- the last
   // partition ends at _unallocated_block which, in general, can be
   // an arbitrary boundary, not necessarily card aligned.
-  if (PrintCMSStatistics != 0) {
-    _num_dirty_cards +=
-         mr.word_size()/CardTableModRefBS::card_size_in_words;
-  }
+  _num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words;
   _space->object_iterate_mem(mr, &_scan_cl);
 }
 
@@ -7275,10 +7032,8 @@
   )
   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
          "sweep _limit out of bounds");
-  if (CMSTraceSweeper) {
-    gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
-                        p2i(_limit));
-  }
+  log_develop_trace(gc, sweep)("====================");
+  log_develop_trace(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit));
 }
 
 void SweepClosure::print_on(outputStream* st) const {
@@ -7305,42 +7060,32 @@
     print();
     ShouldNotReachHere();
   }
-  if (Verbose && PrintGC) {
-    gclog_or_tty->print("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
-                        _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
-    gclog_or_tty->print_cr("\nLive " SIZE_FORMAT " objects,  "
-                           SIZE_FORMAT " bytes  "
-      "Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
-      _numObjectsLive, _numWordsLive*sizeof(HeapWord),
-      _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
-    size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
-                        * sizeof(HeapWord);
-    gclog_or_tty->print_cr("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
-
-    if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
-      size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
-      size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
-      size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
-      gclog_or_tty->print("Returned " SIZE_FORMAT " bytes", returned_bytes);
-      gclog_or_tty->print("   Indexed List Returned " SIZE_FORMAT " bytes",
-        indexListReturnedBytes);
-      gclog_or_tty->print_cr("        Dictionary Returned " SIZE_FORMAT " bytes",
-        dict_returned_bytes);
-    }
-  }
-  if (CMSTraceSweeper) {
-    gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
-                           p2i(_limit));
-  }
+
+  if (log_is_enabled(Debug, gc, sweep)) {
+    log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
+                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
+    log_debug(gc, sweep)("Live " SIZE_FORMAT " objects,  " SIZE_FORMAT " bytes  Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
+                         _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
+    size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord);
+    log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
+  }
+
+  if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) {
+    size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
+    size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
+    size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
+    log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes   Indexed List Returned " SIZE_FORMAT " bytes        Dictionary Returned " SIZE_FORMAT " bytes",
+                         returned_bytes, indexListReturnedBytes, dict_returned_bytes);
+  }
+  log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit));
+  log_develop_trace(gc, sweep)("================");
 }
 #endif  // PRODUCT
 
 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
     bool freeRangeInFreeLists) {
-  if (CMSTraceSweeper) {
-    gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
-               p2i(freeFinger), freeRangeInFreeLists);
-  }
+  log_develop_trace(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)",
+                               p2i(freeFinger), freeRangeInFreeLists);
   assert(!inFreeRange(), "Trampling existing free range");
   set_inFreeRange(true);
   set_lastFreeRangeCoalesced(false);
@@ -7406,13 +7151,9 @@
              "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger()));
       flush_cur_free_chunk(freeFinger(),
                            pointer_delta(addr, freeFinger()));
-      if (CMSTraceSweeper) {
-        gclog_or_tty->print("Sweep: last chunk: ");
-        gclog_or_tty->print("put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") "
-                   "[coalesced:%d]\n",
-                   p2i(freeFinger()), pointer_delta(addr, freeFinger()),
-                   lastFreeRangeCoalesced() ? 1 : 0);
-      }
+      log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]",
+                                   p2i(freeFinger()), pointer_delta(addr, freeFinger()),
+                                   lastFreeRangeCoalesced() ? 1 : 0);
     }
 
     // help the iterator loop finish
@@ -7623,9 +7364,7 @@
     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
   }
 
-  if (CMSTraceSweeper) {
-    gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
-  }
+  log_develop_trace(gc, sweep)("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
 
   HeapWord* const fc_addr = (HeapWord*) fc;
 
@@ -7726,16 +7465,12 @@
          p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
   if (eob >= _limit) {
     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
-    if (CMSTraceSweeper) {
-      gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
-                             "[" PTR_FORMAT "," PTR_FORMAT ") in space "
-                             "[" PTR_FORMAT "," PTR_FORMAT ")",
-                             p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
-    }
+    log_develop_trace(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block "
+                                 "[" PTR_FORMAT "," PTR_FORMAT ") in space "
+                                 "[" PTR_FORMAT "," PTR_FORMAT ")",
+                                 p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
     // Return the storage we are tracking back into the free lists.
-    if (CMSTraceSweeper) {
-      gclog_or_tty->print_cr("Flushing ... ");
-    }
+    log_develop_trace(gc, sweep)("Flushing ... ");
     assert(freeFinger() < eob, "Error");
     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
   }
@@ -7752,10 +7487,7 @@
       assert(!_sp->verify_chunk_in_free_list(fc),
              "chunk should not be in free lists yet");
     }
-    if (CMSTraceSweeper) {
-      gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
-                    p2i(chunk), size);
-    }
+    log_develop_trace(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size);
     // A new free range is going to be starting.  The current
     // free range has not been added to the free lists yet or
     // was removed so add it back.
@@ -7766,8 +7498,8 @@
     }
     _sp->addChunkAndRepairOffsetTable(chunk, size,
             lastFreeRangeCoalesced());
-  } else if (CMSTraceSweeper) {
-    gclog_or_tty->print_cr("Already in free list: nothing to flush");
+  } else {
+    log_develop_trace(gc, sweep)("Already in free list: nothing to flush");
   }
   set_inFreeRange(false);
   set_freeRangeInFreeLists(false);
@@ -7798,9 +7530,7 @@
   _freelistLock->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
   _collector->stopTimer();
-  if (PrintCMSStatistics != 0) {
-    _collector->incrementYields();
-  }
+  _collector->incrementYields();
 
   // See the comment in coordinator_yield()
   for (unsigned i = 0; i < CMSYieldSleepCount &&
@@ -7825,10 +7555,8 @@
 #endif
 
 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
-  if (CMSTraceSweeper) {
-    gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
-                           p2i(fc), fc->size());
-  }
+  log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
+                               p2i(fc), fc->size());
 }
 
 // CMSIsAliveClosure
--- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -35,6 +35,7 @@
 #include "gc/shared/generationCounters.hpp"
 #include "gc/shared/space.hpp"
 #include "gc/shared/taskqueue.hpp"
+#include "logging/log.hpp"
 #include "memory/freeBlockDictionary.hpp"
 #include "memory/iterator.hpp"
 #include "memory/virtualspace.hpp"
@@ -308,9 +309,8 @@
 
   void reset() {
     _index = 0;
-    if (_overflows > 0 && PrintCMSStatistics > 1) {
-      warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times",
-              _capacity, _overflows);
+    if (_overflows > 0) {
+      log_trace(gc)("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", _capacity, _overflows);
     }
     _overflows = 0;
   }
@@ -451,7 +451,7 @@
 
   // Debugging.
   void print_on(outputStream* st) const PRODUCT_RETURN;
-  void print() const { print_on(gclog_or_tty); }
+  void print() const { print_on(tty); }
 };
 
 // A closure related to weak references processing which
@@ -935,7 +935,7 @@
   void    startTimer() { assert(!_timer.is_active(), "Error"); _timer.start();   }
   void    stopTimer()  { assert( _timer.is_active(), "Error"); _timer.stop();    }
   void    resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset();   }
-  double  timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
+  jlong   timerTicks() { assert(!_timer.is_active(), "Error"); return _timer.ticks(); }
 
   int  yields()          { return _numYields; }
   void resetYields()     { _numYields = 0;    }
@@ -961,7 +961,7 @@
 
   // Debugging
   void verify();
-  bool verify_after_remark(bool silent = VerifySilently);
+  bool verify_after_remark();
   void verify_ok_to_terminate() const PRODUCT_RETURN;
   void verify_work_stacks_empty() const PRODUCT_RETURN;
   void verify_overflow_empty() const PRODUCT_RETURN;
@@ -1234,7 +1234,6 @@
   const char* name() const;
   virtual const char* short_name() const { return "CMS"; }
   void        print() const;
-  void printOccupancy(const char* s);
 
   // Resize the generation after a compacting GC.  The
   // generation can be treated as a contiguous space
--- a/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -144,9 +144,6 @@
     _cmst = NULL;
     Terminator_lock->notify();
   }
-
-  // Thread destructor usually does this..
-  ThreadLocalStorage::set_thread(NULL);
 }
 
 #ifndef PRODUCT
--- a/src/share/vm/gc/cms/parNewGeneration.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/cms/parNewGeneration.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -34,7 +34,7 @@
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "gc/shared/generation.hpp"
@@ -45,6 +45,7 @@
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "gc/shared/workgroup.hpp"
+#include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.inline.hpp"
@@ -270,9 +271,9 @@
 }
 
 void ParScanThreadState::print_promotion_failure_size() {
-  if (_promotion_failed_info.has_failed() && PrintPromotionFailure) {
-    gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
-                        _thread_num, _promotion_failed_info.first_size());
+  if (_promotion_failed_info.has_failed()) {
+    log_trace(gc, promotion)(" (%d: promotion failure size = " SIZE_FORMAT ") ",
+                             _thread_num, _promotion_failed_info.first_size());
   }
 }
 
@@ -298,11 +299,11 @@
 
   #if TASKQUEUE_STATS
   static void
-    print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
-  void print_termination_stats(outputStream* const st = gclog_or_tty);
+    print_termination_stats_hdr(outputStream* const st);
+  void print_termination_stats();
   static void
-    print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
-  void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
+    print_taskqueue_stats_hdr(outputStream* const st);
+  void print_taskqueue_stats();
   void reset_stats();
   #endif // TASKQUEUE_STATS
 
@@ -383,7 +384,15 @@
   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------");
 }
 
-void ParScanThreadStateSet::print_termination_stats(outputStream* const st) {
+void ParScanThreadStateSet::print_termination_stats() {
+  LogHandle(gc, task, stats) log;
+  if (!log.is_debug()) {
+    return;
+  }
+
+  ResourceMark rm;
+  outputStream* st = log.debug_stream();
+
   print_termination_stats_hdr(st);
 
   for (int i = 0; i < length(); ++i) {
@@ -404,7 +413,13 @@
   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 }
 
-void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) {
+void ParScanThreadStateSet::print_taskqueue_stats() {
+  if (!develop_log_is_enabled(Trace, gc, task, stats)) {
+    return;
+  }
+  LogHandle(gc, task, stats) log;
+  ResourceMark rm;
+  outputStream* st = log.trace_stream();
   print_taskqueue_stats_hdr(st);
 
   TaskQueueStats totals;
@@ -823,9 +838,7 @@
   _promo_failure_scan_stack.clear(true); // Clear cached segments.
 
   remove_forwarding_pointers();
-  if (PrintGCDetails) {
-    gclog_or_tty->print(" (promotion failed)");
-  }
+  log_info(gc, promotion)("Promotion failed");
   // All the spaces are in play for mark-sweep.
   swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
   from()->set_next_compaction_space(to());
@@ -882,9 +895,7 @@
     size_policy->minor_collection_begin();
   }
 
-  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
-  // Capture heap used before collection (for printing).
-  size_t gch_prev_used = gch->used();
+  GCTraceTime(Trace, gc) t1("ParNew", NULL, gch->gc_cause());
 
   age_table()->clear();
   to()->clear(SpaceDecorator::Mangle);
@@ -990,12 +1001,8 @@
     plab_stats()->adjust_desired_plab_sz();
   }
 
-  if (PrintGC && !PrintGCDetails) {
-    gch->print_heap_change(gch_prev_used);
-  }
-
-  TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats());
-  TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats());
+  TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
+  TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());
 
   if (UseAdaptiveSizePolicy) {
     size_policy->minor_collection_end(gch->gc_cause());
@@ -1150,11 +1157,9 @@
 
   // This code must come after the CAS test, or it will print incorrect
   // information.
-  if (TraceScavenge) {
-    gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
-       is_in_reserved(new_obj) ? "copying" : "tenuring",
-       new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size());
-  }
+  log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
+                                  is_in_reserved(new_obj) ? "copying" : "tenuring",
+                                  new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size());
 
   if (forward_ptr == NULL) {
     oop obj_to_push = new_obj;
@@ -1176,9 +1181,7 @@
     )
     if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
       // Add stats for overflow pushes.
-      if (Verbose && PrintGCDetails) {
-        gclog_or_tty->print("queue overflow!\n");
-      }
+      log_develop_trace(gc)("Queue Overflow");
       push_on_overflow_list(old, par_scan_state);
       TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
     }
--- a/src/share/vm/gc/cms/parOopClosures.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/cms/parOopClosures.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -30,6 +30,7 @@
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
+#include "logging/log.hpp"
 
 template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
   assert (!oopDesc::is_null(*p), "null weak reference?");
@@ -108,11 +109,9 @@
       if (m->is_marked()) { // Contains forwarding pointer.
         new_obj = ParNewGeneration::real_forwardee(obj);
         oopDesc::encode_store_heap_oop_not_null(p, new_obj);
-        if (TraceScavenge) {
-          gclog_or_tty->print_cr("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
-             "forwarded ",
-             new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size());
-        }
+        log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
+                                        "forwarded ",
+                                        new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size());
       } else {
         size_t obj_sz = obj->size_given_klass(objK);
         new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
--- a/src/share/vm/gc/cms/promotionInfo.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/cms/promotionInfo.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -132,7 +132,7 @@
   }
 
   void print_on(outputStream* st) const;
-  void print() const { print_on(gclog_or_tty); }
+  void print() const { print_on(tty); }
 };
 
 class PromotionInfo VALUE_OBJ_CLASS_SPEC {
--- a/src/share/vm/gc/cms/vmCMSOperations.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/cms/vmCMSOperations.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -28,7 +28,7 @@
 #include "gc/cms/vmCMSOperations.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/os.hpp"
@@ -58,7 +58,7 @@
 void VM_CMS_Operation::verify_before_gc() {
   if (VerifyBeforeGC &&
       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm);
+    GCTraceTime(Info, gc, verify) tm("Verify Before", _collector->_gc_timer_cm);
     HandleMark hm;
     FreelistLocker x(_collector);
     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@@ -70,7 +70,7 @@
 void VM_CMS_Operation::verify_after_gc() {
   if (VerifyAfterGC &&
       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm);
+    GCTraceTime(Info, gc, verify) tm("Verify After", _collector->_gc_timer_cm);
     HandleMark hm;
     FreelistLocker x(_collector);
     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
--- a/src/share/vm/gc/g1/collectionSetChooser.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/collectionSetChooser.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -26,7 +26,6 @@
 #include "gc/g1/collectionSetChooser.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
-#include "gc/g1/g1ErgoVerbose.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "runtime/atomic.inline.hpp"
 
@@ -136,8 +135,8 @@
     assert(regions_at(i) != NULL, "Should be true by sorting!");
   }
 #endif // ASSERT
-  if (G1PrintRegionLivenessInfo) {
-    G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
+  if (log_is_enabled(Trace, gc, liveness)) {
+    G1PrintRegionLivenessInfoClosure cl("Post-Sorting");
     for (uint i = 0; i < _end; ++i) {
       HeapRegion* r = regions_at(i);
       cl.doHeapRegion(r);
--- a/src/share/vm/gc/g1/concurrentG1RefineThread.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/concurrentG1RefineThread.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -28,6 +28,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/suspendibleThreadSet.hpp"
+#include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
@@ -88,11 +89,8 @@
 void ConcurrentG1RefineThread::activate() {
   MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
   if (!is_primary()) {
-    if (G1TraceConcRefinement) {
-      DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
-      gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d",
-                             _worker_id, _threshold, (int)dcqs.completed_buffers_num());
-    }
+    log_debug(gc, refine)("G1-Refine-activated worker %d, on threshold %d, current %d",
+                          _worker_id, _threshold, JavaThread::dirty_card_queue_set().completed_buffers_num());
     set_active(true);
   } else {
     DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
@@ -104,11 +102,8 @@
 void ConcurrentG1RefineThread::deactivate() {
   MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
   if (!is_primary()) {
-    if (G1TraceConcRefinement) {
-      DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
-      gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d",
-                             _worker_id, _deactivation_threshold, (int)dcqs.completed_buffers_num());
-    }
+    log_debug(gc, refine)("G1-Refine-deactivated worker %d, off threshold %d, current %d",
+                          _worker_id, _deactivation_threshold, JavaThread::dirty_card_queue_set().completed_buffers_num());
     set_active(false);
   } else {
     DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
@@ -174,9 +169,7 @@
     }
   }
 
-  if (G1TraceConcRefinement) {
-    gclog_or_tty->print_cr("G1-Refine-stop");
-  }
+  log_debug(gc, refine)("G1-Refine-stop");
 }
 
 void ConcurrentG1RefineThread::stop() {
@@ -199,4 +192,4 @@
 void ConcurrentG1RefineThread::stop_service() {
   MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
   _monitor->notify();
-}
\ No newline at end of file
+}
--- a/src/share/vm/gc/g1/concurrentMark.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/concurrentMark.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -31,8 +31,6 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
-#include "gc/g1/g1ErgoVerbose.hpp"
-#include "gc/g1/g1Log.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
 #include "gc/g1/g1RemSet.hpp"
 #include "gc/g1/g1StringDedup.hpp"
@@ -44,12 +42,13 @@
 #include "gc/shared/gcId.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "gc/shared/vmGCOperations.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
@@ -232,9 +231,7 @@
   // Clear expansion flag
   _should_expand = false;
   if (_capacity == (jint) MarkStackSizeMax) {
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
-    }
+    log_trace(gc)("(benign) Can't expand marking stack capacity, at max size limit");
     return;
   }
   // Double capacity if possible
@@ -254,12 +251,9 @@
     _index = 0;
     _capacity = new_capacity;
   } else {
-    if (PrintGCDetails && Verbose) {
-      // Failed to double capacity, continue;
-      gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
-                          SIZE_FORMAT "K to " SIZE_FORMAT "K",
-                          _capacity / K, new_capacity / K);
-    }
+    // Failed to double capacity, continue;
+    log_trace(gc)("(benign) Failed to expand marking stack capacity from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
+                  _capacity / K, new_capacity / K);
   }
 }
 
@@ -848,10 +842,7 @@
       // marking.
       reset_marking_state(true /* clear_overflow */);
 
-      if (G1Log::fine()) {
-        gclog_or_tty->gclog_stamp();
-        gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
-      }
+      log_info(gc)("Concurrent Mark reset for overflow");
     }
   }
 
@@ -987,8 +978,6 @@
 };
 
 void ConcurrentMark::scanRootRegions() {
-  double scan_start = os::elapsedTime();
-
   // Start of concurrent marking.
   ClassLoaderDataGraph::clear_claimed_marks();
 
@@ -996,10 +985,7 @@
   // at least one root region to scan. So, if it's false, we
   // should not attempt to do any further work.
   if (root_regions()->scan_in_progress()) {
-    if (G1Log::fine()) {
-      gclog_or_tty->gclog_stamp();
-      gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
-    }
+    GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan");
 
     _parallel_marking_threads = calc_parallel_marking_threads();
     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
@@ -1010,11 +996,6 @@
     _parallel_workers->set_active_workers(active_workers);
     _parallel_workers->run_task(&task);
 
-    if (G1Log::fine()) {
-      gclog_or_tty->gclog_stamp();
-      gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
-    }
-
     // It's possible that has_aborted() is true here without actually
     // aborting the survivor scan earlier. This is OK as it's
     // mainly used for sanity checking.
@@ -1049,22 +1030,6 @@
   print_stats();
 }
 
-// Helper class to get rid of some boilerplate code.
-class G1CMTraceTime : public StackObj {
-  GCTraceTimeImpl _gc_trace_time;
-  static bool doit_and_prepend(bool doit) {
-    if (doit) {
-      gclog_or_tty->put(' ');
-    }
-    return doit;
-  }
-
- public:
-  G1CMTraceTime(const char* title, bool doit)
-    : _gc_trace_time(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm()) {
-  }
-};
-
 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
   // world is stopped at this checkpoint
   assert(SafepointSynchronize::is_at_safepoint(),
@@ -1083,8 +1048,7 @@
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
     g1h->prepare_for_verify();
-    Universe::verify(VerifyOption_G1UsePrevMarking,
-                     " VerifyDuringGC:(before)");
+    Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
   }
   g1h->check_bitmaps("Remark Start");
 
@@ -1102,16 +1066,13 @@
   if (has_overflown()) {
     // Oops.  We overflowed.  Restart concurrent marking.
     _restart_for_overflow = true;
-    if (G1TraceMarkStackOverflow) {
-      gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
-    }
+    log_develop_trace(gc)("Remark led to restart for overflow.");
 
     // Verify the heap w.r.t. the previous marking bitmap.
     if (VerifyDuringGC) {
       HandleMark hm;  // handle scope
       g1h->prepare_for_verify();
-      Universe::verify(VerifyOption_G1UsePrevMarking,
-                       " VerifyDuringGC:(overflow)");
+      Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
     }
 
     // Clear the marking state because we will be restarting
@@ -1119,7 +1080,7 @@
     reset_marking_state();
   } else {
     {
-      G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
+      GCTraceTime(Debug, gc) trace("GC Aggregate Data", g1h->gc_timer_cm());
 
       // Aggregate the per-task counting data that we have accumulated
       // while marking.
@@ -1136,8 +1097,7 @@
     if (VerifyDuringGC) {
       HandleMark hm;  // handle scope
       g1h->prepare_for_verify();
-      Universe::verify(VerifyOption_G1UseNextMarking,
-                       " VerifyDuringGC:(after)");
+      Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
     }
     g1h->check_bitmaps("Remark End");
     assert(!restart_for_overflow(), "sanity");
@@ -1523,8 +1483,8 @@
   G1CollectedHeap* _g1;
   size_t _freed_bytes;
   FreeRegionList* _local_cleanup_list;
-  HeapRegionSetCount _old_regions_removed;
-  HeapRegionSetCount _humongous_regions_removed;
+  uint _old_regions_removed;
+  uint _humongous_regions_removed;
   HRRSCleanupTask* _hrrs_cleanup_task;
 
 public:
@@ -1534,13 +1494,13 @@
     _g1(g1),
     _freed_bytes(0),
     _local_cleanup_list(local_cleanup_list),
-    _old_regions_removed(),
-    _humongous_regions_removed(),
+    _old_regions_removed(0),
+    _humongous_regions_removed(0),
     _hrrs_cleanup_task(hrrs_cleanup_task) { }
 
   size_t freed_bytes() { return _freed_bytes; }
-  const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
-  const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
+  const uint old_regions_removed() { return _old_regions_removed; }
+  const uint humongous_regions_removed() { return _humongous_regions_removed; }
 
   bool doHeapRegion(HeapRegion *hr) {
     if (hr->is_archive()) {
@@ -1555,10 +1515,10 @@
       _freed_bytes += hr->used();
       hr->set_containing_set(NULL);
       if (hr->is_humongous()) {
-        _humongous_regions_removed.increment(1u, hr->capacity());
+        _humongous_regions_removed++;
         _g1->free_humongous_region(hr, _local_cleanup_list, true);
       } else {
-        _old_regions_removed.increment(1u, hr->capacity());
+        _old_regions_removed++;
         _g1->free_region(hr, _local_cleanup_list, true);
       }
     } else {
@@ -1656,8 +1616,7 @@
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
     g1h->prepare_for_verify();
-    Universe::verify(VerifyOption_G1UsePrevMarking,
-                     " VerifyDuringGC:(before)");
+    Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
   }
   g1h->check_bitmaps("Cleanup Start");
 
@@ -1699,8 +1658,8 @@
   double this_final_counting_time = (count_end - start);
   _total_counting_time += this_final_counting_time;
 
-  if (G1PrintRegionLivenessInfo) {
-    G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
+  if (log_is_enabled(Trace, gc, liveness)) {
+    G1PrintRegionLivenessInfoClosure cl("Post-Marking");
     _g1h->heap_region_iterate(&cl);
   }
 
@@ -1743,10 +1702,6 @@
   double end = os::elapsedTime();
   _cleanup_times.add((end - start) * 1000.0);
 
-  if (G1Log::fine()) {
-    g1h->g1_policy()->print_heap_transition(start_used_bytes);
-  }
-
   // Clean up will have freed any regions completely full of garbage.
   // Update the soft reference policy with the new heap occupancy.
   Universe::update_heap_info_at_gc();
@@ -1754,8 +1709,7 @@
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
     g1h->prepare_for_verify();
-    Universe::verify(VerifyOption_G1UsePrevMarking,
-                     " VerifyDuringGC:(after)");
+    Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)");
   }
 
   g1h->check_bitmaps("Cleanup End");
@@ -1788,11 +1742,9 @@
   _cleanup_list.verify_optional();
   FreeRegionList tmp_free_list("Tmp Free List");
 
-  if (G1ConcRegionFreeingVerbose) {
-    gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
-                           "cleanup list has %u entries",
-                           _cleanup_list.length());
-  }
+  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
+                                  "cleanup list has %u entries",
+                                  _cleanup_list.length());
 
   // No one else should be accessing the _cleanup_list at this point,
   // so it is not necessary to take any locks
@@ -1810,13 +1762,11 @@
     // region from the _cleanup_list).
     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
         _cleanup_list.is_empty()) {
-      if (G1ConcRegionFreeingVerbose) {
-        gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
-                               "appending %u entries to the secondary_free_list, "
-                               "cleanup list still has %u entries",
-                               tmp_free_list.length(),
-                               _cleanup_list.length());
-      }
+      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
+                                      "appending %u entries to the secondary_free_list, "
+                                      "cleanup list still has %u entries",
+                                      tmp_free_list.length(),
+                                      _cleanup_list.length());
 
       {
         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
@@ -2073,7 +2023,7 @@
   // Inner scope to exclude the cleaning of the string and symbol
   // tables from the displayed time.
   {
-    G1CMTraceTime t("GC ref-proc", G1Log::finer());
+    GCTraceTime(Debug, gc) trace("GC Ref Proc", g1h->gc_timer_cm());
 
     ReferenceProcessor* rp = g1h->ref_processor_cm();
 
@@ -2163,24 +2113,24 @@
 
   // Unload Klasses, String, Symbols, Code Cache, etc.
   {
-    G1CMTraceTime trace("Unloading", G1Log::finer());
+    GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm());
 
     if (ClassUnloadingWithConcurrentMark) {
       bool purged_classes;
 
       {
-        G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
+        GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm());
         purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
       }
 
       {
-        G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
+        GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm());
         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
       }
     }
 
     if (G1StringDedup::is_enabled()) {
-      G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest());
+      GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm());
       G1StringDedup::unlink(&g1_is_alive);
     }
   }
@@ -2301,7 +2251,7 @@
   HandleMark   hm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
-  G1CMTraceTime trace("Finalize Marking", G1Log::finer());
+  GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm());
 
   g1h->ensure_parsability(false);
 
@@ -2614,12 +2564,13 @@
 }
 
 void ConcurrentMark::print_stats() {
-  if (G1MarkingVerboseLevel > 0) {
-    gclog_or_tty->print_cr("---------------------------------------------------------------------");
-    for (size_t i = 0; i < _active_tasks; ++i) {
-      _tasks[i]->print_stats();
-      gclog_or_tty->print_cr("---------------------------------------------------------------------");
-    }
+  if (!log_is_enabled(Debug, gc, stats)) {
+    return;
+  }
+  log_debug(gc, stats)("---------------------------------------------------------------------");
+  for (size_t i = 0; i < _active_tasks; ++i) {
+    _tasks[i]->print_stats();
+    log_debug(gc, stats)("---------------------------------------------------------------------");
   }
 }
 
@@ -2663,16 +2614,21 @@
 
 static void print_ms_time_info(const char* prefix, const char* name,
                                NumberSeq& ns) {
-  gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
+  log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
   if (ns.num() > 0) {
-    gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
+    log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
                            prefix, ns.sd(), ns.maximum());
   }
 }
 
 void ConcurrentMark::print_summary_info() {
-  gclog_or_tty->print_cr(" Concurrent marking:");
+  LogHandle(gc, marking) log;
+  if (!log.is_trace()) {
+    return;
+  }
+
+  log.trace(" Concurrent marking:");
   print_ms_time_info("  ", "init marks", _init_times);
   print_ms_time_info("  ", "remarks", _remark_times);
   {
@@ -2681,25 +2637,16 @@
 
   }
   print_ms_time_info("  ", "cleanups", _cleanup_times);
-  gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
-                         _total_counting_time,
-                         (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
-                          (double)_cleanup_times.num()
-                         : 0.0));
+  log.trace("    Final counting total time = %8.2f s (avg = %8.2f ms).",
+            _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
   if (G1ScrubRemSets) {
-    gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
-                           _total_rs_scrub_time,
-                           (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
-                            (double)_cleanup_times.num()
-                           : 0.0));
+    log.trace("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
+              _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
   }
-  gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
-                         (_init_times.sum() + _remark_times.sum() +
-                          _cleanup_times.sum())/1000.0);
-  gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
-                "(%8.2f s marking).",
-                cmThread()->vtime_accum(),
-                cmThread()->vtime_mark_accum());
+  log.trace("  Total stop_world time = %8.2f s.",
+            (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
+  log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
+            cmThread()->vtime_accum(), cmThread()->vtime_mark_accum());
 }
 
 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
@@ -3079,15 +3026,15 @@
 }
 
 void CMTask::print_stats() {
-  gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
-                         _worker_id, _calls);
-  gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
-                         _elapsed_time_ms, _termination_time_ms);
-  gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
-                         _step_times_ms.num(), _step_times_ms.avg(),
-                         _step_times_ms.sd());
-  gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
-                         _step_times_ms.maximum(), _step_times_ms.sum());
+  log_debug(gc, stats)("Marking Stats, task = %u, calls = %d",
+                       _worker_id, _calls);
+  log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
+                       _elapsed_time_ms, _termination_time_ms);
+  log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
+                       _step_times_ms.num(), _step_times_ms.avg(),
+                       _step_times_ms.sd());
+  log_debug(gc, stats)("                    max = %1.2lfms, total = %1.2lfms",
+                       _step_times_ms.maximum(), _step_times_ms.sum());
 }
 
 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) {
@@ -3587,9 +3534,8 @@
 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
 
 G1PrintRegionLivenessInfoClosure::
-G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
-  : _out(out),
-    _total_used_bytes(0), _total_capacity_bytes(0),
+G1PrintRegionLivenessInfoClosure(const char* phase_name)
+  : _total_used_bytes(0), _total_capacity_bytes(0),
     _total_prev_live_bytes(0), _total_next_live_bytes(0),
     _hum_used_bytes(0), _hum_capacity_bytes(0),
     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
@@ -3599,38 +3545,37 @@
   double now = os::elapsedTime();
 
   // Print the header of the output.
-  _out->cr();
-  _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
-  _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
-                 G1PPRL_SUM_ADDR_FORMAT("reserved")
-                 G1PPRL_SUM_BYTE_FORMAT("region-size"),
-                 p2i(g1_reserved.start()), p2i(g1_reserved.end()),
-                 HeapRegion::GrainBytes);
-  _out->print_cr(G1PPRL_LINE_PREFIX);
-  _out->print_cr(G1PPRL_LINE_PREFIX
-                G1PPRL_TYPE_H_FORMAT
-                G1PPRL_ADDR_BASE_H_FORMAT
-                G1PPRL_BYTE_H_FORMAT
-                G1PPRL_BYTE_H_FORMAT
-                G1PPRL_BYTE_H_FORMAT
-                G1PPRL_DOUBLE_H_FORMAT
-                G1PPRL_BYTE_H_FORMAT
-                G1PPRL_BYTE_H_FORMAT,
-                "type", "address-range",
-                "used", "prev-live", "next-live", "gc-eff",
-                "remset", "code-roots");
-  _out->print_cr(G1PPRL_LINE_PREFIX
-                G1PPRL_TYPE_H_FORMAT
-                G1PPRL_ADDR_BASE_H_FORMAT
-                G1PPRL_BYTE_H_FORMAT
-                G1PPRL_BYTE_H_FORMAT
-                G1PPRL_BYTE_H_FORMAT
-                G1PPRL_DOUBLE_H_FORMAT
-                G1PPRL_BYTE_H_FORMAT
-                G1PPRL_BYTE_H_FORMAT,
-                "", "",
-                "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
-                "(bytes)", "(bytes)");
+  log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
+  log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
+                          G1PPRL_SUM_ADDR_FORMAT("reserved")
+                          G1PPRL_SUM_BYTE_FORMAT("region-size"),
+                          p2i(g1_reserved.start()), p2i(g1_reserved.end()),
+                          HeapRegion::GrainBytes);
+  log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
+  log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
+                          G1PPRL_TYPE_H_FORMAT
+                          G1PPRL_ADDR_BASE_H_FORMAT
+                          G1PPRL_BYTE_H_FORMAT
+                          G1PPRL_BYTE_H_FORMAT
+                          G1PPRL_BYTE_H_FORMAT
+                          G1PPRL_DOUBLE_H_FORMAT
+                          G1PPRL_BYTE_H_FORMAT
+                          G1PPRL_BYTE_H_FORMAT,
+                          "type", "address-range",
+                          "used", "prev-live", "next-live", "gc-eff",
+                          "remset", "code-roots");
+  log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
+                          G1PPRL_TYPE_H_FORMAT
+                          G1PPRL_ADDR_BASE_H_FORMAT
+                          G1PPRL_BYTE_H_FORMAT
+                          G1PPRL_BYTE_H_FORMAT
+                          G1PPRL_BYTE_H_FORMAT
+                          G1PPRL_DOUBLE_H_FORMAT
+                          G1PPRL_BYTE_H_FORMAT
+                          G1PPRL_BYTE_H_FORMAT,
+                          "", "",
+                          "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
+                          "(bytes)", "(bytes)");
 }
 
 // It takes as a parameter a reference to one of the _hum_* fields, it
@@ -3701,18 +3646,18 @@
   _total_strong_code_roots_bytes += strong_code_roots_bytes;
 
   // Print a line for this particular region.
-  _out->print_cr(G1PPRL_LINE_PREFIX
-                 G1PPRL_TYPE_FORMAT
-                 G1PPRL_ADDR_BASE_FORMAT
-                 G1PPRL_BYTE_FORMAT
-                 G1PPRL_BYTE_FORMAT
-                 G1PPRL_BYTE_FORMAT
-                 G1PPRL_DOUBLE_FORMAT
-                 G1PPRL_BYTE_FORMAT
-                 G1PPRL_BYTE_FORMAT,
-                 type, p2i(bottom), p2i(end),
-                 used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
-                 remset_bytes, strong_code_roots_bytes);
+  log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
+                          G1PPRL_TYPE_FORMAT
+                          G1PPRL_ADDR_BASE_FORMAT
+                          G1PPRL_BYTE_FORMAT
+                          G1PPRL_BYTE_FORMAT
+                          G1PPRL_BYTE_FORMAT
+                          G1PPRL_DOUBLE_FORMAT
+                          G1PPRL_BYTE_FORMAT
+                          G1PPRL_BYTE_FORMAT,
+                          type, p2i(bottom), p2i(end),
+                          used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
+                          remset_bytes, strong_code_roots_bytes);
 
   return false;
 }
@@ -3721,23 +3666,22 @@
   // add static memory usages to remembered set sizes
   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
   // Print the footer of the output.
-  _out->print_cr(G1PPRL_LINE_PREFIX);
-  _out->print_cr(G1PPRL_LINE_PREFIX
-                 " SUMMARY"
-                 G1PPRL_SUM_MB_FORMAT("capacity")
-                 G1PPRL_SUM_MB_PERC_FORMAT("used")
-                 G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
-                 G1PPRL_SUM_MB_PERC_FORMAT("next-live")
-                 G1PPRL_SUM_MB_FORMAT("remset")
-                 G1PPRL_SUM_MB_FORMAT("code-roots"),
-                 bytes_to_mb(_total_capacity_bytes),
-                 bytes_to_mb(_total_used_bytes),
-                 perc(_total_used_bytes, _total_capacity_bytes),
-                 bytes_to_mb(_total_prev_live_bytes),
-                 perc(_total_prev_live_bytes, _total_capacity_bytes),
-                 bytes_to_mb(_total_next_live_bytes),
-                 perc(_total_next_live_bytes, _total_capacity_bytes),
-                 bytes_to_mb(_total_remset_bytes),
-                 bytes_to_mb(_total_strong_code_roots_bytes));
-  _out->cr();
+  log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
+  log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
+                         " SUMMARY"
+                         G1PPRL_SUM_MB_FORMAT("capacity")
+                         G1PPRL_SUM_MB_PERC_FORMAT("used")
+                         G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
+                         G1PPRL_SUM_MB_PERC_FORMAT("next-live")
+                         G1PPRL_SUM_MB_FORMAT("remset")
+                         G1PPRL_SUM_MB_FORMAT("code-roots"),
+                         bytes_to_mb(_total_capacity_bytes),
+                         bytes_to_mb(_total_used_bytes),
+                         perc(_total_used_bytes, _total_capacity_bytes),
+                         bytes_to_mb(_total_prev_live_bytes),
+                         perc(_total_prev_live_bytes, _total_capacity_bytes),
+                         bytes_to_mb(_total_next_live_bytes),
+                         perc(_total_next_live_bytes, _total_capacity_bytes),
+                         bytes_to_mb(_total_remset_bytes),
+                         bytes_to_mb(_total_strong_code_roots_bytes));
 }
--- a/src/share/vm/gc/g1/concurrentMark.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/concurrentMark.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -978,8 +978,6 @@
 // after we sort the old regions at the end of the cleanup operation.
 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure {
 private:
-  outputStream* _out;
-
   // Accumulators for these values.
   size_t _total_used_bytes;
   size_t _total_capacity_bytes;
@@ -1024,7 +1022,7 @@
 public:
   // The header and footer are printed in the constructor and
   // destructor respectively.
-  G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name);
+  G1PrintRegionLivenessInfoClosure(const char* phase_name);
   virtual bool doHeapRegion(HeapRegion* r);
   ~G1PrintRegionLivenessInfoClosure();
 };
--- a/src/share/vm/gc/g1/concurrentMarkThread.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/concurrentMarkThread.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -26,12 +26,13 @@
 #include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
-#include "gc/g1/g1Log.hpp"
 #include "gc/g1/g1MMUTracker.hpp"
 #include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
 #include "gc/shared/gcId.hpp"
 #include "gc/shared/gcTrace.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/vmThread.hpp"
 
@@ -78,20 +79,6 @@
   }
 };
 
-// We want to avoid that the logging from the concurrent thread is mixed
-// with the logging from a STW GC. So, if necessary join the STS to ensure
-// that the logging is done either before or after the STW logging.
-void ConcurrentMarkThread::cm_log(bool doit, bool join_sts, const char* fmt, ...) {
-  if (doit) {
-    SuspendibleThreadSetJoiner sts_joiner(join_sts);
-    va_list args;
-    va_start(args, fmt);
-    gclog_or_tty->gclog_stamp();
-    gclog_or_tty->vprint_cr(fmt, args);
-    va_end(args);
-  }
-}
-
 // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
 void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
   if (g1_policy->adaptive_young_list_length()) {
@@ -143,8 +130,11 @@
         _cm->scanRootRegions();
       }
 
-      double mark_start_sec = os::elapsedTime();
-      cm_log(G1Log::fine(), true, "[GC concurrent-mark-start]");
+      // It would be nice to use the GCTraceConcTime class here but
+      // the "end" logging is inside the loop and not at the end of
+      // a scope. Mimicking the same log output as GCTraceConcTime instead.
+      jlong mark_start = os::elapsed_counter();
+      log_info(gc)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start));
 
       int iter = 0;
       do {
@@ -154,20 +144,22 @@
         }
 
         double mark_end_time = os::elapsedVTime();
-        double mark_end_sec = os::elapsedTime();
+        jlong mark_end = os::elapsed_counter();
         _vtime_mark_accum += (mark_end_time - cycle_start);
         if (!cm()->has_aborted()) {
           delay_to_keep_mmu(g1_policy, true /* remark */);
-
-          cm_log(G1Log::fine(), true, "[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec);
+          log_info(gc)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
+                       TimeHelper::counter_to_seconds(mark_start),
+                       TimeHelper::counter_to_seconds(mark_end),
+                       TimeHelper::counter_to_millis(mark_end - mark_start));
 
           CMCheckpointRootsFinalClosure final_cl(_cm);
-          VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
+          VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */);
           VMThread::execute(&op);
         }
         if (cm()->restart_for_overflow()) {
-          cm_log(G1TraceMarkStackOverflow, true, "Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
-          cm_log(G1Log::fine(), true, "[GC concurrent-mark-restart-for-overflow]");
+          log_debug(gc)("Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
+          log_info(gc)("Concurrent Mark restart for overflow");
         }
       } while (cm()->restart_for_overflow());
 
@@ -181,7 +173,7 @@
         delay_to_keep_mmu(g1_policy, false /* cleanup */);
 
         CMCleanUp cl_cl(_cm);
-        VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */);
+        VM_CGC_Operation op(&cl_cl, "Pause Cleanup", false /* needs_pll */);
         VMThread::execute(&op);
       } else {
         // We don't want to update the marking status if a GC pause
@@ -201,8 +193,7 @@
         // place, it would wait for us to process the regions
         // reclaimed by cleanup.
 
-        double cleanup_start_sec = os::elapsedTime();
-        cm_log(G1Log::fine(), false, "[GC concurrent-cleanup-start]");
+        GCTraceConcTime(Info, gc) tt("Concurrent Cleanup");
 
         // Now do the concurrent cleanup operation.
         _cm->completeCleanup();
@@ -217,9 +208,6 @@
         // while it's trying to join the STS, which is conditional on
         // the GC workers finishing.
         g1h->reset_free_regions_coming();
-
-        double cleanup_end_sec = os::elapsedTime();
-        cm_log(G1Log::fine(), true, "[GC concurrent-cleanup-end, %1.7lf secs]", cleanup_end_sec - cleanup_start_sec);
       }
       guarantee(cm()->cleanup_list_is_empty(),
                 "at this point there should be no regions on the cleanup list");
@@ -253,7 +241,7 @@
         if (!cm()->has_aborted()) {
           g1_policy->record_concurrent_mark_cleanup_completed();
         } else {
-          cm_log(G1Log::fine(), false, "[GC concurrent-mark-abort]");
+          log_info(gc)("Concurrent Mark abort");
         }
       }
 
--- a/src/share/vm/gc/g1/concurrentMarkThread.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/concurrentMarkThread.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -40,7 +40,6 @@
   double _vtime_accum;  // Accumulated virtual time.
 
   double _vtime_mark_accum;
-  void cm_log(bool doit, bool join_sts, const char* fmt, ...) ATTRIBUTE_PRINTF(4, 5);
 
  public:
   virtual void run();
--- a/src/share/vm/gc/g1/dirtyCardQueue.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/dirtyCardQueue.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -112,7 +112,7 @@
                           fl_owner);
   set_buffer_size(G1UpdateBufferSize);
   _shared_dirty_card_queue.set_lock(lock);
-  _free_ids = new FreeIdSet((int) num_par_ids(), _cbl_mon);
+  _free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
 }
 
 void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
--- a/src/share/vm/gc/g1/g1Allocator.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1Allocator.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -353,7 +353,7 @@
   assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
   hr->set_archive();
   _g1h->old_set_add(hr);
-  _g1h->hr_printer()->alloc(hr, G1HRPrinter::Archive);
+  _g1h->hr_printer()->alloc(hr);
   _allocated_regions.append(hr);
   _allocation_region = hr;
 
--- a/src/share/vm/gc/g1/g1BlockOffsetTable.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1BlockOffsetTable.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,6 +27,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/space.hpp"
+#include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "services/memTracker.hpp"
@@ -50,14 +51,9 @@
 
   storage->set_mapping_changed_listener(&_listener);
 
-  if (TraceBlockOffsetTable) {
-    gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
-    gclog_or_tty->print_cr("  "
-                  "  rs.base(): " PTR_FORMAT
-                  "  rs.size(): " SIZE_FORMAT
-                  "  rs end(): " PTR_FORMAT,
-                  p2i(bot_reserved.start()), bot_reserved.byte_size(), p2i(bot_reserved.end()));
-  }
+  log_trace(gc, bot)("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
+  log_trace(gc, bot)("    rs.base(): " PTR_FORMAT "  rs.size(): " SIZE_FORMAT "  rs end(): " PTR_FORMAT,
+                     p2i(bot_reserved.start()), bot_reserved.byte_size(), p2i(bot_reserved.end()));
 }
 
 bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
--- a/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/metadataOnStackMark.hpp"
 #include "classfile/stringTable.hpp"
+#include "classfile/symbolTable.hpp"
 #include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
 #include "gc/g1/bufferingOopClosure.hpp"
@@ -35,10 +36,8 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
-#include "gc/g1/g1ErgoVerbose.hpp"
 #include "gc/g1/g1EvacStats.inline.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
-#include "gc/g1/g1Log.hpp"
 #include "gc/g1/g1MarkSweep.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
 #include "gc/g1/g1ParScanThreadState.inline.hpp"
@@ -58,11 +57,12 @@
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/generationSpec.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
@@ -112,18 +112,37 @@
 
 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
  private:
-  size_t _num_processed;
+  size_t _num_dirtied;
+  G1CollectedHeap* _g1h;
+  G1SATBCardTableLoggingModRefBS* _g1_bs;
+
+  HeapRegion* region_for_card(jbyte* card_ptr) const {
+    return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
+  }
+
+  bool will_become_free(HeapRegion* hr) const {
+    // A region will be freed by free_collection_set if the region is in the
+    // collection set and has not had an evacuation failure.
+    return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
+  }
 
  public:
-  RedirtyLoggedCardTableEntryClosure() : CardTableEntryClosure(), _num_processed(0) { }
+  RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
+    _num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
 
   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
-    *card_ptr = CardTableModRefBS::dirty_card_val();
-    _num_processed++;
+    HeapRegion* hr = region_for_card(card_ptr);
+
+    // Should only dirty cards in regions that won't be freed.
+    if (!will_become_free(hr)) {
+      *card_ptr = CardTableModRefBS::dirty_card_val();
+      _num_dirtied++;
+    }
+
     return true;
   }
 
-  size_t num_processed() const { return _num_processed; }
+  size_t num_dirtied()   const { return _num_dirtied; }
 };
 
 
@@ -204,11 +223,9 @@
   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
     if (!_secondary_free_list.is_empty()) {
-      if (G1ConcRegionFreeingVerbose) {
-        gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
-                               "secondary_free_list has %u entries",
-                               _secondary_free_list.length());
-      }
+      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
+                                      "secondary_free_list has %u entries",
+                                      _secondary_free_list.length());
       // It looks as if there are free regions available on the
       // secondary_free_list. Let's move them to the free_list and try
       // again to allocate from it.
@@ -217,11 +234,9 @@
       assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
              "empty we should have moved at least one entry to the free_list");
       HeapRegion* res = _hrm.allocate_free_region(is_old);
-      if (G1ConcRegionFreeingVerbose) {
-        gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
-                               "allocated " HR_FORMAT " from secondary_free_list",
-                               HR_FORMAT_PARAMS(res));
-      }
+      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
+                                      "allocated " HR_FORMAT " from secondary_free_list",
+                                      HR_FORMAT_PARAMS(res));
       return res;
     }
 
@@ -231,10 +246,8 @@
     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
   }
 
-  if (G1ConcRegionFreeingVerbose) {
-    gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
-                           "could not allocate from secondary_free_list");
-  }
+  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
+                                  "could not allocate from secondary_free_list");
   return NULL;
 }
 
@@ -246,10 +259,8 @@
   HeapRegion* res;
   if (G1StressConcRegionFreeing) {
     if (!_secondary_free_list.is_empty()) {
-      if (G1ConcRegionFreeingVerbose) {
-        gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
-                               "forced to look at the secondary_free_list");
-      }
+      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
+                                      "forced to look at the secondary_free_list");
       res = new_region_try_secondary_free_list(is_old);
       if (res != NULL) {
         return res;
@@ -260,10 +271,8 @@
   res = _hrm.allocate_free_region(is_old);
 
   if (res == NULL) {
-    if (G1ConcRegionFreeingVerbose) {
-      gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
-                             "res == NULL, trying the secondary_free_list");
-    }
+    log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
+                                    "res == NULL, trying the secondary_free_list");
     res = new_region_try_secondary_free_list(is_old);
   }
   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
@@ -273,11 +282,9 @@
     // reconsider the use of _expand_heap_after_alloc_failure.
     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 
-    ergo_verbose1(ErgoHeapSizing,
-                  "attempt heap expansion",
-                  ergo_format_reason("region allocation request failed")
-                  ergo_format_byte("allocation request"),
-                  word_size * HeapWordSize);
+    log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
+                              word_size * HeapWordSize);
+
     if (expand(word_size * HeapWordSize)) {
       // Given that expand() succeeded in expanding the heap, and we
       // always expand the heap by an amount aligned to the heap
@@ -403,11 +410,7 @@
   for (uint i = first; i <= last; ++i) {
     hr = region_at(i);
     _humongous_set.add(hr);
-    if (i == first) {
-      _hr_printer.alloc(G1HRPrinter::StartsHumongous, hr, hr->top());
-    } else {
-      _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->top());
-    }
+    _hr_printer.alloc(hr);
   }
 
   return new_obj;
@@ -465,11 +468,9 @@
     if (first != G1_NO_HRM_INDEX) {
       // We found something. Make sure these regions are committed, i.e. expand
       // the heap. Alternatively we could do a defragmentation GC.
-      ergo_verbose1(ErgoHeapSizing,
-                    "attempt heap expansion",
-                    ergo_format_reason("humongous allocation request failed")
-                    ergo_format_byte("allocation request"),
-                    word_size * HeapWordSize);
+      log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
+                                    word_size * HeapWordSize);
+
 
       _hrm.expand_at(first, obj_regions);
       g1_policy()->record_new_heap_size(num_regions());
@@ -788,11 +789,9 @@
     }
     increase_used(word_size * HeapWordSize);
     if (commits != 0) {
-      ergo_verbose1(ErgoHeapSizing,
-                    "attempt heap expansion",
-                    ergo_format_reason("allocate archive regions")
-                    ergo_format_byte("total size"),
-                    HeapRegion::GrainWords * HeapWordSize * commits);
+      log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
+                                HeapRegion::GrainWords * HeapWordSize * commits);
+
     }
 
     // Mark each G1 region touched by the range as archive, add it to the old set,
@@ -804,9 +803,9 @@
     while (curr_region != NULL) {
       assert(curr_region->is_empty() && !curr_region->is_pinned(),
              "Region already in use (index %u)", curr_region->hrm_index());
-      _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
       curr_region->set_allocation_context(AllocationContext::system());
       curr_region->set_archive();
+      _hr_printer.alloc(curr_region);
       _old_set.add(curr_region);
       if (curr_region != last_region) {
         curr_region->set_top(curr_region->end());
@@ -973,11 +972,8 @@
   }
 
   if (uncommitted_regions != 0) {
-    ergo_verbose1(ErgoHeapSizing,
-                  "attempt heap shrinking",
-                  ergo_format_reason("uncommitted archive regions")
-                  ergo_format_byte("total size"),
-                  HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
+    log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
+                              HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
   }
   decrease_used(size_used);
 }
@@ -1195,19 +1191,7 @@
 public:
   bool doHeapRegion(HeapRegion* hr) {
     assert(!hr->is_young(), "not expecting to find young regions");
-    if (hr->is_free()) {
-      // We only generate output for non-empty regions.
-    } else if (hr->is_starts_humongous()) {
-      _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
-    } else if (hr->is_continues_humongous()) {
-      _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
-    } else if (hr->is_archive()) {
-      _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
-    } else if (hr->is_old()) {
-      _hr_printer->post_compaction(hr, G1HRPrinter::Old);
-    } else {
-      ShouldNotReachHere();
-    }
+    _hr_printer->post_compaction(hr);
     return false;
   }
 
@@ -1216,8 +1200,11 @@
 };
 
 void G1CollectedHeap::print_hrm_post_compaction() {
-  PostCompactionPrinterClosure cl(hr_printer());
-  heap_region_iterate(&cl);
+  if (_hr_printer.is_active()) {
+    PostCompactionPrinterClosure cl(hr_printer());
+    heap_region_iterate(&cl);
+  }
+
 }
 
 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
@@ -1238,7 +1225,6 @@
   SvcGCMarker sgcm(SvcGCMarker::FULL);
   ResourceMark rm;
 
-  G1Log::update_level();
   print_heap_before_gc();
   trace_heap_before_gc(gc_tracer);
 
@@ -1256,10 +1242,10 @@
 
     // Timing
     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
-    TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
+    GCTraceCPUTime tcpu;
 
     {
-      GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
+      GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
       TraceCollectorStats tcs(g1mm()->full_collection_counters());
       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
 
@@ -1310,11 +1296,6 @@
       _allocator->abandon_gc_alloc_regions();
       g1_rem_set()->cleanupHRRS();
 
-      // We should call this after we retire any currently active alloc
-      // regions so that all the ALLOC / RETIRE events are generated
-      // before the start GC event.
-      _hr_printer.start_gc(true /* full */, (size_t) total_collections());
-
       // We may have added regions to the current incremental collection
       // set between the last GC or pause and now. We need to clear the
       // incremental collection set and then start rebuilding it afresh
@@ -1381,14 +1362,10 @@
 
       resize_if_necessary_after_full_collection();
 
-      if (_hr_printer.is_active()) {
-        // We should do this after we potentially resize the heap so
-        // that all the COMMIT / UNCOMMIT events are generated before
-        // the end GC event.
-
-        print_hrm_post_compaction();
-        _hr_printer.end_gc(true /* full */, (size_t) total_collections());
-      }
+      // We should do this after we potentially resize the heap so
+      // that all the COMMIT / UNCOMMIT events are generated before
+      // the compaction events.
+      print_hrm_post_compaction();
 
       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
       if (hot_card_cache->use_cache()) {
@@ -1457,10 +1434,6 @@
 
       g1_policy()->record_full_collection_end();
 
-      if (G1Log::fine()) {
-        g1_policy()->print_heap_transition();
-      }
-
       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
@@ -1470,9 +1443,7 @@
       gc_epilogue(true);
     }
 
-    if (G1Log::finer()) {
-      g1_policy()->print_detailed_heap_transition(true /* full */);
-    }
+    g1_policy()->print_detailed_heap_transition();
 
     print_heap_after_gc();
     trace_heap_after_gc(gc_tracer);
@@ -1550,30 +1521,22 @@
   if (capacity_after_gc < minimum_desired_capacity) {
     // Don't expand unless it's significant
     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
-    ergo_verbose4(ErgoHeapSizing,
-                  "attempt heap expansion",
-                  ergo_format_reason("capacity lower than "
-                                     "min desired capacity after Full GC")
-                  ergo_format_byte("capacity")
-                  ergo_format_byte("occupancy")
-                  ergo_format_byte_perc("min desired capacity"),
-                  capacity_after_gc, used_after_gc,
-                  minimum_desired_capacity, (double) MinHeapFreeRatio);
+
+    log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
+                              "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
+                              capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
+
     expand(expand_bytes);
 
     // No expansion, now see if we want to shrink
   } else if (capacity_after_gc > maximum_desired_capacity) {
     // Capacity too large, compute shrinking size
     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
-    ergo_verbose4(ErgoHeapSizing,
-                  "attempt heap shrinking",
-                  ergo_format_reason("capacity higher than "
-                                     "max desired capacity after Full GC")
-                  ergo_format_byte("capacity")
-                  ergo_format_byte("occupancy")
-                  ergo_format_byte_perc("max desired capacity"),
-                  capacity_after_gc, used_after_gc,
-                  maximum_desired_capacity, (double) MaxHeapFreeRatio);
+
+    log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
+                              "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
+                              capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
+
     shrink(shrink_bytes);
   }
 }
@@ -1679,11 +1642,10 @@
   verify_region_sets_optional();
 
   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
-  ergo_verbose1(ErgoHeapSizing,
-                "attempt heap expansion",
-                ergo_format_reason("allocation request failed")
-                ergo_format_byte("allocation request"),
-                word_size * HeapWordSize);
+  log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
+                            word_size * HeapWordSize);
+
+
   if (expand(expand_bytes)) {
     _hrm.verify_optional();
     verify_region_sets_optional();
@@ -1698,16 +1660,12 @@
   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
                                        HeapRegion::GrainBytes);
-  ergo_verbose2(ErgoHeapSizing,
-                "expand the heap",
-                ergo_format_byte("requested expansion amount")
-                ergo_format_byte("attempted expansion amount"),
-                expand_bytes, aligned_expand_bytes);
+
+  log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount:" SIZE_FORMAT "B expansion amount:" SIZE_FORMAT "B",
+                            expand_bytes, aligned_expand_bytes);
 
   if (is_maximal_no_gc()) {
-    ergo_verbose0(ErgoHeapSizing,
-                      "did not expand the heap",
-                      ergo_format_reason("heap already fully expanded"));
+    log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
     return false;
   }
 
@@ -1725,9 +1683,8 @@
     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
     g1_policy()->record_new_heap_size(num_regions());
   } else {
-    ergo_verbose0(ErgoHeapSizing,
-                  "did not expand the heap",
-                  ergo_format_reason("heap expansion operation failed"));
+    log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
+
     // The expansion of the virtual storage space was unsuccessful.
     // Let's see if it was because we ran out of swap.
     if (G1ExitOnExpansionFailure &&
@@ -1749,18 +1706,13 @@
   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
 
-  ergo_verbose3(ErgoHeapSizing,
-                "shrink the heap",
-                ergo_format_byte("requested shrinking amount")
-                ergo_format_byte("aligned shrinking amount")
-                ergo_format_byte("attempted shrinking amount"),
-                shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
+
+  log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
+                            shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
   if (num_regions_removed > 0) {
     g1_policy()->record_new_heap_size(num_regions());
   } else {
-    ergo_verbose0(ErgoHeapSizing,
-                  "did not shrink the heap",
-                  ergo_format_reason("heap shrinking operation failed"));
+    log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
   }
 }
 
@@ -1872,8 +1824,8 @@
                                          translation_factor,
                                          mtGC);
   if (TracePageSizes) {
-    gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
-                           description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
+    tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
+                  description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
   }
   return result;
 }
@@ -1882,16 +1834,10 @@
   CollectedHeap::pre_initialize();
   os::enable_vtime();
 
-  G1Log::init();
-
   // Necessary to satisfy locking discipline assertions.
 
   MutexLocker x(Heap_lock);
 
-  // We have to initialize the printer before committing the heap, as
-  // it will be used then.
-  _hr_printer.set_active(G1PrintHeapRegions);
-
   // While there are no constraints in the GC code that HeapWordSize
   // be any particular value, there are multiple other areas in the
   // system which believe this to be true (e.g. oop->object_size in some
@@ -2084,7 +2030,7 @@
 
 void G1CollectedHeap::stop() {
   // Stop all concurrent threads. We do this to make sure these threads
-  // do not continue to execute and access resources (e.g. gclog_or_tty)
+  // do not continue to execute and access resources (e.g. logging)
   // that are destroyed during shutdown.
   _cg1r->stop();
   _cmThread->stop();
@@ -2201,9 +2147,8 @@
   virtual bool doHeapRegion(HeapRegion* hr) {
     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
     if (_gc_time_stamp != region_gc_time_stamp) {
-      gclog_or_tty->print_cr("Region " HR_FORMAT " has GC time stamp = %d, "
-                             "expected %d", HR_FORMAT_PARAMS(hr),
-                             region_gc_time_stamp, _gc_time_stamp);
+      log_info(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
+                           region_gc_time_stamp, _gc_time_stamp);
       _failures = true;
     }
     return false;
@@ -2268,15 +2213,21 @@
   return blk.result();
 }
 
+bool  G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
+  switch (cause) {
+    case GCCause::_java_lang_system_gc:                 return ExplicitGCInvokesConcurrent;
+    case GCCause::_dcmd_gc_run:                         return ExplicitGCInvokesConcurrent;
+    case GCCause::_update_allocation_context_stats_inc: return true;
+    case GCCause::_wb_conc_mark:                        return true;
+    default :                                           return false;
+  }
+}
+
 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
   switch (cause) {
     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
-    case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
-    case GCCause::_dcmd_gc_run:             return ExplicitGCInvokesConcurrent;
     case GCCause::_g1_humongous_allocation: return true;
-    case GCCause::_update_allocation_context_stats_inc: return true;
-    case GCCause::_wb_conc_mark:            return true;
-    default:                                return false;
+    default:                                return is_user_requested_concurrent_full_gc(cause);
   }
 }
 
@@ -2790,12 +2741,13 @@
     if (!oopDesc::is_null(heap_oop)) {
       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
       if (_g1h->is_obj_dead_cond(obj, _vo)) {
-        gclog_or_tty->print_cr("Root location " PTR_FORMAT " "
-                               "points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
+        LogHandle(gc, verify) log;
+        log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
         if (_vo == VerifyOption_G1UseMarkWord) {
-          gclog_or_tty->print_cr("  Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark());
+          log.info("  Mark word: " PTR_FORMAT, p2i(obj->mark()));
         }
-        obj->print_on(gclog_or_tty);
+        ResourceMark rm;
+        obj->print_on(log.info_stream());
         _failures = true;
       }
     }
@@ -2840,10 +2792,10 @@
       // Verify that the strong code root list for this region
       // contains the nmethod
       if (!hrrs->strong_code_roots_list_contains(_nm)) {
-        gclog_or_tty->print_cr("Code root location " PTR_FORMAT " "
-                               "from nmethod " PTR_FORMAT " not in strong "
-                               "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
-                               p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
+        log_info(gc, verify)("Code root location " PTR_FORMAT " "
+                             "from nmethod " PTR_FORMAT " not in strong "
+                             "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
+                             p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
         _failures = true;
       }
     }
@@ -3021,12 +2973,8 @@
         r->object_iterate(&not_dead_yet_cl);
         if (_vo != VerifyOption_G1UseNextMarking) {
           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
-            gclog_or_tty->print_cr("[" PTR_FORMAT "," PTR_FORMAT "] "
-                                   "max_live_bytes " SIZE_FORMAT " "
-                                   "< calculated " SIZE_FORMAT,
-                                   p2i(r->bottom()), p2i(r->end()),
-                                   r->max_live_bytes(),
-                                 not_dead_yet_cl.live_bytes());
+            log_info(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
+                                 p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());
             _failures = true;
           }
         } else {
@@ -3074,85 +3022,75 @@
   }
 };
 
-void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
-  if (SafepointSynchronize::is_at_safepoint()) {
-    assert(Thread::current()->is_VM_thread(),
-           "Expected to be executed serially by the VM thread at this point");
-
-    if (!silent) { gclog_or_tty->print("Roots "); }
-    VerifyRootsClosure rootsCl(vo);
-    VerifyKlassClosure klassCl(this, &rootsCl);
-    CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
-
-    // We apply the relevant closures to all the oops in the
-    // system dictionary, class loader data graph, the string table
-    // and the nmethods in the code cache.
-    G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
-    G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
-
-    {
-      G1RootProcessor root_processor(this, 1);
-      root_processor.process_all_roots(&rootsCl,
-                                       &cldCl,
-                                       &blobsCl);
-    }
-
-    bool failures = rootsCl.failures() || codeRootsCl.failures();
-
-    if (vo != VerifyOption_G1UseMarkWord) {
-      // If we're verifying during a full GC then the region sets
-      // will have been torn down at the start of the GC. Therefore
-      // verifying the region sets will fail. So we only verify
-      // the region sets when not in a full GC.
-      if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
-      verify_region_sets();
+void G1CollectedHeap::verify(VerifyOption vo) {
+  if (!SafepointSynchronize::is_at_safepoint()) {
+    log_info(gc, verify)("Skipping verification. Not at safepoint.");
+  }
+
+  assert(Thread::current()->is_VM_thread(),
+         "Expected to be executed serially by the VM thread at this point");
+
+  log_debug(gc, verify)("Roots");
+  VerifyRootsClosure rootsCl(vo);
+  VerifyKlassClosure klassCl(this, &rootsCl);
+  CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
+
+  // We apply the relevant closures to all the oops in the
+  // system dictionary, class loader data graph, the string table
+  // and the nmethods in the code cache.
+  G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
+  G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
+
+  {
+    G1RootProcessor root_processor(this, 1);
+    root_processor.process_all_roots(&rootsCl,
+                                     &cldCl,
+                                     &blobsCl);
+  }
+
+  bool failures = rootsCl.failures() || codeRootsCl.failures();
+
+  if (vo != VerifyOption_G1UseMarkWord) {
+    // If we're verifying during a full GC then the region sets
+    // will have been torn down at the start of the GC. Therefore
+    // verifying the region sets will fail. So we only verify
+    // the region sets when not in a full GC.
+    log_debug(gc, verify)("HeapRegionSets");
+    verify_region_sets();
+  }
+
+  log_debug(gc, verify)("HeapRegions");
+  if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
+
+    G1ParVerifyTask task(this, vo);
+    workers()->run_task(&task);
+    if (task.failures()) {
+      failures = true;
     }
 
-    if (!silent) { gclog_or_tty->print("HeapRegions "); }
-    if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
-
-      G1ParVerifyTask task(this, vo);
-      workers()->run_task(&task);
-      if (task.failures()) {
-        failures = true;
-      }
-
-    } else {
-      VerifyRegionClosure blk(false, vo);
-      heap_region_iterate(&blk);
-      if (blk.failures()) {
-        failures = true;
-      }
-    }
-
-    if (G1StringDedup::is_enabled()) {
-      if (!silent) gclog_or_tty->print("StrDedup ");
-      G1StringDedup::verify();
+  } else {
+    VerifyRegionClosure blk(false, vo);
+    heap_region_iterate(&blk);
+    if (blk.failures()) {
+      failures = true;
     }
-
-    if (failures) {
-      gclog_or_tty->print_cr("Heap:");
-      // It helps to have the per-region information in the output to
-      // help us track down what went wrong. This is why we call
-      // print_extended_on() instead of print_on().
-      print_extended_on(gclog_or_tty);
-      gclog_or_tty->cr();
-      gclog_or_tty->flush();
-    }
-    guarantee(!failures, "there should not have been any failures");
-  } else {
-    if (!silent) {
-      gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet");
-      if (G1StringDedup::is_enabled()) {
-        gclog_or_tty->print(", StrDedup");
-      }
-      gclog_or_tty->print(") ");
-    }
-  }
-}
-
-void G1CollectedHeap::verify(bool silent) {
-  verify(silent, VerifyOption_G1UsePrevMarking);
+  }
+
+  if (G1StringDedup::is_enabled()) {
+    log_debug(gc, verify)("StrDedup");
+    G1StringDedup::verify();
+  }
+
+  if (failures) {
+    log_info(gc, verify)("Heap after failed verification:");
+    // It helps to have the per-region information in the output to
+    // help us track down what went wrong. This is why we call
+    // print_extended_on() instead of print_on().
+    LogHandle(gc, verify) log;
+    ResourceMark rm;
+    print_extended_on(log.info_stream());
+  }
+  guarantee(!failures, "there should not have been any failures");
 }
 
 double G1CollectedHeap::verify(bool guard, const char* msg) {
@@ -3170,12 +3108,12 @@
 }
 
 void G1CollectedHeap::verify_before_gc() {
-  double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
+  double verify_time_ms = verify(VerifyBeforeGC, "Before GC");
   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
 }
 
 void G1CollectedHeap::verify_after_gc() {
-  double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
+  double verify_time_ms = verify(VerifyAfterGC, "After GC");
   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
 }
 
@@ -3240,11 +3178,11 @@
 
   // Print the per-region information.
   st->cr();
-  st->print_cr("Heap Regions: (E=young(eden), S=young(survivor), O=old, "
+  st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
                "HS=humongous(starts), HC=humongous(continues), "
                "CS=collection set, F=free, A=archive, TS=gc time stamp, "
-               "PTAMS=previous top-at-mark-start, "
-               "NTAMS=next top-at-mark-start)");
+               "AC=allocation context, "
+               "TAMS=top-at-mark-start (previous, next)");
   PrintRegionClosure blk(st);
   heap_region_iterate(&blk);
 }
@@ -3285,12 +3223,8 @@
     // to that.
     g1_policy()->print_tracing_info();
   }
-  if (G1SummarizeRSetStats) {
-    g1_rem_set()->print_summary_info();
-  }
-  if (G1SummarizeConcMark) {
-    concurrent_mark()->print_summary_info();
-  }
+  g1_rem_set()->print_summary_info();
+  concurrent_mark()->print_summary_info();
   g1_policy()->print_yg_surv_rate_info();
 }
 
@@ -3308,28 +3242,27 @@
     size_t occupied = hrrs->occupied();
     _occupied_sum += occupied;
 
-    gclog_or_tty->print_cr("Printing RSet for region " HR_FORMAT,
-                           HR_FORMAT_PARAMS(r));
+    tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
     if (occupied == 0) {
-      gclog_or_tty->print_cr("  RSet is empty");
+      tty->print_cr("  RSet is empty");
     } else {
       hrrs->print();
     }
-    gclog_or_tty->print_cr("----------");
+    tty->print_cr("----------");
     return false;
   }
 
   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
-    gclog_or_tty->cr();
-    gclog_or_tty->print_cr("========================================");
-    gclog_or_tty->print_cr("%s", msg);
-    gclog_or_tty->cr();
+    tty->cr();
+    tty->print_cr("========================================");
+    tty->print_cr("%s", msg);
+    tty->cr();
   }
 
   ~PrintRSetsClosure() {
-    gclog_or_tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
-    gclog_or_tty->print_cr("========================================");
-    gclog_or_tty->cr();
+    tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
+    tty->print_cr("========================================");
+    tty->cr();
   }
 };
 
@@ -3387,20 +3320,12 @@
   accumulate_statistics_all_tlabs();
   ensure_parsability(true);
 
-  if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
-      (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
-    g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
-  }
+  g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
 }
 
 void G1CollectedHeap::gc_epilogue(bool full) {
-
-  if (G1SummarizeRSetStats &&
-      (G1SummarizeRSetStatsPeriod > 0) &&
-      // we are at the end of the GC. Total collections has already been increased.
-      ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
-    g1_rem_set()->print_periodic_summary_info("After GC RS summary");
-  }
+  // we are at the end of the GC. Total collections has already been increased.
+  g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
 
   // FIXME: what is this about?
   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
@@ -3646,7 +3571,14 @@
   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 }
 
-void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
+void G1CollectedHeap::print_taskqueue_stats() const {
+  if (!develop_log_is_enabled(Trace, gc, task, stats)) {
+    return;
+  }
+  LogHandle(gc, task, stats) log;
+  ResourceMark rm;
+  outputStream* st = log.trace_stream();
+
   print_taskqueue_stats_hdr(st);
 
   TaskQueueStats totals;
@@ -3668,41 +3600,17 @@
 }
 #endif // TASKQUEUE_STATS
 
-void G1CollectedHeap::log_gc_header() {
-  if (!G1Log::fine()) {
-    return;
-  }
-
-  gclog_or_tty->gclog_stamp();
-
-  GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
-    .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
-    .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
-
-  gclog_or_tty->print("[%s", (const char*)gc_cause_str);
-}
-
-void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
-  if (!G1Log::fine()) {
-    return;
-  }
-
-  if (G1Log::finer()) {
-    if (evacuation_failed()) {
-      gclog_or_tty->print(" (to-space exhausted)");
-    }
-    gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
-    g1_policy()->print_phases(pause_time_sec);
-    g1_policy()->print_detailed_heap_transition();
-  } else {
-    if (evacuation_failed()) {
-      gclog_or_tty->print("--");
-    }
-    g1_policy()->print_heap_transition();
-    gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
-  }
-  gclog_or_tty->flush();
-}
+void G1CollectedHeap::log_gc_footer(double pause_time_counter) {
+  if (evacuation_failed()) {
+    log_info(gc)("To-space exhausted");
+  }
+
+  double pause_time_sec = TimeHelper::counter_to_seconds(pause_time_counter);
+  g1_policy()->print_phases(pause_time_sec);
+
+  g1_policy()->print_detailed_heap_transition();
+}
+
 
 void G1CollectedHeap::wait_for_root_region_scanning() {
   double scan_wait_start = os::elapsedTime();
@@ -3738,7 +3646,6 @@
 
   wait_for_root_region_scanning();
 
-  G1Log::update_level();
   print_heap_before_gc();
   trace_heap_before_gc(_gc_tracer_stw);
 
@@ -3775,16 +3682,25 @@
 
     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
 
-    TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
+    GCTraceCPUTime tcpu;
 
     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
                                                                   workers()->active_workers(),
                                                                   Threads::number_of_non_daemon_threads());
     workers()->set_active_workers(active_workers);
+    FormatBuffer<> gc_string("Pause ");
+    if (collector_state()->during_initial_mark_pause()) {
+      gc_string.append("Initial Mark");
+    } else if (collector_state()->gcs_are_young()) {
+      gc_string.append("Young");
+    } else {
+      gc_string.append("Mixed");
+    }
+    GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
 
     double pause_start_sec = os::elapsedTime();
+    double pause_start_counter = os::elapsed_counter();
     g1_policy()->note_gc_start(active_workers);
-    log_gc_header();
 
     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
@@ -3842,11 +3758,6 @@
         // of the collection set!).
         _allocator->release_mutator_alloc_region();
 
-        // We should call this after we retire the mutator alloc
-        // region(s) so that all the ALLOC / RETIRE events are generated
-        // before the start GC event.
-        _hr_printer.start_gc(false /* full */, (size_t) total_collections());
-
         // This timing is only used by the ergonomics to handle our pause target.
         // It is unclear why this should not include the full pause. We will
         // investigate this in CR 7178365.
@@ -3970,7 +3881,7 @@
           size_t expand_bytes = g1_policy()->expansion_amount();
           if (expand_bytes > 0) {
             size_t bytes_before = capacity();
-            // No need for an ergo verbose message here,
+            // No need for an ergo logging here,
             // expansion_amount() does this when it returns a value > 0.
             double expand_ms;
             if (!expand(expand_bytes, &expand_ms)) {
@@ -4030,12 +3941,6 @@
         // CM reference discovery will be re-enabled if necessary.
       }
 
-      // We should do this after we potentially expand the heap so
-      // that all the COMMIT events are generated before the end GC
-      // event, and after we retire the GC alloc regions so that all
-      // RETIRE events are generated before the end GC event.
-      _hr_printer.end_gc(false /* full */, (size_t) total_collections());
-
 #ifdef TRACESPINNING
       ParallelTaskTerminator::print_termination_counts();
 #endif
@@ -4044,7 +3949,7 @@
     }
 
     // Print the remainder of the GC log output.
-    log_gc_footer(os::elapsedTime() - pause_start_sec);
+    log_gc_footer(os::elapsed_counter() - pause_start_counter);
 
     // It is not yet to safe to tell the concurrent mark to
     // start as we have some optional output below. We don't want the
@@ -4054,7 +3959,7 @@
     _hrm.verify_optional();
     verify_region_sets_optional();
 
-    TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
+    TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 
     print_heap_after_gc();
@@ -4209,13 +4114,12 @@
 
       assert(pss->queue_is_empty(), "should be empty");
 
-      if (PrintTerminationStats) {
+      if (log_is_enabled(Debug, gc, task, stats)) {
         MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
         size_t lab_waste;
         size_t lab_undo_waste;
         pss->waste(lab_waste, lab_undo_waste);
-        _g1h->print_termination_stats(gclog_or_tty,
-                                      worker_id,
+        _g1h->print_termination_stats(worker_id,
                                       (os::elapsedTime() - start_sec) * 1000.0,   /* elapsed time */
                                       strong_roots_sec * 1000.0,                  /* strong roots time */
                                       term_sec * 1000.0,                          /* evac term time */
@@ -4233,22 +4137,22 @@
   }
 };
 
-void G1CollectedHeap::print_termination_stats_hdr(outputStream* const st) {
-  st->print_raw_cr("GC Termination Stats");
-  st->print_raw_cr("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");
-  st->print_raw_cr("thr     ms        ms      %        ms      %    attempts  total   alloc    undo");
-  st->print_raw_cr("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
-}
-
-void G1CollectedHeap::print_termination_stats(outputStream* const st,
-                                              uint worker_id,
+void G1CollectedHeap::print_termination_stats_hdr() {
+  log_debug(gc, task, stats)("GC Termination Stats");
+  log_debug(gc, task, stats)("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");
+  log_debug(gc, task, stats)("thr     ms        ms      %%        ms      %%    attempts  total   alloc    undo");
+  log_debug(gc, task, stats)("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
+}
+
+void G1CollectedHeap::print_termination_stats(uint worker_id,
                                               double elapsed_ms,
                                               double strong_roots_ms,
                                               double term_ms,
                                               size_t term_attempts,
                                               size_t alloc_buffer_waste,
                                               size_t undo_waste) const {
-  st->print_cr("%3d %9.2f %9.2f %6.2f "
+  log_debug(gc, task, stats)
+              ("%3d %9.2f %9.2f %6.2f "
                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
                worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
@@ -4297,13 +4201,11 @@
               "claim value %d after unlink less than initial symbol table size %d",
               SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
 
-    if (G1TraceStringSymbolTableScrubbing) {
-      gclog_or_tty->print_cr("Cleaned string and symbol table, "
-                             "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
-                             "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
-                             strings_processed(), strings_removed(),
-                             symbols_processed(), symbols_removed());
-    }
+    log_debug(gc, stringdedup)("Cleaned string and symbol table, "
+                               "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
+                               "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
+                               strings_processed(), strings_removed(),
+                               symbols_processed(), symbols_removed());
   }
 
   void work(uint worker_id) {
@@ -4619,24 +4521,26 @@
 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
  private:
   DirtyCardQueueSet* _queue;
+  G1CollectedHeap* _g1h;
  public:
-  G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
+  G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
+    _queue(queue), _g1h(g1h) { }
 
   virtual void work(uint worker_id) {
-    G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->g1_policy()->phase_times();
+    G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
 
-    RedirtyLoggedCardTableEntryClosure cl;
+    RedirtyLoggedCardTableEntryClosure cl(_g1h);
     _queue->par_apply_closure_to_all_completed_buffers(&cl);
 
-    phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed());
+    phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
   }
 };
 
 void G1CollectedHeap::redirty_logged_cards() {
   double redirty_logged_cards_start = os::elapsedTime();
 
-  G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
+  G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
   dirty_card_queue_set().reset_for_par_iteration();
   workers()->run_task(&redirty_task);
 
@@ -5141,10 +5045,7 @@
       ClassLoaderDataGraph::clear_claimed_marks();
     }
 
-    // The individual threads will set their evac-failure closures.
-    if (PrintTerminationStats) {
-      print_termination_stats_hdr(gclog_or_tty);
-    }
+    print_termination_stats_hdr();
 
     workers()->run_task(&g1_par_task);
     end_par_time_sec = os::elapsedTime();
@@ -5278,9 +5179,9 @@
   free_region(hr, free_list, par);
 }
 
-void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
-                                           const HeapRegionSetCount& humongous_regions_removed) {
-  if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
+void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
+                                           const uint humongous_regions_removed) {
+  if (old_regions_removed > 0 || humongous_regions_removed > 0) {
     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
     _old_set.bulk_remove(old_regions_removed);
     _humongous_set.bulk_remove(humongous_regions_removed);
@@ -5383,11 +5284,8 @@
             "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
   HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
   if (result < end) {
-    gclog_or_tty->cr();
-    gclog_or_tty->print_cr("## wrong marked address on %s bitmap: " PTR_FORMAT,
-                           bitmap_name, p2i(result));
-    gclog_or_tty->print_cr("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT,
-                           bitmap_name, p2i(tams), p2i(end));
+    log_info(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
+    log_info(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));
     return false;
   }
   return true;
@@ -5412,9 +5310,8 @@
     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
   }
   if (!res_p || !res_n) {
-    gclog_or_tty->print_cr("#### Bitmap verification failed for " HR_FORMAT,
-                           HR_FORMAT_PARAMS(hr));
-    gclog_or_tty->print_cr("#### Caller: %s", caller);
+    log_info(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
+    log_info(gc, verify)("#### Caller: %s", caller);
     return false;
   }
   return true;
@@ -5466,42 +5363,42 @@
     InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
     if (hr->is_humongous()) {
       if (hr->in_collection_set()) {
-        gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
+        log_info(gc, verify)("## humongous region %u in CSet", i);
         _failures = true;
         return true;
       }
       if (cset_state.is_in_cset()) {
-        gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
+        log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i);
         _failures = true;
         return true;
       }
       if (hr->is_continues_humongous() && cset_state.is_humongous()) {
-        gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
+        log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i);
         _failures = true;
         return true;
       }
     } else {
       if (cset_state.is_humongous()) {
-        gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
+        log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i);
         _failures = true;
         return true;
       }
       if (hr->in_collection_set() != cset_state.is_in_cset()) {
-        gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
-                               hr->in_collection_set(), cset_state.value(), i);
+        log_info(gc, verify)("## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
+                             hr->in_collection_set(), cset_state.value(), i);
         _failures = true;
         return true;
       }
       if (cset_state.is_in_cset()) {
         if (hr->is_young() != (cset_state.is_young())) {
-          gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
-                                 hr->is_young(), cset_state.value(), i);
+          log_info(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
+                               hr->is_young(), cset_state.value(), i);
           _failures = true;
           return true;
         }
         if (hr->is_old() != (cset_state.is_old())) {
-          gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
-                                 hr->is_old(), cset_state.value(), i);
+          log_info(gc, verify)("## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
+                               hr->is_old(), cset_state.value(), i);
           _failures = true;
           return true;
         }
@@ -5669,12 +5566,12 @@
  private:
   FreeRegionList* _free_region_list;
   HeapRegionSet* _proxy_set;
-  HeapRegionSetCount _humongous_regions_removed;
+  uint _humongous_regions_removed;
   size_t _freed_bytes;
  public:
 
   G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
-    _free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) {
+    _free_region_list(free_region_list), _humongous_regions_removed(0), _freed_bytes(0) {
   }
 
   virtual bool doHeapRegion(HeapRegion* r) {
@@ -5718,9 +5615,7 @@
     uint region_idx = r->hrm_index();
     if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
         !r->rem_set()->is_empty()) {
-
-      if (G1TraceEagerReclaimHumongousObjects) {
-        gclog_or_tty->print_cr("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT "  with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
+      log_debug(gc, humongous)("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT "  with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
                                region_idx,
                                (size_t)obj->size() * HeapWordSize,
                                p2i(r->bottom()),
@@ -5730,8 +5625,6 @@
                                g1h->is_humongous_reclaim_candidate(region_idx),
                                obj->is_typeArray()
                               );
-      }
-
       return false;
     }
 
@@ -5739,8 +5632,7 @@
               "Only eagerly reclaiming type arrays is supported, but the object "
               PTR_FORMAT " is not.", p2i(r->bottom()));
 
-    if (G1TraceEagerReclaimHumongousObjects) {
-      gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
+    log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
                              region_idx,
                              (size_t)obj->size() * HeapWordSize,
                              p2i(r->bottom()),
@@ -5750,7 +5642,7 @@
                              g1h->is_humongous_reclaim_candidate(region_idx),
                              obj->is_typeArray()
                             );
-    }
+
     // Need to clear mark bit of the humongous object if already set.
     if (next_bitmap->isMarked(r->bottom())) {
       next_bitmap->clear(r->bottom());
@@ -5759,7 +5651,7 @@
       HeapRegion* next = g1h->next_region_in_humongous(r);
       _freed_bytes += r->used();
       r->set_containing_set(NULL);
-      _humongous_regions_removed.increment(1u, r->capacity());
+      _humongous_regions_removed++;
       g1h->free_humongous_region(r, _free_region_list, false);
       r = next;
     } while (r != NULL);
@@ -5767,24 +5659,20 @@
     return false;
   }
 
-  HeapRegionSetCount& humongous_free_count() {
+  uint humongous_free_count() {
     return _humongous_regions_removed;
   }
 
   size_t bytes_freed() const {
     return _freed_bytes;
   }
-
-  size_t humongous_reclaimed() const {
-    return _humongous_regions_removed.length();
-  }
 };
 
 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
   assert_at_safepoint(true);
 
   if (!G1EagerReclaimHumongousObjects ||
-      (!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) {
+      (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
     return;
   }
@@ -5796,8 +5684,7 @@
   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
   heap_region_iterate(&cl);
 
-  HeapRegionSetCount empty_set;
-  remove_from_old_sets(empty_set, cl.humongous_free_count());
+  remove_from_old_sets(0, cl.humongous_free_count());
 
   G1HRPrinter* hrp = hr_printer();
   if (hrp->is_active()) {
@@ -5812,7 +5699,7 @@
   decrement_summary_bytes(cl.bytes_freed());
 
   g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
-                                                                    cl.humongous_reclaimed());
+                                                                    cl.humongous_free_count());
 }
 
 // This routine is similar to the above but does not record
@@ -5837,10 +5724,7 @@
 }
 
 void G1CollectedHeap::set_free_regions_coming() {
-  if (G1ConcRegionFreeingVerbose) {
-    gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
-                           "setting free regions coming");
-  }
+  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
 
   assert(!free_regions_coming(), "pre-condition");
   _free_regions_coming = true;
@@ -5855,10 +5739,7 @@
     SecondaryFreeList_lock->notify_all();
   }
 
-  if (G1ConcRegionFreeingVerbose) {
-    gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
-                           "reset free regions coming");
-  }
+  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : reset free regions coming");
 }
 
 void G1CollectedHeap::wait_while_free_regions_coming() {
@@ -5868,10 +5749,7 @@
     return;
   }
 
-  if (G1ConcRegionFreeingVerbose) {
-    gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
-                           "waiting for free regions");
-  }
+  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : waiting for free regions");
 
   {
     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
@@ -5880,10 +5758,7 @@
     }
   }
 
-  if (G1ConcRegionFreeingVerbose) {
-    gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
-                           "done waiting for free regions");
-  }
+  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions");
 }
 
 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
@@ -5901,8 +5776,8 @@
   NoYoungRegionsClosure() : _success(true) { }
   bool doHeapRegion(HeapRegion* r) {
     if (r->is_young()) {
-      gclog_or_tty->print_cr("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
-                             p2i(r->bottom()), p2i(r->end()));
+      log_info(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
+                           p2i(r->bottom()), p2i(r->end()));
       _success = false;
     }
     return false;
@@ -6076,7 +5951,7 @@
                                               false /* do_expand */);
     if (new_alloc_region != NULL) {
       set_region_short_lived_locked(new_alloc_region);
-      _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
+      _hr_printer.alloc(new_alloc_region, young_list_full);
       check_bitmaps("Mutator Region Allocation", new_alloc_region);
       return new_alloc_region;
     }
@@ -6117,13 +5992,12 @@
       new_alloc_region->record_timestamp();
       if (is_survivor) {
         new_alloc_region->set_survivor();
-        _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
         check_bitmaps("Survivor Region Allocation", new_alloc_region);
       } else {
         new_alloc_region->set_old();
-        _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
         check_bitmaps("Old Region Allocation", new_alloc_region);
       }
+      _hr_printer.alloc(new_alloc_region);
       bool during_im = collector_state()->during_initial_mark_pause();
       new_alloc_region->note_start_of_copying(during_im);
       return new_alloc_region;
@@ -6152,11 +6026,8 @@
 
   if (index != G1_NO_HRM_INDEX) {
     if (expanded) {
-      ergo_verbose1(ErgoHeapSizing,
-                    "attempt heap expansion",
-                    ergo_format_reason("requested address range outside heap bounds")
-                    ergo_format_byte("region size"),
-                    HeapRegion::GrainWords * HeapWordSize);
+      log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
+                                HeapRegion::GrainWords * HeapWordSize);
     }
     _hrm.allocate_free_regions_starting_at(index, 1);
     return region_at(index);
@@ -6173,9 +6044,9 @@
   HeapRegionManager*   _hrm;
 
 public:
-  HeapRegionSetCount _old_count;
-  HeapRegionSetCount _humongous_count;
-  HeapRegionSetCount _free_count;
+  uint _old_count;
+  uint _humongous_count;
+  uint _free_count;
 
   VerifyRegionListsClosure(HeapRegionSet* old_set,
                            HeapRegionSet* humongous_set,
@@ -6188,13 +6059,13 @@
       // TODO
     } else if (hr->is_humongous()) {
       assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
-      _humongous_count.increment(1u, hr->capacity());
+      _humongous_count++;
     } else if (hr->is_empty()) {
       assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
-      _free_count.increment(1u, hr->capacity());
+      _free_count++;
     } else if (hr->is_old()) {
       assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
-      _old_count.increment(1u, hr->capacity());
+      _old_count++;
     } else {
       // There are no other valid region types. Check for one invalid
       // one we can identify: pinned without old or humongous set.
@@ -6205,17 +6076,9 @@
   }
 
   void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
-    guarantee(old_set->length() == _old_count.length(), "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length());
-    guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), "Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
-              old_set->total_capacity_bytes(), _old_count.capacity());
-
-    guarantee(humongous_set->length() == _humongous_count.length(), "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length());
-    guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), "Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
-              humongous_set->total_capacity_bytes(), _humongous_count.capacity());
-
-    guarantee(free_list->num_free_regions() == _free_count.length(), "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length());
-    guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), "Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
-              free_list->total_capacity_bytes(), _free_count.capacity());
+    guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count);
+    guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count);
+    guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count);
   }
 };
 
--- a/src/share/vm/gc/g1/g1CollectedHeap.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1CollectedHeap.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -245,9 +245,11 @@
   // instead of doing a STW GC. Currently, a concurrent cycle is
   // explicitly started if:
   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
-  // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
-  // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
-  // (d) cause == _g1_humongous_allocation
+  // (b) cause == _g1_humongous_allocation
+  // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
+  // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
+  // (e) cause == _update_allocation_context_stats_inc
+  // (f) cause == _wb_conc_mark
   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 
   // indicates whether we are in young or mixed GC mode
@@ -288,8 +290,7 @@
   void verify_before_gc();
   void verify_after_gc();
 
-  void log_gc_header();
-  void log_gc_footer(double pause_time_sec);
+  void log_gc_footer(double pause_time_counter);
 
   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 
@@ -571,6 +572,9 @@
   void register_old_region_with_cset(HeapRegion* r) {
     _in_cset_fast_test.set_in_old(r->hrm_index());
   }
+  inline void register_ext_region_with_cset(HeapRegion* r) {
+    _in_cset_fast_test.set_ext(r->hrm_index());
+  }
   void clear_in_cset(const HeapRegion* hr) {
     _in_cset_fast_test.clear(hr);
   }
@@ -579,6 +583,8 @@
     _in_cset_fast_test.clear();
   }
 
+  bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
+
   // This is called at the start of either a concurrent cycle or a Full
   // GC to update the number of old marking cycles started.
   void increment_old_marking_cycles_started();
@@ -697,8 +703,8 @@
   void shrink_helper(size_t expand_bytes);
 
   #if TASKQUEUE_STATS
-  static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
-  void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
+  static void print_taskqueue_stats_hdr(outputStream* const st);
+  void print_taskqueue_stats() const;
   void reset_taskqueue_stats();
   #endif // TASKQUEUE_STATS
 
@@ -731,10 +737,9 @@
   void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 
   // Print the header for the per-thread termination statistics.
-  static void print_termination_stats_hdr(outputStream* const st);
+  static void print_termination_stats_hdr();
   // Print actual per-thread termination statistics.
-  void print_termination_stats(outputStream* const st,
-                               uint worker_id,
+  void print_termination_stats(uint worker_id,
                                double elapsed_ms,
                                double strong_roots_ms,
                                double term_ms,
@@ -961,6 +966,10 @@
     return CollectedHeap::G1CollectedHeap;
   }
 
+  virtual const char* name() const {
+    return "G1";
+  }
+
   const G1CollectorState* collector_state() const { return &_collector_state; }
   G1CollectorState* collector_state() { return &_collector_state; }
 
@@ -1123,7 +1132,7 @@
   inline void old_set_remove(HeapRegion* hr);
 
   size_t non_young_capacity_bytes() {
-    return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
+    return (_old_set.length() + _humongous_set.length()) * HeapRegion::GrainBytes;
   }
 
   void set_free_regions_coming();
@@ -1148,7 +1157,7 @@
   // True iff an evacuation has failed in the most-recent collection.
   bool evacuation_failed() { return _evacuation_failed; }
 
-  void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed);
+  void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
   void prepend_to_freelist(FreeRegionList* list);
   void decrement_summary_bytes(size_t bytes);
 
@@ -1358,6 +1367,10 @@
 
   YoungList* young_list() const { return _young_list; }
 
+  uint old_regions_count() const { return _old_set.length(); }
+
+  uint humongous_regions_count() const { return _humongous_set.length(); }
+
   // debugging
   bool check_young_list_well_formed() {
     return _young_list->check_list_well_formed();
@@ -1475,10 +1488,7 @@
   // Currently there is only one place where this is called with
   // vo == UseMarkWord, which is to verify the marking during a
   // full GC.
-  void verify(bool silent, VerifyOption vo);
-
-  // Override; it uses the "prev" marking information
-  virtual void verify(bool silent);
+  void verify(VerifyOption vo);
 
   // The methods below are here for convenience and dispatch the
   // appropriate method depending on value of the given VerifyOption
--- a/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,9 +29,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1IHOPControl.hpp"
-#include "gc/g1/g1ErgoVerbose.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
-#include "gc/g1/g1Log.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
@@ -121,6 +119,8 @@
 
   _eden_used_bytes_before_gc(0),
   _survivor_used_bytes_before_gc(0),
+  _old_used_bytes_before_gc(0),
+  _humongous_used_bytes_before_gc(0),
   _heap_used_bytes_before_gc(0),
   _metaspace_used_bytes_before_gc(0),
   _eden_capacity_bytes_before_gc(0),
@@ -177,20 +177,9 @@
   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
   HeapRegionRemSet::setup_remset_size();
 
-  G1ErgoVerbose::initialize();
-  if (PrintAdaptiveSizePolicy) {
-    // Currently, we only use a single switch for all the heuristics.
-    G1ErgoVerbose::set_enabled(true);
-    // Given that we don't currently have a verboseness level
-    // parameter, we'll hardcode this to high. This can be easily
-    // changed in the future.
-    G1ErgoVerbose::set_level(ErgoHigh);
-  } else {
-    G1ErgoVerbose::set_enabled(false);
-  }
-
   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
+  clear_ratio_check_data();
 
   _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
 
@@ -291,7 +280,7 @@
   // for the first time during initialization.
   _reserve_regions = 0;
 
-  _collectionSetChooser = new CollectionSetChooser();
+  _cset_chooser = new CollectionSetChooser();
 }
 
 G1CollectorPolicy::~G1CollectorPolicy() {
@@ -790,7 +779,7 @@
        curr = curr->get_next_young_region()) {
     SurvRateGroup* group = curr->surv_rate_group();
     if (group == NULL && !curr->is_survivor()) {
-      gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
+      log_info(gc, verify)("## %s: encountered NULL surv_rate_group", name);
       ret = false;
     }
 
@@ -798,13 +787,12 @@
       int age = curr->age_in_surv_rate_group();
 
       if (age < 0) {
-        gclog_or_tty->print_cr("## %s: encountered negative age", name);
+        log_info(gc, verify)("## %s: encountered negative age", name);
         ret = false;
       }
 
       if (age <= prev_age) {
-        gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
-                               "(%d, %d)", name, age, prev_age);
+        log_info(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age);
         ret = false;
       }
       prev_age = age;
@@ -854,7 +842,7 @@
   _survivor_surv_rate_group->reset();
   update_young_list_max_and_target_length();
   update_rs_lengths_prediction();
-  _collectionSetChooser->clear();
+  cset_chooser()->clear();
 
   _bytes_allocated_in_old_since_last_gc = 0;
 
@@ -901,7 +889,6 @@
   collector_state()->set_during_marking(true);
   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
   collector_state()->set_during_initial_mark_pause(false);
-  _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 }
 
 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
@@ -913,7 +900,6 @@
   double end_time_sec = os::elapsedTime();
   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
-  _cur_mark_stop_world_time_ms += elapsed_time_ms;
   _prev_collection_pause_end_ms += elapsed_time_ms;
 
   record_pause(Remark, _mark_remark_start_sec, end_time_sec);
@@ -983,38 +969,15 @@
   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
   size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
 
+  bool result = false;
   if (marking_request_bytes > marking_initiating_used_threshold) {
-    if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
-      ergo_verbose5(ErgoConcCycles,
-        "request concurrent cycle initiation",
-        ergo_format_reason("occupancy higher than threshold")
-        ergo_format_byte("occupancy")
-        ergo_format_byte("allocation request")
-        ergo_format_byte_perc("threshold")
-        ergo_format_str("source"),
-        cur_used_bytes,
-        alloc_byte_size,
-        marking_initiating_used_threshold,
-        (double) marking_initiating_used_threshold / _g1->capacity() * 100,
-        source);
-      return true;
-    } else {
-      ergo_verbose5(ErgoConcCycles,
-        "do not request concurrent cycle initiation",
-        ergo_format_reason("still doing mixed collections")
-        ergo_format_byte("occupancy")
-        ergo_format_byte("allocation request")
-        ergo_format_byte_perc("threshold")
-        ergo_format_str("source"),
-        cur_used_bytes,
-        alloc_byte_size,
-        marking_initiating_used_threshold,
-        (double) InitiatingHeapOccupancyPercent,
-        source);
-    }
+    result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc();
+    log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
+                              result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
+                              cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source);
   }
 
-  return false;
+  return result;
 }
 
 // Anything below that is considered to be zero
@@ -1028,13 +991,7 @@
   bool last_pause_included_initial_mark = false;
   bool update_stats = !_g1->evacuation_failed();
 
-#ifndef PRODUCT
-  if (G1YoungSurvRateVerbose) {
-    gclog_or_tty->cr();
-    _short_lived_surv_rate_group->print();
-    // do that for any other surv rate groups too
-  }
-#endif // PRODUCT
+  NOT_PRODUCT(_short_lived_surv_rate_group->print());
 
   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 
@@ -1082,6 +1039,14 @@
         _recent_avg_pause_time_ratio = 1.0;
       }
     }
+
+    // Compute the ratio of just this last pause time to the entire time range stored
+    // in the vectors. Comparing this pause to the entire range, rather than only the
+    // most recent interval, has the effect of smoothing over a possible transient 'burst'
+    // of more frequent pauses that don't really reflect a change in heap occupancy.
+    // This reduces the likelihood of a needless heap expansion being triggered.
+    _last_pause_time_ratio =
+      (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms;
   }
 
   bool new_in_marking_window = collector_state()->in_marking_window();
@@ -1221,13 +1186,9 @@
   double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
 
   if (update_rs_time_goal_ms < scan_hcc_time_ms) {
-    ergo_verbose2(ErgoTiming,
-                  "adjust concurrent refinement thresholds",
-                  ergo_format_reason("Scanning the HCC expected to take longer than Update RS time goal")
-                  ergo_format_ms("Update RS time goal")
-                  ergo_format_ms("Scan HCC time"),
-                  update_rs_time_goal_ms,
-                  scan_hcc_time_ms);
+    log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
+                                "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
+                                update_rs_time_goal_ms, scan_hcc_time_ms);
 
     update_rs_time_goal_ms = 0;
   } else {
@@ -1237,7 +1198,7 @@
                                phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
                                update_rs_time_goal_ms);
 
-  _collectionSetChooser->verify();
+  cset_chooser()->verify();
 }
 
 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
@@ -1305,65 +1266,37 @@
   _eden_used_bytes_before_gc = young_list->eden_used_bytes();
   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
   _heap_capacity_bytes_before_gc = _g1->capacity();
+  _old_used_bytes_before_gc = _g1->old_regions_count() * HeapRegion::GrainBytes;
+  _humongous_used_bytes_before_gc = _g1->humongous_regions_count() * HeapRegion::GrainBytes;
   _heap_used_bytes_before_gc = _g1->used();
-
-  _eden_capacity_bytes_before_gc =
-         (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
-
-  if (full) {
-    _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
-  }
+  _eden_capacity_bytes_before_gc = (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
+  _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
 }
 
-void G1CollectorPolicy::print_heap_transition(size_t bytes_before) const {
-  size_t bytes_after = _g1->used();
-  size_t capacity = _g1->capacity();
-
-  gclog_or_tty->print(" " SIZE_FORMAT "%s->" SIZE_FORMAT "%s(" SIZE_FORMAT "%s)",
-      byte_size_in_proper_unit(bytes_before),
-      proper_unit_for_byte_size(bytes_before),
-      byte_size_in_proper_unit(bytes_after),
-      proper_unit_for_byte_size(bytes_after),
-      byte_size_in_proper_unit(capacity),
-      proper_unit_for_byte_size(capacity));
-}
-
-void G1CollectorPolicy::print_heap_transition() const {
-  print_heap_transition(_heap_used_bytes_before_gc);
-}
-
-void G1CollectorPolicy::print_detailed_heap_transition(bool full) const {
+void G1CollectorPolicy::print_detailed_heap_transition() const {
   YoungList* young_list = _g1->young_list();
 
   size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
   size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
   size_t heap_used_bytes_after_gc = _g1->used();
+  size_t old_used_bytes_after_gc = _g1->old_regions_count() * HeapRegion::GrainBytes;
+  size_t humongous_used_bytes_after_gc = _g1->humongous_regions_count() * HeapRegion::GrainBytes;
 
   size_t heap_capacity_bytes_after_gc = _g1->capacity();
   size_t eden_capacity_bytes_after_gc =
     (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
+  size_t survivor_capacity_bytes_after_gc = _max_survivor_regions * HeapRegion::GrainBytes;
 
-  gclog_or_tty->print(
-    "   [Eden: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->" EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ") "
-    "Survivors: " EXT_SIZE_FORMAT "->" EXT_SIZE_FORMAT " "
-    "Heap: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->"
-    EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")]",
-    EXT_SIZE_PARAMS(_eden_used_bytes_before_gc),
-    EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc),
-    EXT_SIZE_PARAMS(eden_used_bytes_after_gc),
-    EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc),
-    EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc),
-    EXT_SIZE_PARAMS(survivor_used_bytes_after_gc),
-    EXT_SIZE_PARAMS(_heap_used_bytes_before_gc),
-    EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc),
-    EXT_SIZE_PARAMS(heap_used_bytes_after_gc),
-    EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc));
+  log_info(gc, heap)("Eden: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
+                     _eden_used_bytes_before_gc / K, eden_used_bytes_after_gc /K, eden_capacity_bytes_after_gc /K);
+  log_info(gc, heap)("Survivor: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
+                     _survivor_used_bytes_before_gc / K, survivor_used_bytes_after_gc /K, survivor_capacity_bytes_after_gc /K);
+  log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
+                     _old_used_bytes_before_gc / K, old_used_bytes_after_gc /K);
+  log_info(gc, heap)("Humongous: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
+                     _humongous_used_bytes_before_gc / K, humongous_used_bytes_after_gc /K);
 
-  if (full) {
-    MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
-  }
-
-  gclog_or_tty->cr();
+  MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
 }
 
 void G1CollectorPolicy::print_phases(double pause_time_sec) {
@@ -1599,41 +1532,116 @@
   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
 }
 
-size_t G1CollectorPolicy::expansion_amount() const {
+void G1CollectorPolicy::clear_ratio_check_data() {
+  _ratio_over_threshold_count = 0;
+  _ratio_over_threshold_sum = 0.0;
+  _pauses_since_start = 0;
+}
+
+size_t G1CollectorPolicy::expansion_amount() {
   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
+  double last_gc_overhead = _last_pause_time_ratio * 100.0;
   double threshold = _gc_overhead_perc;
-  if (recent_gc_overhead > threshold) {
-    // We will double the existing space, or take
-    // G1ExpandByPercentOfAvailable % of the available expansion
-    // space, whichever is smaller, bounded below by a minimum
-    // expansion (unless that's all that's left.)
-    const size_t min_expand_bytes = 1*M;
+  size_t expand_bytes = 0;
+
+  // If the heap is at less than half its maximum size, scale the threshold down,
+  // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
+  // though the scaling code will likely keep the increase small.
+  if (_g1->capacity() <= _g1->max_capacity() / 2) {
+    threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
+    threshold = MAX2(threshold, 1.0);
+  }
+
+  // If the last GC time ratio is over the threshold, increment the count of
+  // times it has been exceeded, and add this ratio to the sum of exceeded
+  // ratios.
+  if (last_gc_overhead > threshold) {
+    _ratio_over_threshold_count++;
+    _ratio_over_threshold_sum += last_gc_overhead;
+  }
+
+  // Check if we've had enough GC time ratio checks that were over the
+  // threshold to trigger an expansion. We'll also expand if we've
+  // reached the end of the history buffer and the average of all entries
+  // is still over the threshold. This indicates a smaller number of GCs were
+  // long enough to make the average exceed the threshold.
+  bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics;
+  if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
+      (filled_history_buffer && (recent_gc_overhead > threshold))) {
+    size_t min_expand_bytes = HeapRegion::GrainBytes;
     size_t reserved_bytes = _g1->max_capacity();
     size_t committed_bytes = _g1->capacity();
     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
-    size_t expand_bytes;
     size_t expand_bytes_via_pct =
       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
-    expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
+    double scale_factor = 1.0;
+
+    // If the current size is less than 1/4 of the Initial heap size, expand
+    // by half of the delta between the current and Initial sizes. IE, grow
+    // back quickly.
+    //
+    // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of
+    // the available expansion space, whichever is smaller, as the base
+    // expansion size. Then possibly scale this size according to how much the
+    // threshold has (on average) been exceeded by. If the delta is small
+    // (less than the StartScaleDownAt value), scale the size down linearly, but
+    // not by less than MinScaleDownFactor. If the delta is large (greater than
+    // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor
+    // times the base size. The scaling will be linear in the range from
+    // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words,
+    // ScaleUpRange sets the rate of scaling up.
+    if (committed_bytes < InitialHeapSize / 4) {
+      expand_bytes = (InitialHeapSize - committed_bytes) / 2;
+    } else {
+      double const MinScaleDownFactor = 0.2;
+      double const MaxScaleUpFactor = 2;
+      double const StartScaleDownAt = _gc_overhead_perc;
+      double const StartScaleUpAt = _gc_overhead_perc * 1.5;
+      double const ScaleUpRange = _gc_overhead_perc * 2.0;
+
+      double ratio_delta;
+      if (filled_history_buffer) {
+        ratio_delta = recent_gc_overhead - threshold;
+      } else {
+        ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
+      }
+
+      expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
+      if (ratio_delta < StartScaleDownAt) {
+        scale_factor = ratio_delta / StartScaleDownAt;
+        scale_factor = MAX2(scale_factor, MinScaleDownFactor);
+      } else if (ratio_delta > StartScaleUpAt) {
+        scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange);
+        scale_factor = MIN2(scale_factor, MaxScaleUpFactor);
+      }
+    }
+
+    log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
+                              "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",
+                              recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100);
+
+    expand_bytes = static_cast<size_t>(expand_bytes * scale_factor);
+
+    // Ensure the expansion size is at least the minimum growth amount
+    // and at most the remaining uncommitted byte size.
     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
 
-    ergo_verbose5(ErgoHeapSizing,
-                  "attempt heap expansion",
-                  ergo_format_reason("recent GC overhead higher than "
-                                     "threshold after GC")
-                  ergo_format_perc("recent GC overhead")
-                  ergo_format_perc("threshold")
-                  ergo_format_byte("uncommitted")
-                  ergo_format_byte_perc("calculated expansion amount"),
-                  recent_gc_overhead, threshold,
-                  uncommitted_bytes,
-                  expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
+    clear_ratio_check_data();
+  } else {
+    // An expansion was not triggered. If we've started counting, increment
+    // the number of checks we've made in the current window.  If we've
+    // reached the end of the window without resizing, clear the counters to
+    // start again the next time we see a ratio above the threshold.
+    if (_ratio_over_threshold_count > 0) {
+      _pauses_since_start++;
+      if (_pauses_since_start > NumPrevPausesForHeuristics) {
+        clear_ratio_check_data();
+      }
+    }
+  }
 
-    return expand_bytes;
-  } else {
-    return 0;
-  }
+  return expand_bytes;
 }
 
 void G1CollectorPolicy::print_tracing_info() const {
@@ -1693,23 +1701,20 @@
   // even while we are still in the process of reclaiming memory.
   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
   if (!during_cycle) {
-    ergo_verbose1(ErgoConcCycles,
-                  "request concurrent cycle initiation",
-                  ergo_format_reason("requested by GC cause")
-                  ergo_format_str("GC cause"),
-                  GCCause::to_string(gc_cause));
+    log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
     collector_state()->set_initiate_conc_mark_if_possible(true);
     return true;
   } else {
-    ergo_verbose1(ErgoConcCycles,
-                  "do not request concurrent cycle initiation",
-                  ergo_format_reason("concurrent cycle already in progress")
-                  ergo_format_str("GC cause"),
-                  GCCause::to_string(gc_cause));
+    log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
     return false;
   }
 }
 
+void G1CollectorPolicy::initiate_conc_mark() {
+  collector_state()->set_during_initial_mark_pause(true);
+  collector_state()->set_initiate_conc_mark_if_possible(false);
+}
+
 void G1CollectorPolicy::decide_on_conc_mark_initiation() {
   // We are about to decide on whether this pause will be an
   // initial-mark pause.
@@ -1726,17 +1731,18 @@
     // concurrent marking cycle. So we might initiate one.
 
     if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
-      // Initiate a new initial mark only if there is no marking or reclamation going
-      // on.
+      // Initiate a new initial mark if there is no marking or reclamation going on.
+      initiate_conc_mark();
+      log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
+    } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) {
+      // Initiate a user requested initial mark. An initial mark must be young only
+      // GC, so the collector state must be updated to reflect this.
+      collector_state()->set_gcs_are_young(true);
+      collector_state()->set_last_young_gc(false);
 
-      collector_state()->set_during_initial_mark_pause(true);
-      // And we can now clear initiate_conc_mark_if_possible() as
-      // we've already acted on it.
-      collector_state()->set_initiate_conc_mark_if_possible(false);
-
-      ergo_verbose0(ErgoConcCycles,
-                  "initiate concurrent cycle",
-                  ergo_format_reason("concurrent cycle initiation requested"));
+      abort_time_to_mixed_tracking();
+      initiate_conc_mark();
+      log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
     } else {
       // The concurrent marking thread is still finishing up the
       // previous cycle. If we start one right now the two cycles
@@ -1750,9 +1756,7 @@
       // and, if it's in a yield point, it's waiting for us to
       // finish. So, at this point we will not start a cycle and we'll
       // let the concurrent marking thread complete the last one.
-      ergo_verbose0(ErgoConcCycles,
-                    "do not initiate concurrent cycle",
-                    ergo_format_reason("concurrent cycle already in progress"));
+      log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
     }
   }
 }
@@ -1807,23 +1811,22 @@
 }
 
 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
-  _collectionSetChooser->clear();
+  cset_chooser()->clear();
 
   WorkGang* workers = _g1->workers();
   uint n_workers = workers->active_workers();
 
   uint n_regions = _g1->num_regions();
   uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
-  _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
-  ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
+  cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
+  ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers);
   workers->run_task(&par_known_garbage_task);
 
-  _collectionSetChooser->sort_regions();
+  cset_chooser()->sort_regions();
 
   double end_sec = os::elapsedTime();
   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
-  _cur_mark_stop_world_time_ms += elapsed_time_ms;
   _prev_collection_pause_end_ms += elapsed_time_ms;
 
   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
@@ -2097,40 +2100,22 @@
 
 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
                                                 const char* false_action_str) const {
-  CollectionSetChooser* cset_chooser = _collectionSetChooser;
-  if (cset_chooser->is_empty()) {
-    ergo_verbose0(ErgoMixedGCs,
-                  false_action_str,
-                  ergo_format_reason("candidate old regions not available"));
+  if (cset_chooser()->is_empty()) {
+    log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
     return false;
   }
 
   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
-  size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
+  size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
   double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
   double threshold = (double) G1HeapWastePercent;
   if (reclaimable_perc <= threshold) {
-    ergo_verbose4(ErgoMixedGCs,
-              false_action_str,
-              ergo_format_reason("reclaimable percentage not over threshold")
-              ergo_format_region("candidate old regions")
-              ergo_format_byte_perc("reclaimable")
-              ergo_format_perc("threshold"),
-              cset_chooser->remaining_regions(),
-              reclaimable_bytes,
-              reclaimable_perc, threshold);
+    log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
+                        false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
     return false;
   }
-
-  ergo_verbose4(ErgoMixedGCs,
-                true_action_str,
-                ergo_format_reason("candidate old regions available")
-                ergo_format_region("candidate old regions")
-                ergo_format_byte_perc("reclaimable")
-                ergo_format_perc("threshold"),
-                cset_chooser->remaining_regions(),
-                reclaimable_bytes,
-                reclaimable_perc, threshold);
+  log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
+                      true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
   return true;
 }
 
@@ -2145,7 +2130,7 @@
   // to the CSet chooser in the first place, not how many remain, so
   // that the result is the same during all mixed GCs that follow a cycle.
 
-  const size_t region_num = (size_t) _collectionSetChooser->length();
+  const size_t region_num = (size_t) cset_chooser()->length();
   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
   size_t result = region_num / gc_num;
   // emulate ceiling
@@ -2186,13 +2171,8 @@
   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
 
-  ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
-                "start choosing CSet",
-                ergo_format_size("_pending_cards")
-                ergo_format_ms("predicted base time")
-                ergo_format_ms("remaining time")
-                ergo_format_ms("target pause time"),
-                _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
+  log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
+                            _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
 
   collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
 
@@ -2228,15 +2208,8 @@
   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
   time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
 
-  ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
-                "add young regions to CSet",
-                ergo_format_region("eden")
-                ergo_format_region("survivors")
-                ergo_format_ms("predicted young region time")
-                ergo_format_ms("target pause time"),
-                eden_region_length, survivor_region_length,
-                _inc_cset_predicted_elapsed_time_ms,
-                target_pause_time_ms);
+  log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms",
+                            eden_region_length, survivor_region_length, _inc_cset_predicted_elapsed_time_ms, target_pause_time_ms);
 
   // The number of recorded young regions is the incremental
   // collection set's current size
@@ -2254,48 +2227,35 @@
 
 
   if (!collector_state()->gcs_are_young()) {
-    CollectionSetChooser* cset_chooser = _collectionSetChooser;
-    cset_chooser->verify();
+    cset_chooser()->verify();
     const uint min_old_cset_length = calc_min_old_cset_length();
     const uint max_old_cset_length = calc_max_old_cset_length();
 
     uint expensive_region_num = 0;
     bool check_time_remaining = adaptive_young_list_length();
 
-    HeapRegion* hr = cset_chooser->peek();
+    HeapRegion* hr = cset_chooser()->peek();
     while (hr != NULL) {
       if (old_cset_region_length() >= max_old_cset_length) {
         // Added maximum number of old regions to the CSet.
-        ergo_verbose2(ErgoCSetConstruction,
-                      "finish adding old regions to CSet",
-                      ergo_format_reason("old CSet region num reached max")
-                      ergo_format_region("old")
-                      ergo_format_region("max"),
-                      old_cset_region_length(), max_old_cset_length);
+        log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions",
+                                  old_cset_region_length(), max_old_cset_length);
         break;
       }
 
 
       // Stop adding regions if the remaining reclaimable space is
       // not above G1HeapWastePercent.
-      size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
+      size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
       double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
       double threshold = (double) G1HeapWastePercent;
       if (reclaimable_perc <= threshold) {
         // We've added enough old regions that the amount of uncollected
         // reclaimable space is at or below the waste threshold. Stop
         // adding old regions to the CSet.
-        ergo_verbose5(ErgoCSetConstruction,
-                      "finish adding old regions to CSet",
-                      ergo_format_reason("reclaimable percentage not over threshold")
-                      ergo_format_region("old")
-                      ergo_format_region("max")
-                      ergo_format_byte_perc("reclaimable")
-                      ergo_format_perc("threshold"),
-                      old_cset_region_length(),
-                      max_old_cset_length,
-                      reclaimable_bytes,
-                      reclaimable_perc, threshold);
+        log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
+                                  "old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%",
+                                  old_cset_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
         break;
       }
 
@@ -2307,15 +2267,9 @@
           if (old_cset_region_length() >= min_old_cset_length) {
             // We have added the minimum number of old regions to the CSet,
             // we are done with this CSet.
-            ergo_verbose4(ErgoCSetConstruction,
-                          "finish adding old regions to CSet",
-                          ergo_format_reason("predicted time is too high")
-                          ergo_format_ms("predicted time")
-                          ergo_format_ms("remaining time")
-                          ergo_format_region("old")
-                          ergo_format_region("min"),
-                          predicted_time_ms, time_remaining_ms,
-                          old_cset_region_length(), min_old_cset_length);
+            log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). "
+                                      "predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions",
+                                      predicted_time_ms, time_remaining_ms, old_cset_region_length(), min_old_cset_length);
             break;
           }
 
@@ -2327,12 +2281,9 @@
         if (old_cset_region_length() >= min_old_cset_length) {
           // In the non-auto-tuning case, we'll finish adding regions
           // to the CSet if we reach the minimum.
-          ergo_verbose2(ErgoCSetConstruction,
-                        "finish adding old regions to CSet",
-                        ergo_format_reason("old CSet region num reached min")
-                        ergo_format_region("old")
-                        ergo_format_region("min"),
-                        old_cset_region_length(), min_old_cset_length);
+
+          log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions",
+                                    old_cset_region_length(), min_old_cset_length);
           break;
         }
       }
@@ -2340,47 +2291,32 @@
       // We will add this region to the CSet.
       time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
       predicted_old_time_ms += predicted_time_ms;
-      cset_chooser->pop(); // already have region via peek()
+      cset_chooser()->pop(); // already have region via peek()
       _g1->old_set_remove(hr);
       add_old_region_to_cset(hr);
 
-      hr = cset_chooser->peek();
+      hr = cset_chooser()->peek();
     }
     if (hr == NULL) {
-      ergo_verbose0(ErgoCSetConstruction,
-                    "finish adding old regions to CSet",
-                    ergo_format_reason("candidate old regions not available"));
+      log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");
     }
 
     if (expensive_region_num > 0) {
       // We print the information once here at the end, predicated on
       // whether we added any apparently expensive regions or not, to
       // avoid generating output per region.
-      ergo_verbose4(ErgoCSetConstruction,
-                    "added expensive regions to CSet",
-                    ergo_format_reason("old CSet region num not reached min")
-                    ergo_format_region("old")
-                    ergo_format_region("expensive")
-                    ergo_format_region("min")
-                    ergo_format_ms("remaining time"),
-                    old_cset_region_length(),
-                    expensive_region_num,
-                    min_old_cset_length,
-                    time_remaining_ms);
+      log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
+                                "old %u regions, expensive: %u regions, min %u regions, remaining time: %1.2fms",
+                                old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
     }
 
-    cset_chooser->verify();
+    cset_chooser()->verify();
   }
 
   stop_incremental_cset_building();
 
-  ergo_verbose3(ErgoCSetConstruction,
-                "finish choosing CSet",
-                ergo_format_region("old")
-                ergo_format_ms("predicted old region time")
-                ergo_format_ms("time remaining"),
-                old_cset_region_length(),
-                predicted_old_time_ms, time_remaining_ms);
+  log_debug(gc, ergo, cset)("Finish choosing CSet. old %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
+                            old_cset_region_length(), predicted_old_time_ms, time_remaining_ms);
 
   double non_young_end_time_sec = os::elapsedTime();
   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
@@ -2439,14 +2375,14 @@
 void TraceYoungGenTimeData::print_summary(const char* str,
                                           const NumberSeq* seq) const {
   double sum = seq->sum();
-  gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
+  tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
                 str, sum / 1000.0, seq->avg());
 }
 
 void TraceYoungGenTimeData::print_summary_sd(const char* str,
                                              const NumberSeq* seq) const {
   print_summary(str, seq);
-  gclog_or_tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
+  tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
                 "(num", seq->num(), seq->sd(), seq->maximum());
 }
 
@@ -2455,18 +2391,18 @@
     return;
   }
 
-  gclog_or_tty->print_cr("ALL PAUSES");
+  tty->print_cr("ALL PAUSES");
   print_summary_sd("   Total", &_total);
-  gclog_or_tty->cr();
-  gclog_or_tty->cr();
-  gclog_or_tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
-  gclog_or_tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
-  gclog_or_tty->cr();
+  tty->cr();
+  tty->cr();
+  tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
+  tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
+  tty->cr();
 
-  gclog_or_tty->print_cr("EVACUATION PAUSES");
+  tty->print_cr("EVACUATION PAUSES");
 
   if (_young_pause_num == 0 && _mixed_pause_num == 0) {
-    gclog_or_tty->print_cr("none");
+    tty->print_cr("none");
   } else {
     print_summary_sd("   Evacuation Pauses", &_total);
     print_summary("      Root Region Scan Wait", &_root_region_scan_wait);
@@ -2481,9 +2417,9 @@
     print_summary("      Clear CT", &_clear_ct);
     print_summary("      Other", &_other);
   }
-  gclog_or_tty->cr();
+  tty->cr();
 
-  gclog_or_tty->print_cr("MISC");
+  tty->print_cr("MISC");
   print_summary_sd("   Stop World", &_all_stop_world_times_ms);
   print_summary_sd("   Yields", &_all_yield_times_ms);
 }
@@ -2500,11 +2436,11 @@
   }
 
   if (_all_full_gc_times.num() > 0) {
-    gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
+    tty->print("\n%4d full_gcs: total time = %8.2f s",
       _all_full_gc_times.num(),
       _all_full_gc_times.sum() / 1000.0);
-    gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
-    gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
+    tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
+    tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
       _all_full_gc_times.sd(),
       _all_full_gc_times.maximum());
   }
--- a/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -159,6 +159,7 @@
   uint max_desired_young_length() {
     return _max_desired_young_length;
   }
+
   bool adaptive_young_list_length() const {
     return _adaptive_size;
   }
@@ -191,7 +192,7 @@
   void initialize_alignments();
   void initialize_flags();
 
-  CollectionSetChooser* _collectionSetChooser;
+  CollectionSetChooser* _cset_chooser;
 
   double _full_collection_start_sec;
 
@@ -201,6 +202,11 @@
   TruncatedSeq* _concurrent_mark_remark_times_ms;
   TruncatedSeq* _concurrent_mark_cleanup_times_ms;
 
+  // Ratio check data for determining if heap growth is necessary.
+  uint _ratio_over_threshold_count;
+  double _ratio_over_threshold_sum;
+  uint _pauses_since_start;
+
   TraceYoungGenTimeData _trace_young_gen_time_data;
   TraceOldGenTimeData   _trace_old_gen_time_data;
 
@@ -224,7 +230,11 @@
 
   enum PredictionConstants {
     TruncatedSeqLength = 10,
-    NumPrevPausesForHeuristics = 10
+    NumPrevPausesForHeuristics = 10,
+    // MinOverThresholdForGrowth must be less than NumPrevPausesForHeuristics,
+    // representing the minimum number of pause time ratios that exceed
+    // GCTimeRatio before a heap expansion will be triggered.
+    MinOverThresholdForGrowth = 4
   };
 
   TruncatedSeq* _alloc_rate_ms_seq;
@@ -405,6 +415,10 @@
   double non_young_other_time_ms() const;
   double constant_other_time_ms(double pause_time_ms) const;
 
+  CollectionSetChooser* cset_chooser() const {
+    return _cset_chooser;
+  }
+
 private:
   // Statistics kept per GC stoppage, pause or full.
   TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
@@ -479,8 +493,10 @@
 
   G1GCPhaseTimes* _phase_times;
 
-  // The ratio of gc time to elapsed time, computed over recent pauses.
+  // The ratio of gc time to elapsed time, computed over recent pauses,
+  // and the ratio for just the last pause.
   double _recent_avg_pause_time_ratio;
+  double _last_pause_time_ratio;
 
   double recent_avg_pause_time_ratio() const {
     return _recent_avg_pause_time_ratio;
@@ -488,7 +504,6 @@
 
   // This set of variables tracks the collector efficiency, in order to
   // determine whether we should initiate a new marking.
-  double _cur_mark_stop_world_time_ms;
   double _mark_remark_start_sec;
   double _mark_cleanup_start_sec;
 
@@ -644,9 +659,7 @@
 
   // Print heap sizing transition (with less and more detail).
 
-  void print_heap_transition(size_t bytes_before) const;
-  void print_heap_transition() const;
-  void print_detailed_heap_transition(bool full = false) const;
+  void print_detailed_heap_transition() const;
 
   virtual void print_phases(double pause_time_sec);
 
@@ -725,6 +738,11 @@
   // (should not be called directly).
   void add_region_to_incremental_cset_common(HeapRegion* hr);
 
+  // Set the state to start a concurrent marking cycle and clear
+  // _initiate_conc_mark_if_possible because it has now been
+  // acted on.
+  void initiate_conc_mark();
+
 public:
   // Add hr to the LHS of the incremental collection set.
   void add_region_to_incremental_cset_lhs(HeapRegion* hr);
@@ -752,7 +770,10 @@
 
   // If an expansion would be appropriate, because recent GC overhead had
   // exceeded the desired limit, return an amount to expand by.
-  virtual size_t expansion_amount() const;
+  virtual size_t expansion_amount();
+
+  // Clear ratio tracking data used by expansion_amount().
+  void clear_ratio_check_data();
 
   // Print tracing information.
   void print_tracing_info() const;
@@ -805,6 +826,8 @@
 
   size_t _eden_used_bytes_before_gc;         // Eden occupancy before GC
   size_t _survivor_used_bytes_before_gc;     // Survivor occupancy before GC
+  size_t _old_used_bytes_before_gc;          // Old occupancy before GC
+  size_t _humongous_used_bytes_before_gc;    // Humongous occupancy before GC
   size_t _heap_used_bytes_before_gc;         // Heap occupancy before GC
   size_t _metaspace_used_bytes_before_gc;    // Metaspace occupancy before GC
 
--- a/src/share/vm/gc/g1/g1ErgoVerbose.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1ErgoVerbose.hpp"
-#include "utilities/ostream.hpp"
-
-ErgoLevel G1ErgoVerbose::_level;
-bool G1ErgoVerbose::_enabled[ErgoHeuristicNum];
-
-void G1ErgoVerbose::initialize() {
-  set_level(ErgoLow);
-  set_enabled(false);
-}
-
-void G1ErgoVerbose::set_level(ErgoLevel level) {
-  _level = level;
-}
-
-void G1ErgoVerbose::set_enabled(ErgoHeuristic n, bool enabled) {
-  assert(0 <= n && n < ErgoHeuristicNum, "pre-condition");
-  _enabled[n] = enabled;
-}
-
-void G1ErgoVerbose::set_enabled(bool enabled) {
-  for (int n = 0; n < ErgoHeuristicNum; n += 1) {
-    set_enabled((ErgoHeuristic) n, enabled);
-  }
-}
-
-const char* G1ErgoVerbose::to_string(int tag) {
-  ErgoHeuristic n = extract_heuristic(tag);
-  switch (n) {
-  case ErgoHeapSizing:        return "Heap Sizing";
-  case ErgoCSetConstruction:  return "CSet Construction";
-  case ErgoConcCycles:        return "Concurrent Cycles";
-  case ErgoMixedGCs:          return "Mixed GCs";
-  case ErgoTiming:            return "Timing";
-  case ErgoIHOP:              return "IHOP";
-  default:
-    ShouldNotReachHere();
-    // Keep the Windows compiler happy
-    return NULL;
-  }
-}
--- a/src/share/vm/gc/g1/g1ErgoVerbose.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,204 +0,0 @@
-/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1ERGOVERBOSE_HPP
-#define SHARE_VM_GC_G1_G1ERGOVERBOSE_HPP
-
-#include "memory/allocation.hpp"
-#include "utilities/debug.hpp"
-
-// The log of G1's heuristic decisions comprises of a series of
-// records which have a similar format in order to maintain
-// consistency across records and ultimately easier parsing of the
-// output, if we ever choose to do that. Each record consists of:
-// * A time stamp to be able to easily correlate each record with
-// other events.
-// * A unique string to allow us to easily identify such records.
-// * The name of the heuristic the record corresponds to.
-// * An action string which describes the action that G1 did or is
-// about to do.
-// * An optional reason string which describes the reason for the
-// action.
-// * An optional number of name/value pairs which contributed to the
-// decision to take the action described in the record.
-//
-// Each record is associated with a "tag" which is the combination of
-// the heuristic the record corresponds to, as well as the min level
-// of verboseness at which the record should be printed. The tag is
-// checked against the current settings to determine whether the record
-// should be printed or not.
-
-// The available verboseness levels.
-typedef enum {
-  // Determine which part of the tag is occupied by the level.
-  ErgoLevelShift = 8,
-  ErgoLevelMask = ~((1 << ErgoLevelShift) - 1),
-
-  // ErgoLow is 0 so that we don't have to explicitly or a heuristic
-  // id with ErgoLow to keep its use simpler.
-  ErgoLow = 0,
-  ErgoHigh = 1 << ErgoLevelShift
-} ErgoLevel;
-
-// The available heuristics.
-typedef enum {
-  // Determines which part of the tag is occupied by the heuristic id.
-  ErgoHeuristicMask = ~ErgoLevelMask,
-
-  ErgoHeapSizing = 0,
-  ErgoCSetConstruction,
-  ErgoConcCycles,
-  ErgoMixedGCs,
-  ErgoTiming,
-  ErgoIHOP,
-
-  ErgoHeuristicNum
-} ErgoHeuristic;
-
-class G1ErgoVerbose : AllStatic {
-private:
-  // Determines the minimum verboseness level at which records will be
-  // printed.
-  static ErgoLevel _level;
-  // Determines which heuristics are currently enabled.
-  static bool _enabled[ErgoHeuristicNum];
-
-  static ErgoLevel extract_level(int tag) {
-    return (ErgoLevel) (tag & ErgoLevelMask);
-  }
-
-  static ErgoHeuristic extract_heuristic(int tag) {
-    return (ErgoHeuristic) (tag & ErgoHeuristicMask);
-  }
-
-public:
-  // Needs to be explicitly called at GC initialization.
-  static void initialize();
-
-  static void set_level(ErgoLevel level);
-  static void set_enabled(ErgoHeuristic h, bool enabled);
-  // It is applied to all heuristics.
-  static void set_enabled(bool enabled);
-
-  static bool enabled(int tag) {
-    ErgoLevel level = extract_level(tag);
-    ErgoHeuristic n = extract_heuristic(tag);
-    return level <= _level && _enabled[n];
-  }
-
-  // Extract the heuristic id from the tag and return a string with
-  // its name.
-  static const char* to_string(int tag);
-};
-
-// The macros below generate the format string for values of different
-// types and/or metrics.
-
-// The reason for the action is optional and is handled specially: the
-// reason string is concatenated here so it's not necessary to pass it
-// as a parameter.
-#define ergo_format_reason(_reason_) ", reason: " _reason_
-
-// Single parameter format strings
-#define ergo_format_str(_name_)      ", " _name_ ": %s"
-#define ergo_format_region(_name_)   ", " _name_ ": %u regions"
-#define ergo_format_byte(_name_)     ", " _name_ ": " SIZE_FORMAT " bytes"
-#define ergo_format_double(_name_)   ", " _name_ ": %1.2f"
-#define ergo_format_perc(_name_)     ", " _name_ ": %1.2f %%"
-#define ergo_format_ms(_name_)       ", " _name_ ": %1.2f ms"
-#define ergo_format_size(_name_)     ", " _name_ ": " SIZE_FORMAT
-
-// Double parameter format strings
-#define ergo_format_byte_perc(_name_)                                   \
-                             ", " _name_ ": " SIZE_FORMAT " bytes (%1.2f %%)"
-
-// Generates the format string
-#define ergo_format(_extra_format_)                           \
-  " %1.3f: [G1Ergonomics (%s) %s" _extra_format_ "]"
-
-// Conditionally, prints an ergonomic decision record. _extra_format_
-// is the format string for the optional items we'd like to print
-// (i.e., the decision's reason and any associated values). This
-// string should be built up using the ergo_*_format macros (see
-// above) to ensure consistency.
-//
-// Since we cannot rely on the compiler supporting variable argument
-// macros, this macro accepts a fixed number of arguments and passes
-// them to the print method. For convenience, we have wrapper macros
-// below which take a specific number of arguments and set the rest to
-// a default value.
-#define ergo_verbose_common(_tag_, _action_, _extra_format_,                \
-                            _arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_) \
-  do {                                                                      \
-    if (G1ErgoVerbose::enabled((_tag_))) {                                  \
-      gclog_or_tty->print_cr(ergo_format(_extra_format_),                   \
-                             os::elapsedTime(),                             \
-                             G1ErgoVerbose::to_string((_tag_)),             \
-                             (_action_),                                    \
-                             (_arg0_), (_arg1_), (_arg2_),                  \
-                             (_arg3_), (_arg4_), (_arg5_));                 \
-    }                                                                       \
-  } while (0)
-
-
-#define ergo_verbose6(_tag_, _action_, _extra_format_,                  \
-                      _arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_)   \
-  ergo_verbose_common(_tag_, _action_, _extra_format_,                  \
-                      _arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_)
-
-#define ergo_verbose5(_tag_, _action_, _extra_format_,                  \
-                      _arg0_, _arg1_, _arg2_, _arg3_, _arg4_)           \
-  ergo_verbose6(_tag_, _action_, _extra_format_ "%s",                   \
-                _arg0_, _arg1_, _arg2_, _arg3_, _arg4_, "")
-
-#define ergo_verbose4(_tag_, _action_, _extra_format_,                  \
-                      _arg0_, _arg1_, _arg2_, _arg3_)                   \
-  ergo_verbose5(_tag_, _action_, _extra_format_ "%s",                   \
-                _arg0_, _arg1_, _arg2_, _arg3_, "")
-
-#define ergo_verbose3(_tag_, _action_, _extra_format_,                  \
-                      _arg0_, _arg1_, _arg2_)                           \
-  ergo_verbose4(_tag_, _action_, _extra_format_ "%s",                   \
-                _arg0_, _arg1_, _arg2_, "")
-
-#define ergo_verbose2(_tag_, _action_, _extra_format_,                  \
-                      _arg0_, _arg1_)                                   \
-  ergo_verbose3(_tag_, _action_, _extra_format_ "%s",                   \
-                _arg0_, _arg1_, "")
-
-#define ergo_verbose1(_tag_, _action_, _extra_format_,                  \
-                      _arg0_)                                           \
-  ergo_verbose2(_tag_, _action_, _extra_format_ "%s",                   \
-                _arg0_, "")
-
-
-#define ergo_verbose0(_tag_, _action_, _extra_format_)                  \
-  ergo_verbose1(_tag_, _action_, _extra_format_ "%s",                   \
-                "")
-
-#define ergo_verbose(_tag_, _action_)                                   \
-  ergo_verbose0(_tag_, _action_, "")
-
-
-#endif // SHARE_VM_GC_G1_G1ERGOVERBOSE_HPP
--- a/src/share/vm/gc/g1/g1EvacFailure.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1EvacFailure.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -47,8 +47,9 @@
   virtual void do_oop(      oop* p) { do_oop_work(p); }
   template <class T> void do_oop_work(T* p) {
     assert(_from->is_in_reserved(p), "paranoia");
-    if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
-        !_from->is_survivor()) {
+    assert(!_from->is_survivor(), "Unexpected evac failure in survivor region");
+
+    if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p))) {
       size_t card_index = _ct_bs->index_for(p);
       if (_ct_bs->mark_card_deferred(card_index)) {
         _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
--- a/src/share/vm/gc/g1/g1EvacStats.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1EvacStats.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -26,91 +26,97 @@
 #include "memory/allocation.inline.hpp"
 #include "gc/g1/g1EvacStats.hpp"
 #include "gc/shared/gcId.hpp"
+#include "logging/log.hpp"
 #include "trace/tracing.hpp"
 
 void G1EvacStats::adjust_desired_plab_sz() {
-  if (PrintPLAB) {
-    gclog_or_tty->print(" (allocated = " SIZE_FORMAT " wasted = " SIZE_FORMAT " "
+  if (!ResizePLAB) {
+    log_debug(gc, plab)(" (allocated = " SIZE_FORMAT " wasted = " SIZE_FORMAT " "
                         "unused = " SIZE_FORMAT " used = " SIZE_FORMAT " "
                         "undo_waste = " SIZE_FORMAT " region_end_waste = " SIZE_FORMAT " "
                         "regions filled = %u direct_allocated = " SIZE_FORMAT " "
                         "failure_used = " SIZE_FORMAT " failure_waste = " SIZE_FORMAT ") ",
                         _allocated, _wasted, _unused, used(), _undo_wasted, _region_end_waste,
                         _regions_filled, _direct_allocated, _failure_used, _failure_waste);
+    // Clear accumulators for next round.
+    reset();
+    return;
   }
 
-  if (ResizePLAB) {
-
-    assert(is_object_aligned(max_size()) && min_size() <= max_size(),
-           "PLAB clipping computation may be incorrect");
+  assert(is_object_aligned(max_size()) && min_size() <= max_size(),
+         "PLAB clipping computation may be incorrect");
 
-    if (_allocated == 0) {
-      assert((_unused == 0),
-             "Inconsistency in PLAB stats: "
-             "_allocated: " SIZE_FORMAT ", "
-             "_wasted: " SIZE_FORMAT ", "
-             "_region_end_waste: " SIZE_FORMAT ", "
-             "_unused: " SIZE_FORMAT ", "
-             "_used  : " SIZE_FORMAT,
-             _allocated, _wasted, _region_end_waste, _unused, used());
-      _allocated = 1;
-    }
-    // The size of the PLAB caps the amount of space that can be wasted at the
-    // end of the collection. In the worst case the last PLAB could be completely
-    // empty.
-    // This allows us to calculate the new PLAB size to achieve the
-    // TargetPLABWastePct given the latest memory usage and that the last buffer
-    // will be G1LastPLABAverageOccupancy full.
-    //
-    // E.g. assume that if in the current GC 100 words were allocated and a
-    // TargetPLABWastePct of 10 had been set.
-    //
-    // So we could waste up to 10 words to meet that percentage. Given that we
-    // also assume that that buffer is typically half-full, the new desired PLAB
-    // size is set to 20 words.
-    //
-    // The amount of allocation performed should be independent of the number of
-    // threads, so should the maximum waste we can spend in total. So if
-    // we used n threads to allocate, each of them can spend maximum waste/n words in
-    // a first rough approximation. The number of threads only comes into play later
-    // when actually retrieving the actual desired PLAB size.
-    //
-    // After calculating this optimal PLAB size the algorithm applies the usual
-    // exponential decaying average over this value to guess the next PLAB size.
-    //
-    // We account region end waste fully to PLAB allocation (in the calculation of
-    // what we consider as "used_for_waste_calculation" below). This is not
-    // completely fair, but is a conservative assumption because PLABs may be sized
-    // flexibly while we cannot adjust inline allocations.
-    // Allocation during GC will try to minimize region end waste so this impact
-    // should be minimal.
-    //
-    // We need to cover overflow when calculating the amount of space actually used
-    // by objects in PLABs when subtracting the region end waste.
-    // Region end waste may be higher than actual allocation. This may occur if many
-    // threads do not allocate anything but a few rather large objects. In this
-    // degenerate case the PLAB size would simply quickly tend to minimum PLAB size,
-    // which is an okay reaction.
-    size_t const used_for_waste_calculation = used() > _region_end_waste ? used() - _region_end_waste : 0;
+  if (_allocated == 0) {
+    assert((_unused == 0),
+           "Inconsistency in PLAB stats: "
+           "_allocated: " SIZE_FORMAT ", "
+           "_wasted: " SIZE_FORMAT ", "
+           "_region_end_waste: " SIZE_FORMAT ", "
+           "_unused: " SIZE_FORMAT ", "
+           "_used  : " SIZE_FORMAT,
+           _allocated, _wasted, _region_end_waste, _unused, used());
+    _allocated = 1;
+  }
+  // The size of the PLAB caps the amount of space that can be wasted at the
+  // end of the collection. In the worst case the last PLAB could be completely
+  // empty.
+  // This allows us to calculate the new PLAB size to achieve the
+  // TargetPLABWastePct given the latest memory usage and that the last buffer
+  // will be G1LastPLABAverageOccupancy full.
+  //
+  // E.g. assume that if in the current GC 100 words were allocated and a
+  // TargetPLABWastePct of 10 had been set.
+  //
+  // So we could waste up to 10 words to meet that percentage. Given that we
+  // also assume that that buffer is typically half-full, the new desired PLAB
+  // size is set to 20 words.
+  //
+  // The amount of allocation performed should be independent of the number of
+  // threads, so should the maximum waste we can spend in total. So if
+  // we used n threads to allocate, each of them can spend maximum waste/n words in
+  // a first rough approximation. The number of threads only comes into play later
+  // when actually retrieving the actual desired PLAB size.
+  //
+  // After calculating this optimal PLAB size the algorithm applies the usual
+  // exponential decaying average over this value to guess the next PLAB size.
+  //
+  // We account region end waste fully to PLAB allocation (in the calculation of
+  // what we consider as "used_for_waste_calculation" below). This is not
+  // completely fair, but is a conservative assumption because PLABs may be sized
+  // flexibly while we cannot adjust inline allocations.
+  // Allocation during GC will try to minimize region end waste so this impact
+  // should be minimal.
+  //
+  // We need to cover overflow when calculating the amount of space actually used
+  // by objects in PLABs when subtracting the region end waste.
+  // Region end waste may be higher than actual allocation. This may occur if many
+  // threads do not allocate anything but a few rather large objects. In this
+  // degenerate case the PLAB size would simply quickly tend to minimum PLAB size,
+  // which is an okay reaction.
+  size_t const used_for_waste_calculation = used() > _region_end_waste ? used() - _region_end_waste : 0;
 
-    size_t const total_waste_allowed = used_for_waste_calculation * TargetPLABWastePct;
-    size_t const cur_plab_sz = (size_t)((double)total_waste_allowed / G1LastPLABAverageOccupancy);
-    // Take historical weighted average
-    _filter.sample(cur_plab_sz);
-    // Clip from above and below, and align to object boundary
-    size_t plab_sz;
-    plab_sz = MAX2(min_size(), (size_t)_filter.average());
-    plab_sz = MIN2(max_size(), plab_sz);
-    plab_sz = align_object_size(plab_sz);
-    // Latch the result
-    _desired_net_plab_sz = plab_sz;
-    if (PrintPLAB) {
-      gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT " desired_plab_sz = " SIZE_FORMAT ") ", cur_plab_sz, plab_sz);
-    }
-  }
-  if (PrintPLAB) {
-    gclog_or_tty->cr();
-  }
+  size_t const total_waste_allowed = used_for_waste_calculation * TargetPLABWastePct;
+  size_t const cur_plab_sz = (size_t)((double)total_waste_allowed / G1LastPLABAverageOccupancy);
+  // Take historical weighted average
+  _filter.sample(cur_plab_sz);
+  // Clip from above and below, and align to object boundary
+  size_t plab_sz;
+  plab_sz = MAX2(min_size(), (size_t)_filter.average());
+  plab_sz = MIN2(max_size(), plab_sz);
+  plab_sz = align_object_size(plab_sz);
+  // Latch the result
+  _desired_net_plab_sz = plab_sz;
+
+  log_debug(gc, plab)(" (allocated = " SIZE_FORMAT " wasted = " SIZE_FORMAT " "
+                      "unused = " SIZE_FORMAT " used = " SIZE_FORMAT " "
+                      "undo_waste = " SIZE_FORMAT " region_end_waste = " SIZE_FORMAT " "
+                      "regions filled = %u direct_allocated = " SIZE_FORMAT " "
+                      "failure_used = " SIZE_FORMAT " failure_waste = " SIZE_FORMAT ") "
+                      " (plab_sz = " SIZE_FORMAT " desired_plab_sz = " SIZE_FORMAT ")",
+                      _allocated, _wasted, _unused, used(), _undo_wasted, _region_end_waste,
+                      _regions_filled, _direct_allocated, _failure_used, _failure_waste,
+                      cur_plab_sz, plab_sz);
+
   // Clear accumulators for next round.
   reset();
 }
--- a/src/share/vm/gc/g1/g1GCPhaseTimes.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1GCPhaseTimes.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -26,10 +26,10 @@
 #include "gc/g1/concurrentG1Refine.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
-#include "gc/g1/g1Log.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/workerDataArray.inline.hpp"
 #include "memory/allocation.hpp"
+#include "logging/log.hpp"
 #include "runtime/os.hpp"
 
 // Helper class for avoiding interleaved logging
@@ -73,66 +73,60 @@
     va_end(ap);
   }
 
-  void print_cr() {
-    gclog_or_tty->print_cr("%s", _buffer);
+  const char* to_string() {
     _cur = _indent_level * INDENT_CHARS;
-  }
-
-  void append_and_print_cr(const char* format, ...)  ATTRIBUTE_PRINTF(2, 3) {
-    va_list ap;
-    va_start(ap, format);
-    vappend(format, ap);
-    va_end(ap);
-    print_cr();
+    return _buffer;
   }
 };
 
+static const char* Indents[4] = {"", "   ", "      ", "         "};
+
 G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
   _max_gc_threads(max_gc_threads)
 {
   assert(max_gc_threads > 0, "Must have some GC threads");
 
-  _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start (ms)", false, G1Log::LevelFiner, 2);
-  _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning (ms)", true, G1Log::LevelFiner, 2);
+  _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start:", false, 2);
+  _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning:", true, 2);
 
   // Root scanning phases
-  _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots (ms)", true, G1Log::LevelFinest, 3);
-  _gc_par_phases[StringTableRoots] = new WorkerDataArray<double>(max_gc_threads, "StringTable Roots (ms)", true, G1Log::LevelFinest, 3);
-  _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms)", true, G1Log::LevelFinest, 3);
-  _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms)", true, G1Log::LevelFinest, 3);
-  _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms)", true, G1Log::LevelFinest, 3);
-  _gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots (ms)", true, G1Log::LevelFinest, 3);
-  _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots (ms)", true, G1Log::LevelFinest, 3);
-  _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms)", true, G1Log::LevelFinest, 3);
-  _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms)", true, G1Log::LevelFinest, 3);
-  _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms)", true, G1Log::LevelFinest, 3);
-  _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms)", true, G1Log::LevelFinest, 3);
-  _gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD (ms)", true, G1Log::LevelFinest, 3);
-  _gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots (ms)", true, G1Log::LevelFinest, 3);
-  _gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering (ms)", true, G1Log::LevelFinest, 3);
+  _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots:", true, 3);
+  _gc_par_phases[StringTableRoots] = new WorkerDataArray<double>(max_gc_threads, "StringTable Roots:", true, 3);
+  _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots:", true, 3);
+  _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots:", true, 3);
+  _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots:", true, 3);
+  _gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots:", true, 3);
+  _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots:", true, 3);
+  _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots:", true, 3);
+  _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots:", true, 3);
+  _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots:", true, 3);
+  _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots:", true, 3);
+  _gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD:", true, 3);
+  _gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots:", true, 3);
+  _gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering:", true, 3);
 
-  _gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS (ms)", true, G1Log::LevelFiner, 2);
-  _gc_par_phases[ScanHCC] = new WorkerDataArray<double>(max_gc_threads, "Scan HCC (ms)", true, G1Log::LevelFiner, 3);
+  _gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS:", true, 2);
+  _gc_par_phases[ScanHCC] = new WorkerDataArray<double>(max_gc_threads, "Scan HCC:", true, 3);
   _gc_par_phases[ScanHCC]->set_enabled(ConcurrentG1Refine::hot_card_cache_enabled());
-  _gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms)", true, G1Log::LevelFiner, 2);
-  _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms)", true, G1Log::LevelFiner, 2);
-  _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms)", true, G1Log::LevelFiner, 2);
-  _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms)", true, G1Log::LevelFiner, 2);
-  _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms)", true, G1Log::LevelFiner, 2);
-  _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms)", false, G1Log::LevelFiner, 2);
-  _gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other (ms)", true, G1Log::LevelFiner, 2);
+  _gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS:", true, 2);
+  _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning:", true, 2);
+  _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy:", true, 2);
+  _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination:", true, 2);
+  _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total:", true, 2);
+  _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End:", false, 2);
+  _gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other:", true, 2);
 
-  _update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers", true, G1Log::LevelFiner, 3);
+  _update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers:", true, 3);
   _gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers);
 
-  _termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts", true, G1Log::LevelFinest, 3);
+  _termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts:", true, 3);
   _gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
 
-  _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup (ms)", true, G1Log::LevelFiner, 2);
-  _gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup (ms)", true, G1Log::LevelFiner, 2);
+  _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup:", true, 2);
+  _gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup:", true, 2);
 
-  _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty", true, G1Log::LevelFinest, 3);
-  _redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards", true, G1Log::LevelFinest, 3);
+  _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty", true, 3);
+  _redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards:", true, 3);
   _gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
 }
 
@@ -141,6 +135,7 @@
   assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads");
   _active_gc_threads = active_gc_threads;
   _cur_expand_heap_time_ms = 0.0;
+  _external_accounted_time_ms = 0.0;
 
   for (int i = 0; i < GCParPhasesSentinel; i++) {
     _gc_par_phases[i]->reset();
@@ -172,22 +167,17 @@
   }
 }
 
-void G1GCPhaseTimes::print_stats(int level, const char* str, double value) {
-  LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
-}
-
-void G1GCPhaseTimes::print_stats(int level, const char* str, size_t value) {
-  LineBuffer(level).append_and_print_cr("[%s: " SIZE_FORMAT "]", str, value);
-}
-
-void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) {
-  LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %u]", str, value, workers);
+void G1GCPhaseTimes::print_stats(const char* indent, const char* str, double value) {
+  log_debug(gc, phases)("%s%s: %.1lf ms", indent, str, value);
 }
 
 double G1GCPhaseTimes::accounted_time_ms() {
+    // First subtract any externally accounted time
+    double misc_time_ms = _external_accounted_time_ms;
+
     // Subtract the root region scanning wait time. It's initialized to
     // zero at the start of the pause.
-    double misc_time_ms = _root_region_scan_wait_time_ms;
+    misc_time_ms += _root_region_scan_wait_time_ms;
 
     misc_time_ms += _cur_collection_par_time_ms;
 
@@ -280,10 +270,6 @@
   void print(G1GCPhaseTimes::GCParPhases phase_id) {
     WorkerDataArray<double>* phase = _phase_times->_gc_par_phases[phase_id];
 
-    if (phase->_log_level > G1Log::level() || !phase->_enabled) {
-      return;
-    }
-
     if (phase->_length == 1) {
       print_single_length(phase_id, phase);
     } else {
@@ -291,69 +277,71 @@
     }
   }
 
+
  private:
-
   void print_single_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
     // No need for min, max, average and sum for only one worker
-    LineBuffer buf(phase->_indent_level);
-    buf.append_and_print_cr("[%s:  %.1lf]", phase->_title, _phase_times->get_time_ms(phase_id, 0));
+    log_debug(gc, phases)("%s%s:  %.1lf", Indents[phase->_indent_level], phase->_title, _phase_times->get_time_ms(phase_id, 0));
 
-    if (phase->_thread_work_items != NULL) {
-      LineBuffer buf2(phase->_thread_work_items->_indent_level);
-      buf2.append_and_print_cr("[%s:  " SIZE_FORMAT "]", phase->_thread_work_items->_title, _phase_times->sum_thread_work_items(phase_id));
+    WorkerDataArray<size_t>* work_items = phase->_thread_work_items;
+    if (work_items != NULL) {
+      log_debug(gc, phases)("%s%s:  " SIZE_FORMAT, Indents[work_items->_indent_level], work_items->_title, _phase_times->sum_thread_work_items(phase_id));
     }
   }
 
-  void print_time_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
-    uint active_length = _phase_times->_active_gc_threads;
-    for (uint i = 0; i < active_length; ++i) {
-      buf.append("  %.1lf", _phase_times->get_time_ms(phase_id, i));
+  void print_time_values(const char* indent, G1GCPhaseTimes::GCParPhases phase_id) {
+    if (log_is_enabled(Trace, gc)) {
+      LineBuffer buf(0);
+      uint active_length = _phase_times->_active_gc_threads;
+      for (uint i = 0; i < active_length; ++i) {
+        buf.append(" %4.1lf", _phase_times->get_time_ms(phase_id, i));
+      }
+      const char* line = buf.to_string();
+      log_trace(gc, phases)("%s%-25s%s", indent, "", line);
     }
-    buf.print_cr();
   }
 
-  void print_count_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
-    uint active_length = _phase_times->_active_gc_threads;
-    for (uint i = 0; i < active_length; ++i) {
-      buf.append("  " SIZE_FORMAT, _phase_times->get_thread_work_item(phase_id, i));
+  void print_count_values(const char* indent, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
+    if (log_is_enabled(Trace, gc)) {
+      LineBuffer buf(0);
+      uint active_length = _phase_times->_active_gc_threads;
+      for (uint i = 0; i < active_length; ++i) {
+        buf.append("  " SIZE_FORMAT, _phase_times->get_thread_work_item(phase_id, i));
+      }
+      const char* line = buf.to_string();
+      log_trace(gc, phases)("%s%-25s%s", indent, "", line);
     }
-    buf.print_cr();
   }
 
   void print_thread_work_items(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
-    LineBuffer buf(thread_work_items->_indent_level);
-    buf.append("[%s:", thread_work_items->_title);
-
-    if (G1Log::finest()) {
-      print_count_values(buf, phase_id, thread_work_items);
-    }
+    const char* indent = Indents[thread_work_items->_indent_level];
 
     assert(thread_work_items->_print_sum, "%s does not have print sum true even though it is a count", thread_work_items->_title);
 
-    buf.append_and_print_cr(" Min: " SIZE_FORMAT ", Avg: %.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT ", Sum: " SIZE_FORMAT "]",
+    log_debug(gc, phases)("%s%-25s Min: " SIZE_FORMAT ", Avg: %4.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT ", Sum: " SIZE_FORMAT,
+        indent, thread_work_items->_title,
         _phase_times->min_thread_work_items(phase_id), _phase_times->average_thread_work_items(phase_id), _phase_times->max_thread_work_items(phase_id),
         _phase_times->max_thread_work_items(phase_id) - _phase_times->min_thread_work_items(phase_id), _phase_times->sum_thread_work_items(phase_id));
+
+    print_count_values(indent, phase_id, thread_work_items);
   }
 
   void print_multi_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
-    LineBuffer buf(phase->_indent_level);
-    buf.append("[%s:", phase->_title);
+    const char* indent = Indents[phase->_indent_level];
 
-    if (G1Log::finest()) {
-      print_time_values(buf, phase_id, phase);
+    if (phase->_print_sum) {
+      log_debug(gc, phases)("%s%-25s Min: %4.1lf, Avg: %4.1lf, Max: %4.1lf, Diff: %4.1lf, Sum: %4.1lf",
+          indent, phase->_title,
+          _phase_times->min_time_ms(phase_id), _phase_times->average_time_ms(phase_id), _phase_times->max_time_ms(phase_id),
+          _phase_times->max_time_ms(phase_id) - _phase_times->min_time_ms(phase_id), _phase_times->sum_time_ms(phase_id));
+    } else {
+      log_debug(gc, phases)("%s%-25s Min: %4.1lf, Avg: %4.1lf, Max: %4.1lf, Diff: %4.1lf",
+          indent, phase->_title,
+          _phase_times->min_time_ms(phase_id), _phase_times->average_time_ms(phase_id), _phase_times->max_time_ms(phase_id),
+          _phase_times->max_time_ms(phase_id) - _phase_times->min_time_ms(phase_id));
     }
 
-    buf.append(" Min: %.1lf, Avg: %.1lf, Max: %.1lf, Diff: %.1lf",
-        _phase_times->min_time_ms(phase_id), _phase_times->average_time_ms(phase_id), _phase_times->max_time_ms(phase_id),
-        _phase_times->max_time_ms(phase_id) - _phase_times->min_time_ms(phase_id));
-
-    if (phase->_print_sum) {
-      // for things like the start and end times the sum is not
-      // that relevant
-      buf.append(", Sum: %.1lf", _phase_times->sum_time_ms(phase_id));
-    }
-
-    buf.append_and_print_cr("]");
+    print_time_values(indent, phase_id);
 
     if (phase->_thread_work_items != NULL) {
       print_thread_work_items(phase_id, phase->_thread_work_items);
@@ -367,67 +355,59 @@
   G1GCParPhasePrinter par_phase_printer(this);
 
   if (_root_region_scan_wait_time_ms > 0.0) {
-    print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
+    print_stats(Indents[1], "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
   }
 
-  print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads);
+  print_stats(Indents[1], "Parallel Time", _cur_collection_par_time_ms);
   for (int i = 0; i <= GCMainParPhasesLast; i++) {
     par_phase_printer.print((GCParPhases) i);
   }
 
-  print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
-  print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms);
+  print_stats(Indents[1], "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
+  print_stats(Indents[1], "Code Root Purge", _cur_strong_code_root_purge_time_ms);
   if (G1StringDedup::is_enabled()) {
-    print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads);
+    print_stats(Indents[1], "String Dedup Fixup", _cur_string_dedup_fixup_time_ms);
     for (int i = StringDedupPhasesFirst; i <= StringDedupPhasesLast; i++) {
       par_phase_printer.print((GCParPhases) i);
     }
   }
-  print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
-  print_stats(1, "Expand Heap After Collection", _cur_expand_heap_time_ms);
-
+  print_stats(Indents[1], "Clear CT", _cur_clear_ct_time_ms);
+  print_stats(Indents[1], "Expand Heap After Collection", _cur_expand_heap_time_ms);
   double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
-  print_stats(1, "Other", misc_time_ms);
+  print_stats(Indents[1], "Other", misc_time_ms);
   if (_cur_verify_before_time_ms > 0.0) {
-    print_stats(2, "Verify Before", _cur_verify_before_time_ms);
+    print_stats(Indents[2], "Verify Before", _cur_verify_before_time_ms);
   }
   if (G1CollectedHeap::heap()->evacuation_failed()) {
     double evac_fail_handling = _cur_evac_fail_recalc_used + _cur_evac_fail_remove_self_forwards +
       _cur_evac_fail_restore_remsets;
-    print_stats(2, "Evacuation Failure", evac_fail_handling);
-    if (G1Log::finest()) {
-      print_stats(3, "Recalculate Used", _cur_evac_fail_recalc_used);
-      print_stats(3, "Remove Self Forwards", _cur_evac_fail_remove_self_forwards);
-      print_stats(3, "Restore RemSet", _cur_evac_fail_restore_remsets);
-    }
+    print_stats(Indents[2], "Evacuation Failure", evac_fail_handling);
+    log_trace(gc, phases)("%sRecalculate Used: %.1lf ms", Indents[3], _cur_evac_fail_recalc_used);
+    log_trace(gc, phases)("%sRemove Self Forwards: %.1lf ms", Indents[3], _cur_evac_fail_remove_self_forwards);
+    log_trace(gc, phases)("%sRestore RemSet: %.1lf ms", Indents[3], _cur_evac_fail_restore_remsets);
   }
-  print_stats(2, "Choose CSet",
+  print_stats(Indents[2], "Choose CSet",
     (_recorded_young_cset_choice_time_ms +
     _recorded_non_young_cset_choice_time_ms));
-  print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
-  print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
-  print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
+  print_stats(Indents[2], "Ref Proc", _cur_ref_proc_time_ms);
+  print_stats(Indents[2], "Ref Enq", _cur_ref_enq_time_ms);
+  print_stats(Indents[2], "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
   par_phase_printer.print(RedirtyCards);
   if (G1EagerReclaimHumongousObjects) {
-    print_stats(2, "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
-    if (G1Log::finest()) {
-      print_stats(3, "Humongous Total", _cur_fast_reclaim_humongous_total);
-      print_stats(3, "Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
-    }
-    print_stats(2, "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
-    if (G1Log::finest()) {
-      print_stats(3, "Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);
-    }
+    print_stats(Indents[2], "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
+
+    log_trace(gc, phases)("%sHumongous Total: " SIZE_FORMAT, Indents[3], _cur_fast_reclaim_humongous_total);
+    log_trace(gc, phases)("%sHumongous Candidate: " SIZE_FORMAT, Indents[3], _cur_fast_reclaim_humongous_candidates);
+    print_stats(Indents[2], "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
+    log_trace(gc, phases)("%sHumongous Reclaimed: " SIZE_FORMAT, Indents[3], _cur_fast_reclaim_humongous_reclaimed);
   }
-  print_stats(2, "Free CSet",
+  print_stats(Indents[2], "Free CSet",
     (_recorded_young_free_cset_time_ms +
     _recorded_non_young_free_cset_time_ms));
-  if (G1Log::finest()) {
-    print_stats(3, "Young Free CSet", _recorded_young_free_cset_time_ms);
-    print_stats(3, "Non-Young Free CSet", _recorded_non_young_free_cset_time_ms);
-  }
+  log_trace(gc, phases)("%sYoung Free CSet: %.1lf ms", Indents[3], _recorded_young_free_cset_time_ms);
+  log_trace(gc, phases)("%sNon-Young Free CSet: %.1lf ms", Indents[3], _recorded_non_young_free_cset_time_ms);
   if (_cur_verify_after_time_ms > 0.0) {
-    print_stats(2, "Verify After", _cur_verify_after_time_ms);
+    print_stats(Indents[2], "Verify After", _cur_verify_after_time_ms);
   }
 }
 
--- a/src/share/vm/gc/g1/g1GCPhaseTimes.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1GCPhaseTimes.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -99,6 +99,8 @@
   double _cur_collection_start_sec;
   double _root_region_scan_wait_time_ms;
 
+  double _external_accounted_time_ms;
+
   double _recorded_young_cset_choice_time_ms;
   double _recorded_non_young_cset_choice_time_ms;
 
@@ -117,9 +119,7 @@
   double _cur_verify_after_time_ms;
 
   // Helper methods for detailed logging
-  void print_stats(int level, const char* str, double value);
-  void print_stats(int level, const char* str, size_t value);
-  void print_stats(int level, const char* str, double value, uint workers);
+  void print_stats(const char*, const char* str, double value);
 
   void note_gc_end();
 
@@ -244,6 +244,10 @@
     _cur_verify_after_time_ms = time_ms;
   }
 
+  void inc_external_accounted_time_ms(double time_ms) {
+    _external_accounted_time_ms += time_ms;
+  }
+
   double accounted_time_ms();
 
   double cur_collection_start_sec() {
--- a/src/share/vm/gc/g1/g1HRPrinter.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1HRPrinter.hpp"
-#include "gc/g1/heapRegion.hpp"
-#include "utilities/ostream.hpp"
-
-const char* G1HRPrinter::action_name(ActionType action) {
-  switch(action) {
-    case Alloc:          return "ALLOC";
-    case AllocForce:     return "ALLOC-FORCE";
-    case Retire:         return "RETIRE";
-    case Reuse:          return "REUSE";
-    case CSet:           return "CSET";
-    case EvacFailure:    return "EVAC-FAILURE";
-    case Cleanup:        return "CLEANUP";
-    case PostCompaction: return "POST-COMPACTION";
-    case Commit:         return "COMMIT";
-    case Uncommit:       return "UNCOMMIT";
-    default:             ShouldNotReachHere();
-  }
-  // trying to keep the Windows compiler happy
-  return NULL;
-}
-
-const char* G1HRPrinter::region_type_name(RegionType type) {
-  switch (type) {
-    case Unset:              return NULL;
-    case Eden:               return "Eden";
-    case Survivor:           return "Survivor";
-    case Old:                return "Old";
-    case StartsHumongous:    return "StartsH";
-    case ContinuesHumongous: return "ContinuesH";
-    case Archive:            return "Archive";
-    default:                 ShouldNotReachHere();
-  }
-  // trying to keep the Windows compiler happy
-  return NULL;
-}
-
-const char* G1HRPrinter::phase_name(PhaseType phase) {
-  switch (phase) {
-    case StartGC:     return "StartGC";
-    case EndGC:       return "EndGC";
-    case StartFullGC: return "StartFullGC";
-    case EndFullGC:   return "EndFullGC";
-    default:          ShouldNotReachHere();
-  }
-  // trying to keep the Windows compiler happy
-  return NULL;
-}
-
-#define G1HR_PREFIX     " G1HR"
-
-void G1HRPrinter::print(ActionType action, RegionType type,
-                        HeapRegion* hr, HeapWord* top) {
-  const char* action_str = action_name(action);
-  const char* type_str   = region_type_name(type);
-  HeapWord* bottom = hr->bottom();
-
-  if (type_str != NULL) {
-    if (top != NULL) {
-      gclog_or_tty->print_cr(G1HR_PREFIX " %s(%s) " PTR_FORMAT " " PTR_FORMAT,
-                             action_str, type_str, p2i(bottom), p2i(top));
-    } else {
-      gclog_or_tty->print_cr(G1HR_PREFIX " %s(%s) " PTR_FORMAT,
-                             action_str, type_str, p2i(bottom));
-    }
-  } else {
-    if (top != NULL) {
-      gclog_or_tty->print_cr(G1HR_PREFIX " %s " PTR_FORMAT " " PTR_FORMAT,
-                             action_str, p2i(bottom), p2i(top));
-    } else {
-      gclog_or_tty->print_cr(G1HR_PREFIX " %s " PTR_FORMAT,
-                             action_str, p2i(bottom));
-    }
-  }
-}
-
-void G1HRPrinter::print(ActionType action, HeapWord* bottom, HeapWord* end) {
-  const char* action_str = action_name(action);
-
-  gclog_or_tty->print_cr(G1HR_PREFIX " %s [" PTR_FORMAT "," PTR_FORMAT "]",
-                         action_str, p2i(bottom), p2i(end));
-}
-
-void G1HRPrinter::print(PhaseType phase, size_t phase_num) {
-  const char* phase_str = phase_name(phase);
-  gclog_or_tty->print_cr(G1HR_PREFIX " #%s " SIZE_FORMAT, phase_str, phase_num);
-}
--- a/src/share/vm/gc/g1/g1HRPrinter.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1HRPrinter.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -26,157 +26,84 @@
 #define SHARE_VM_GC_G1_G1HRPRINTER_HPP
 
 #include "gc/g1/heapRegion.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 
 #define SKIP_RETIRED_FULL_REGIONS 1
 
 class G1HRPrinter VALUE_OBJ_CLASS_SPEC {
-public:
-  typedef enum {
-    Alloc,
-    AllocForce,
-    Retire,
-    Reuse,
-    CSet,
-    EvacFailure,
-    Cleanup,
-    PostCompaction,
-    Commit,
-    Uncommit
-  } ActionType;
-
-  typedef enum {
-    Unset,
-    Eden,
-    Survivor,
-    Old,
-    StartsHumongous,
-    ContinuesHumongous,
-    Archive
-  } RegionType;
-
-  typedef enum {
-    StartGC,
-    EndGC,
-    StartFullGC,
-    EndFullGC
-  } PhaseType;
 
 private:
-  bool _active;
 
-  static const char* action_name(ActionType action);
-  static const char* region_type_name(RegionType type);
-  static const char* phase_name(PhaseType phase);
-
-  // Print an action event. This version is used in most scenarios and
-  // only prints the region's bottom. The parameters type and top are
-  // optional (the "not set" values are Unset and NULL).
-  static void print(ActionType action, RegionType type,
-                    HeapRegion* hr, HeapWord* top);
-
-  // Print an action event. This version prints both the region's
-  // bottom and end. Used for Commit / Uncommit events.
-  static void print(ActionType action, HeapWord* bottom, HeapWord* end);
-
-  // Print a phase event.
-  static void print(PhaseType phase, size_t phase_num);
+  // Print an action event.
+  static void print(const char* action, HeapRegion* hr) {
+    log_trace(gc, region)("G1HR %s(%s) [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT "]",
+                          action, hr->get_type_str(), p2i(hr->bottom()), p2i(hr->top()), p2i(hr->end()));
+  }
 
 public:
   // In some places we iterate over a list in order to generate output
   // for the list's elements. By exposing this we can avoid this
   // iteration if the printer is not active.
-  const bool is_active() { return _active; }
+  const bool is_active() { return log_is_enabled(Trace, gc, region); }
 
-  // Have to set this explicitly as we have to do this during the
-  // heap's initialize() method, not in the constructor.
-  void set_active(bool active) { _active = active; }
-
-  // The methods below are convenient wrappers for the print() methods.
+  // The methods below are convenient wrappers for the print() method.
 
-  void alloc(HeapRegion* hr, RegionType type, bool force = false) {
+  void alloc(HeapRegion* hr, bool force = false) {
     if (is_active()) {
-      print((!force) ? Alloc : AllocForce, type, hr, NULL);
-    }
-  }
-
-  void alloc(RegionType type, HeapRegion* hr, HeapWord* top) {
-    if (is_active()) {
-      print(Alloc, type, hr, top);
+      print((force) ? "ALLOC-FORCE" : "ALLOC", hr);
     }
   }
 
   void retire(HeapRegion* hr) {
     if (is_active()) {
       if (!SKIP_RETIRED_FULL_REGIONS || hr->top() < hr->end()) {
-        print(Retire, Unset, hr, hr->top());
+        print("RETIRE", hr);
       }
     }
   }
 
   void reuse(HeapRegion* hr) {
     if (is_active()) {
-      print(Reuse, Unset, hr, NULL);
+      print("REUSE", hr);
     }
   }
 
   void cset(HeapRegion* hr) {
     if (is_active()) {
-      print(CSet, Unset, hr, NULL);
+      print("CSET", hr);
     }
   }
 
   void evac_failure(HeapRegion* hr) {
     if (is_active()) {
-      print(EvacFailure, Unset, hr, NULL);
+      print("EVAC-FAILURE", hr);
     }
   }
 
   void cleanup(HeapRegion* hr) {
     if (is_active()) {
-      print(Cleanup, Unset, hr, NULL);
-    }
-  }
-
-  void post_compaction(HeapRegion* hr, RegionType type) {
-    if (is_active()) {
-      print(PostCompaction, type, hr, hr->top());
+      print("CLEANUP", hr);
     }
   }
 
-  void commit(HeapWord* bottom, HeapWord* end) {
+  void post_compaction(HeapRegion* hr) {
     if (is_active()) {
-      print(Commit, bottom, end);
-    }
-  }
-
-  void uncommit(HeapWord* bottom, HeapWord* end) {
-    if (is_active()) {
-      print(Uncommit, bottom, end);
+      print("POST-COMPACTION", hr);
     }
   }
 
-  void start_gc(bool full, size_t gc_num) {
+  void commit(HeapRegion* hr) {
     if (is_active()) {
-      if (!full) {
-        print(StartGC, gc_num);
-      } else {
-        print(StartFullGC, gc_num);
-      }
+      print("COMMIT", hr);
     }
   }
 
-  void end_gc(bool full, size_t gc_num) {
+  void uncommit(HeapRegion* hr) {
     if (is_active()) {
-      if (!full) {
-        print(EndGC, gc_num);
-      } else {
-        print(EndFullGC, gc_num);
-      }
+      print("UNCOMMIT", hr);
     }
   }
-
-  G1HRPrinter() : _active(false) { }
 };
 
 #endif // SHARE_VM_GC_G1_G1HRPRINTER_HPP
--- a/src/share/vm/gc/g1/g1HotCardCache.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1HotCardCache.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -123,7 +123,7 @@
   // Resets the hot card cache and discards the entries.
   void reset_hot_cache() {
     assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
-    assert(Thread::current_noinline()->is_VM_thread(), "Current thread should be the VMthread");
+    assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread");
     if (default_use_cache()) {
         reset_hot_cache_internal();
     }
--- a/src/share/vm/gc/g1/g1IHOPControl.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1IHOPControl.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -24,10 +24,10 @@
 
 #include "precompiled.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1ErgoVerbose.hpp"
 #include "gc/g1/g1IHOPControl.hpp"
 #include "gc/g1/g1Predictions.hpp"
 #include "gc/shared/gcTrace.hpp"
+#include "logging/log.hpp"
 
 G1IHOPControl::G1IHOPControl(double initial_ihop_percent, size_t target_occupancy) :
   _initial_ihop_percent(initial_ihop_percent),
@@ -47,20 +47,14 @@
 
 void G1IHOPControl::print() {
   size_t cur_conc_mark_start_threshold = get_conc_mark_start_threshold();
-  ergo_verbose6(ErgoIHOP,
-                "basic information",
-                ergo_format_reason("value update")
-                ergo_format_byte_perc("threshold")
-                ergo_format_byte("target occupancy")
-                ergo_format_byte("current occupancy")
-                ergo_format_double("recent old gen allocation rate")
-                ergo_format_double("recent marking phase length"),
-                cur_conc_mark_start_threshold,
-                cur_conc_mark_start_threshold * 100.0 / _target_occupancy,
-                _target_occupancy,
-                G1CollectedHeap::heap()->used(),
-                _last_allocation_time_s > 0.0 ? _last_allocated_bytes / _last_allocation_time_s : 0.0,
-                last_marking_length_s());
+  log_debug(gc, ihop)("Basic information (value update), threshold: " SIZE_FORMAT "B (%1.2f), target occupancy: " SIZE_FORMAT "B, current occupancy: " SIZE_FORMAT "B,"
+                      " recent old gen allocation rate: %1.2f, recent marking phase length: %1.2f",
+                      cur_conc_mark_start_threshold,
+                      cur_conc_mark_start_threshold * 100.0 / _target_occupancy,
+                      _target_occupancy,
+                      G1CollectedHeap::heap()->used(),
+                      _last_allocation_time_s > 0.0 ? _last_allocated_bytes / _last_allocation_time_s : 0.0,
+                      last_marking_length_s());
 }
 
 void G1IHOPControl::send_trace_event(G1NewTracer* tracer) {
@@ -192,21 +186,14 @@
 void G1AdaptiveIHOPControl::print() {
   G1IHOPControl::print();
   size_t actual_target = actual_target_threshold();
-  ergo_verbose6(ErgoIHOP,
-                "adaptive IHOP information",
-                ergo_format_reason("value update")
-                ergo_format_byte_perc("threshold")
-                ergo_format_byte("internal target occupancy")
-                ergo_format_double("predicted old gen allocation rate")
-                ergo_format_double("predicted marking phase length")
-                ergo_format_str("prediction active"),
-                get_conc_mark_start_threshold(),
-                percent_of(get_conc_mark_start_threshold(), actual_target),
-                actual_target,
-                _predictor->get_new_prediction(&_allocation_rate_s),
-                _predictor->get_new_prediction(&_marking_times_s),
-                have_enough_data_for_prediction() ? "true" : "false"
-                );
+  log_debug(gc, ihop)("Adaptive IHOP information (value update), threshold: " SIZE_FORMAT "B (%1.2f), internal target occupancy: " SIZE_FORMAT "B,"
+                      " predicted old gen allocation rate: %1.2f, predicted marking phase length: %1.2f, prediction active: %s",
+                      get_conc_mark_start_threshold(),
+                      percent_of(get_conc_mark_start_threshold(), actual_target),
+                      actual_target,
+                      _predictor->get_new_prediction(&_allocation_rate_s),
+                      _predictor->get_new_prediction(&_marking_times_s),
+                      have_enough_data_for_prediction() ? "true" : "false");
 }
 
 void G1AdaptiveIHOPControl::send_trace_event(G1NewTracer* tracer) {
--- a/src/share/vm/gc/g1/g1InCSetState.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1InCSetState.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -53,8 +53,12 @@
     // frequency of the checks.
     // The most common check is whether the region is in the collection set or not,
     // this encoding allows us to use an > 0 check.
-    // The other values are simply encoded in increasing generation order, which
-    // makes getting the next generation fast by a simple increment.
+    // The positive values are encoded in increasing generation order, which
+    // makes getting the next generation fast by a simple increment. They are also
+    // used to index into arrays.
+    // The negative values are used for objects requiring various special cases,
+    // for example eager reclamation of humongous objects.
+    Ext          = -2,    // Extension point
     Humongous    = -1,    // The region is humongous
     NotInCSet    =  0,    // The region is not in the collection set.
     Young        =  1,    // The region is in the collection set and a young region.
@@ -76,10 +80,11 @@
   bool is_humongous() const            { return _value == Humongous; }
   bool is_young() const                { return _value == Young; }
   bool is_old() const                  { return _value == Old; }
+  bool is_ext() const                  { return _value == Ext; }
 
 #ifdef ASSERT
-  bool is_default() const              { return !is_in_cset_or_humongous(); }
-  bool is_valid() const                { return (_value >= Humongous) && (_value < Num); }
+  bool is_default() const              { return _value == NotInCSet; }
+  bool is_valid() const                { return (_value >= Ext) && (_value < Num); }
   bool is_valid_gen() const            { return (_value >= Young && _value <= Old); }
 #endif
 };
@@ -105,6 +110,12 @@
     set_by_index(index, InCSetState::Humongous);
   }
 
+  void set_ext(uintptr_t index) {
+    assert(get_by_index(index).is_default(),
+           "State at index " INTPTR_FORMAT " should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value());
+    set_by_index(index, InCSetState::Ext);
+  }
+
   void clear_humongous(uintptr_t index) {
     set_by_index(index, InCSetState::NotInCSet);
   }
--- a/src/share/vm/gc/g1/g1Log.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1Log.hpp"
-#include "gc/g1/g1_globals.hpp"
-#include "runtime/globals_extension.hpp"
-
-G1Log::LogLevel G1Log::_level = G1Log::LevelNone;
-
-
-// Updates _level based on PrintGC and PrintGCDetails values (unless
-// G1LogLevel is set explicitly)
-// - PrintGC maps to "fine".
-// - PrintGCDetails maps to "finer".
-void G1Log::update_level() {
-  if (FLAG_IS_DEFAULT(G1LogLevel)) {
-    _level = LevelNone;
-    if (PrintGCDetails) {
-      _level = LevelFiner;
-    } else if (PrintGC) {
-      _level = LevelFine;
-    }
-  }
-}
-
-
-// If G1LogLevel has not been set up we will use the values of PrintGC
-// and PrintGCDetails for the logging level.
-void G1Log::init() {
-  if (!FLAG_IS_DEFAULT(G1LogLevel)) {
-    // PrintGC flags change won't have any affect, because G1LogLevel
-    // is set explicitly
-    if (G1LogLevel[0] == '\0' || strncmp("none", G1LogLevel, 4) == 0 && G1LogLevel[4] == '\0') {
-      _level = LevelNone;
-    } else if (strncmp("fine", G1LogLevel, 4) == 0 && G1LogLevel[4] == '\0') {
-      _level = LevelFine;
-    } else if (strncmp("finer", G1LogLevel, 5) == 0 && G1LogLevel[5] == '\0') {
-      _level = LevelFiner;
-    } else if (strncmp("finest", G1LogLevel, 6) == 0 && G1LogLevel[6] == '\0') {
-      _level = LevelFinest;
-    } else {
-      warning("Unknown logging level '%s', should be one of 'fine', 'finer' or 'finest'.", G1LogLevel);
-    }
-  } else {
-    update_level();
-  }
-}
-
--- a/src/share/vm/gc/g1/g1Log.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1LOG_HPP
-#define SHARE_VM_GC_G1_G1LOG_HPP
-
-#include "memory/allocation.hpp"
-
-class G1Log : public AllStatic {
- public:
-  typedef enum {
-    LevelNone,
-    LevelFine,
-    LevelFiner,
-    LevelFinest
-  } LogLevel;
-
- private:
-  static LogLevel _level;
-
- public:
-  inline static bool fine() {
-    return _level >= LevelFine;
-  }
-
-  inline static bool finer() {
-    return _level >= LevelFiner;
-  }
-
-  inline static bool finest() {
-    return _level == LevelFinest;
-  }
-
-  static LogLevel level() {
-    return _level;
-  }
-
-  static void init();
-
-  // Update to log level to reflect runtime changes to manageable flags
-  static void update_level();
-};
-
-#endif // SHARE_VM_GC_G1_G1LOG_HPP
--- a/src/share/vm/gc/g1/g1MarkSweep.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1MarkSweep.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,7 +29,6 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
-#include "gc/g1/g1Log.hpp"
 #include "gc/g1/g1MarkSweep.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
 #include "gc/g1/g1StringDedup.hpp"
@@ -38,7 +37,7 @@
 #include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/modRefBarrierSet.hpp"
 #include "gc/shared/referencePolicy.hpp"
@@ -123,7 +122,7 @@
 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
                                     bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer());
+  GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", gc_timer());
 
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
@@ -183,13 +182,8 @@
     // fail. At the end of the GC, the original mark word values
     // (including hash values) are restored to the appropriate
     // objects.
-    if (!VerifySilently) {
-      gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
-    }
-    g1h->verify(VerifySilently, VerifyOption_G1UseMarkWord);
-    if (!VerifySilently) {
-      gclog_or_tty->print_cr("]");
-    }
+    GCTraceTime(Info, gc, verify)("During GC (full)");
+    g1h->verify(VerifyOption_G1UseMarkWord);
   }
 
   gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
@@ -203,7 +197,7 @@
   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
   // tracking expects us to do so. See comment under phase4.
 
-  GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer());
+  GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", gc_timer());
 
   prepare_compaction();
 }
@@ -236,7 +230,7 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   // Adjust the pointers to reflect the new locations
-  GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer());
+  GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", gc_timer());
 
   // Need cleared claim bits for the roots processing
   ClassLoaderDataGraph::clear_claimed_marks();
@@ -297,7 +291,7 @@
   // to use a higher index (saved from phase2) when verifying perm_gen.
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
-  GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer());
+  GCTraceTime(Trace, gc) tm("Phase 4: Move objects", gc_timer());
 
   G1SpaceCompactClosure blk;
   g1h->heap_region_iterate(&blk);
@@ -335,7 +329,7 @@
   FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
 
   hr->set_containing_set(NULL);
-  _humongous_regions_removed.increment(1u, hr->capacity());
+  _humongous_regions_removed++;
 
   _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
   prepare_for_compaction(hr, end);
@@ -364,8 +358,7 @@
 void G1PrepareCompactClosure::update_sets() {
   // We'll recalculate total used bytes and recreate the free list
   // at the end of the GC, so no point in updating those values here.
-  HeapRegionSetCount empty_set;
-  _g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
+  _g1h->remove_from_old_sets(0, _humongous_regions_removed);
 }
 
 bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
--- a/src/share/vm/gc/g1/g1MarkSweep.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1MarkSweep.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -92,7 +92,7 @@
   G1CollectedHeap* _g1h;
   ModRefBarrierSet* _mrbs;
   CompactPoint _cp;
-  HeapRegionSetCount _humongous_regions_removed;
+  uint _humongous_regions_removed;
 
   virtual void prepare_for_compaction(HeapRegion* hr, HeapWord* end);
   void prepare_for_compaction_work(CompactPoint* cp, HeapRegion* hr, HeapWord* end);
@@ -103,7 +103,7 @@
   G1PrepareCompactClosure() :
     _g1h(G1CollectedHeap::heap()),
     _mrbs(_g1h->g1_barrier_set()),
-    _humongous_regions_removed() { }
+    _humongous_regions_removed(0) { }
 
   void update_sets();
   bool doHeapRegion(HeapRegion* hr);
--- a/src/share/vm/gc/g1/g1OopClosures.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1OopClosures.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -31,7 +31,8 @@
 #include "utilities/stack.inline.hpp"
 
 G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1,  G1ParScanThreadState* par_scan_state) :
-  G1ParClosureSuper(g1, par_scan_state),
+  _g1(g1),
+  _par_scan_state(par_scan_state),
   _worker_id(par_scan_state->worker_id()),
   _scanned_klass(NULL),
   _cm(_g1->concurrent_mark())
--- a/src/share/vm/gc/g1/g1OopClosures.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1OopClosures.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -86,8 +86,10 @@
 };
 
 // Add back base class for metadata
-class G1ParCopyHelper : public G1ParClosureSuper {
+class G1ParCopyHelper : public OopClosure {
 protected:
+  G1CollectedHeap* _g1;
+  G1ParScanThreadState* _par_scan_state;
   uint _worker_id;              // Cache value from par_scan_state.
   Klass* _scanned_klass;
   ConcurrentMark* _cm;
@@ -121,17 +123,15 @@
   G1MarkPromotedFromRoot
 };
 
-template <G1Barrier barrier, G1Mark do_mark_object>
+template <G1Barrier barrier, G1Mark do_mark_object, bool use_ext>
 class G1ParCopyClosure : public G1ParCopyHelper {
 public:
   G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
-      G1ParCopyHelper(g1, par_scan_state) {
-    assert(ref_processor() == NULL, "sanity");
-  }
+      G1ParCopyHelper(g1, par_scan_state) { }
 
-  template <class T> void do_oop_nv(T* p);
-  virtual void do_oop(oop* p)       { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
+  template <class T> void do_oop_work(T* p);
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 };
 
 class G1KlassScanClosure : public KlassClosure {
@@ -144,20 +144,15 @@
   void do_klass(Klass* klass);
 };
 
-class FilterIntoCSClosure: public ExtendedOopClosure {
+class FilterIntoCSClosure: public OopClosure {
   G1CollectedHeap* _g1;
   OopClosure* _oc;
-  DirtyCardToOopClosure* _dcto_cl;
 public:
-  FilterIntoCSClosure(  DirtyCardToOopClosure* dcto_cl,
-                        G1CollectedHeap* g1,
-                        OopClosure* oc) :
-    _dcto_cl(dcto_cl), _g1(g1), _oc(oc) { }
+  FilterIntoCSClosure(G1CollectedHeap* g1, OopClosure* oc) : _g1(g1), _oc(oc) { }
 
-  template <class T> void do_oop_nv(T* p);
-  virtual void do_oop(oop* p)        { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
-  bool apply_to_weak_ref_discovered_field() { return true; }
+  template <class T> void do_oop_work(T* p);
+  virtual void do_oop(oop* p)        { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p)  { do_oop_work(p); }
 };
 
 class FilterOutOfRegionClosure: public ExtendedOopClosure {
@@ -206,43 +201,43 @@
 // during an evacuation pause) to record cards containing
 // pointers into the collection set.
 
-class G1Mux2Closure : public ExtendedOopClosure {
+class G1Mux2Closure : public OopClosure {
   OopClosure* _c1;
   OopClosure* _c2;
 public:
   G1Mux2Closure(OopClosure *c1, OopClosure *c2);
-  template <class T> void do_oop_nv(T* p);
-  virtual void do_oop(oop* p)        { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
+  template <class T> void do_oop_work(T* p);
+  virtual void do_oop(oop* p)        { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p)  { do_oop_work(p); }
 };
 
 // A closure that returns true if it is actually applied
 // to a reference
 
-class G1TriggerClosure : public ExtendedOopClosure {
+class G1TriggerClosure : public OopClosure {
   bool _triggered;
 public:
   G1TriggerClosure();
   bool triggered() const { return _triggered; }
-  template <class T> void do_oop_nv(T* p);
-  virtual void do_oop(oop* p)        { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
+  template <class T> void do_oop_work(T* p);
+  virtual void do_oop(oop* p)        { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p)  { do_oop_work(p); }
 };
 
 // A closure which uses a triggering closure to determine
 // whether to apply an oop closure.
 
-class G1InvokeIfNotTriggeredClosure: public ExtendedOopClosure {
+class G1InvokeIfNotTriggeredClosure: public OopClosure {
   G1TriggerClosure* _trigger_cl;
   OopClosure* _oop_cl;
 public:
   G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc);
-  template <class T> void do_oop_nv(T* p);
-  virtual void do_oop(oop* p)        { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
+  template <class T> void do_oop_work(T* p);
+  virtual void do_oop(oop* p)        { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p)  { do_oop_work(p); }
 };
 
-class G1UpdateRSOrPushRefOopClosure: public ExtendedOopClosure {
+class G1UpdateRSOrPushRefOopClosure: public OopClosure {
   G1CollectedHeap* _g1;
   G1RemSet* _g1_rem_set;
   HeapRegion* _from;
@@ -268,11 +263,9 @@
     return result;
   }
 
-  bool apply_to_weak_ref_discovered_field() { return true; }
-
-  template <class T> void do_oop_nv(T* p);
-  virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
-  virtual void do_oop(oop* p)       { do_oop_nv(p); }
+  template <class T> void do_oop_work(T* p);
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
 };
 
 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_HPP
--- a/src/share/vm/gc/g1/g1OopClosures.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1OopClosures.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -42,7 +42,7 @@
  */
 
 template <class T>
-inline void FilterIntoCSClosure::do_oop_nv(T* p) {
+inline void FilterIntoCSClosure::do_oop_work(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop) &&
       _g1->is_in_cset_or_humongous(oopDesc::decode_heap_oop_not_null(heap_oop))) {
@@ -90,8 +90,10 @@
     } else {
       if (state.is_humongous()) {
         _g1->set_humongous_is_live(obj);
+      } else if (state.is_ext()) {
+        _par_scan_state->do_oop_ext(p);
       }
-      _par_scan_state->update_rs(_from, p);
+      _par_scan_state->update_rs(_from, p, obj);
     }
   }
 }
@@ -102,12 +104,15 @@
 
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (_g1->is_in_cset_or_humongous(obj)) {
+    const InCSetState state = _g1->in_cset_state(obj);
+    if (state.is_in_cset_or_humongous()) {
       Prefetch::write(obj->mark_addr(), 0);
       Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
 
       // Place on the references queue
       _par_scan_state->push_on_queue(p);
+    } else if (state.is_ext()) {
+      _par_scan_state->do_oop_ext(p);
     } else {
       assert(!_g1->obj_in_cs(obj), "checking");
     }
@@ -131,27 +136,27 @@
 }
 
 template <class T>
-inline void G1Mux2Closure::do_oop_nv(T* p) {
+inline void G1Mux2Closure::do_oop_work(T* p) {
   // Apply first closure; then apply the second.
   _c1->do_oop(p);
   _c2->do_oop(p);
 }
 
 template <class T>
-inline void G1TriggerClosure::do_oop_nv(T* p) {
+inline void G1TriggerClosure::do_oop_work(T* p) {
   // Record that this closure was actually applied (triggered).
   _triggered = true;
 }
 
 template <class T>
-inline void G1InvokeIfNotTriggeredClosure::do_oop_nv(T* p) {
+inline void G1InvokeIfNotTriggeredClosure::do_oop_work(T* p) {
   if (!_trigger_cl->triggered()) {
     _oop_cl->do_oop(p);
   }
 }
 
 template <class T>
-inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
+inline void G1UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
   oop obj = oopDesc::load_decode_heap_oop(p);
   if (obj == NULL) {
     return;
@@ -249,9 +254,9 @@
   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
 }
 
-template <G1Barrier barrier, G1Mark do_mark_object>
+template <G1Barrier barrier, G1Mark do_mark_object, bool use_ext>
 template <class T>
-void G1ParCopyClosure<barrier, do_mark_object>::do_oop_nv(T* p) {
+void G1ParCopyClosure<barrier, do_mark_object, use_ext>::do_oop_work(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
 
   if (oopDesc::is_null(heap_oop)) {
@@ -286,6 +291,10 @@
     if (state.is_humongous()) {
       _g1->set_humongous_is_live(obj);
     }
+
+    if (use_ext && state.is_ext()) {
+      _par_scan_state->do_oop_ext(p);
+    }
     // The object is not in collection set. If we're a root scanning
     // closure during an initial mark pause then attempt to mark the object.
     if (do_mark_object == G1MarkFromRoot) {
--- a/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -96,12 +96,13 @@
   bool verify_task(StarTask ref) const;
 #endif // ASSERT
 
+  template <class T> void do_oop_ext(T* ref);
   template <class T> void push_on_queue(T* ref);
 
-  template <class T> void update_rs(HeapRegion* from, T* p) {
+  template <class T> void update_rs(HeapRegion* from, T* p, oop o) {
     // If the new value of the field points to the same region or
     // is the to-space, we don't need to include it in the Rset updates.
-    if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
+    if (!HeapRegion::is_in_same_region(p, o) && !from->is_young()) {
       size_t card_index = ctbs()->index_for(p);
       // If the card hasn't been added to the buffer, do it.
       if (ctbs()->mark_card_deferred(card_index)) {
--- a/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -40,23 +40,22 @@
   // processed multiple times. So redo this check.
   const InCSetState in_cset_state = _g1h->in_cset_state(obj);
   if (in_cset_state.is_in_cset()) {
-    oop forwardee;
     markOop m = obj->mark();
     if (m->is_marked()) {
-      forwardee = (oop) m->decode_pointer();
+      obj = (oop) m->decode_pointer();
     } else {
-      forwardee = copy_to_survivor_space(in_cset_state, obj, m);
+      obj = copy_to_survivor_space(in_cset_state, obj, m);
     }
-    oopDesc::encode_store_heap_oop(p, forwardee);
+    oopDesc::encode_store_heap_oop(p, obj);
   } else if (in_cset_state.is_humongous()) {
     _g1h->set_humongous_is_live(obj);
   } else {
-    assert(!in_cset_state.is_in_cset_or_humongous(),
-           "In_cset_state must be NotInCSet here, but is " CSETSTATE_FORMAT, in_cset_state.value());
+    assert(in_cset_state.is_default() || in_cset_state.is_ext(),
+           "In_cset_state must be NotInCSet or Ext here, but is " CSETSTATE_FORMAT, in_cset_state.value());
   }
 
   assert(obj != NULL, "Must be");
-  update_rs(from, p);
+  update_rs(from, p, obj);
 }
 
 template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
--- a/src/share/vm/gc/g1/g1ParScanThreadState_ext.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1ParScanThreadState_ext.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,3 +29,10 @@
 G1ParScanThreadState* G1ParScanThreadStateSet::new_par_scan_state(uint worker_id, size_t young_cset_length) {
   return new G1ParScanThreadState(_g1h, worker_id, young_cset_length);
 }
+
+template <typename T>
+void G1ParScanThreadState::do_oop_ext(T* ref) {
+}
+
+template void G1ParScanThreadState::do_oop_ext<oop>(oop* ref);
+template void G1ParScanThreadState::do_oop_ext<narrowOop>(narrowOop* ref);
--- a/src/share/vm/gc/g1/g1Predictions.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1Predictions.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_GC_G1_G1PREDICTIONS_HPP
 #define SHARE_VM_GC_G1_G1PREDICTIONS_HPP
 
-#include "memory/allocation.inline.hpp"
 #include "utilities/numberSeq.hpp"
 
 // Utility class containing various helper methods for prediction.
--- a/src/share/vm/gc/g1/g1RemSet.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1RemSet.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -52,7 +52,7 @@
   for (uint i = 0; i < n_workers(); i++) {
     _cset_rs_update_cl[i] = NULL;
   }
-  if (G1SummarizeRSetStats) {
+  if (log_is_enabled(Trace, gc, remset)) {
     _prev_period_summary.initialize(this);
   }
   // Initialize the card queue set used to hold cards containing
@@ -109,17 +109,6 @@
   }
 }
 
-void ScanRSClosure::printCard(HeapRegion* card_region, size_t card_index,
-    HeapWord* card_start) {
-  gclog_or_tty->print_cr("T %u Region [" PTR_FORMAT ", " PTR_FORMAT ") "
-      "RS names card " SIZE_FORMAT_HEX ": "
-      "[" PTR_FORMAT ", " PTR_FORMAT ")",
-      _worker_i,
-      p2i(card_region->bottom()), p2i(card_region->end()),
-      card_index,
-      p2i(card_start), p2i(card_start + G1BlockOffsetSharedArray::N_words));
-}
-
 void ScanRSClosure::scan_strong_code_roots(HeapRegion* r) {
   double scan_start = os::elapsedTime();
   r->strong_code_roots_do(_code_root_cl);
@@ -152,10 +141,6 @@
     }
     if (current_card < jump_to_card) continue;
     HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
-#if 0
-    gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
-        card_start, card_start + CardTableModRefBS::card_size_in_words);
-#endif
 
     HeapRegion* card_region = _g1h->heap_region_containing(card_start);
     _cards++;
@@ -463,7 +448,7 @@
   update_rs_oop_cl.set_from(r);
 
   G1TriggerClosure trigger_cl;
-  FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl);
+  FilterIntoCSClosure into_cs_cl(_g1, &trigger_cl);
   G1InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
   G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
 
@@ -526,31 +511,36 @@
   return has_refs_into_cset;
 }
 
-void G1RemSet::print_periodic_summary_info(const char* header) {
-  G1RemSetSummary current;
-  current.initialize(this);
+void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) {
+  if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) &&
+      (period_count % G1SummarizeRSetStatsPeriod == 0)) {
+
+    if (!_prev_period_summary.initialized()) {
+      _prev_period_summary.initialize(this);
+    }
 
-  _prev_period_summary.subtract_from(&current);
-  print_summary_info(&_prev_period_summary, header);
+    G1RemSetSummary current;
+    current.initialize(this);
+    _prev_period_summary.subtract_from(&current);
 
-  _prev_period_summary.set(&current);
+    LogHandle(gc, remset) log;
+    log.trace("%s", header);
+    ResourceMark rm;
+    _prev_period_summary.print_on(log.trace_stream());
+
+    _prev_period_summary.set(&current);
+  }
 }
 
 void G1RemSet::print_summary_info() {
-  G1RemSetSummary current;
-  current.initialize(this);
-
-  print_summary_info(&current, " Cumulative RS summary");
-}
-
-void G1RemSet::print_summary_info(G1RemSetSummary * summary, const char * header) {
-  assert(summary != NULL, "just checking");
-
-  if (header != NULL) {
-    gclog_or_tty->print_cr("%s", header);
+  LogHandle(gc, remset, exit) log;
+  if (log.is_trace()) {
+    log.trace(" Cumulative RS summary");
+    G1RemSetSummary current;
+    current.initialize(this);
+    ResourceMark rm;
+    current.print_on(log.trace_stream());
   }
-
-  summary->print_on(gclog_or_tty);
 }
 
 void G1RemSet::prepare_for_verify() {
--- a/src/share/vm/gc/g1/g1RemSet.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1RemSet.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -33,6 +33,7 @@
 class G1CollectedHeap;
 class ConcurrentG1Refine;
 class G1ParPushHeapRSClosure;
+class outputStream;
 
 // A G1RemSet in which each heap region has a rem set that records the
 // external heap references into it.  Uses a mod ref bs to track updates,
@@ -63,8 +64,6 @@
   // references into the collection set.
   G1ParPushHeapRSClosure** _cset_rs_update_cl;
 
-  // Print the given summary info
-  virtual void print_summary_info(G1RemSetSummary * summary, const char * header = NULL);
 public:
   // This is called to reset dual hash tables after the gc pause
   // is finished and the initial hash table is no longer being
@@ -135,7 +134,7 @@
   virtual void print_summary_info();
 
   // Print accumulated summary info from the last time called.
-  virtual void print_periodic_summary_info(const char* header);
+  virtual void print_periodic_summary_info(const char* header, uint period_count);
 
   // Prepare remembered set for verification.
   virtual void prepare_for_verify();
@@ -199,10 +198,6 @@
 
   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   virtual void do_oop(oop* p)       { do_oop_work(p); }
-
-  // Override: this closure is idempotent.
-  //  bool idempotent() { return true; }
-  bool apply_to_weak_ref_discovered_field() { return true; }
 };
 
 #endif // SHARE_VM_GC_G1_G1REMSET_HPP
--- a/src/share/vm/gc/g1/g1RemSetSummary.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1RemSetSummary.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -271,7 +271,7 @@
   void print_summary_on(outputStream* out) {
     RegionTypeCounter* counters[] = { &_young, &_humonguous, &_free, &_old, NULL };
 
-    out->print_cr("\n Current rem set statistics");
+    out->print_cr(" Current rem set statistics");
     out->print_cr("  Total per region rem sets sizes = " SIZE_FORMAT "K."
                   " Max = " SIZE_FORMAT "K.",
                   round_to_K(total_rs_mem_sz()), round_to_K(max_rs_mem_sz()));
@@ -323,7 +323,7 @@
 };
 
 void G1RemSetSummary::print_on(outputStream* out) {
-  out->print_cr("\n Recent concurrent refinement statistics");
+  out->print_cr(" Recent concurrent refinement statistics");
   out->print_cr("  Processed " SIZE_FORMAT " cards",
                 num_concurrent_refined_cards());
   out->print_cr("  Of " SIZE_FORMAT " completed buffers:", num_processed_buf_total());
--- a/src/share/vm/gc/g1/g1RemSetSummary.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1RemSetSummary.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -85,6 +85,7 @@
 
   // initialize and get the first sampling
   void initialize(G1RemSet* remset);
+  bool const initialized() { return _rs_threads_vtimes != NULL; }
 
   void print_on(outputStream* out);
 
--- a/src/share/vm/gc/g1/g1RootClosures.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1RootClosures.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -23,7 +23,38 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/g1/g1RootClosures.inline.hpp"
+#include "gc/g1/g1OopClosures.inline.hpp"
+#include "gc/g1/g1RootClosures.hpp"
+#include "gc/g1/g1SharedClosures.hpp"
+
+// Closures used for standard G1 evacuation.
+class G1EvacuationClosures : public G1EvacuationRootClosures {
+  G1SharedClosures<G1MarkNone> _closures;
+
+public:
+  G1EvacuationClosures(G1CollectedHeap* g1h,
+                       G1ParScanThreadState* pss,
+                       bool gcs_are_young) :
+      _closures(g1h, pss, gcs_are_young, /* must_claim_cld */ false) {}
+
+  OopClosure* weak_oops()   { return &_closures._buffered_oops; }
+  OopClosure* strong_oops() { return &_closures._buffered_oops; }
+
+  CLDClosure* weak_clds()             { return &_closures._clds; }
+  CLDClosure* strong_clds()           { return &_closures._clds; }
+  CLDClosure* thread_root_clds()      { return NULL; }
+  CLDClosure* second_pass_weak_clds() { return NULL; }
+
+  CodeBlobClosure* strong_codeblobs()      { return &_closures._codeblobs; }
+  CodeBlobClosure* weak_codeblobs()        { return &_closures._codeblobs; }
+
+  void flush()                 { _closures._buffered_oops.done(); }
+  double closure_app_seconds() { return _closures._buffered_oops.closure_app_seconds(); }
+
+  OopClosure* raw_strong_oops() { return &_closures._oops; }
+
+  bool trace_metadata()         { return false; }
+};
 
 // Closures used during initial mark.
 // The treatment of "weak" roots is selectable through the template parameter,
--- a/src/share/vm/gc/g1/g1RootClosures.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "gc/g1/bufferingOopClosure.hpp"
-#include "gc/g1/g1CodeBlobClosure.hpp"
-#include "gc/g1/g1CollectedHeap.hpp"
-#include "gc/g1/g1OopClosures.inline.hpp"
-#include "gc/g1/g1RootClosures.hpp"
-
-// Simple holder object for a complete set of closures used by the G1 evacuation code.
-template <G1Mark Mark>
-class G1SharedClosures VALUE_OBJ_CLASS_SPEC {
-public:
-  G1ParCopyClosure<G1BarrierNone,  Mark> _oops;
-  G1ParCopyClosure<G1BarrierKlass, Mark> _oop_in_klass;
-  G1KlassScanClosure                     _klass_in_cld_closure;
-  CLDToKlassAndOopClosure                _clds;
-  G1CodeBlobClosure                      _codeblobs;
-  BufferingOopClosure                    _buffered_oops;
-
-  G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty_klasses, bool must_claim_cld) :
-    _oops(g1h, pss),
-    _oop_in_klass(g1h, pss),
-    _klass_in_cld_closure(&_oop_in_klass, process_only_dirty_klasses),
-    _clds(&_klass_in_cld_closure, &_oops, must_claim_cld),
-    _codeblobs(&_oops),
-    _buffered_oops(&_oops) {}
-};
-
-class G1EvacuationClosures : public G1EvacuationRootClosures {
-  G1SharedClosures<G1MarkNone> _closures;
-
-public:
-  G1EvacuationClosures(G1CollectedHeap* g1h,
-                       G1ParScanThreadState* pss,
-                       bool gcs_are_young) :
-      _closures(g1h, pss, gcs_are_young, /* must_claim_cld */ false) {}
-
-  OopClosure* weak_oops()   { return &_closures._buffered_oops; }
-  OopClosure* strong_oops() { return &_closures._buffered_oops; }
-
-  CLDClosure* weak_clds()             { return &_closures._clds; }
-  CLDClosure* strong_clds()           { return &_closures._clds; }
-  CLDClosure* thread_root_clds()      { return NULL; }
-  CLDClosure* second_pass_weak_clds() { return NULL; }
-
-  CodeBlobClosure* strong_codeblobs()      { return &_closures._codeblobs; }
-  CodeBlobClosure* weak_codeblobs()        { return &_closures._codeblobs; }
-
-  void flush()                 { _closures._buffered_oops.done(); }
-  double closure_app_seconds() { return _closures._buffered_oops.closure_app_seconds(); }
-
-  OopClosure* raw_strong_oops() { return &_closures._oops; }
-
-  bool trace_metadata()         { return false; }
-};
--- a/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1SATBCardTableModRefBS.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -28,6 +28,7 @@
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/satbMarkQueue.hpp"
 #include "gc/shared/memset_with_concurrent_readers.hpp"
+#include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "runtime/mutexLocker.hpp"
@@ -85,11 +86,6 @@
     return false;
   }
 
-  if  (val == g1_young_gen) {
-    // the card is for a young gen region. We don't need to keep track of all pointers into young
-    return false;
-  }
-
   // Cached bit can be installed either on a clean card or on a claimed card.
   jbyte new_val = val;
   if (val == clean_card_val()) {
@@ -152,17 +148,10 @@
   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 
-  if (TraceCardTableModRefBS) {
-    gclog_or_tty->print_cr("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
-    gclog_or_tty->print_cr("  "
-                  "  &_byte_map[0]: " INTPTR_FORMAT
-                  "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
-                  p2i(&_byte_map[0]),
-                  p2i(&_byte_map[_last_valid_index]));
-    gclog_or_tty->print_cr("  "
-                  "  byte_map_base: " INTPTR_FORMAT,
-                  p2i(byte_map_base));
-  }
+  log_trace(gc, barrier)("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
+  log_trace(gc, barrier)("    &_byte_map[0]: " INTPTR_FORMAT "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
+                         p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
+  log_trace(gc, barrier)("    byte_map_base: " INTPTR_FORMAT,  p2i(byte_map_base));
 }
 
 void
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/g1/g1SharedClosures.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "gc/g1/bufferingOopClosure.hpp"
+#include "gc/g1/g1CodeBlobClosure.hpp"
+#include "gc/g1/g1OopClosures.hpp"
+#include "memory/iterator.hpp"
+
+class G1CollectedHeap;
+class G1ParScanThreadState;
+
+// Simple holder object for a complete set of closures used by the G1 evacuation code.
+template <G1Mark Mark, bool use_ext = false>
+class G1SharedClosures VALUE_OBJ_CLASS_SPEC {
+public:
+  G1ParCopyClosure<G1BarrierNone,  Mark, use_ext> _oops;
+  G1ParCopyClosure<G1BarrierKlass, Mark, use_ext> _oop_in_klass;
+  G1KlassScanClosure                              _klass_in_cld_closure;
+  CLDToKlassAndOopClosure                         _clds;
+  G1CodeBlobClosure                               _codeblobs;
+  BufferingOopClosure                             _buffered_oops;
+
+  G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty_klasses, bool must_claim_cld) :
+    _oops(g1h, pss),
+    _oop_in_klass(g1h, pss),
+    _klass_in_cld_closure(&_oop_in_klass, process_only_dirty_klasses),
+    _clds(&_klass_in_cld_closure, &_oops, must_claim_cld),
+    _codeblobs(&_oops),
+    _buffered_oops(&_oops) {}
+};
--- a/src/share/vm/gc/g1/g1StringDedupQueue.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1StringDedupQueue.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -28,6 +28,7 @@
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1StringDedupQueue.hpp"
 #include "gc/shared/gcLocker.hpp"
+#include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "runtime/mutexLocker.hpp"
@@ -152,10 +153,9 @@
   }
 }
 
-void G1StringDedupQueue::print_statistics(outputStream* st) {
-  st->print_cr(
-    "   [Queue]\n"
-    "      [Dropped: " UINTX_FORMAT "]", _queue->_dropped);
+void G1StringDedupQueue::print_statistics() {
+  log_debug(gc, stringdedup)("   [Queue]");
+  log_debug(gc, stringdedup)("      [Dropped: " UINTX_FORMAT "]", _queue->_dropped);
 }
 
 void G1StringDedupQueue::verify() {
--- a/src/share/vm/gc/g1/g1StringDedupQueue.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1StringDedupQueue.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -94,7 +94,7 @@
 
   static void unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl);
 
-  static void print_statistics(outputStream* st);
+  static void print_statistics();
   static void verify();
 };
 
--- a/src/share/vm/gc/g1/g1StringDedupStat.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1StringDedupStat.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/g1/g1StringDedupStat.hpp"
+#include "logging/log.hpp"
 
 G1StringDedupStat::G1StringDedupStat() :
   _inspected(0),
@@ -68,7 +69,7 @@
   _block_elapsed       += stat._block_elapsed;
 }
 
-void G1StringDedupStat::print_summary(outputStream* st, const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
+void G1StringDedupStat::print_summary(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
   double total_deduped_bytes_percent = 0.0;
 
   if (total_stat._new_bytes > 0) {
@@ -76,10 +77,8 @@
     total_deduped_bytes_percent = (double)total_stat._deduped_bytes / (double)total_stat._new_bytes * 100.0;
   }
 
-  st->date_stamp(PrintGCDateStamps);
-  st->stamp(PrintGCTimeStamps);
-  st->print_cr(
-    "[GC concurrent-string-deduplication, "
+  log_info(gc, stringdedup)(
+    "Concurrent String Deduplication "
     G1_STRDEDUP_BYTES_FORMAT_NS "->" G1_STRDEDUP_BYTES_FORMAT_NS "(" G1_STRDEDUP_BYTES_FORMAT_NS "), avg "
     G1_STRDEDUP_PERCENT_FORMAT_NS ", " G1_STRDEDUP_TIME_FORMAT "]",
     G1_STRDEDUP_BYTES_PARAM(last_stat._new_bytes),
@@ -89,7 +88,7 @@
     last_stat._exec_elapsed);
 }
 
-void G1StringDedupStat::print_statistics(outputStream* st, const G1StringDedupStat& stat, bool total) {
+void G1StringDedupStat::print_statistics(const G1StringDedupStat& stat, bool total) {
   double young_percent               = 0.0;
   double old_percent                 = 0.0;
   double skipped_percent             = 0.0;
@@ -134,29 +133,24 @@
   }
 
   if (total) {
-    st->print_cr(
+    log_debug(gc, stringdedup)(
       "   [Total Exec: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT ", Idle: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT ", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT "]",
       stat._exec, stat._exec_elapsed, stat._idle, stat._idle_elapsed, stat._block, stat._block_elapsed);
   } else {
-    st->print_cr(
+    log_debug(gc, stringdedup)(
       "   [Last Exec: " G1_STRDEDUP_TIME_FORMAT ", Idle: " G1_STRDEDUP_TIME_FORMAT ", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT "]",
       stat._exec_elapsed, stat._idle_elapsed, stat._block, stat._block_elapsed);
   }
-  st->print_cr(
-    "      [Inspected:    " G1_STRDEDUP_OBJECTS_FORMAT "]\n"
-    "         [Skipped:   " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]\n"
-    "         [Hashed:    " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]\n"
-    "         [Known:     " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]\n"
-    "         [New:       " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "]\n"
-    "      [Deduplicated: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]\n"
-    "         [Young:     " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]\n"
-    "         [Old:       " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]",
-    stat._inspected,
-    stat._skipped, skipped_percent,
-    stat._hashed, hashed_percent,
-    stat._known, known_percent,
-    stat._new, new_percent, G1_STRDEDUP_BYTES_PARAM(stat._new_bytes),
-    stat._deduped, deduped_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_bytes), deduped_bytes_percent,
-    stat._deduped_young, deduped_young_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_young_bytes), deduped_young_bytes_percent,
-    stat._deduped_old, deduped_old_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_old_bytes), deduped_old_bytes_percent);
+  log_debug(gc, stringdedup)("      [Inspected:    " G1_STRDEDUP_OBJECTS_FORMAT "]", stat._inspected);
+  log_debug(gc, stringdedup)("         [Skipped:   " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]", stat._skipped, skipped_percent);
+  log_debug(gc, stringdedup)("         [Hashed:    " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]", stat._hashed, hashed_percent);
+  log_debug(gc, stringdedup)("         [Known:     " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]", stat._known, known_percent);
+  log_debug(gc, stringdedup)("         [New:       " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "]",
+                             stat._new, new_percent, G1_STRDEDUP_BYTES_PARAM(stat._new_bytes));
+  log_debug(gc, stringdedup)("      [Deduplicated: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]",
+                             stat._deduped, deduped_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_bytes), deduped_bytes_percent);
+  log_debug(gc, stringdedup)("         [Young:     " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]",
+                             stat._deduped_young, deduped_young_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_young_bytes), deduped_young_bytes_percent);
+  log_debug(gc, stringdedup)("         [Old:       " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]",
+                             stat._deduped_old, deduped_old_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_old_bytes), deduped_old_bytes_percent);
 }
--- a/src/share/vm/gc/g1/g1StringDedupStat.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1StringDedupStat.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -135,8 +135,8 @@
 
   void add(const G1StringDedupStat& stat);
 
-  static void print_summary(outputStream* st, const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
-  static void print_statistics(outputStream* st, const G1StringDedupStat& stat, bool total);
+  static void print_summary(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
+  static void print_statistics(const G1StringDedupStat& stat, bool total);
 };
 
 #endif // SHARE_VM_GC_G1_G1STRINGDEDUPSTAT_HPP
--- a/src/share/vm/gc/g1/g1StringDedupTable.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1StringDedupTable.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -30,6 +30,7 @@
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1StringDedupTable.hpp"
 #include "gc/shared/gcLocker.hpp"
+#include "logging/log.hpp"
 #include "memory/padded.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/typeArrayOop.hpp"
@@ -568,19 +569,16 @@
   _entry_cache->trim(max_cache_size);
 }
 
-void G1StringDedupTable::print_statistics(outputStream* st) {
-  st->print_cr(
-    "   [Table]\n"
-    "      [Memory Usage: " G1_STRDEDUP_BYTES_FORMAT_NS "]\n"
-    "      [Size: " SIZE_FORMAT ", Min: " SIZE_FORMAT ", Max: " SIZE_FORMAT "]\n"
-    "      [Entries: " UINTX_FORMAT ", Load: " G1_STRDEDUP_PERCENT_FORMAT_NS ", Cached: " UINTX_FORMAT ", Added: " UINTX_FORMAT ", Removed: " UINTX_FORMAT "]\n"
-    "      [Resize Count: " UINTX_FORMAT ", Shrink Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS "), Grow Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS ")]\n"
-    "      [Rehash Count: " UINTX_FORMAT ", Rehash Threshold: " UINTX_FORMAT ", Hash Seed: 0x%x]\n"
-    "      [Age Threshold: " UINTX_FORMAT "]",
-    G1_STRDEDUP_BYTES_PARAM(_table->_size * sizeof(G1StringDedupEntry*) + (_table->_entries + _entry_cache->size()) * sizeof(G1StringDedupEntry)),
-    _table->_size, _min_size, _max_size,
-    _table->_entries, (double)_table->_entries / (double)_table->_size * 100.0, _entry_cache->size(), _entries_added, _entries_removed,
-    _resize_count, _table->_shrink_threshold, _shrink_load_factor * 100.0, _table->_grow_threshold, _grow_load_factor * 100.0,
-    _rehash_count, _rehash_threshold, _table->_hash_seed,
-    StringDeduplicationAgeThreshold);
+void G1StringDedupTable::print_statistics() {
+  LogHandle(gc, stringdedup) log;
+  log.debug("   [Table]");
+  log.debug("      [Memory Usage: " G1_STRDEDUP_BYTES_FORMAT_NS "]",
+            G1_STRDEDUP_BYTES_PARAM(_table->_size * sizeof(G1StringDedupEntry*) + (_table->_entries + _entry_cache->size()) * sizeof(G1StringDedupEntry)));
+  log.debug("      [Size: " SIZE_FORMAT ", Min: " SIZE_FORMAT ", Max: " SIZE_FORMAT "]", _table->_size, _min_size, _max_size);
+  log.debug("      [Entries: " UINTX_FORMAT ", Load: " G1_STRDEDUP_PERCENT_FORMAT_NS ", Cached: " UINTX_FORMAT ", Added: " UINTX_FORMAT ", Removed: " UINTX_FORMAT "]",
+            _table->_entries, (double)_table->_entries / (double)_table->_size * 100.0, _entry_cache->size(), _entries_added, _entries_removed);
+  log.debug("      [Resize Count: " UINTX_FORMAT ", Shrink Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS "), Grow Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS ")]",
+            _resize_count, _table->_shrink_threshold, _shrink_load_factor * 100.0, _table->_grow_threshold, _grow_load_factor * 100.0);
+  log.debug("      [Rehash Count: " UINTX_FORMAT ", Rehash Threshold: " UINTX_FORMAT ", Hash Seed: 0x%x]", _rehash_count, _rehash_threshold, _table->_hash_seed);
+  log.debug("      [Age Threshold: " UINTX_FORMAT "]", StringDeduplicationAgeThreshold);
 }
--- a/src/share/vm/gc/g1/g1StringDedupTable.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1StringDedupTable.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -234,7 +234,7 @@
 
   static void unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, uint worker_id);
 
-  static void print_statistics(outputStream* st);
+  static void print_statistics();
   static void verify();
 };
 
--- a/src/share/vm/gc/g1/g1StringDedupThread.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1StringDedupThread.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -24,12 +24,12 @@
 
 #include "precompiled.hpp"
 #include "classfile/stringTable.hpp"
-#include "gc/g1/g1Log.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1StringDedupQueue.hpp"
 #include "gc/g1/g1StringDedupTable.hpp"
 #include "gc/g1/g1StringDedupThread.hpp"
 #include "gc/g1/suspendibleThreadSet.hpp"
+#include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.inline.hpp"
 
@@ -129,7 +129,7 @@
 
       // Print statistics
       total_stat.add(stat);
-      print(gclog_or_tty, stat, total_stat);
+      print(stat, total_stat);
     }
   }
 
@@ -152,14 +152,14 @@
   }
 }
 
-void G1StringDedupThread::print(outputStream* st, const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
-  if (G1Log::fine() || PrintStringDeduplicationStatistics) {
-    G1StringDedupStat::print_summary(st, last_stat, total_stat);
-    if (PrintStringDeduplicationStatistics) {
-      G1StringDedupStat::print_statistics(st, last_stat, false);
-      G1StringDedupStat::print_statistics(st, total_stat, true);
-      G1StringDedupTable::print_statistics(st);
-      G1StringDedupQueue::print_statistics(st);
+void G1StringDedupThread::print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
+  if (log_is_enabled(Info, gc, stringdedup)) {
+    G1StringDedupStat::print_summary(last_stat, total_stat);
+    if (log_is_enabled(Debug, gc, stringdedup)) {
+      G1StringDedupStat::print_statistics(last_stat, false);
+      G1StringDedupStat::print_statistics(total_stat, true);
+      G1StringDedupTable::print_statistics();
+      G1StringDedupQueue::print_statistics();
     }
   }
 }
--- a/src/share/vm/gc/g1/g1StringDedupThread.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1StringDedupThread.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -43,7 +43,7 @@
   G1StringDedupThread();
   ~G1StringDedupThread();
 
-  void print(outputStream* st, const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
+  void print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
 
 public:
   static void create();
--- a/src/share/vm/gc/g1/g1_globals.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1_globals.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -33,9 +33,11 @@
 
 #define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, range, constraint) \
                                                                             \
-  product(bool, G1UseAdaptiveIHOP, false,                                   \
-          "Adaptively adjust InitiatingHeapOccupancyPercent from the "      \
-          "initial value.")                                                 \
+  product(bool, G1UseAdaptiveIHOP, true,                                    \
+          "Adaptively adjust the initiating heap occupancy from the "       \
+          "initial value of InitiatingHeapOccupancyPercent. The policy "    \
+          "attempts to start marking in time based on application "         \
+          "behavior.")                                                      \
                                                                             \
   experimental(size_t, G1AdaptiveIHOPNumInitialSamples, 3,                  \
           "How many completed time periods from initial mark to first "     \
@@ -51,32 +53,14 @@
           "Overhead of concurrent marking")                                 \
           range(0, 100)                                                     \
                                                                             \
-  develop(intx, G1MarkingVerboseLevel, 0,                                   \
-          "Level (0-4) of verboseness of the marking code")                 \
-          range(0, 4)                                                       \
-                                                                            \
-  develop(bool, G1TraceMarkStackOverflow, false,                            \
-          "If true, extra debugging code for CM restart for ovflw.")        \
-                                                                            \
-  diagnostic(bool, G1SummarizeConcMark, false,                              \
-          "Summarize concurrent mark info")                                 \
-                                                                            \
-  diagnostic(bool, G1SummarizeRSetStats, false,                             \
-          "Summarize remembered set processing info")                       \
-                                                                            \
   diagnostic(intx, G1SummarizeRSetStatsPeriod, 0,                           \
           "The period (in number of GCs) at which we will generate "        \
           "update buffer processing info "                                  \
           "(0 means do not periodically generate this info); "              \
-          "it also requires -XX:+G1SummarizeRSetStats")                     \
+          "it also requires that logging is enabled on the trace"           \
+          "level for gc+remset")                                            \
           range(0, max_intx)                                                \
                                                                             \
-  diagnostic(bool, G1TraceConcRefinement, false,                            \
-          "Trace G1 concurrent refinement")                                 \
-                                                                            \
-  experimental(bool, G1TraceStringSymbolTableScrubbing, false,              \
-          "Trace information string and symbol table scrubbing.")           \
-                                                                            \
   product(double, G1ConcMarkStepDurationMillis, 10.0,                       \
           "Target duration of individual concurrent marking steps "         \
           "in milliseconds.")                                               \
@@ -119,10 +103,6 @@
   develop(bool, G1RSBarrierRegionFilter, true,                              \
           "If true, generate region filtering code in RS barrier")          \
                                                                             \
-  diagnostic(bool, G1PrintRegionLivenessInfo, false,                        \
-            "Prints the liveness information for all regions in the heap "  \
-            "at the end of a marking cycle.")                               \
-                                                                            \
   product(size_t, G1UpdateBufferSize, 256,                                  \
           "Size of an update buffer")                                       \
           range(1, NOT_LP64(32*M) LP64_ONLY(1*G))                           \
@@ -155,6 +135,7 @@
           "Each time the rset update queue increases by this amount "       \
           "activate the next refinement thread if available. "              \
           "Will be selected ergonomically by default.")                     \
+          range(0, max_jint)                                                \
                                                                             \
   product(intx, G1RSetUpdatingPauseTimePercent, 10,                         \
           "A target percentage of time that is allowed to be spend on "     \
@@ -202,12 +183,6 @@
   develop(bool, G1ScrubRemSets, true,                                       \
           "When true, do RS scrubbing after cleanup.")                      \
                                                                             \
-  develop(bool, G1RSScrubVerbose, false,                                    \
-          "When true, do RS scrubbing with verbose output.")                \
-                                                                            \
-  develop(bool, G1YoungSurvRateVerbose, false,                              \
-          "print out the survival rate of young regions according to age.") \
-                                                                            \
   develop(intx, G1YoungSurvRateNumRegionsSummary, 0,                        \
           "the number of regions for which we'll print a surv rate "        \
           "summary.")                                                       \
@@ -219,10 +194,6 @@
           "to minimize the probability of promotion failure.")              \
           range(0, 50)                                                      \
                                                                             \
-  diagnostic(bool, G1PrintHeapRegions, false,                               \
-          "If set G1 will print information on which regions are being "    \
-          "allocated and which are reclaimed.")                             \
-                                                                            \
   develop(bool, G1HRRSUseSparseTable, true,                                 \
           "When true, use sparse table to save space.")                     \
                                                                             \
@@ -251,9 +222,6 @@
           "The number of regions we will add to the secondary free list "   \
           "at every append operation")                                      \
                                                                             \
-  develop(bool, G1ConcRegionFreeingVerbose, false,                          \
-          "Enables verboseness during concurrent region freeing")           \
-                                                                            \
   develop(bool, G1StressConcRegionFreeing, false,                           \
           "It stresses the concurrent region freeing operation")            \
                                                                             \
@@ -298,6 +266,7 @@
                                                                             \
   product(uintx, G1MixedGCCountTarget, 8,                                   \
           "The target number of mixed GCs after a marking cycle.")          \
+          range(0, max_uintx)                                               \
                                                                             \
   experimental(bool, G1EagerReclaimHumongousObjects, true,                  \
           "Try to reclaim dead large objects at every young GC.")           \
@@ -306,18 +275,11 @@
           "Try to reclaim dead large objects that have a few stale "        \
           "references at every young GC.")                                  \
                                                                             \
-  experimental(bool, G1TraceEagerReclaimHumongousObjects, false,            \
-          "Print some information about large object liveness "             \
-          "at every young GC.")                                             \
-                                                                            \
   experimental(uintx, G1OldCSetRegionThresholdPercent, 10,                  \
           "An upper bound for the number of old CSet regions expressed "    \
           "as a percentage of the heap size.")                              \
           range(0, 100)                                                     \
                                                                             \
-  experimental(ccstr, G1LogLevel, NULL,                                     \
-          "Log level for G1 logging: fine, finer, finest")                  \
-                                                                            \
   notproduct(bool, G1EvacuationFailureALot, false,                          \
           "Force use of evacuation failure handling during certain "        \
           "evacuation pauses")                                              \
--- a/src/share/vm/gc/g1/g1_specialized_oop_closures.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/g1_specialized_oop_closures.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -35,27 +35,15 @@
 class G1ParScanClosure;
 class G1ParPushHeapRSClosure;
 
-class FilterIntoCSClosure;
 class FilterOutOfRegionClosure;
 class G1CMOopClosure;
 class G1RootRegionScanClosure;
 
-// Specialized oop closures from g1RemSet.cpp
-class G1Mux2Closure;
-class G1TriggerClosure;
-class G1InvokeIfNotTriggeredClosure;
-class G1UpdateRSOrPushRefOopClosure;
-
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f) \
       f(G1ParScanClosure,_nv)                      \
       f(G1ParPushHeapRSClosure,_nv)                \
-      f(FilterIntoCSClosure,_nv)                   \
       f(FilterOutOfRegionClosure,_nv)              \
       f(G1CMOopClosure,_nv)                        \
-      f(G1RootRegionScanClosure,_nv)               \
-      f(G1Mux2Closure,_nv)                         \
-      f(G1TriggerClosure,_nv)                      \
-      f(G1InvokeIfNotTriggeredClosure,_nv)         \
-      f(G1UpdateRSOrPushRefOopClosure,_nv)
+      f(G1RootRegionScanClosure,_nv)
 
 #endif // SHARE_VM_GC_G1_G1_SPECIALIZED_OOP_CLOSURES_HPP
--- a/src/share/vm/gc/g1/heapRegion.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/heapRegion.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -34,6 +34,7 @@
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "gc/shared/liveRange.hpp"
 #include "gc/shared/space.inline.hpp"
+#include "logging/log.hpp"
 #include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.inline.hpp"
@@ -479,10 +480,8 @@
         // Object is in the region. Check that its less than top
         if (_hr->top() <= (HeapWord*)obj) {
           // Object is above top
-          gclog_or_tty->print_cr("Object " PTR_FORMAT " in region "
-                                 "[" PTR_FORMAT ", " PTR_FORMAT ") is above "
-                                 "top " PTR_FORMAT,
-                                 p2i(obj), p2i(_hr->bottom()), p2i(_hr->end()), p2i(_hr->top()));
+          log_info(gc, verify)("Object " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ") is above top " PTR_FORMAT,
+                               p2i(obj), p2i(_hr->bottom()), p2i(_hr->end()), p2i(_hr->top()));
           _failures = true;
           return;
         }
@@ -515,23 +514,19 @@
     if (nm != NULL) {
       // Verify that the nemthod is live
       if (!nm->is_alive()) {
-        gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod "
-                               PTR_FORMAT " in its strong code roots",
-                               p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
+        log_info(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " PTR_FORMAT " in its strong code roots",
+                             p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
         _failures = true;
       } else {
         VerifyStrongCodeRootOopClosure oop_cl(_hr, nm);
         nm->oops_do(&oop_cl);
         if (!oop_cl.has_oops_in_region()) {
-          gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod "
-                                 PTR_FORMAT " in its strong code roots "
-                                 "with no pointers into region",
-                                 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
+          log_info(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its strong code roots with no pointers into region",
+                               p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
           _failures = true;
         } else if (oop_cl.failures()) {
-          gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has other "
-                                 "failures for nmethod " PTR_FORMAT,
-                                 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
+          log_info(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT,
+                               p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
           _failures = true;
         }
       }
@@ -564,9 +559,8 @@
   // on its strong code root list
   if (is_empty()) {
     if (strong_code_roots_length > 0) {
-      gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] is empty "
-                             "but has " SIZE_FORMAT " code root entries",
-                             p2i(bottom()), p2i(end()), strong_code_roots_length);
+      log_info(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] is empty but has " SIZE_FORMAT " code root entries",
+                           p2i(bottom()), p2i(end()), strong_code_roots_length);
       *failures = true;
     }
     return;
@@ -574,9 +568,8 @@
 
   if (is_continues_humongous()) {
     if (strong_code_roots_length > 0) {
-      gclog_or_tty->print_cr("region " HR_FORMAT " is a continuation of a humongous "
-                             "region but has " SIZE_FORMAT " code root entries",
-                             HR_FORMAT_PARAMS(this), strong_code_roots_length);
+      log_info(gc, verify)("region " HR_FORMAT " is a continuation of a humongous region but has " SIZE_FORMAT " code root entries",
+                           HR_FORMAT_PARAMS(this), strong_code_roots_length);
       *failures = true;
     }
     return;
@@ -590,19 +583,22 @@
   }
 }
 
-void HeapRegion::print() const { print_on(gclog_or_tty); }
+void HeapRegion::print() const { print_on(tty); }
 void HeapRegion::print_on(outputStream* st) const {
-  st->print("AC%4u", allocation_context());
-
-  st->print(" %2s", get_short_type_str());
-  if (in_collection_set())
-    st->print(" CS");
-  else
-    st->print("   ");
-  st->print(" TS %5d", _gc_time_stamp);
-  st->print(" PTAMS " PTR_FORMAT " NTAMS " PTR_FORMAT,
-            p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()));
-  G1OffsetTableContigSpace::print_on(st);
+  st->print("|%4u", this->_hrm_index);
+  st->print("|" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT,
+            p2i(bottom()), p2i(top()), p2i(end()));
+  st->print("|%3d%%", (int) ((double) used() * 100 / capacity()));
+  st->print("|%2s", get_short_type_str());
+  if (in_collection_set()) {
+    st->print("|CS");
+  } else {
+    st->print("|  ");
+  }
+  st->print("|TS%3u", _gc_time_stamp);
+  st->print("|AC%3u", allocation_context());
+  st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "|",
+               p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()));
 }
 
 class VerifyLiveClosure: public OopClosure {
@@ -648,6 +644,7 @@
     assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
            "Precondition");
     T heap_oop = oopDesc::load_heap_oop(p);
+    LogHandle(gc, verify) log;
     if (!oopDesc::is_null(heap_oop)) {
       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
       bool failed = false;
@@ -656,35 +653,26 @@
                         Mutex::_no_safepoint_check_flag);
 
         if (!_failures) {
-          gclog_or_tty->cr();
-          gclog_or_tty->print_cr("----------");
+          log.info("----------");
         }
+        ResourceMark rm;
         if (!_g1h->is_in_closed_subset(obj)) {
           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
-          gclog_or_tty->print_cr("Field " PTR_FORMAT
-                                 " of live obj " PTR_FORMAT " in region "
-                                 "[" PTR_FORMAT ", " PTR_FORMAT ")",
-                                 p2i(p), p2i(_containing_obj),
-                                 p2i(from->bottom()), p2i(from->end()));
-          print_object(gclog_or_tty, _containing_obj);
-          gclog_or_tty->print_cr("points to obj " PTR_FORMAT " not in the heap",
-                                 p2i(obj));
+          log.info("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
+                   p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end()));
+          print_object(log.info_stream(), _containing_obj);
+          log.info("points to obj " PTR_FORMAT " not in the heap", p2i(obj));
         } else {
           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
           HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
-          gclog_or_tty->print_cr("Field " PTR_FORMAT
-                                 " of live obj " PTR_FORMAT " in region "
-                                 "[" PTR_FORMAT ", " PTR_FORMAT ")",
-                                 p2i(p), p2i(_containing_obj),
-                                 p2i(from->bottom()), p2i(from->end()));
-          print_object(gclog_or_tty, _containing_obj);
-          gclog_or_tty->print_cr("points to dead obj " PTR_FORMAT " in region "
-                                 "[" PTR_FORMAT ", " PTR_FORMAT ")",
-                                 p2i(obj), p2i(to->bottom()), p2i(to->end()));
-          print_object(gclog_or_tty, obj);
+          log.info("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
+                   p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end()));
+          print_object(log.info_stream(), _containing_obj);
+          log.info("points to dead obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
+                   p2i(obj), p2i(to->bottom()), p2i(to->end()));
+          print_object(log.info_stream(), obj);
         }
-        gclog_or_tty->print_cr("----------");
-        gclog_or_tty->flush();
+        log.info("----------");
         _failures = true;
         failed = true;
         _n_failures++;
@@ -711,25 +699,17 @@
                             Mutex::_no_safepoint_check_flag);
 
             if (!_failures) {
-              gclog_or_tty->cr();
-              gclog_or_tty->print_cr("----------");
+              log.info("----------");
             }
-            gclog_or_tty->print_cr("Missing rem set entry:");
-            gclog_or_tty->print_cr("Field " PTR_FORMAT " "
-                                   "of obj " PTR_FORMAT ", "
-                                   "in region " HR_FORMAT,
-                                   p2i(p), p2i(_containing_obj),
-                                   HR_FORMAT_PARAMS(from));
-            _containing_obj->print_on(gclog_or_tty);
-            gclog_or_tty->print_cr("points to obj " PTR_FORMAT " "
-                                   "in region " HR_FORMAT,
-                                   p2i(obj),
-                                   HR_FORMAT_PARAMS(to));
-            obj->print_on(gclog_or_tty);
-            gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
-                          cv_obj, cv_field);
-            gclog_or_tty->print_cr("----------");
-            gclog_or_tty->flush();
+            log.info("Missing rem set entry:");
+            log.info("Field " PTR_FORMAT " of obj " PTR_FORMAT ", in region " HR_FORMAT,
+                     p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
+            ResourceMark rm;
+            _containing_obj->print_on(log.info_stream());
+            log.info("points to obj " PTR_FORMAT " in region " HR_FORMAT, p2i(obj), HR_FORMAT_PARAMS(to));
+            obj->print_on(log.info_stream());
+            log.info("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field);
+            log.info("----------");
             _failures = true;
             if (!failed) _n_failures++;
           }
@@ -763,13 +743,13 @@
                                    (vo == VerifyOption_G1UsePrevMarking &&
                                    ClassLoaderDataGraph::unload_list_contains(klass));
         if (!is_metaspace_object) {
-          gclog_or_tty->print_cr("klass " PTR_FORMAT " of object " PTR_FORMAT " "
-                                 "not metadata", p2i(klass), p2i(obj));
+          log_info(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " "
+                               "not metadata", p2i(klass), p2i(obj));
           *failures = true;
           return;
         } else if (!klass->is_klass()) {
-          gclog_or_tty->print_cr("klass " PTR_FORMAT " of object " PTR_FORMAT " "
-                                 "not a klass", p2i(klass), p2i(obj));
+          log_info(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " "
+                               "not a klass", p2i(klass), p2i(obj));
           *failures = true;
           return;
         } else {
@@ -784,7 +764,7 @@
           }
         }
       } else {
-        gclog_or_tty->print_cr(PTR_FORMAT " no an oop", p2i(obj));
+        log_info(gc, verify)(PTR_FORMAT " no an oop", p2i(obj));
         *failures = true;
         return;
       }
@@ -800,13 +780,13 @@
   if (is_region_humongous) {
     oop obj = oop(this->humongous_start_region()->bottom());
     if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) {
-      gclog_or_tty->print_cr("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj));
+      log_info(gc, verify)("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj));
     }
   }
 
   if (!is_region_humongous && p != top()) {
-    gclog_or_tty->print_cr("end of last object " PTR_FORMAT " "
-                           "does not match top " PTR_FORMAT, p2i(p), p2i(top()));
+    log_info(gc, verify)("end of last object " PTR_FORMAT " "
+                         "does not match top " PTR_FORMAT, p2i(p), p2i(top()));
     *failures = true;
     return;
   }
@@ -820,9 +800,9 @@
     HeapWord* addr_1 = p;
     HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
     if (b_start_1 != p) {
-      gclog_or_tty->print_cr("BOT look up for top: " PTR_FORMAT " "
-                             " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
-                             p2i(addr_1), p2i(b_start_1), p2i(p));
+      log_info(gc, verify)("BOT look up for top: " PTR_FORMAT " "
+                           " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
+                           p2i(addr_1), p2i(b_start_1), p2i(p));
       *failures = true;
       return;
     }
@@ -832,9 +812,9 @@
     if (addr_2 < the_end) {
       HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
       if (b_start_2 != p) {
-        gclog_or_tty->print_cr("BOT look up for top + 1: " PTR_FORMAT " "
-                               " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
-                               p2i(addr_2), p2i(b_start_2), p2i(p));
+        log_info(gc, verify)("BOT look up for top + 1: " PTR_FORMAT " "
+                             " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
+                             p2i(addr_2), p2i(b_start_2), p2i(p));
         *failures = true;
         return;
       }
@@ -846,9 +826,9 @@
     if (addr_3 < the_end) {
       HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
       if (b_start_3 != p) {
-        gclog_or_tty->print_cr("BOT look up for top + diff: " PTR_FORMAT " "
-                               " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
-                               p2i(addr_3), p2i(b_start_3), p2i(p));
+        log_info(gc, verify)("BOT look up for top + diff: " PTR_FORMAT " "
+                             " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
+                             p2i(addr_3), p2i(b_start_3), p2i(p));
         *failures = true;
         return;
       }
@@ -858,9 +838,9 @@
     HeapWord* addr_4 = the_end - 1;
     HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
     if (b_start_4 != p) {
-      gclog_or_tty->print_cr("BOT look up for end - 1: " PTR_FORMAT " "
-                             " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
-                             p2i(addr_4), p2i(b_start_4), p2i(p));
+      log_info(gc, verify)("BOT look up for end - 1: " PTR_FORMAT " "
+                           " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
+                           p2i(addr_4), p2i(b_start_4), p2i(p));
       *failures = true;
       return;
     }
@@ -911,7 +891,7 @@
 
 void G1OffsetTableContigSpace::print() const {
   print_short();
-  gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
+  tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
                 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
                 p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
 }
--- a/src/share/vm/gc/g1/heapRegion.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/heapRegion.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -351,6 +351,15 @@
                                       ~((1 << (size_t) LogOfHRGrainBytes) - 1);
   }
 
+
+  // Returns whether a field is in the same region as the obj it points to.
+  template <typename T>
+  static bool is_in_same_region(T* p, oop obj) {
+    assert(p != NULL, "p can't be NULL");
+    assert(obj != NULL, "obj can't be NULL");
+    return (((uintptr_t) p ^ cast_from_oop<uintptr_t>(obj)) >> LogOfHRGrainBytes) == 0;
+  }
+
   static size_t max_region_size();
   static size_t min_region_size_in_words();
 
--- a/src/share/vm/gc/g1/heapRegionManager.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/heapRegionManager.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -99,7 +99,7 @@
   if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
     for (uint i = start; i < start + num_regions; i++) {
       HeapRegion* hr = at(i);
-      G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end());
+      G1CollectedHeap::heap()->hr_printer()->uncommit(hr);
     }
   }
 
@@ -135,7 +135,7 @@
     assert(is_available(i), "Just made region %u available but is apparently not.", i);
     HeapRegion* hr = at(i);
     if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
-      G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end());
+      G1CollectedHeap::heap()->hr_printer()->commit(hr);
     }
     HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
     MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
--- a/src/share/vm/gc/g1/heapRegionRemSet.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/heapRegionRemSet.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -560,20 +560,13 @@
 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
                               BitMap* region_bm, BitMap* card_bm) {
   // First eliminated garbage regions from the coarse map.
-  if (G1RSScrubVerbose) {
-    gclog_or_tty->print_cr("Scrubbing region %u:", _hr->hrm_index());
-  }
+  log_develop_trace(gc, remset, scrub)("Scrubbing region %u:", _hr->hrm_index());
 
   assert(_coarse_map.size() == region_bm->size(), "Precondition");
-  if (G1RSScrubVerbose) {
-    gclog_or_tty->print("   Coarse map: before = " SIZE_FORMAT "...",
-                        _n_coarse_entries);
-  }
+  log_develop_trace(gc, remset, scrub)("   Coarse map: before = " SIZE_FORMAT "...", _n_coarse_entries);
   _coarse_map.set_intersection(*region_bm);
   _n_coarse_entries = _coarse_map.count_one_bits();
-  if (G1RSScrubVerbose) {
-    gclog_or_tty->print_cr("   after = " SIZE_FORMAT ".", _n_coarse_entries);
-  }
+  log_develop_trace(gc, remset, scrub)("   after = " SIZE_FORMAT ".", _n_coarse_entries);
 
   // Now do the fine-grained maps.
   for (size_t i = 0; i < _max_fine_entries; i++) {
@@ -582,28 +575,19 @@
     while (cur != NULL) {
       PerRegionTable* nxt = cur->collision_list_next();
       // If the entire region is dead, eliminate.
-      if (G1RSScrubVerbose) {
-        gclog_or_tty->print_cr("     For other region %u:",
-                               cur->hr()->hrm_index());
-      }
+      log_develop_trace(gc, remset, scrub)("     For other region %u:", cur->hr()->hrm_index());
       if (!region_bm->at((size_t) cur->hr()->hrm_index())) {
         *prev = nxt;
         cur->set_collision_list_next(NULL);
         _n_fine_entries--;
-        if (G1RSScrubVerbose) {
-          gclog_or_tty->print_cr("          deleted via region map.");
-        }
+        log_develop_trace(gc, remset, scrub)("          deleted via region map.");
         unlink_from_all(cur);
         PerRegionTable::free(cur);
       } else {
         // Do fine-grain elimination.
-        if (G1RSScrubVerbose) {
-          gclog_or_tty->print("          occ: before = %4d.", cur->occupied());
-        }
+        log_develop_trace(gc, remset, scrub)("          occ: before = %4d.", cur->occupied());
         cur->scrub(ctbs, card_bm);
-        if (G1RSScrubVerbose) {
-          gclog_or_tty->print_cr("          after = %4d.", cur->occupied());
-        }
+        log_develop_trace(gc, remset, scrub)("          after = %4d.", cur->occupied());
         // Did that empty the table completely?
         if (cur->occupied() == 0) {
           *prev = nxt;
@@ -799,15 +783,15 @@
   while (iter.has_next(card_index)) {
     HeapWord* card_start =
       G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
-    gclog_or_tty->print_cr("  Card " PTR_FORMAT, p2i(card_start));
+    tty->print_cr("  Card " PTR_FORMAT, p2i(card_start));
   }
   if (iter.n_yielded() != occupied()) {
-    gclog_or_tty->print_cr("Yielded disagrees with occupied:");
-    gclog_or_tty->print_cr("  " SIZE_FORMAT_W(6) " yielded (" SIZE_FORMAT_W(6)
+    tty->print_cr("Yielded disagrees with occupied:");
+    tty->print_cr("  " SIZE_FORMAT_W(6) " yielded (" SIZE_FORMAT_W(6)
                   " coarse, " SIZE_FORMAT_W(6) " fine).",
                   iter.n_yielded(),
                   iter.n_yielded_coarse(), iter.n_yielded_fine());
-    gclog_or_tty->print_cr("  " SIZE_FORMAT_W(6) " occ     (" SIZE_FORMAT_W(6)
+    tty->print_cr("  " SIZE_FORMAT_W(6) " occ     (" SIZE_FORMAT_W(6)
                            " coarse, " SIZE_FORMAT_W(6) " fine).",
                   occupied(), occ_coarse(), occ_fine());
   }
@@ -1020,24 +1004,6 @@
 }
 
 #ifndef PRODUCT
-void PerRegionTable::test_fl_mem_size() {
-  PerRegionTable* dummy = alloc(NULL);
-
-  size_t min_prt_size = sizeof(void*) + dummy->bm()->size_in_words() * HeapWordSize;
-  assert(dummy->mem_size() > min_prt_size,
-         "PerRegionTable memory usage is suspiciously small, only has " SIZE_FORMAT " bytes. "
-         "Should be at least " SIZE_FORMAT " bytes.", dummy->mem_size(), min_prt_size);
-  free(dummy);
-  guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
-  // try to reset the state
-  _free_list = NULL;
-  delete dummy;
-}
-
-void HeapRegionRemSet::test_prt() {
-  PerRegionTable::test_fl_mem_size();
-}
-
 void HeapRegionRemSet::test() {
   os::sleep(Thread::current(), (jlong)5000, false);
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -1089,7 +1055,7 @@
   while (iter.has_next(card_index)) {
     HeapWord* card_start =
       G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
-    gclog_or_tty->print_cr("  Card " PTR_FORMAT ".", p2i(card_start));
+    tty->print_cr("  Card " PTR_FORMAT ".", p2i(card_start));
     sum++;
   }
   guarantee(sum == 11 - 3 + 2048, "Failure");
--- a/src/share/vm/gc/g1/heapRegionRemSet.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/heapRegionRemSet.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -86,7 +86,7 @@
 
   static void invalidate(uint start_idx, size_t num_regions);
 
-  static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN;
+  static void print(outputStream* out = tty) PRODUCT_RETURN;
 
   static size_t static_mem_size() {
     return _static_mem_size;
@@ -392,7 +392,6 @@
 
   // Run unit tests.
 #ifndef PRODUCT
-  static void test_prt();
   static void test();
 #endif
 };
--- a/src/share/vm/gc/g1/heapRegionSet.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/heapRegionSet.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -49,8 +49,8 @@
   // verification might fail and send us on a wild goose chase.
   check_mt_safety();
 
-  guarantee_heap_region_set(( is_empty() && length() == 0 && total_capacity_bytes() == 0) ||
-                            (!is_empty() && length() > 0  && total_capacity_bytes() > 0) ,
+  guarantee_heap_region_set(( is_empty() && length() == 0) ||
+                            (!is_empty() && length() > 0),
                             "invariant");
 }
 
@@ -81,14 +81,12 @@
   out->print_cr("    free              : %s", BOOL_TO_STR(regions_free()));
   out->print_cr("  Attributes");
   out->print_cr("    length            : %14u", length());
-  out->print_cr("    total capacity    : " SIZE_FORMAT_W(14) " bytes",
-                total_capacity_bytes());
 }
 
 HeapRegionSetBase::HeapRegionSetBase(const char* name, bool humongous, bool free, HRSMtSafeChecker* mt_safety_checker)
   : _name(name), _verify_in_progress(false),
     _is_humongous(humongous), _is_free(free), _mt_safety_checker(mt_safety_checker),
-    _count()
+    _length(0)
 { }
 
 void FreeRegionList::set_unrealistically_long_length(uint len) {
@@ -177,7 +175,7 @@
     }
   }
 
-  _count.increment(from_list->length(), from_list->total_capacity_bytes());
+  _length += from_list->length();
   from_list->clear();
 
   verify_optional();
@@ -255,30 +253,12 @@
 }
 
 void FreeRegionList::clear() {
-  _count = HeapRegionSetCount();
+  _length = 0;
   _head = NULL;
   _tail = NULL;
   _last = NULL;
 }
 
-void FreeRegionList::print_on(outputStream* out, bool print_contents) {
-  HeapRegionSetBase::print_on(out, print_contents);
-  out->print_cr("  Linking");
-  out->print_cr("    head              : " PTR_FORMAT, p2i(_head));
-  out->print_cr("    tail              : " PTR_FORMAT, p2i(_tail));
-
-  if (print_contents) {
-    out->print_cr("  Contents");
-    FreeRegionListIterator iter(this);
-    while (iter.more_available()) {
-      HeapRegion* hr = iter.get_next();
-      hr->print_on(out);
-    }
-  }
-
-  out->cr();
-}
-
 void FreeRegionList::verify_list() {
   HeapRegion* curr = _head;
   HeapRegion* prev1 = NULL;
@@ -312,8 +292,6 @@
   guarantee(_tail == prev0, "Expected %s to end with %u but it ended with %u.", name(), _tail->hrm_index(), prev0->hrm_index());
   guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next");
   guarantee(length() == count, "%s count mismatch. Expected %u, actual %u.", name(), length(), count);
-  guarantee(total_capacity_bytes() == capacity, "%s capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
-            name(), total_capacity_bytes(), capacity);
 }
 
 // Note on the check_mt_safety() methods below:
--- a/src/share/vm/gc/g1/heapRegionSet.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/heapRegionSet.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,22 +27,22 @@
 
 #include "gc/g1/heapRegion.hpp"
 
-#define assert_heap_region_set(p, message)                        \
-  do {                                                            \
-    assert((p), "[%s] %s ln: %u cy: " SIZE_FORMAT,                \
-           name(), message, length(), total_capacity_bytes());    \
+#define assert_heap_region_set(p, message) \
+  do {                                     \
+    assert((p), "[%s] %s ln: %u",          \
+           name(), message, length());     \
   } while (0)
 
-#define guarantee_heap_region_set(p, message)                     \
-  do {                                                            \
-    guarantee((p), "[%s] %s ln: %u cy: " SIZE_FORMAT,             \
-              name(), message, length(), total_capacity_bytes()); \
+#define guarantee_heap_region_set(p, message) \
+  do {                                        \
+    guarantee((p), "[%s] %s ln: %u",          \
+              name(), message, length());     \
   } while (0)
 
-#define assert_free_region_list(p, message)                                              \
-  do {                                                                                   \
-    assert((p), "[%s] %s ln: %u cy: " SIZE_FORMAT " hd: " PTR_FORMAT " tl: " PTR_FORMAT, \
-           name(), message, length(), total_capacity_bytes(), p2i(_head), p2i(_tail));   \
+#define assert_free_region_list(p, message)                          \
+  do {                                                               \
+    assert((p), "[%s] %s ln: %u hd: " PTR_FORMAT " tl: " PTR_FORMAT, \
+           name(), message, length(), p2i(_head), p2i(_tail));       \
   } while (0)
 
 
@@ -63,28 +63,6 @@
 class HumongousRegionSetMtSafeChecker      : public HRSMtSafeChecker { public: void check(); };
 class OldRegionSetMtSafeChecker            : public HRSMtSafeChecker { public: void check(); };
 
-class HeapRegionSetCount VALUE_OBJ_CLASS_SPEC {
-  friend class VMStructs;
-  uint   _length;
-  size_t _capacity;
-
-public:
-  HeapRegionSetCount() : _length(0), _capacity(0) { }
-
-  const uint   length()   const { return _length;   }
-  const size_t capacity() const { return _capacity; }
-
-  void increment(uint length_to_add, size_t capacity_to_add) {
-    _length += length_to_add;
-    _capacity += capacity_to_add;
-  }
-
-  void decrement(const uint length_to_remove, const size_t capacity_to_remove) {
-    _length -= length_to_remove;
-    _capacity -= capacity_to_remove;
-  }
-};
-
 // Base class for all the classes that represent heap region sets. It
 // contains the basic attributes that each set needs to maintain
 // (e.g., length, region num, used bytes sum) plus any shared
@@ -98,10 +76,8 @@
   HRSMtSafeChecker* _mt_safety_checker;
 
 protected:
-  // The number of regions added to the set. If the set contains
-  // only humongous regions, this reflects only 'starts humongous'
-  // regions and does not include 'continues humongous' ones.
-  HeapRegionSetCount _count;
+  // The number of regions in to the set.
+  uint _length;
 
   const char* _name;
 
@@ -130,13 +106,9 @@
 public:
   const char* name() { return _name; }
 
-  uint length() const { return _count.length(); }
-
-  bool is_empty() { return _count.length() == 0; }
+  uint length() const { return _length; }
 
-  size_t total_capacity_bytes() {
-    return _count.capacity();
-  }
+  bool is_empty() { return _length == 0; }
 
   // It updates the fields of the set to reflect hr being added to
   // the set and tags the region appropriately.
@@ -181,8 +153,8 @@
   HeapRegionSet(const char* name, bool humongous, HRSMtSafeChecker* mt_safety_checker):
     HeapRegionSetBase(name, humongous, false /* free */, mt_safety_checker) { }
 
-  void bulk_remove(const HeapRegionSetCount& removed) {
-    _count.decrement(removed.length(), removed.capacity());
+  void bulk_remove(const uint removed) {
+    _length -= removed;
   }
 };
 
@@ -250,8 +222,6 @@
   void remove_starting_at(HeapRegion* first, uint num_regions);
 
   virtual void verify();
-
-  virtual void print_on(outputStream* out, bool print_contents = false);
 };
 
 // Iterator class that provides a convenient way to iterate over the
--- a/src/share/vm/gc/g1/heapRegionSet.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/heapRegionSet.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -33,7 +33,7 @@
   assert_heap_region_set(hr->next() == NULL, "should not already be linked");
   assert_heap_region_set(hr->prev() == NULL, "should not already be linked");
 
-  _count.increment(1u, hr->capacity());
+  _length++;
   hr->set_containing_set(this);
   verify_region(hr);
 }
@@ -45,8 +45,8 @@
   assert_heap_region_set(hr->prev() == NULL, "should already be unlinked");
 
   hr->set_containing_set(NULL);
-  assert_heap_region_set(_count.length() > 0, "pre-condition");
-  _count.decrement(1u, hr->capacity());
+  assert_heap_region_set(_length > 0, "pre-condition");
+  _length--;
 }
 
 inline void FreeRegionList::add_ordered(HeapRegion* hr) {
--- a/src/share/vm/gc/g1/satbMarkQueue.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/satbMarkQueue.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -199,9 +199,8 @@
 
 void SATBMarkQueue::print(const char* name,
                           void** buf, size_t index, size_t sz) {
-  gclog_or_tty->print_cr("  SATB BUFFER [%s] buf: " PTR_FORMAT " "
-                         "index: " SIZE_FORMAT " sz: " SIZE_FORMAT,
-                         name, p2i(buf), index, sz);
+  tty->print_cr("  SATB BUFFER [%s] buf: " PTR_FORMAT " index: " SIZE_FORMAT " sz: " SIZE_FORMAT,
+                name, p2i(buf), index, sz);
 }
 #endif // PRODUCT
 
@@ -222,16 +221,13 @@
 
 #ifdef ASSERT
 void SATBMarkQueueSet::dump_active_states(bool expected_active) {
-  gclog_or_tty->print_cr("Expected SATB active state: %s",
-                         expected_active ? "ACTIVE" : "INACTIVE");
-  gclog_or_tty->print_cr("Actual SATB active states:");
-  gclog_or_tty->print_cr("  Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE");
+  log_info(gc, verify)("Expected SATB active state: %s", expected_active ? "ACTIVE" : "INACTIVE");
+  log_info(gc, verify)("Actual SATB active states:");
+  log_info(gc, verify)("  Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE");
   for (JavaThread* t = Threads::first(); t; t = t->next()) {
-    gclog_or_tty->print_cr("  Thread \"%s\" queue: %s", t->name(),
-                           t->satb_mark_queue().is_active() ? "ACTIVE" : "INACTIVE");
+    log_info(gc, verify)("  Thread \"%s\" queue: %s", t->name(), t->satb_mark_queue().is_active() ? "ACTIVE" : "INACTIVE");
   }
-  gclog_or_tty->print_cr("  Shared queue: %s",
-                         shared_satb_queue()->is_active() ? "ACTIVE" : "INACTIVE");
+  log_info(gc, verify)("  Shared queue: %s", shared_satb_queue()->is_active() ? "ACTIVE" : "INACTIVE");
 }
 
 void SATBMarkQueueSet::verify_active_states(bool expected_active) {
@@ -318,8 +314,8 @@
   char buffer[SATB_PRINTER_BUFFER_SIZE];
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
 
-  gclog_or_tty->cr();
-  gclog_or_tty->print_cr("SATB BUFFERS [%s]", msg);
+  tty->cr();
+  tty->print_cr("SATB BUFFERS [%s]", msg);
 
   BufferNode* nd = _completed_buffers_head;
   int i = 0;
@@ -338,7 +334,7 @@
 
   shared_satb_queue()->print("Shared");
 
-  gclog_or_tty->cr();
+  tty->cr();
 }
 #endif // PRODUCT
 
--- a/src/share/vm/gc/g1/survRateGroup.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/survRateGroup.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,6 +27,7 @@
 #include "gc/g1/g1Predictions.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/survRateGroup.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 
 SurvRateGroup::SurvRateGroup(G1Predictions* predictor,
@@ -163,12 +164,11 @@
 
 #ifndef PRODUCT
 void SurvRateGroup::print() {
-  gclog_or_tty->print_cr("Surv Rate Group: %s (" SIZE_FORMAT " entries)",
-                _name, _region_num);
+  log_develop_trace(gc, survivor)("Surv Rate Group: %s (" SIZE_FORMAT " entries)", _name, _region_num);
   for (size_t i = 0; i < _region_num; ++i) {
-    gclog_or_tty->print_cr("    age " SIZE_FORMAT_W(4) "   surv rate %6.2lf %%   pred %6.2lf %%",
-                           i, _surv_rate[i] * 100.0,
-                           _predictor->get_new_prediction(_surv_rate_pred[i]) * 100.0);
+    log_develop_trace(gc, survivor)("    age " SIZE_FORMAT_W(4) "   surv rate %6.2lf %%   pred %6.2lf %%",
+                                    i, _surv_rate[i] * 100.0,
+                                    _predictor->get_new_prediction(_surv_rate_pred[i]) * 100.0);
   }
 }
 
@@ -178,22 +178,20 @@
   if (length == 0)
     return;
 
-  gclog_or_tty->cr();
-  gclog_or_tty->print_cr("%s Rate Summary (for up to age " SIZE_FORMAT ")", _name, length-1);
-  gclog_or_tty->print_cr("      age range     survival rate (avg)      samples (avg)");
-  gclog_or_tty->print_cr("  ---------------------------------------------------------");
+  log_trace(gc, survivor)("%s Rate Summary (for up to age " SIZE_FORMAT ")", _name, length-1);
+  log_trace(gc, survivor)("      age range     survival rate (avg)      samples (avg)");
+  log_trace(gc, survivor)("  ---------------------------------------------------------");
 
   size_t index = 0;
   size_t limit = MIN2((int) length, 10);
   while (index < limit) {
-    gclog_or_tty->print_cr("           " SIZE_FORMAT_W(4)
-                           "                 %6.2lf%%             %6.2lf",
-                           index, _summary_surv_rates[index]->avg() * 100.0,
-                           (double) _summary_surv_rates[index]->num());
+    log_trace(gc, survivor)("           " SIZE_FORMAT_W(4) "                 %6.2lf%%             %6.2lf",
+                            index, _summary_surv_rates[index]->avg() * 100.0,
+                            (double) _summary_surv_rates[index]->num());
     ++index;
   }
 
-  gclog_or_tty->print_cr("  ---------------------------------------------------------");
+  log_trace(gc, survivor)("  ---------------------------------------------------------");
 
   int num = 0;
   double sum = 0.0;
@@ -205,16 +203,15 @@
     ++index;
 
     if (index == length || num % 10 == 0) {
-      gclog_or_tty->print_cr("   " SIZE_FORMAT_W(4) " .. " SIZE_FORMAT_W(4)
-                             "                 %6.2lf%%             %6.2lf",
-                             (index-1) / 10 * 10, index-1, sum / (double) num,
-                             (double) samples / (double) num);
+      log_trace(gc, survivor)("   " SIZE_FORMAT_W(4) " .. " SIZE_FORMAT_W(4) "                 %6.2lf%%             %6.2lf",
+                              (index-1) / 10 * 10, index-1, sum / (double) num,
+                              (double) samples / (double) num);
       sum = 0.0;
       num = 0;
       samples = 0;
     }
   }
 
-  gclog_or_tty->print_cr("  ---------------------------------------------------------");
+  log_trace(gc, survivor)("  ---------------------------------------------------------");
 }
 #endif // PRODUCT
--- a/src/share/vm/gc/g1/vmStructs_g1.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/vmStructs_g1.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -59,10 +59,7 @@
   nonstatic_field(G1MonitoringSupport, _old_committed,      size_t)           \
   nonstatic_field(G1MonitoringSupport, _old_used,           size_t)           \
                                                                               \
-  nonstatic_field(HeapRegionSetBase,   _count,          HeapRegionSetCount)   \
-                                                                              \
-  nonstatic_field(HeapRegionSetCount,  _length,         uint)                 \
-  nonstatic_field(HeapRegionSetCount,  _capacity,       size_t)               \
+  nonstatic_field(HeapRegionSetBase,   _length,         uint)                 \
                                                                               \
   nonstatic_field(PtrQueue,            _active,         bool)                 \
   nonstatic_field(PtrQueue,            _buf,            void**)               \
@@ -82,7 +79,6 @@
   declare_type(HeapRegion, G1OffsetTableContigSpace)                          \
   declare_toplevel_type(HeapRegionManager)                                    \
   declare_toplevel_type(HeapRegionSetBase)                                    \
-  declare_toplevel_type(HeapRegionSetCount)                                   \
   declare_toplevel_type(G1MonitoringSupport)                                  \
   declare_toplevel_type(PtrQueue)                                             \
                                                                               \
--- a/src/share/vm/gc/g1/vm_operations_g1.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/vm_operations_g1.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,10 +27,9 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/g1/g1Log.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
 #include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
 #include "runtime/interfaceSupport.hpp"
 
@@ -226,10 +225,10 @@
 }
 
 void VM_CGC_Operation::doit() {
-  TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
+  GCIdMark gc_id_mark(_gc_id);
+  GCTraceCPUTime tcpu;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  GCIdMark gc_id_mark(_gc_id);
-  GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm());
+  GCTraceTime(Info, gc) t(_printGCMessage, g1h->gc_timer_cm(), GCCause::_no_gc, true);
   IsGCActiveMark x;
   _cl->do_void();
 }
--- a/src/share/vm/gc/g1/workerDataArray.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/workerDataArray.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -30,13 +30,11 @@
   const uint length = 3;
   const char* title = "Test array";
   const bool print_sum = false;
-  const int log_level = 3;
   const uint indent_level = 2;
 
-  WorkerDataArray<size_t> array(length, title, print_sum, log_level, indent_level);
+  WorkerDataArray<size_t> array(length, title, print_sum, indent_level);
   assert(strncmp(array.title(), title, strlen(title)) == 0 , "Expected titles to match");
   assert(array.should_print_sum() == print_sum, "Expected should_print_sum to match print_sum");
-  assert(array.log_level() == log_level, "Expected log levels to match");
   assert(array.indentation() == indent_level, "Expected indentation to match");
 
   const size_t expected[length] = {5, 3, 7};
--- a/src/share/vm/gc/g1/workerDataArray.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/workerDataArray.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -32,7 +32,6 @@
   uint        _length;
   const char* _title;
   bool        _print_sum;
-  int         _log_level;
   uint        _indent_level;
   bool        _enabled;
 
@@ -46,7 +45,6 @@
   WorkerDataArray(uint length,
                   const char* title,
                   bool print_sum,
-                  int log_level,
                   uint indent_level);
 
   ~WorkerDataArray();
@@ -80,10 +78,6 @@
     return _print_sum;
   }
 
-  int log_level() const {
-    return _log_level;
-  }
-
   void clear();
   void set_enabled(bool enabled) {
     _enabled = enabled;
--- a/src/share/vm/gc/g1/workerDataArray.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/workerDataArray.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,12 +29,10 @@
 WorkerDataArray<T>::WorkerDataArray(uint length,
                                     const char* title,
                                     bool print_sum,
-                                    int log_level,
                                     uint indent_level) :
  _title(title),
  _length(0),
  _print_sum(print_sum),
- _log_level(log_level),
  _indent_level(indent_level),
  _thread_work_items(NULL),
  _enabled(true) {
--- a/src/share/vm/gc/g1/youngList.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/g1/youngList.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,6 +29,7 @@
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/youngList.hpp"
+#include "logging/log.hpp"
 #include "utilities/ostream.hpp"
 
 YoungList::YoungList(G1CollectedHeap* g1h) :
@@ -98,10 +99,10 @@
   HeapRegion* last = NULL;
   while (curr != NULL) {
     if (!curr->is_young()) {
-      gclog_or_tty->print_cr("### YOUNG REGION " PTR_FORMAT "-" PTR_FORMAT " "
-                             "incorrectly tagged (y: %d, surv: %d)",
-                             p2i(curr->bottom()), p2i(curr->end()),
-                             curr->is_young(), curr->is_survivor());
+      log_info(gc, verify)("### YOUNG REGION " PTR_FORMAT "-" PTR_FORMAT " "
+                           "incorrectly tagged (y: %d, surv: %d)",
+                           p2i(curr->bottom()), p2i(curr->end()),
+                           curr->is_young(), curr->is_survivor());
       ret = false;
     }
     ++length;
@@ -111,9 +112,8 @@
   ret = ret && (length == _length);
 
   if (!ret) {
-    gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
-    gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
-                           length, _length);
+    log_info(gc, verify)("### YOUNG LIST seems not well formed!");
+    log_info(gc, verify)("###   list has %u entries, _length is %u", length, _length);
   }
 
   return ret;
@@ -123,20 +123,19 @@
   bool ret = true;
 
   if (_length != 0) {
-    gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
-                  _length);
+    log_info(gc, verify)("### YOUNG LIST should have 0 length, not %u", _length);
     ret = false;
   }
   if (check_sample && _last_sampled_rs_lengths != 0) {
-    gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
+    log_info(gc, verify)("### YOUNG LIST has non-zero last sampled RS lengths");
     ret = false;
   }
   if (_head != NULL) {
-    gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
+    log_info(gc, verify)("### YOUNG LIST does not have a NULL head");
     ret = false;
   }
   if (!ret) {
-    gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
+    log_info(gc, verify)("### YOUNG LIST does not seem empty");
   }
 
   return ret;
@@ -171,7 +170,6 @@
   _curr = _curr->get_next_young_region();
   if (_curr == NULL) {
     _last_sampled_rs_lengths = _sampled_rs_lengths;
-    // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
   }
 }
 
@@ -222,13 +220,13 @@
   const char* names[] = {"YOUNG", "SURVIVOR"};
 
   for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
-    gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
+    tty->print_cr("%s LIST CONTENTS", names[list]);
     HeapRegion *curr = lists[list];
     if (curr == NULL) {
-      gclog_or_tty->print_cr("  empty");
+      tty->print_cr("  empty");
     }
     while (curr != NULL) {
-      gclog_or_tty->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT ", N: " PTR_FORMAT ", age: %4d",
+      tty->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT ", N: " PTR_FORMAT ", age: %4d",
                              HR_FORMAT_PARAMS(curr),
                              p2i(curr->prev_top_at_mark_start()),
                              p2i(curr->next_top_at_mark_start()),
@@ -237,5 +235,5 @@
     }
   }
 
-  gclog_or_tty->cr();
+  tty->cr();
 }
--- a/src/share/vm/gc/parallel/adjoiningGenerations.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/adjoiningGenerations.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,6 +27,9 @@
 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
 #include "gc/parallel/generationSizer.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
+#include "logging/log.hpp"
+#include "memory/resourceArea.hpp"
+#include "utilities/ostream.hpp"
 
 // If boundary moving is being used, create the young gen and old
 // gen with ASPSYoungGen and ASPSOldGen, respectively.  Revert to
@@ -116,6 +119,29 @@
   return virtual_spaces()->reserved_space().size();
 }
 
+void log_before_expansion(bool old, size_t expand_in_bytes, size_t change_in_bytes, size_t max_size) {
+  LogHandle(heap, ergo) log;
+  if (!log.is_debug()) {
+   return;
+  }
+  log.debug("Before expansion of %s gen with boundary move", old ? "old" : "young");
+  log.debug("  Requested change: " SIZE_FORMAT_HEX "  Attempted change: " SIZE_FORMAT_HEX,
+                        expand_in_bytes, change_in_bytes);
+  ResourceMark rm;
+  ParallelScavengeHeap::heap()->print_on(log.debug_stream());
+  log.debug("  PS%sGen max size: " SIZE_FORMAT "K", old ? "Old" : "Young", max_size/K);
+}
+
+void log_after_expansion(bool old, size_t max_size) {
+  LogHandle(heap, ergo) log;
+  if (!log.is_debug()) {
+   return;
+  }
+  log.debug("After expansion of %s gen with boundary move", old ? "old" : "young");
+  ResourceMark rm;
+  ParallelScavengeHeap::heap()->print_on(log.debug_stream());
+  log.debug("  PS%sGen max size: " SIZE_FORMAT "K", old ? "Old" : "Young", max_size/K);
+}
 
 // Make checks on the current sizes of the generations and
 // the constraints on the sizes of the generations.  Push
@@ -141,17 +167,7 @@
     return;
   }
 
-  if (TraceAdaptiveGCBoundary) {
-    gclog_or_tty->print_cr("Before expansion of old gen with boundary move");
-    gclog_or_tty->print_cr("  Requested change: " SIZE_FORMAT_HEX
-                           "  Attempted change: " SIZE_FORMAT_HEX,
-      expand_in_bytes, change_in_bytes);
-    if (!PrintHeapAtGC) {
-      Universe::print_on(gclog_or_tty);
-    }
-    gclog_or_tty->print_cr("  PSOldGen max size: " SIZE_FORMAT "K",
-      old_gen()->max_gen_size()/K);
-  }
+  log_before_expansion(true, expand_in_bytes, change_in_bytes, old_gen()->max_gen_size());
 
   // Move the boundary between the generations up (smaller young gen).
   if (virtual_spaces()->adjust_boundary_up(change_in_bytes)) {
@@ -167,14 +183,7 @@
   young_gen()->space_invariants();
   old_gen()->space_invariants();
 
-  if (TraceAdaptiveGCBoundary) {
-    gclog_or_tty->print_cr("After expansion of old gen with boundary move");
-    if (!PrintHeapAtGC) {
-      Universe::print_on(gclog_or_tty);
-    }
-    gclog_or_tty->print_cr("  PSOldGen max size: " SIZE_FORMAT "K",
-      old_gen()->max_gen_size()/K);
-  }
+  log_after_expansion(true, old_gen()->max_gen_size());
 }
 
 // See comments on request_old_gen_expansion()
@@ -200,16 +209,7 @@
     return false;
   }
 
-  if (TraceAdaptiveGCBoundary) {
-    gclog_or_tty->print_cr("Before expansion of young gen with boundary move");
-    gclog_or_tty->print_cr("  Requested change: " SIZE_FORMAT_HEX "  Attempted change: " SIZE_FORMAT_HEX,
-      expand_in_bytes, change_in_bytes);
-    if (!PrintHeapAtGC) {
-      Universe::print_on(gclog_or_tty);
-    }
-    gclog_or_tty->print_cr("  PSYoungGen max size: " SIZE_FORMAT "K",
-      young_gen()->max_size()/K);
-  }
+  log_before_expansion(false, expand_in_bytes, change_in_bytes, young_gen()->max_size());
 
   // Move the boundary between the generations down (smaller old gen).
   MutexLocker x(ExpandHeap_lock);
@@ -227,14 +227,7 @@
   young_gen()->space_invariants();
   old_gen()->space_invariants();
 
-  if (TraceAdaptiveGCBoundary) {
-    gclog_or_tty->print_cr("After expansion of young gen with boundary move");
-    if (!PrintHeapAtGC) {
-      Universe::print_on(gclog_or_tty);
-    }
-    gclog_or_tty->print_cr("  PSYoungGen max size: " SIZE_FORMAT "K",
-      young_gen()->max_size()/K);
-  }
+  log_after_expansion(false, young_gen()->max_size());
 
   return result;
 }
--- a/src/share/vm/gc/parallel/asPSOldGen.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/asPSOldGen.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -125,25 +125,21 @@
   size_t result = policy->promo_increment_aligned_down(max_contraction);
   // Also adjust for inter-generational alignment
   size_t result_aligned = align_size_down(result, gen_alignment);
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("\nASPSOldGen::available_for_contraction:"
-      " " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, result_aligned/K, result_aligned);
-    gclog_or_tty->print_cr(" reserved().byte_size() " SIZE_FORMAT " K / " SIZE_FORMAT_HEX,
-      reserved().byte_size()/K, reserved().byte_size());
+
+  LogHandle(gc, ergo) log;
+  if (log.is_trace()) {
     size_t working_promoted = (size_t) policy->avg_promoted()->padded_average();
-    gclog_or_tty->print_cr(" padded promoted " SIZE_FORMAT " K / " SIZE_FORMAT_HEX,
-      working_promoted/K, working_promoted);
-    gclog_or_tty->print_cr(" used " SIZE_FORMAT " K / " SIZE_FORMAT_HEX,
-      used_in_bytes()/K, used_in_bytes());
-    gclog_or_tty->print_cr(" min_gen_size() " SIZE_FORMAT " K / " SIZE_FORMAT_HEX,
-      min_gen_size()/K, min_gen_size());
-    gclog_or_tty->print_cr(" max_contraction " SIZE_FORMAT " K / " SIZE_FORMAT_HEX,
-      max_contraction/K, max_contraction);
-    gclog_or_tty->print_cr("    without alignment " SIZE_FORMAT " K / " SIZE_FORMAT_HEX,
-      policy->promo_increment(max_contraction)/K,
-      policy->promo_increment(max_contraction));
-    gclog_or_tty->print_cr(" alignment " SIZE_FORMAT_HEX, gen_alignment);
+    size_t promo_increment = policy->promo_increment(max_contraction);
+    log.trace("ASPSOldGen::available_for_contraction: " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, result_aligned/K, result_aligned);
+    log.trace(" reserved().byte_size() " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, reserved().byte_size()/K, reserved().byte_size());
+    log.trace(" padded promoted " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, working_promoted/K, working_promoted);
+    log.trace(" used " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, used_in_bytes()/K, used_in_bytes());
+    log.trace(" min_gen_size() " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, min_gen_size()/K, min_gen_size());
+    log.trace(" max_contraction " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, max_contraction/K, max_contraction);
+    log.trace("    without alignment " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, promo_increment/K, promo_increment);
+    log.trace(" alignment " SIZE_FORMAT_HEX, gen_alignment);
   }
+
   assert(result_aligned <= max_contraction, "arithmetic is wrong");
   return result_aligned;
 }
--- a/src/share/vm/gc/parallel/asPSYoungGen.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/asPSYoungGen.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -111,13 +111,12 @@
     PSAdaptiveSizePolicy* policy = heap->size_policy();
     size_t result = policy->eden_increment_aligned_down(max_contraction);
     size_t result_aligned = align_size_down(result, gen_alignment);
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: " SIZE_FORMAT " K",
-        result_aligned/K);
-      gclog_or_tty->print_cr("  max_contraction " SIZE_FORMAT " K", max_contraction/K);
-      gclog_or_tty->print_cr("  eden_avail " SIZE_FORMAT " K", eden_avail/K);
-      gclog_or_tty->print_cr("  gen_avail " SIZE_FORMAT " K", gen_avail/K);
-    }
+
+    log_trace(gc, ergo)("ASPSYoungGen::available_for_contraction: " SIZE_FORMAT " K", result_aligned/K);
+    log_trace(gc, ergo)("  max_contraction " SIZE_FORMAT " K", max_contraction/K);
+    log_trace(gc, ergo)("  eden_avail " SIZE_FORMAT " K", eden_avail/K);
+    log_trace(gc, ergo)("  gen_avail " SIZE_FORMAT " K", gen_avail/K);
+
     return result_aligned;
   }
 
@@ -199,25 +198,17 @@
     virtual_space()->shrink_by(change);
     size_changed = true;
   } else {
-    if (Verbose && PrintGC) {
-      if (orig_size == gen_size_limit()) {
-        gclog_or_tty->print_cr("ASPSYoung generation size at maximum: "
-          SIZE_FORMAT "K", orig_size/K);
-      } else if (orig_size == min_gen_size()) {
-        gclog_or_tty->print_cr("ASPSYoung generation size at minium: "
-          SIZE_FORMAT "K", orig_size/K);
-      }
+    if (orig_size == gen_size_limit()) {
+      log_trace(gc)("ASPSYoung generation size at maximum: " SIZE_FORMAT "K", orig_size/K);
+    } else if (orig_size == min_gen_size()) {
+      log_trace(gc)("ASPSYoung generation size at minium: " SIZE_FORMAT "K", orig_size/K);
     }
   }
 
   if (size_changed) {
     reset_after_change();
-    if (Verbose && PrintGC) {
-      size_t current_size  = virtual_space()->committed_size();
-      gclog_or_tty->print_cr("ASPSYoung generation size changed: "
-        SIZE_FORMAT "K->" SIZE_FORMAT "K",
-        orig_size/K, current_size/K);
-    }
+    log_trace(gc)("ASPSYoung generation size changed: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
+                  orig_size/K, virtual_space()->committed_size()/K);
   }
 
   guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
@@ -245,41 +236,31 @@
     return;
   }
 
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("PSYoungGen::resize_spaces(requested_eden_size: "
-                  SIZE_FORMAT
-                  ", requested_survivor_size: " SIZE_FORMAT ")",
-                  requested_eden_size, requested_survivor_size);
-    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
-                  SIZE_FORMAT,
-                  p2i(eden_space()->bottom()),
-                  p2i(eden_space()->end()),
-                  pointer_delta(eden_space()->end(),
-                                eden_space()->bottom(),
-                                sizeof(char)));
-    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") "
-                  SIZE_FORMAT,
-                  p2i(from_space()->bottom()),
-                  p2i(from_space()->end()),
-                  pointer_delta(from_space()->end(),
-                                from_space()->bottom(),
-                                sizeof(char)));
-    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") "
-                  SIZE_FORMAT,
-                  p2i(to_space()->bottom()),
-                  p2i(to_space()->end()),
-                  pointer_delta(  to_space()->end(),
-                                  to_space()->bottom(),
-                                  sizeof(char)));
-  }
+  log_trace(gc, ergo)("PSYoungGen::resize_spaces(requested_eden_size: "
+                      SIZE_FORMAT
+                      ", requested_survivor_size: " SIZE_FORMAT ")",
+                      requested_eden_size, requested_survivor_size);
+  log_trace(gc, ergo)("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
+                      SIZE_FORMAT,
+                      p2i(eden_space()->bottom()),
+                      p2i(eden_space()->end()),
+                      pointer_delta(eden_space()->end(), eden_space()->bottom(), sizeof(char)));
+  log_trace(gc, ergo)("    from: [" PTR_FORMAT ".." PTR_FORMAT ") "
+                      SIZE_FORMAT,
+                      p2i(from_space()->bottom()),
+                      p2i(from_space()->end()),
+                      pointer_delta(from_space()->end(), from_space()->bottom(), sizeof(char)));
+  log_trace(gc, ergo)("      to: [" PTR_FORMAT ".." PTR_FORMAT ") "
+                      SIZE_FORMAT,
+                      p2i(to_space()->bottom()),
+                      p2i(to_space()->end()),
+                      pointer_delta(  to_space()->end(), to_space()->bottom(), sizeof(char)));
 
   // There's nothing to do if the new sizes are the same as the current
   if (requested_survivor_size == to_space()->capacity_in_bytes() &&
       requested_survivor_size == from_space()->capacity_in_bytes() &&
       requested_eden_size == eden_space()->capacity_in_bytes()) {
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    capacities are the right sizes, returning");
-    }
+    log_trace(gc, ergo)("    capacities are the right sizes, returning");
     return;
   }
 
@@ -302,9 +283,7 @@
   if (eden_from_to_order) {
     // Eden, from, to
 
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("  Eden, from, to:");
-    }
+    log_trace(gc, ergo)("  Eden, from, to:");
 
     // Set eden
     // "requested_eden_size" is a goal for the size of eden
@@ -368,28 +347,24 @@
 
     guarantee(to_start != to_end, "to space is zero sized");
 
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(eden_start),
-                    p2i(eden_end),
-                    pointer_delta(eden_end, eden_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [from_start .. from_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(from_start),
-                    p2i(from_end),
-                    pointer_delta(from_end, from_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(to_start),
-                    p2i(to_end),
-                    pointer_delta(  to_end,   to_start, sizeof(char)));
-    }
+    log_trace(gc, ergo)("    [eden_start .. eden_end): "
+                        "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
+                        p2i(eden_start),
+                        p2i(eden_end),
+                        pointer_delta(eden_end, eden_start, sizeof(char)));
+    log_trace(gc, ergo)("    [from_start .. from_end): "
+                        "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
+                        p2i(from_start),
+                        p2i(from_end),
+                        pointer_delta(from_end, from_start, sizeof(char)));
+    log_trace(gc, ergo)("    [  to_start ..   to_end): "
+                        "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
+                        p2i(to_start),
+                        p2i(to_end),
+                        pointer_delta(  to_end,   to_start, sizeof(char)));
   } else {
     // Eden, to, from
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("  Eden, to, from:");
-    }
+    log_trace(gc, ergo)("  Eden, to, from:");
 
     // To space gets priority over eden resizing. Note that we position
     // to space as if we were able to resize from space, even though from
@@ -422,23 +397,21 @@
     eden_end = MAX2(eden_end, eden_start + alignment);
     to_start = MAX2(to_start, eden_end);
 
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(eden_start),
-                    p2i(eden_end),
-                    pointer_delta(eden_end, eden_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(to_start),
-                    p2i(to_end),
-                    pointer_delta(  to_end,   to_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [from_start .. from_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(from_start),
-                    p2i(from_end),
-                    pointer_delta(from_end, from_start, sizeof(char)));
-    }
+    log_trace(gc, ergo)("    [eden_start .. eden_end): "
+                        "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
+                        p2i(eden_start),
+                        p2i(eden_end),
+                        pointer_delta(eden_end, eden_start, sizeof(char)));
+    log_trace(gc, ergo)("    [  to_start ..   to_end): "
+                        "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
+                        p2i(to_start),
+                        p2i(to_end),
+                        pointer_delta(  to_end,   to_start, sizeof(char)));
+    log_trace(gc, ergo)("    [from_start .. from_end): "
+                        "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
+                        p2i(from_start),
+                        p2i(from_end),
+                        pointer_delta(from_end, from_start, sizeof(char)));
   }
 
 
@@ -457,7 +430,7 @@
   // Let's make sure the call to initialize doesn't reset "top"!
   DEBUG_ONLY(HeapWord* old_from_top = from_space()->top();)
 
-  // For PrintAdaptiveSizePolicy block  below
+  // For logging block  below
   size_t old_from = from_space()->capacity_in_bytes();
   size_t old_to   = to_space()->capacity_in_bytes();
 
@@ -506,19 +479,16 @@
 
   assert(from_space()->top() == old_from_top, "from top changed!");
 
-  if (PrintAdaptiveSizePolicy) {
-    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-    gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
-                  "collection: %d "
-                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
-                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
-                  heap->total_collections(),
-                  old_from, old_to,
-                  from_space()->capacity_in_bytes(),
-                  to_space()->capacity_in_bytes());
-    gclog_or_tty->cr();
-  }
-  space_invariants();
+  log_trace(gc, ergo)("AdaptiveSizePolicy::survivor space sizes: "
+                "collection: %d "
+                "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
+                "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
+                ParallelScavengeHeap::heap()->total_collections(),
+                old_from, old_to,
+                from_space()->capacity_in_bytes(),
+                to_space()->capacity_in_bytes());
+
+    space_invariants();
 }
 void ASPSYoungGen::reset_after_change() {
   assert_locked_or_safepoint(Heap_lock);
--- a/src/share/vm/gc/parallel/cardTableExtension.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/cardTableExtension.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -468,30 +468,17 @@
   // Update the covered region
   resize_update_covered_table(changed_region, new_region);
 
-  if (TraceCardTableModRefBS) {
-    int ind = changed_region;
-    gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
-    gclog_or_tty->print_cr("  "
-                  "  _covered[%d].start(): " INTPTR_FORMAT
-                  "  _covered[%d].last(): " INTPTR_FORMAT,
-                  ind, p2i(_covered[ind].start()),
-                  ind, p2i(_covered[ind].last()));
-    gclog_or_tty->print_cr("  "
-                  "  _committed[%d].start(): " INTPTR_FORMAT
-                  "  _committed[%d].last(): " INTPTR_FORMAT,
-                  ind, p2i(_committed[ind].start()),
-                  ind, p2i(_committed[ind].last()));
-    gclog_or_tty->print_cr("  "
-                  "  byte_for(start): " INTPTR_FORMAT
-                  "  byte_for(last): " INTPTR_FORMAT,
-                  p2i(byte_for(_covered[ind].start())),
-                  p2i(byte_for(_covered[ind].last())));
-    gclog_or_tty->print_cr("  "
-                  "  addr_for(start): " INTPTR_FORMAT
-                  "  addr_for(last): " INTPTR_FORMAT,
-                  p2i(addr_for((jbyte*) _committed[ind].start())),
-                  p2i(addr_for((jbyte*) _committed[ind].last())));
-  }
+  int ind = changed_region;
+  log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
+  log_trace(gc, barrier)("    _covered[%d].start(): " INTPTR_FORMAT "  _covered[%d].last(): " INTPTR_FORMAT,
+                ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
+  log_trace(gc, barrier)("    _committed[%d].start(): " INTPTR_FORMAT "  _committed[%d].last(): " INTPTR_FORMAT,
+                ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
+  log_trace(gc, barrier)("    byte_for(start): " INTPTR_FORMAT "  byte_for(last): " INTPTR_FORMAT,
+                p2i(byte_for(_covered[ind].start())),  p2i(byte_for(_covered[ind].last())));
+  log_trace(gc, barrier)("    addr_for(start): " INTPTR_FORMAT "  addr_for(last): " INTPTR_FORMAT,
+                p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
+
   debug_only(verify_guard();)
 }
 
--- a/src/share/vm/gc/parallel/gcTaskManager.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/gcTaskManager.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,6 +27,7 @@
 #include "gc/parallel/gcTaskThread.hpp"
 #include "gc/shared/adaptiveSizePolicy.hpp"
 #include "gc/shared/gcId.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/mutex.hpp"
@@ -465,13 +466,11 @@
          "all_workers_active() is  incorrect: "
          "active %d  ParallelGCThreads %u", active_workers(),
          ParallelGCThreads);
-  if (TraceDynamicGCThreads) {
-    gclog_or_tty->print_cr("GCTaskManager::set_active_gang(): "
-                           "all_workers_active()  %d  workers %d  "
-                           "active  %d  ParallelGCThreads %u",
-                           all_workers_active(), workers(),  active_workers(),
-                           ParallelGCThreads);
-  }
+  log_trace(gc, task)("GCTaskManager::set_active_gang(): "
+                      "all_workers_active()  %d  workers %d  "
+                      "active  %d  ParallelGCThreads %u",
+                      all_workers_active(), workers(),  active_workers(),
+                      ParallelGCThreads);
 }
 
 // Create IdleGCTasks for inactive workers.
@@ -502,15 +501,12 @@
         set_active_workers(reduced_active_workers);
         more_inactive_workers = 0;
       }
-      if (TraceDynamicGCThreads) {
-        gclog_or_tty->print_cr("JT: %d  workers %d  active  %d  "
-                                "idle %d  more %d",
-                                Threads::number_of_non_daemon_threads(),
-                                workers(),
-                                active_workers(),
-                                idle_workers(),
-                                more_inactive_workers);
-      }
+      log_trace(gc, task)("JT: %d  workers %d  active  %d  idle %d  more %d",
+                          Threads::number_of_non_daemon_threads(),
+                          workers(),
+                          active_workers(),
+                          idle_workers(),
+                          more_inactive_workers);
     }
     GCTaskQueue* q = GCTaskQueue::create();
     for(uint i = 0; i < (uint) more_inactive_workers; i++) {
@@ -536,6 +532,9 @@
 }
 
 void GCTaskManager::print_task_time_stamps() {
+  if (!log_is_enabled(Debug, gc, task, time)) {
+    return;
+  }
   for(uint i=0; i<ParallelGCThreads; i++) {
     GCTaskThread* t = thread(i);
     t->print_task_time_stamps();
@@ -828,38 +827,24 @@
 
 void IdleGCTask::do_it(GCTaskManager* manager, uint which) {
   WaitHelper* wait_helper = manager->wait_helper();
-  if (TraceGCTaskManager) {
-    tty->print_cr("[" INTPTR_FORMAT "]"
-                  " IdleGCTask:::do_it()"
-      "  should_wait: %s",
+  log_trace(gc, task)("[" INTPTR_FORMAT "] IdleGCTask:::do_it() should_wait: %s",
       p2i(this), wait_helper->should_wait() ? "true" : "false");
-  }
+
   MutexLockerEx ml(manager->monitor(), Mutex::_no_safepoint_check_flag);
-  if (TraceDynamicGCThreads) {
-    gclog_or_tty->print_cr("--- idle %d", which);
-  }
+  log_trace(gc, task)("--- idle %d", which);
   // Increment has to be done when the idle tasks are created.
   // manager->increment_idle_workers();
   manager->monitor()->notify_all();
   while (wait_helper->should_wait()) {
-    if (TraceGCTaskManager) {
-      tty->print_cr("[" INTPTR_FORMAT "]"
-                    " IdleGCTask::do_it()"
-        "  [" INTPTR_FORMAT "] (%s)->wait()",
-        p2i(this), p2i(manager->monitor()), manager->monitor()->name());
-    }
+    log_trace(gc, task)("[" INTPTR_FORMAT "] IdleGCTask::do_it()  [" INTPTR_FORMAT "] (%s)->wait()",
+      p2i(this), p2i(manager->monitor()), manager->monitor()->name());
     manager->monitor()->wait(Mutex::_no_safepoint_check_flag, 0);
   }
   manager->decrement_idle_workers();
-  if (TraceDynamicGCThreads) {
-    gclog_or_tty->print_cr("--- release %d", which);
-  }
-  if (TraceGCTaskManager) {
-    tty->print_cr("[" INTPTR_FORMAT "]"
-                  " IdleGCTask::do_it() returns"
-      "  should_wait: %s",
-      p2i(this), wait_helper->should_wait() ? "true" : "false");
-  }
+
+  log_trace(gc, task)("--- release %d", which);
+  log_trace(gc, task)("[" INTPTR_FORMAT "] IdleGCTask::do_it() returns should_wait: %s",
+    p2i(this), wait_helper->should_wait() ? "true" : "false");
   // Release monitor().
 }
 
--- a/src/share/vm/gc/parallel/gcTaskThread.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/gcTaskThread.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,4 +1,3 @@
-
 /*
  * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -27,9 +26,11 @@
 #include "gc/parallel/gcTaskManager.hpp"
 #include "gc/parallel/gcTaskThread.hpp"
 #include "gc/shared/gcId.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
+#include "runtime/atomic.inline.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/os.hpp"
@@ -46,11 +47,6 @@
   if (!os::create_thread(this, os::pgc_thread))
     vm_exit_out_of_memory(0, OOM_MALLOC_ERROR, "Cannot create GC thread. Out of system resources.");
 
-  if (PrintGCTaskTimeStamps) {
-    _time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC);
-
-    guarantee(_time_stamps != NULL, "Sanity");
-  }
   set_id(which);
   set_name("ParGC Thread#%d", which);
 }
@@ -67,21 +63,30 @@
 
 GCTaskTimeStamp* GCTaskThread::time_stamp_at(uint index) {
   guarantee(index < GCTaskTimeStampEntries, "increase GCTaskTimeStampEntries");
+  if (_time_stamps == NULL) {
+    // We allocate the _time_stamps array lazily since logging can be enabled dynamically
+    GCTaskTimeStamp* time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC);
+    void* old = Atomic::cmpxchg_ptr(time_stamps, &_time_stamps, NULL);
+    if (old != NULL) {
+      // Someone already setup the time stamps
+      FREE_C_HEAP_ARRAY(GCTaskTimeStamp, time_stamps);
+    }
+  }
 
   return &(_time_stamps[index]);
 }
 
 void GCTaskThread::print_task_time_stamps() {
-  assert(PrintGCTaskTimeStamps, "Sanity");
-  assert(_time_stamps != NULL, "Sanity (Probably set PrintGCTaskTimeStamps late)");
+  assert(log_is_enabled(Debug, gc, task, time), "Sanity");
+  assert(_time_stamps != NULL, "Sanity");
 
-  tty->print_cr("GC-Thread %u entries: %d", id(), _time_stamp_index);
+  log_debug(gc, task, time)("GC-Thread %u entries: %d", id(), _time_stamp_index);
   for(uint i=0; i<_time_stamp_index; i++) {
     GCTaskTimeStamp* time_stamp = time_stamp_at(i);
-    tty->print_cr("\t[ %s " JLONG_FORMAT " " JLONG_FORMAT " ]",
-                  time_stamp->name(),
-                  time_stamp->entry_time(),
-                  time_stamp->exit_time());
+    log_debug(gc, task, time)("\t[ %s " JLONG_FORMAT " " JLONG_FORMAT " ]",
+                              time_stamp->name(),
+                              time_stamp->entry_time(),
+                              time_stamp->exit_time());
   }
 
   // Reset after dumping the data
@@ -96,7 +101,6 @@
 void GCTaskThread::run() {
   // Set up the thread for stack overflow support
   this->record_stack_base_and_size();
-  this->initialize_thread_local_storage();
   this->initialize_named_thread();
   // Bind yourself to your processor.
   if (processor_id() != GCTaskManager::sentinel_worker()) {
@@ -129,7 +133,7 @@
       // Record if this is an idle task for later use.
       bool is_idle_task = task->is_idle_task();
       // In case the update is costly
-      if (PrintGCTaskTimeStamps) {
+      if (log_is_enabled(Debug, gc, task, time)) {
         timer.update();
       }
 
@@ -145,10 +149,7 @@
       if (!is_idle_task) {
         manager()->note_completion(which());
 
-        if (PrintGCTaskTimeStamps) {
-          assert(_time_stamps != NULL,
-            "Sanity (PrintGCTaskTimeStamps set late?)");
-
+        if (log_is_enabled(Debug, gc, task, time)) {
           timer.update();
 
           GCTaskTimeStamp* time_stamp = time_stamp_at(_time_stamp_index++);
--- a/src/share/vm/gc/parallel/parallelScavengeHeap.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/parallelScavengeHeap.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -38,6 +38,7 @@
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcWhen.hpp"
+#include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
@@ -307,10 +308,7 @@
         if (limit_exceeded && softrefs_clear) {
           *gc_overhead_limit_was_exceeded = true;
           size_policy()->set_gc_overhead_limit_exceeded(false);
-          if (PrintGCDetails && Verbose) {
-            gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
-              "return NULL because gc_overhead_limit_exceeded is set");
-          }
+          log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");
           if (op.result() != NULL) {
             CollectedHeap::fill_with_object(op.result(), size);
           }
@@ -584,35 +582,17 @@
 }
 
 
-void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
+void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
   // Why do we need the total_collections()-filter below?
   if (total_collections() > 0) {
-    if (!silent) {
-      gclog_or_tty->print("tenured ");
-    }
+    log_debug(gc, verify)("Tenured");
     old_gen()->verify();
 
-    if (!silent) {
-      gclog_or_tty->print("eden ");
-    }
+    log_debug(gc, verify)("Eden");
     young_gen()->verify();
   }
 }
 
-void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print(" "  SIZE_FORMAT
-                        "->" SIZE_FORMAT
-                        "("  SIZE_FORMAT ")",
-                        prev_used, used(), capacity());
-  } else {
-    gclog_or_tty->print(" "  SIZE_FORMAT "K"
-                        "->" SIZE_FORMAT "K"
-                        "("  SIZE_FORMAT "K)",
-                        prev_used / K, used() / K, capacity() / K);
-  }
-}
-
 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
   const PSHeapSummary& heap_summary = create_ps_heap_summary();
   gc_tracer->report_gc_heap_summary(when, heap_summary);
--- a/src/share/vm/gc/parallel/parallelScavengeHeap.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/parallelScavengeHeap.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -35,6 +35,7 @@
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/gcWhen.hpp"
 #include "gc/shared/strongRootsScope.hpp"
+#include "memory/metaspace.hpp"
 #include "utilities/ostream.hpp"
 
 class AdjoiningGenerations;
@@ -87,6 +88,10 @@
     return CollectedHeap::ParallelScavengeHeap;
   }
 
+  virtual const char* name() const {
+    return "Parallel";
+  }
+
   virtual CollectorPolicy* collector_policy() const { return _collector_policy; }
 
   static PSYoungGen* young_gen() { return _young_gen; }
@@ -215,9 +220,7 @@
   virtual void gc_threads_do(ThreadClosure* tc) const;
   virtual void print_tracing_info() const;
 
-  void verify(bool silent, VerifyOption option /* ignored */);
-
-  void print_heap_change(size_t prev_used);
+  void verify(VerifyOption option /* ignored */);
 
   // Resize the young generation.  The reserved space for the
   // generation may be expanded in preparation for the resize.
@@ -241,4 +244,26 @@
   };
 };
 
+// Simple class for storing info about the heap at the start of GC, to be used
+// after GC for comparison/printing.
+class PreGCValues {
+public:
+  PreGCValues(ParallelScavengeHeap* heap) :
+      _heap_used(heap->used()),
+      _young_gen_used(heap->young_gen()->used_in_bytes()),
+      _old_gen_used(heap->old_gen()->used_in_bytes()),
+      _metadata_used(MetaspaceAux::used_bytes()) { };
+
+  size_t heap_used() const      { return _heap_used; }
+  size_t young_gen_used() const { return _young_gen_used; }
+  size_t old_gen_used() const   { return _old_gen_used; }
+  size_t metadata_used() const  { return _metadata_used; }
+
+private:
+  size_t _heap_used;
+  size_t _young_gen_used;
+  size_t _old_gen_used;
+  size_t _metadata_used;
+};
+
 #endif // SHARE_VM_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
--- a/src/share/vm/gc/parallel/pcTasks.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/pcTasks.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -31,7 +31,8 @@
 #include "gc/parallel/psParallelCompact.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "logging/log.hpp"
 #include "memory/universe.hpp"
 #include "oops/objArrayKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -251,14 +252,6 @@
 
   cm->set_region_stack_index(which_stack_index);
   cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
-  if (TraceDynamicGCThreads) {
-    gclog_or_tty->print_cr("StealRegionCompactionTask::do_it "
-                           "region_stack_index %d region_stack = " PTR_FORMAT " "
-                           " empty (%d) use all workers %d",
-    which_stack_index, p2i(ParCompactionManager::region_list(which_stack_index)),
-    cm->region_stack()->is_empty(),
-    use_all_workers);
-  }
 
   // Has to drain stacks first because there may be regions on
   // preloaded onto the stack and this thread may never have
@@ -323,14 +316,6 @@
   }
 
   cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
-  if (TraceDynamicGCThreads) {
-    gclog_or_tty->print_cr("DrainStacksCompactionTask::do_it which = %d "
-                           "which_stack_index = %d/empty(%d) "
-                           "use all workers %d",
-                           which, which_stack_index,
-                           cm->region_stack()->is_empty(),
-                           use_all_workers);
-  }
 
   cm->set_region_stack_index(which_stack_index);
 
@@ -346,13 +331,6 @@
            "region_stack and region_stack_index are inconsistent");
     ParCompactionManager::push_recycled_stack_index(cm->region_stack_index());
 
-    if (TraceDynamicGCThreads) {
-      void* old_region_stack = (void*) cm->region_stack();
-      int old_region_stack_index = cm->region_stack_index();
-      gclog_or_tty->print_cr("Pushing region stack " PTR_FORMAT "/%d",
-        p2i(old_region_stack), old_region_stack_index);
-    }
-
     cm->set_region_stack(NULL);
     cm->set_region_stack_index((uint)max_uintx);
   }
--- a/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -30,6 +30,7 @@
 #include "gc/shared/collectorPolicy.hpp"
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
+#include "logging/log.hpp"
 #include "runtime/timer.hpp"
 #include "utilities/top.hpp"
 
@@ -159,14 +160,10 @@
     _major_pause_young_estimator->update(eden_size_in_mbytes,
       major_pause_in_ms);
 
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print("psAdaptiveSizePolicy::major_collection_end: "
-        "major gc cost: %f  average: %f", collection_cost,
-        avg_major_gc_cost()->average());
-      gclog_or_tty->print_cr("  major pause: %f major period %f",
-        major_pause_in_ms,
-        _latest_major_mutator_interval_seconds * MILLIUNITS);
-    }
+    log_trace(gc, ergo)("psAdaptiveSizePolicy::major_collection_end: major gc cost: %f  average: %f",
+                        collection_cost,avg_major_gc_cost()->average());
+    log_trace(gc, ergo)("  major pause: %f major period %f",
+                        major_pause_in_ms, _latest_major_mutator_interval_seconds * MILLIUNITS);
 
     // Calculate variable used to estimate collection cost vs. gen sizes
     assert(collection_cost >= 0.0, "Expected to be non-negative");
@@ -197,19 +194,11 @@
   // A similar test is done in the scavenge's should_attempt_scavenge().  If
   // this is changed, decide if that test should also be changed.
   bool result = padded_average_promoted_in_bytes() > (float) old_free_in_bytes;
-  if (PrintGCDetails && Verbose) {
-    if (result) {
-      gclog_or_tty->print("  full after scavenge: ");
-    } else {
-      gclog_or_tty->print("  no full after scavenge: ");
-    }
-    gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
-      " padded_average_promoted " SIZE_FORMAT
-      " free in old gen " SIZE_FORMAT,
-      (size_t) average_promoted_in_bytes(),
-      (size_t) padded_average_promoted_in_bytes(),
-      old_free_in_bytes);
-  }
+  log_trace(gc, ergo)("%s after scavenge average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen " SIZE_FORMAT,
+                      result ? "Full" : "No full",
+                      (size_t) average_promoted_in_bytes(),
+                      (size_t) padded_average_promoted_in_bytes(),
+                      old_free_in_bytes);
   return result;
 }
 
@@ -361,26 +350,24 @@
 
   // Note we make the same tests as in the code block below;  the code
   // seems a little easier to read with the printing in another block.
-  if (PrintAdaptiveSizePolicy) {
-    if (desired_eden_size > eden_limit) {
-      gclog_or_tty->print_cr(
-            "PSAdaptiveSizePolicy::compute_eden_space_size limits:"
-            " desired_eden_size: " SIZE_FORMAT
-            " old_eden_size: " SIZE_FORMAT
-            " eden_limit: " SIZE_FORMAT
-            " cur_eden: " SIZE_FORMAT
-            " max_eden_size: " SIZE_FORMAT
-            " avg_young_live: " SIZE_FORMAT,
-            desired_eden_size, _eden_size, eden_limit, cur_eden,
-            max_eden_size, (size_t)avg_young_live()->average());
-    }
-    if (gc_cost() > gc_cost_limit) {
-      gclog_or_tty->print_cr(
-            "PSAdaptiveSizePolicy::compute_eden_space_size: gc time limit"
-            " gc_cost: %f "
-            " GCTimeLimit: " UINTX_FORMAT,
-            gc_cost(), GCTimeLimit);
-    }
+  if (desired_eden_size > eden_limit) {
+    log_debug(gc, ergo)(
+          "PSAdaptiveSizePolicy::compute_eden_space_size limits:"
+          " desired_eden_size: " SIZE_FORMAT
+          " old_eden_size: " SIZE_FORMAT
+          " eden_limit: " SIZE_FORMAT
+          " cur_eden: " SIZE_FORMAT
+          " max_eden_size: " SIZE_FORMAT
+          " avg_young_live: " SIZE_FORMAT,
+          desired_eden_size, _eden_size, eden_limit, cur_eden,
+          max_eden_size, (size_t)avg_young_live()->average());
+  }
+  if (gc_cost() > gc_cost_limit) {
+    log_debug(gc, ergo)(
+          "PSAdaptiveSizePolicy::compute_eden_space_size: gc time limit"
+          " gc_cost: %f "
+          " GCTimeLimit: " UINTX_FORMAT,
+          gc_cost(), GCTimeLimit);
   }
 
   // Align everything and make a final limit check
@@ -399,51 +386,26 @@
     desired_eden_size = MAX2(eden_limit, cur_eden);
   }
 
-  if (PrintAdaptiveSizePolicy) {
-    // Timing stats
-    gclog_or_tty->print(
-               "PSAdaptiveSizePolicy::compute_eden_space_size: costs"
-               " minor_time: %f"
-               " major_cost: %f"
-               " mutator_cost: %f"
-               " throughput_goal: %f",
-               minor_gc_cost(), major_gc_cost(), mutator_cost(),
-               _throughput_goal);
+  log_debug(gc, ergo)("PSAdaptiveSizePolicy::compute_eden_space_size: costs minor_time: %f major_cost: %f mutator_cost: %f throughput_goal: %f",
+             minor_gc_cost(), major_gc_cost(), mutator_cost(), _throughput_goal);
+
+  log_trace(gc, ergo)("Minor_pause: %f major_pause: %f minor_interval: %f major_interval: %fpause_goal: %f",
+                      _avg_minor_pause->padded_average(),
+                      _avg_major_pause->padded_average(),
+                      _avg_minor_interval->average(),
+                      _avg_major_interval->average(),
+                      gc_pause_goal_sec());
 
-    // We give more details if Verbose is set
-    if (Verbose) {
-      gclog_or_tty->print( " minor_pause: %f"
-                  " major_pause: %f"
-                  " minor_interval: %f"
-                  " major_interval: %f"
-                  " pause_goal: %f",
-                  _avg_minor_pause->padded_average(),
-                  _avg_major_pause->padded_average(),
-                  _avg_minor_interval->average(),
-                  _avg_major_interval->average(),
-                  gc_pause_goal_sec());
-    }
+  log_debug(gc, ergo)("Live_space: " SIZE_FORMAT " free_space: " SIZE_FORMAT,
+                      live_space(), free_space());
 
-    // Footprint stats
-    gclog_or_tty->print( " live_space: " SIZE_FORMAT
-                " free_space: " SIZE_FORMAT,
-                live_space(), free_space());
-    // More detail
-    if (Verbose) {
-      gclog_or_tty->print( " base_footprint: " SIZE_FORMAT
-                  " avg_young_live: " SIZE_FORMAT
-                  " avg_old_live: " SIZE_FORMAT,
-                  (size_t)_avg_base_footprint->average(),
-                  (size_t)avg_young_live()->average(),
-                  (size_t)avg_old_live()->average());
-    }
+  log_trace(gc, ergo)("Base_footprint: " SIZE_FORMAT " avg_young_live: " SIZE_FORMAT " avg_old_live: " SIZE_FORMAT,
+                      (size_t)_avg_base_footprint->average(),
+                      (size_t)avg_young_live()->average(),
+                      (size_t)avg_old_live()->average());
 
-    // And finally, our old and new sizes.
-    gclog_or_tty->print(" old_eden_size: " SIZE_FORMAT
-               " desired_eden_size: " SIZE_FORMAT,
-               _eden_size, desired_eden_size);
-    gclog_or_tty->cr();
-  }
+  log_debug(gc, ergo)("Old eden_size: " SIZE_FORMAT " desired_eden_size: " SIZE_FORMAT,
+                      _eden_size, desired_eden_size);
 
   set_eden_size(desired_eden_size);
 }
@@ -564,27 +526,25 @@
 
   // Note we make the same tests as in the code block below;  the code
   // seems a little easier to read with the printing in another block.
-  if (PrintAdaptiveSizePolicy) {
-    if (desired_promo_size > promo_limit)  {
-      // "free_in_old_gen" was the original value for used for promo_limit
-      size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
-      gclog_or_tty->print_cr(
-            "PSAdaptiveSizePolicy::compute_old_gen_free_space limits:"
-            " desired_promo_size: " SIZE_FORMAT
-            " promo_limit: " SIZE_FORMAT
-            " free_in_old_gen: " SIZE_FORMAT
-            " max_old_gen_size: " SIZE_FORMAT
-            " avg_old_live: " SIZE_FORMAT,
-            desired_promo_size, promo_limit, free_in_old_gen,
-            max_old_gen_size, (size_t) avg_old_live()->average());
-    }
-    if (gc_cost() > gc_cost_limit) {
-      gclog_or_tty->print_cr(
-            "PSAdaptiveSizePolicy::compute_old_gen_free_space: gc time limit"
-            " gc_cost: %f "
-            " GCTimeLimit: " UINTX_FORMAT,
-            gc_cost(), GCTimeLimit);
-    }
+  if (desired_promo_size > promo_limit)  {
+    // "free_in_old_gen" was the original value for used for promo_limit
+    size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
+    log_debug(gc, ergo)(
+          "PSAdaptiveSizePolicy::compute_old_gen_free_space limits:"
+          " desired_promo_size: " SIZE_FORMAT
+          " promo_limit: " SIZE_FORMAT
+          " free_in_old_gen: " SIZE_FORMAT
+          " max_old_gen_size: " SIZE_FORMAT
+          " avg_old_live: " SIZE_FORMAT,
+          desired_promo_size, promo_limit, free_in_old_gen,
+          max_old_gen_size, (size_t) avg_old_live()->average());
+  }
+  if (gc_cost() > gc_cost_limit) {
+    log_debug(gc, ergo)(
+          "PSAdaptiveSizePolicy::compute_old_gen_free_space: gc time limit"
+          " gc_cost: %f "
+          " GCTimeLimit: " UINTX_FORMAT,
+          gc_cost(), GCTimeLimit);
   }
 
   // Align everything and make a final limit check
@@ -596,51 +556,28 @@
   // And one last limit check, now that we've aligned things.
   desired_promo_size = MIN2(desired_promo_size, promo_limit);
 
-  if (PrintAdaptiveSizePolicy) {
-    // Timing stats
-    gclog_or_tty->print(
-               "PSAdaptiveSizePolicy::compute_old_gen_free_space: costs"
-               " minor_time: %f"
-               " major_cost: %f"
-               " mutator_cost: %f"
-               " throughput_goal: %f",
-               minor_gc_cost(), major_gc_cost(), mutator_cost(),
-               _throughput_goal);
+  // Timing stats
+  log_debug(gc, ergo)("PSAdaptiveSizePolicy::compute_old_gen_free_space: costs minor_time: %f major_cost: %f  mutator_cost: %f throughput_goal: %f",
+             minor_gc_cost(), major_gc_cost(), mutator_cost(), _throughput_goal);
+
+  log_trace(gc, ergo)("Minor_pause: %f major_pause: %f minor_interval: %f major_interval: %f pause_goal: %f",
+                      _avg_minor_pause->padded_average(),
+                      _avg_major_pause->padded_average(),
+                      _avg_minor_interval->average(),
+                      _avg_major_interval->average(),
+                      gc_pause_goal_sec());
 
-    // We give more details if Verbose is set
-    if (Verbose) {
-      gclog_or_tty->print( " minor_pause: %f"
-                  " major_pause: %f"
-                  " minor_interval: %f"
-                  " major_interval: %f"
-                  " pause_goal: %f",
-                  _avg_minor_pause->padded_average(),
-                  _avg_major_pause->padded_average(),
-                  _avg_minor_interval->average(),
-                  _avg_major_interval->average(),
-                  gc_pause_goal_sec());
-    }
+  // Footprint stats
+  log_debug(gc, ergo)("Live_space: " SIZE_FORMAT " free_space: " SIZE_FORMAT,
+                      live_space(), free_space());
 
-    // Footprint stats
-    gclog_or_tty->print( " live_space: " SIZE_FORMAT
-                " free_space: " SIZE_FORMAT,
-                live_space(), free_space());
-    // More detail
-    if (Verbose) {
-      gclog_or_tty->print( " base_footprint: " SIZE_FORMAT
-                  " avg_young_live: " SIZE_FORMAT
-                  " avg_old_live: " SIZE_FORMAT,
-                  (size_t)_avg_base_footprint->average(),
-                  (size_t)avg_young_live()->average(),
-                  (size_t)avg_old_live()->average());
-    }
+  log_trace(gc, ergo)("Base_footprint: " SIZE_FORMAT " avg_young_live: " SIZE_FORMAT " avg_old_live: " SIZE_FORMAT,
+                      (size_t)_avg_base_footprint->average(),
+                      (size_t)avg_young_live()->average(),
+                      (size_t)avg_old_live()->average());
 
-    // And finally, our old and new sizes.
-    gclog_or_tty->print(" old_promo_size: " SIZE_FORMAT
-               " desired_promo_size: " SIZE_FORMAT,
-               _promo_size, desired_promo_size);
-    gclog_or_tty->cr();
-  }
+  log_debug(gc, ergo)("Old promo_size: " SIZE_FORMAT " desired_promo_size: " SIZE_FORMAT,
+                      _promo_size, desired_promo_size);
 
   set_promo_size(desired_promo_size);
 }
@@ -719,14 +656,12 @@
     }
   }
 
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "PSAdaptiveSizePolicy::adjust_promo_for_pause_time "
-      "adjusting gen sizes for major pause (avg %f goal %f). "
-      "desired_promo_size " SIZE_FORMAT " promo delta " SIZE_FORMAT,
-      _avg_major_pause->average(), gc_pause_goal_sec(),
-      *desired_promo_size_ptr, promo_heap_delta);
-  }
+  log_trace(gc, ergo)(
+    "PSAdaptiveSizePolicy::adjust_promo_for_pause_time "
+    "adjusting gen sizes for major pause (avg %f goal %f). "
+    "desired_promo_size " SIZE_FORMAT " promo delta " SIZE_FORMAT,
+    _avg_major_pause->average(), gc_pause_goal_sec(),
+    *desired_promo_size_ptr, promo_heap_delta);
 }
 
 void PSAdaptiveSizePolicy::adjust_eden_for_pause_time(bool is_full_gc,
@@ -740,14 +675,12 @@
   if (_avg_minor_pause->padded_average() > _avg_major_pause->padded_average()) {
     adjust_eden_for_minor_pause_time(is_full_gc, desired_eden_size_ptr);
   }
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "PSAdaptiveSizePolicy::adjust_eden_for_pause_time "
-      "adjusting gen sizes for major pause (avg %f goal %f). "
-      "desired_eden_size " SIZE_FORMAT " eden delta " SIZE_FORMAT,
-      _avg_major_pause->average(), gc_pause_goal_sec(),
-      *desired_eden_size_ptr, eden_heap_delta);
-  }
+  log_trace(gc, ergo)(
+    "PSAdaptiveSizePolicy::adjust_eden_for_pause_time "
+    "adjusting gen sizes for major pause (avg %f goal %f). "
+    "desired_eden_size " SIZE_FORMAT " eden delta " SIZE_FORMAT,
+    _avg_major_pause->average(), gc_pause_goal_sec(),
+    *desired_eden_size_ptr, eden_heap_delta);
 }
 
 void PSAdaptiveSizePolicy::adjust_promo_for_throughput(bool is_full_gc,
@@ -761,13 +694,8 @@
     return;
   }
 
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print("\nPSAdaptiveSizePolicy::adjust_promo_for_throughput("
-      "is_full: %d, promo: " SIZE_FORMAT "): ",
-      is_full_gc, *desired_promo_size_ptr);
-    gclog_or_tty->print_cr("mutator_cost %f  major_gc_cost %f "
-      "minor_gc_cost %f", mutator_cost(), major_gc_cost(), minor_gc_cost());
-  }
+  log_trace(gc, ergo)("PSAdaptiveSizePolicy::adjust_promo_for_throughput(is_full: %d, promo: " SIZE_FORMAT "): mutator_cost %f  major_gc_cost %f minor_gc_cost %f",
+                      is_full_gc, *desired_promo_size_ptr, mutator_cost(), major_gc_cost(), minor_gc_cost());
 
   // Tenured generation
   if (is_full_gc) {
@@ -780,12 +708,8 @@
       double scale_by_ratio = major_gc_cost() / gc_cost();
       scaled_promo_heap_delta =
         (size_t) (scale_by_ratio * (double) promo_heap_delta);
-      if (PrintAdaptiveSizePolicy && Verbose) {
-        gclog_or_tty->print_cr(
-          "Scaled tenured increment: " SIZE_FORMAT " by %f down to "
-          SIZE_FORMAT,
-          promo_heap_delta, scale_by_ratio, scaled_promo_heap_delta);
-      }
+      log_trace(gc, ergo)("Scaled tenured increment: " SIZE_FORMAT " by %f down to " SIZE_FORMAT,
+                          promo_heap_delta, scale_by_ratio, scaled_promo_heap_delta);
     } else if (major_gc_cost() >= 0.0) {
       // Scaling is not going to work.  If the major gc time is the
       // larger, give it a full increment.
@@ -839,13 +763,10 @@
         _old_gen_change_for_major_throughput++;
     }
 
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr(
-          "adjusting tenured gen for throughput (avg %f goal %f). "
-          "desired_promo_size " SIZE_FORMAT " promo_delta " SIZE_FORMAT ,
-          mutator_cost(), _throughput_goal,
-          *desired_promo_size_ptr, scaled_promo_heap_delta);
-    }
+    log_trace(gc, ergo)("Adjusting tenured gen for throughput (avg %f goal %f). desired_promo_size " SIZE_FORMAT " promo_delta " SIZE_FORMAT ,
+                        mutator_cost(),
+                        _throughput_goal,
+                        *desired_promo_size_ptr, scaled_promo_heap_delta);
   }
 }
 
@@ -860,13 +781,8 @@
     return;
   }
 
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print("\nPSAdaptiveSizePolicy::adjust_eden_for_throughput("
-      "is_full: %d, cur_eden: " SIZE_FORMAT "): ",
-      is_full_gc, *desired_eden_size_ptr);
-    gclog_or_tty->print_cr("mutator_cost %f  major_gc_cost %f "
-      "minor_gc_cost %f", mutator_cost(), major_gc_cost(), minor_gc_cost());
-  }
+  log_trace(gc, ergo)("PSAdaptiveSizePolicy::adjust_eden_for_throughput(is_full: %d, cur_eden: " SIZE_FORMAT "): mutator_cost %f  major_gc_cost %f minor_gc_cost %f",
+                      is_full_gc, *desired_eden_size_ptr, mutator_cost(), major_gc_cost(), minor_gc_cost());
 
   // Young generation
   size_t scaled_eden_heap_delta = 0;
@@ -878,12 +794,8 @@
     assert(scale_by_ratio <= 1.0 && scale_by_ratio >= 0.0, "Scaling is wrong");
     scaled_eden_heap_delta =
       (size_t) (scale_by_ratio * (double) eden_heap_delta);
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr(
-        "Scaled eden increment: " SIZE_FORMAT " by %f down to "
-        SIZE_FORMAT,
-        eden_heap_delta, scale_by_ratio, scaled_eden_heap_delta);
-    }
+    log_trace(gc, ergo)("Scaled eden increment: " SIZE_FORMAT " by %f down to " SIZE_FORMAT,
+                        eden_heap_delta, scale_by_ratio, scaled_eden_heap_delta);
   } else if (minor_gc_cost() >= 0.0) {
     // Scaling is not going to work.  If the minor gc time is the
     // larger, give it a full increment.
@@ -936,13 +848,8 @@
       _young_gen_change_for_minor_throughput++;
   }
 
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-        "adjusting eden for throughput (avg %f goal %f). desired_eden_size "
-        SIZE_FORMAT " eden delta " SIZE_FORMAT "\n",
-      mutator_cost(), _throughput_goal,
-        *desired_eden_size_ptr, scaled_eden_heap_delta);
-  }
+    log_trace(gc, ergo)("Adjusting eden for throughput (avg %f goal %f). desired_eden_size " SIZE_FORMAT " eden delta " SIZE_FORMAT,
+                        mutator_cost(), _throughput_goal, *desired_eden_size_ptr, scaled_eden_heap_delta);
 }
 
 size_t PSAdaptiveSizePolicy::adjust_promo_for_footprint(
@@ -955,15 +862,13 @@
 
   size_t reduced_size = desired_promo_size - change;
 
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "AdaptiveSizePolicy::adjust_promo_for_footprint "
-      "adjusting tenured gen for footprint. "
-      "starting promo size " SIZE_FORMAT
-      " reduced promo size " SIZE_FORMAT
-      " promo delta " SIZE_FORMAT,
-      desired_promo_size, reduced_size, change );
-  }
+  log_trace(gc, ergo)(
+    "AdaptiveSizePolicy::adjust_promo_for_footprint "
+    "adjusting tenured gen for footprint. "
+    "starting promo size " SIZE_FORMAT
+    " reduced promo size " SIZE_FORMAT
+    " promo delta " SIZE_FORMAT,
+    desired_promo_size, reduced_size, change );
 
   assert(reduced_size <= desired_promo_size, "Inconsistent result");
   return reduced_size;
@@ -979,15 +884,13 @@
 
   size_t reduced_size = desired_eden_size - change;
 
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "AdaptiveSizePolicy::adjust_eden_for_footprint "
-      "adjusting eden for footprint. "
-      " starting eden size " SIZE_FORMAT
-      " reduced eden size " SIZE_FORMAT
-      " eden delta " SIZE_FORMAT,
-      desired_eden_size, reduced_size, change);
-  }
+  log_trace(gc, ergo)(
+    "AdaptiveSizePolicy::adjust_eden_for_footprint "
+    "adjusting eden for footprint. "
+    " starting eden size " SIZE_FORMAT
+    " reduced eden size " SIZE_FORMAT
+    " eden delta " SIZE_FORMAT,
+    desired_eden_size, reduced_size, change);
 
   assert(reduced_size <= desired_eden_size, "Inconsistent result");
   return reduced_size;
@@ -1187,33 +1090,14 @@
   // the amount of old gen free space is less than what we expect to
   // promote).
 
-  if (PrintAdaptiveSizePolicy) {
-    // A little more detail if Verbose is on
-    if (Verbose) {
-      gclog_or_tty->print( "  avg_survived: %f"
-                  "  avg_deviation: %f",
-                  _avg_survived->average(),
-                  _avg_survived->deviation());
-    }
-
-    gclog_or_tty->print( "  avg_survived_padded_avg: %f",
-                _avg_survived->padded_average());
+  log_trace(gc, ergo)("avg_survived: %f  avg_deviation: %f", _avg_survived->average(), _avg_survived->deviation());
+  log_debug(gc, ergo)("avg_survived_padded_avg: %f", _avg_survived->padded_average());
 
-    if (Verbose) {
-      gclog_or_tty->print( "  avg_promoted_avg: %f"
-                  "  avg_promoted_dev: %f",
-                  avg_promoted()->average(),
-                  avg_promoted()->deviation());
-    }
-
-    gclog_or_tty->print_cr( "  avg_promoted_padded_avg: %f"
-                "  avg_pretenured_padded_avg: %f"
-                "  tenuring_thresh: %d"
-                "  target_size: " SIZE_FORMAT,
-                avg_promoted()->padded_average(),
-                _avg_pretenured->padded_average(),
-                tenuring_threshold, target_size);
-  }
+  log_trace(gc, ergo)("avg_promoted_avg: %f  avg_promoted_dev: %f", avg_promoted()->average(), avg_promoted()->deviation());
+  log_debug(gc, ergo)("avg_promoted_padded_avg: %f  avg_pretenured_padded_avg: %f  tenuring_thresh: %d  target_size: " SIZE_FORMAT,
+                      avg_promoted()->padded_average(),
+                      _avg_pretenured->padded_average(),
+                      tenuring_threshold, target_size);
 
   set_survivor_size(target_size);
 
@@ -1233,24 +1117,22 @@
   }
   avg_promoted()->sample(promoted);
 
-  if (PrintAdaptiveSizePolicy) {
-    gclog_or_tty->print_cr(
-                  "AdaptiveSizePolicy::update_averages:"
-                  "  survived: "  SIZE_FORMAT
-                  "  promoted: "  SIZE_FORMAT
-                  "  overflow: %s",
-                  survived, promoted, is_survivor_overflow ? "true" : "false");
-  }
+  log_trace(gc, ergo)("AdaptiveSizePolicy::update_averages:  survived: "  SIZE_FORMAT "  promoted: "  SIZE_FORMAT "  overflow: %s",
+                      survived, promoted, is_survivor_overflow ? "true" : "false");
 }
 
-bool PSAdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st)
-  const {
+bool PSAdaptiveSizePolicy::print() const {
+
+  if (!UseAdaptiveSizePolicy) {
+    return false;
+  }
 
-  if (!UseAdaptiveSizePolicy) return false;
+  if (AdaptiveSizePolicy::print()) {
+    AdaptiveSizePolicy::print_tenuring_threshold(PSScavenge::tenuring_threshold());
+    return true;
+  }
 
-  return AdaptiveSizePolicy::print_adaptive_size_policy_on(
-                          st,
-                          PSScavenge::tenuring_threshold());
+  return false;
 }
 
 #ifndef PRODUCT
--- a/src/share/vm/gc/parallel/psAdaptiveSizePolicy.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psAdaptiveSizePolicy.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -395,7 +395,7 @@
                        size_t promoted);
 
   // Printing support
-  virtual bool print_adaptive_size_policy_on(outputStream* st) const;
+  virtual bool print() const;
 
   // Decay the supplemental growth additive.
   void decay_supplemental_growth(bool is_full_gc);
--- a/src/share/vm/gc/parallel/psCompactionManager.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psCompactionManager.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -32,6 +32,7 @@
 #include "gc/parallel/psOldGen.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "logging/log.hpp"
 #include "memory/iterator.inline.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
@@ -229,30 +230,18 @@
 static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, ParCompactionManager* cm) {
   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
   T heap_oop = oopDesc::load_heap_oop(referent_addr);
-  debug_only(
-    if(TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj));
-    }
-  )
+  log_develop_trace(gc, ref)("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj));
   if (!oopDesc::is_null(heap_oop)) {
     oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
     if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
         PSParallelCompact::ref_processor()->discover_reference(obj, klass->reference_type())) {
       // reference already enqueued, referent will be traversed later
       klass->InstanceKlass::oop_pc_follow_contents(obj, cm);
-      debug_only(
-        if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL enqueued " PTR_FORMAT, p2i(obj));
-        }
-      )
+      log_develop_trace(gc, ref)("       Non NULL enqueued " PTR_FORMAT, p2i(obj));
       return;
     } else {
       // treat referent as normal oop
-      debug_only(
-        if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL normal " PTR_FORMAT, p2i(obj));
-        }
-      )
+      log_develop_trace(gc, ref)("       Non NULL normal " PTR_FORMAT, p2i(obj));
       cm->mark_and_push(referent_addr);
     }
   }
@@ -262,12 +251,7 @@
   T  next_oop = oopDesc::load_heap_oop(next_addr);
   if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
     T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
-    debug_only(
-      if(TraceReferenceGC && PrintGCDetails) {
-        gclog_or_tty->print_cr("   Process discovered as normal "
-                               PTR_FORMAT, p2i(discovered_addr));
-      }
-    )
+    log_develop_trace(gc, ref)("   Process discovered as normal " PTR_FORMAT, p2i(discovered_addr));
     cm->mark_and_push(discovered_addr);
   }
   cm->mark_and_push(next_addr);
--- a/src/share/vm/gc/parallel/psMarkSweep.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psMarkSweep.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/stringTable.hpp"
+#include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
@@ -40,11 +41,12 @@
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/fprofiler.hpp"
@@ -136,8 +138,6 @@
   // We need to track unique mark sweep invocations as well.
   _total_invocations++;
 
-  AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
-
   heap->print_heap_before_gc();
   heap->trace_heap_before_gc(_gc_tracer);
 
@@ -147,7 +147,7 @@
 
   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify(" VerifyBeforeGC:");
+    Universe::verify("Before GC");
   }
 
   // Verify object start arrays
@@ -166,8 +166,8 @@
   {
     HandleMark hm;
 
-    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
+    GCTraceCPUTime tcpu;
+    GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause, true);
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 
@@ -179,13 +179,9 @@
     CodeCache::gc_prologue();
     BiasedLocking::preserve_marks();
 
-    // Capture heap size before collection for printing.
-    size_t prev_used = heap->used();
-
     // Capture metadata size before collection for sizing.
     size_t metadata_prev_used = MetaspaceAux::used_bytes();
 
-    // For PrintGCDetails
     size_t old_gen_prev_used = old_gen->used_in_bytes();
     size_t young_gen_prev_used = young_gen->used_in_bytes();
 
@@ -265,17 +261,9 @@
 
     if (UseAdaptiveSizePolicy) {
 
-      if (PrintAdaptiveSizePolicy) {
-        gclog_or_tty->print("AdaptiveSizeStart: ");
-        gclog_or_tty->stamp();
-        gclog_or_tty->print_cr(" collection: %d ",
-                       heap->total_collections());
-        if (Verbose) {
-          gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT
-            " young_gen_capacity: " SIZE_FORMAT,
-            old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
-        }
-      }
+     log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
+     log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
+                         old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
 
       // Don't check if the size_policy is ready here.  Let
       // the size_policy check that internally.
@@ -332,10 +320,7 @@
         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
                                size_policy->calculated_survivor_size_in_bytes());
       }
-      if (PrintAdaptiveSizePolicy) {
-        gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
-                       heap->total_collections());
-      }
+      log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
     }
 
     if (UsePerfData) {
@@ -353,18 +338,9 @@
 
     if (TraceOldGenTime) accumulated_time()->stop();
 
-    if (PrintGC) {
-      if (PrintGCDetails) {
-        // Don't print a GC timestamp here.  This is after the GC so
-        // would be confusing.
-        young_gen->print_used_change(young_gen_prev_used);
-        old_gen->print_used_change(old_gen_prev_used);
-      }
-      heap->print_heap_change(prev_used);
-      if (PrintGCDetails) {
-        MetaspaceAux::print_metaspace_change(metadata_prev_used);
-      }
-    }
+    young_gen->print_used_change(young_gen_prev_used);
+    old_gen->print_used_change(old_gen_prev_used);
+    MetaspaceAux::print_metaspace_change(metadata_prev_used);
 
     // Track memory usage and detect low memory
     MemoryService::track_memory_usage();
@@ -373,7 +349,7 @@
 
   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify(" VerifyAfterGC:");
+    Universe::verify("After GC");
   }
 
   // Re-verify object start arrays
@@ -397,6 +373,8 @@
   ParallelTaskTerminator::print_termination_counts();
 #endif
 
+  AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
+
   _gc_timer->register_gc_end();
 
   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
@@ -442,8 +420,7 @@
     return false; // Respect young gen minimum size.
   }
 
-  if (TraceAdaptiveGCBoundary && Verbose) {
-    gclog_or_tty->print(" absorbing " SIZE_FORMAT "K:  "
+  log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K:  "
                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
@@ -452,7 +429,6 @@
                         young_gen->from_space()->used_in_bytes() / K,
                         young_gen->to_space()->used_in_bytes() / K,
                         young_gen->capacity_in_bytes() / K, new_young_size / K);
-  }
 
   // Fill the unused part of the old gen.
   MutableSpace* const old_space = old_gen->object_space();
@@ -516,7 +492,7 @@
 
 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
+  GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", _gc_timer);
 
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 
@@ -575,7 +551,7 @@
 
 
 void PSMarkSweep::mark_sweep_phase2() {
-  GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer);
+  GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", _gc_timer);
 
   // Now all live objects are marked, compute the new object addresses.
 
@@ -602,7 +578,7 @@
 
 void PSMarkSweep::mark_sweep_phase3() {
   // Adjust the pointers to reflect the new locations
-  GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer);
+  GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", _gc_timer);
 
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSYoungGen* young_gen = heap->young_gen();
@@ -642,7 +618,7 @@
 
 void PSMarkSweep::mark_sweep_phase4() {
   EventMark m("4 compact heap");
-  GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer);
+  GCTraceTime(Trace, gc) tm("Phase 4: Move objects", _gc_timer);
 
   // All pointers are now adjusted, move objects accordingly
 
--- a/src/share/vm/gc/parallel/psOldGen.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psOldGen.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -30,6 +30,7 @@
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 
@@ -256,10 +257,8 @@
     success = expand_to_reserved();
   }
 
-  if (PrintGC && Verbose) {
-    if (success && GC_locker::is_active_and_needs_gc()) {
-      gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
-    }
+  if (success && GC_locker::is_active_and_needs_gc()) {
+    log_debug(gc)("Garbage collection disabled, expanded heap instead");
   }
 }
 
@@ -291,13 +290,11 @@
     }
   }
 
-  if (result && Verbose && PrintGC) {
+  if (result) {
     size_t new_mem_size = virtual_space()->committed_size();
     size_t old_mem_size = new_mem_size - bytes;
-    gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
-                                       SIZE_FORMAT "K to "
-                                       SIZE_FORMAT "K",
-                    name(), old_mem_size/K, bytes/K, new_mem_size/K);
+    log_debug(gc)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
+                  name(), old_mem_size/K, bytes/K, new_mem_size/K);
   }
 
   return result;
@@ -326,14 +323,10 @@
     virtual_space()->shrink_by(bytes);
     post_resize();
 
-    if (Verbose && PrintGC) {
-      size_t new_mem_size = virtual_space()->committed_size();
-      size_t old_mem_size = new_mem_size + bytes;
-      gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by "
-                                         SIZE_FORMAT "K to "
-                                         SIZE_FORMAT "K",
-                      name(), old_mem_size/K, bytes/K, new_mem_size/K);
-    }
+    size_t new_mem_size = virtual_space()->committed_size();
+    size_t old_mem_size = new_mem_size + bytes;
+    log_debug(gc)("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
+                  name(), old_mem_size/K, bytes/K, new_mem_size/K);
   }
 }
 
@@ -353,14 +346,12 @@
 
   const size_t current_size = capacity_in_bytes();
 
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
-      "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
-      " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
-      " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
-      desired_free_space, used_in_bytes(), new_size, current_size,
-      gen_size_limit(), min_gen_size());
-  }
+  log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: "
+    "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
+    " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
+    " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
+    desired_free_space, used_in_bytes(), new_size, current_size,
+    gen_size_limit(), min_gen_size());
 
   if (new_size == current_size) {
     // No change requested
@@ -376,14 +367,10 @@
     shrink(change_bytes);
   }
 
-  if (PrintAdaptiveSizePolicy) {
-    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-    gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
-                  "collection: %d "
-                  "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
-                  heap->total_collections(),
-                  size_before, virtual_space()->committed_size());
-  }
+  log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: collection: %d (" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
+                      ParallelScavengeHeap::heap()->total_collections(),
+                      size_before,
+                      virtual_space()->committed_size());
 }
 
 // NOTE! We need to be careful about resizing. During a GC, multiple
@@ -430,13 +417,8 @@
 void PSOldGen::print() const { print_on(tty);}
 void PSOldGen::print_on(outputStream* st) const {
   st->print(" %-15s", name());
-  if (PrintGCDetails && Verbose) {
-    st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT,
-                capacity_in_bytes(), used_in_bytes());
-  } else {
-    st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
-                capacity_in_bytes()/K, used_in_bytes()/K);
-  }
+  st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
+              capacity_in_bytes()/K, used_in_bytes()/K);
   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
                 p2i(virtual_space()->low_boundary()),
                 p2i(virtual_space()->high()),
@@ -446,13 +428,8 @@
 }
 
 void PSOldGen::print_used_change(size_t prev_used) const {
-  gclog_or_tty->print(" [%s:", name());
-  gclog_or_tty->print(" "  SIZE_FORMAT "K"
-                      "->" SIZE_FORMAT "K"
-                      "("  SIZE_FORMAT "K)",
-                      prev_used / K, used_in_bytes() / K,
-                      capacity_in_bytes() / K);
-  gclog_or_tty->print("]");
+  log_info(gc, heap)("%s: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
+      name(), prev_used / K, used_in_bytes() / K, capacity_in_bytes() / K);
 }
 
 void PSOldGen::update_counters() {
--- a/src/share/vm/gc/parallel/psParallelCompact.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psParallelCompact.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/stringTable.hpp"
+#include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc/parallel/gcTaskManager.hpp"
@@ -44,11 +45,12 @@
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "logging/log.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.inline.hpp"
 #include "oops/methodData.hpp"
@@ -106,7 +108,6 @@
 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
 
 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
-bool      PSParallelCompact::_print_phases = false;
 
 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
 
@@ -193,21 +194,26 @@
   "old ", "eden", "from", "to  "
 };
 
-void PSParallelCompact::print_region_ranges()
-{
-  tty->print_cr("space  bottom     top        end        new_top");
-  tty->print_cr("------ ---------- ---------- ---------- ----------");
+void PSParallelCompact::print_region_ranges() {
+  if (!develop_log_is_enabled(Trace, gc, compaction, phases)) {
+    return;
+  }
+  LogHandle(gc, compaction, phases) log;
+  ResourceMark rm;
+  Universe::print_on(log.trace_stream());
+  log.trace("space  bottom     top        end        new_top");
+  log.trace("------ ---------- ---------- ---------- ----------");
 
   for (unsigned int id = 0; id < last_space_id; ++id) {
     const MutableSpace* space = _space_info[id].space();
-    tty->print_cr("%u %s "
-                  SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
-                  SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
-                  id, space_names[id],
-                  summary_data().addr_to_region_idx(space->bottom()),
-                  summary_data().addr_to_region_idx(space->top()),
-                  summary_data().addr_to_region_idx(space->end()),
-                  summary_data().addr_to_region_idx(_space_info[id].new_top()));
+    log.trace("%u %s "
+              SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
+              SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
+              id, space_names[id],
+              summary_data().addr_to_region_idx(space->bottom()),
+              summary_data().addr_to_region_idx(space->top()),
+              summary_data().addr_to_region_idx(space->end()),
+              summary_data().addr_to_region_idx(_space_info[id].new_top()));
   }
 }
 
@@ -219,13 +225,14 @@
 
   ParallelCompactData& sd = PSParallelCompact::summary_data();
   size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
-  tty->print_cr(REGION_IDX_FORMAT " " PTR_FORMAT " "
-                REGION_IDX_FORMAT " " PTR_FORMAT " "
-                REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
-                REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
-                i, p2i(c->data_location()), dci, p2i(c->destination()),
-                c->partial_obj_size(), c->live_obj_size(),
-                c->data_size(), c->source_region(), c->destination_count());
+  log_develop_trace(gc, compaction, phases)(
+      REGION_IDX_FORMAT " " PTR_FORMAT " "
+      REGION_IDX_FORMAT " " PTR_FORMAT " "
+      REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
+      REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
+      i, p2i(c->data_location()), dci, p2i(c->destination()),
+      c->partial_obj_size(), c->live_obj_size(),
+      c->data_size(), c->source_region(), c->destination_count());
 
 #undef  REGION_IDX_FORMAT
 #undef  REGION_DATA_FORMAT
@@ -251,13 +258,17 @@
     ++i;
   }
 
-  tty->print_cr("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
+  log_develop_trace(gc, compaction, phases)("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
 }
 
 void
 print_generic_summary_data(ParallelCompactData& summary_data,
                            SpaceInfo* space_info)
 {
+  if (!develop_log_is_enabled(Trace, gc, compaction, phases)) {
+    return;
+  }
+
   for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
     const MutableSpace* space = space_info[id].space();
     print_generic_summary_data(summary_data, space->bottom(),
@@ -266,20 +277,6 @@
 }
 
 void
-print_initial_summary_region(size_t i,
-                             const ParallelCompactData::RegionData* c,
-                             bool newline = true)
-{
-  tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
-             SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
-             SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
-             i, p2i(c->destination()),
-             c->partial_obj_size(), c->live_obj_size(),
-             c->data_size(), c->source_region(), c->destination_count());
-  if (newline) tty->cr();
-}
-
-void
 print_initial_summary_data(ParallelCompactData& summary_data,
                            const MutableSpace* space) {
   if (space->top() == space->bottom()) {
@@ -298,7 +295,12 @@
   size_t full_region_count = 0;
   size_t i = summary_data.addr_to_region_idx(space->bottom());
   while (i < end_region && summary_data.region(i)->data_size() == region_size) {
-    print_initial_summary_region(i, summary_data.region(i));
+    ParallelCompactData::RegionData* c = summary_data.region(i);
+    log_develop_trace(gc, compaction, phases)(
+        SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
+        i, p2i(c->destination()),
+        c->partial_obj_size(), c->live_obj_size(),
+        c->data_size(), c->source_region(), c->destination_count());
     ++full_region_count;
     ++i;
   }
@@ -327,9 +329,15 @@
             max_live_to_right = live_to_right;
     }
 
-    print_initial_summary_region(i, c, false);
-    tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
-                  reclaimed_ratio, dead_to_right, live_to_right);
+    ParallelCompactData::RegionData* c = summary_data.region(i);
+    log_develop_trace(gc, compaction, phases)(
+        SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d"
+        "%12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
+        i, p2i(c->destination()),
+        c->partial_obj_size(), c->live_obj_size(),
+        c->data_size(), c->source_region(), c->destination_count(),
+        reclaimed_ratio, dead_to_right, live_to_right);
+
 
     live_to_right -= c->data_size();
     ++i;
@@ -337,18 +345,25 @@
 
   // Any remaining regions are empty.  Print one more if there is one.
   if (i < end_region) {
-    print_initial_summary_region(i, summary_data.region(i));
+    ParallelCompactData::RegionData* c = summary_data.region(i);
+    log_develop_trace(gc, compaction, phases)(
+        SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
+         i, p2i(c->destination()),
+         c->partial_obj_size(), c->live_obj_size(),
+         c->data_size(), c->source_region(), c->destination_count());
   }
 
-  tty->print_cr("max:  " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
-                "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
-                max_reclaimed_ratio_region, max_dead_to_right,
-                max_live_to_right, max_reclaimed_ratio);
+  log_develop_trace(gc, compaction, phases)("max:  " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
+                                            max_reclaimed_ratio_region, max_dead_to_right, max_live_to_right, max_reclaimed_ratio);
 }
 
 void
 print_initial_summary_data(ParallelCompactData& summary_data,
                            SpaceInfo* space_info) {
+  if (!develop_log_is_enabled(Trace, gc, compaction, phases)) {
+    return;
+  }
+
   unsigned int id = PSParallelCompact::old_space_id;
   const MutableSpace* space;
   do {
@@ -606,11 +621,7 @@
                                          sr->partial_obj_size()));
     const size_t end_idx = addr_to_region_idx(target_end);
 
-    if (TraceParallelOldGCSummaryPhase) {
-        gclog_or_tty->print_cr("split:  clearing source_region field in ["
-                               SIZE_FORMAT ", " SIZE_FORMAT ")",
-                               beg_idx, end_idx);
-    }
+    log_develop_trace(gc, compaction, phases)("split:  clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
     for (size_t idx = beg_idx; idx < end_idx; ++idx) {
       _region_data[idx].set_source_region(0);
     }
@@ -630,27 +641,22 @@
   *target_next = split_destination + partial_obj_size;
   HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
 
-  if (TraceParallelOldGCSummaryPhase) {
+  if (develop_log_is_enabled(Trace, gc, compaction, phases)) {
     const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
-    gclog_or_tty->print_cr("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT
-                           " pos=" SIZE_FORMAT,
-                           split_type, p2i(source_next), split_region,
-                           partial_obj_size);
-    gclog_or_tty->print_cr("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT
-                           " tn=" PTR_FORMAT,
-                           split_type, p2i(split_destination),
-                           addr_to_region_idx(split_destination),
-                           p2i(*target_next));
+    log_develop_trace(gc, compaction, phases)("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
+                                              split_type, p2i(source_next), split_region, partial_obj_size);
+    log_develop_trace(gc, compaction, phases)("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
+                                              split_type, p2i(split_destination),
+                                              addr_to_region_idx(split_destination),
+                                              p2i(*target_next));
 
     if (partial_obj_size != 0) {
       HeapWord* const po_beg = split_info.destination();
       HeapWord* const po_end = po_beg + split_info.partial_obj_size();
-      gclog_or_tty->print_cr("%s split:  "
-                             "po_beg=" PTR_FORMAT " " SIZE_FORMAT " "
-                             "po_end=" PTR_FORMAT " " SIZE_FORMAT,
-                             split_type,
-                             p2i(po_beg), addr_to_region_idx(po_beg),
-                             p2i(po_end), addr_to_region_idx(po_end));
+      log_develop_trace(gc, compaction, phases)("%s split:  po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
+                                                split_type,
+                                                p2i(po_beg), addr_to_region_idx(po_beg),
+                                                p2i(po_end), addr_to_region_idx(po_end));
     }
   }
 
@@ -663,13 +669,12 @@
                                     HeapWord* target_beg, HeapWord* target_end,
                                     HeapWord** target_next)
 {
-  if (TraceParallelOldGCSummaryPhase) {
-    HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
-    tty->print_cr("sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
-                  "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
-                  p2i(source_beg), p2i(source_end), p2i(source_next_val),
-                  p2i(target_beg), p2i(target_end), p2i(*target_next));
-  }
+  HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
+  log_develop_trace(gc, compaction, phases)(
+      "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
+      "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
+      p2i(source_beg), p2i(source_end), p2i(source_next_val),
+      p2i(target_beg), p2i(target_end), p2i(*target_next));
 
   size_t cur_region = addr_to_region_idx(source_beg);
   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
@@ -900,32 +905,6 @@
   _dwl_adjustment = normal_distribution(1.0);
 }
 
-// Simple class for storing info about the heap at the start of GC, to be used
-// after GC for comparison/printing.
-class PreGCValues {
-public:
-  PreGCValues() { }
-  PreGCValues(ParallelScavengeHeap* heap) { fill(heap); }
-
-  void fill(ParallelScavengeHeap* heap) {
-    _heap_used      = heap->used();
-    _young_gen_used = heap->young_gen()->used_in_bytes();
-    _old_gen_used   = heap->old_gen()->used_in_bytes();
-    _metadata_used  = MetaspaceAux::used_bytes();
-  };
-
-  size_t heap_used() const      { return _heap_used; }
-  size_t young_gen_used() const { return _young_gen_used; }
-  size_t old_gen_used() const   { return _old_gen_used; }
-  size_t metadata_used() const  { return _metadata_used; }
-
-private:
-  size_t _heap_used;
-  size_t _young_gen_used;
-  size_t _old_gen_used;
-  size_t _metadata_used;
-};
-
 void
 PSParallelCompact::clear_data_covering_space(SpaceId id)
 {
@@ -955,19 +934,17 @@
   DEBUG_ONLY(split_info.verify_clear();)
 }
 
-void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
+void PSParallelCompact::pre_compact()
 {
   // Update the from & to space pointers in space_info, since they are swapped
   // at each young gen gc.  Do the update unconditionally (even though a
   // promotion failure does not swap spaces) because an unknown number of young
   // collections will have swapped the spaces an unknown number of times.
-  GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
+  GCTraceTime(Trace, gc, phases) tm("Pre Compact", &_gc_timer);
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 
-  pre_gc_values->fill(heap);
-
   DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
   DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
 
@@ -986,7 +963,7 @@
 
   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify(" VerifyBeforeGC:");
+    Universe::verify("Before GC");
   }
 
   // Verify object start arrays
@@ -1004,7 +981,7 @@
 
 void PSParallelCompact::post_compact()
 {
-  GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
+  GCTraceTime(Trace, gc, phases) tm("Post Compact", &_gc_timer);
 
   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
     // Clear the marking bitmap, summary data and split info.
@@ -1558,7 +1535,7 @@
     }
   }
 
-  if (TraceParallelOldGCSummaryPhase) {
+  if (develop_log_is_enabled(Trace, gc, compaction, phases)) {
     const size_t region_size = ParallelCompactData::RegionSize;
     HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
     const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
@@ -1566,12 +1543,13 @@
     HeapWord* const new_top = _space_info[id].new_top();
     const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
     const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
-    tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
-                  "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
-                  "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
-                  id, space->capacity_in_words(), p2i(dense_prefix_end),
-                  dp_region, dp_words / region_size,
-                  cr_words / region_size, p2i(new_top));
+    log_develop_trace(gc, compaction, phases)(
+        "id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
+        "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
+        "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
+        id, space->capacity_in_words(), p2i(dense_prefix_end),
+        dp_region, dp_words / region_size,
+        cr_words / region_size, p2i(new_top));
   }
 }
 
@@ -1581,29 +1559,27 @@
                                           SpaceId src_space_id,
                                           HeapWord* src_beg, HeapWord* src_end)
 {
-  if (TraceParallelOldGCSummaryPhase) {
-    tty->print_cr("summarizing %d [%s] into %d [%s]:  "
-                  "src=" PTR_FORMAT "-" PTR_FORMAT " "
-                  SIZE_FORMAT "-" SIZE_FORMAT " "
-                  "dst=" PTR_FORMAT "-" PTR_FORMAT " "
-                  SIZE_FORMAT "-" SIZE_FORMAT,
-                  src_space_id, space_names[src_space_id],
-                  dst_space_id, space_names[dst_space_id],
-                  p2i(src_beg), p2i(src_end),
-                  _summary_data.addr_to_region_idx(src_beg),
-                  _summary_data.addr_to_region_idx(src_end),
-                  p2i(dst_beg), p2i(dst_end),
-                  _summary_data.addr_to_region_idx(dst_beg),
-                  _summary_data.addr_to_region_idx(dst_end));
-  }
+  log_develop_trace(gc, compaction, phases)(
+      "Summarizing %d [%s] into %d [%s]:  "
+      "src=" PTR_FORMAT "-" PTR_FORMAT " "
+      SIZE_FORMAT "-" SIZE_FORMAT " "
+      "dst=" PTR_FORMAT "-" PTR_FORMAT " "
+      SIZE_FORMAT "-" SIZE_FORMAT,
+      src_space_id, space_names[src_space_id],
+      dst_space_id, space_names[dst_space_id],
+      p2i(src_beg), p2i(src_end),
+      _summary_data.addr_to_region_idx(src_beg),
+      _summary_data.addr_to_region_idx(src_end),
+      p2i(dst_beg), p2i(dst_end),
+      _summary_data.addr_to_region_idx(dst_beg),
+      _summary_data.addr_to_region_idx(dst_end));
 }
 #endif  // #ifndef PRODUCT
 
 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
                                       bool maximum_compaction)
 {
-  GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
-  // trace("2");
+  GCTraceTime(Trace, gc, phases) tm("Summary Phase", &_gc_timer);
 
 #ifdef  ASSERT
   if (TraceParallelOldGCMarkingPhase) {
@@ -1619,14 +1595,9 @@
   // Quick summarization of each space into itself, to see how much is live.
   summarize_spaces_quick();
 
-  if (TraceParallelOldGCSummaryPhase) {
-    tty->print_cr("summary_phase:  after summarizing each space to self");
-    Universe::print();
-    NOT_PRODUCT(print_region_ranges());
-    if (Verbose) {
-      NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
-    }
-  }
+  log_develop_trace(gc, compaction, phases)("summary phase:  after summarizing each space to self");
+  NOT_PRODUCT(print_region_ranges());
+  NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
 
   // The amount of live data that will end up in old space (assuming it fits).
   size_t old_space_total_live = 0;
@@ -1700,14 +1671,9 @@
     }
   }
 
-  if (TraceParallelOldGCSummaryPhase) {
-    tty->print_cr("summary_phase:  after final summarization");
-    Universe::print();
-    NOT_PRODUCT(print_region_ranges());
-    if (Verbose) {
-      NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
-    }
-  }
+  log_develop_trace(gc, compaction, phases)("Summary_phase:  after final summarization");
+  NOT_PRODUCT(print_region_ranges());
+  NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
 }
 
 // This method should contain all heap-specific policy for invoking a full
@@ -1782,20 +1748,16 @@
 
   heap->pre_full_gc_dump(&_gc_timer);
 
-  _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
-
   // Make sure data structures are sane, make the heap parsable, and do other
   // miscellaneous bookkeeping.
-  PreGCValues pre_gc_values;
-  pre_compact(&pre_gc_values);
+  pre_compact();
+
+  PreGCValues pre_gc_values(heap);
 
   // Get the compaction manager reserved for the VM thread.
   ParCompactionManager* const vmthread_cm =
     ParCompactionManager::manager_array(gc_task_manager()->workers());
 
-  // Place after pre_compact() where the number of invocations is incremented.
-  AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
-
   {
     ResourceMark rm;
     HandleMark hm;
@@ -1804,8 +1766,8 @@
     gc_task_manager()->set_active_gang();
     gc_task_manager()->task_idle_workers();
 
-    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
+    GCTraceCPUTime tcpu;
+    GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause, true);
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 
@@ -1852,17 +1814,9 @@
     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
 
     if (UseAdaptiveSizePolicy) {
-      if (PrintAdaptiveSizePolicy) {
-        gclog_or_tty->print("AdaptiveSizeStart: ");
-        gclog_or_tty->stamp();
-        gclog_or_tty->print_cr(" collection: %d ",
-                       heap->total_collections());
-        if (Verbose) {
-          gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT
-            " young_gen_capacity: " SIZE_FORMAT,
-            old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
-        }
-      }
+      log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
+      log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
+                          old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
 
       // Don't check if the size_policy is ready here.  Let
       // the size_policy check that internally.
@@ -1920,10 +1874,8 @@
         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
                                size_policy->calculated_survivor_size_in_bytes());
       }
-      if (PrintAdaptiveSizePolicy) {
-        gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
-                       heap->total_collections());
-      }
+
+      log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
     }
 
     if (UsePerfData) {
@@ -1938,20 +1890,14 @@
     // Resize the metaspace capacity after a collection
     MetaspaceGC::compute_new_size();
 
-    if (TraceOldGenTime) accumulated_time()->stop();
-
-    if (PrintGC) {
-      if (PrintGCDetails) {
-        // No GC timestamp here.  This is after GC so it would be confusing.
-        young_gen->print_used_change(pre_gc_values.young_gen_used());
-        old_gen->print_used_change(pre_gc_values.old_gen_used());
-        heap->print_heap_change(pre_gc_values.heap_used());
-        MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
-      } else {
-        heap->print_heap_change(pre_gc_values.heap_used());
-      }
+    if (TraceOldGenTime) {
+      accumulated_time()->stop();
     }
 
+    young_gen->print_used_change(pre_gc_values.young_gen_used());
+    old_gen->print_used_change(pre_gc_values.old_gen_used());
+    MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
+
     // Track memory usage and detect low memory
     MemoryService::track_memory_usage();
     heap->update_counters();
@@ -1969,7 +1915,7 @@
 
   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify(" VerifyAfterGC:");
+    Universe::verify("After GC");
   }
 
   // Re-verify object start arrays
@@ -1989,13 +1935,10 @@
   heap->print_heap_after_gc();
   heap->trace_heap_after_gc(&_gc_tracer);
 
-  if (PrintGCTaskTimeStamps) {
-    gclog_or_tty->print_cr("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " "
-                           JLONG_FORMAT,
-                           marking_start.ticks(), compaction_start.ticks(),
-                           collection_exit.ticks());
-    gc_task_manager()->print_task_time_stamps();
-  }
+  log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
+                         marking_start.ticks(), compaction_start.ticks(),
+                         collection_exit.ticks());
+  gc_task_manager()->print_task_time_stamps();
 
   heap->post_full_gc_dump(&_gc_timer);
 
@@ -2003,6 +1946,8 @@
   ParallelTaskTerminator::print_termination_counts();
 #endif
 
+  AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
+
   _gc_timer.register_gc_end();
 
   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
@@ -2049,8 +1994,7 @@
     return false; // Respect young gen minimum size.
   }
 
-  if (TraceAdaptiveGCBoundary && Verbose) {
-    gclog_or_tty->print(" absorbing " SIZE_FORMAT "K:  "
+  log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K:  "
                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
@@ -2059,7 +2003,6 @@
                         young_gen->from_space()->used_in_bytes() / K,
                         young_gen->to_space()->used_in_bytes() / K,
                         young_gen->capacity_in_bytes() / K, new_young_size / K);
-  }
 
   // Fill the unused part of the old gen.
   MutableSpace* const old_space = old_gen->object_space();
@@ -2109,7 +2052,7 @@
                                       bool maximum_heap_compaction,
                                       ParallelOldTracer *gc_tracer) {
   // Recursively traverse all live objects and mark them
-  GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
+  GCTraceTime(Trace, gc, phases) tm("Marking Phase", &_gc_timer);
 
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   uint parallel_gc_threads = heap->gc_task_manager()->workers();
@@ -2124,7 +2067,7 @@
   ClassLoaderDataGraph::clear_claimed_marks();
 
   {
-    GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
+    GCTraceTime(Trace, gc, phases) tm("Par Mark", &_gc_timer);
 
     ParallelScavengeHeap::ParStrongRootsScope psrs;
 
@@ -2153,7 +2096,7 @@
 
   // Process reference objects found during marking
   {
-    GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
+    GCTraceTime(Trace, gc, phases) tm("Reference Processing", &_gc_timer);
 
     ReferenceProcessorStats stats;
     if (ref_processor()->processing_is_mt()) {
@@ -2170,7 +2113,7 @@
     gc_tracer->report_gc_reference_stats(stats);
   }
 
-  GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
+  GCTraceTime(Trace, gc) tm_m("Class Unloading", &_gc_timer);
 
   // This is the point where the entire marking should have completed.
   assert(cm->marking_stacks_empty(), "Marking should have completed");
@@ -2201,7 +2144,7 @@
 
 void PSParallelCompact::adjust_roots() {
   // Adjust the pointers to reflect the new locations
-  GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
+  GCTraceTime(Trace, gc, phases) tm("Adjust Roots", &_gc_timer);
 
   // Need new claim bits when tracing through and adjusting pointers.
   ClassLoaderDataGraph::clear_claimed_marks();
@@ -2234,10 +2177,49 @@
   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 }
 
+// Helper class to print 8 region numbers per line and then print the total at the end.
+class FillableRegionLogger : public StackObj {
+private:
+  LogHandle(gc, compaction) log;
+  static const int LineLength = 8;
+  size_t _regions[LineLength];
+  int _next_index;
+  bool _enabled;
+  size_t _total_regions;
+public:
+  FillableRegionLogger() : _next_index(0), _total_regions(0), _enabled(develop_log_is_enabled(Trace, gc, compaction)) { }
+  ~FillableRegionLogger() {
+    log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
+  }
+
+  void print_line() {
+    if (!_enabled || _next_index == 0) {
+      return;
+    }
+    FormatBuffer<> line("Fillable: ");
+    for (int i = 0; i < _next_index; i++) {
+      line.append(" " SIZE_FORMAT_W(7), _regions[i]);
+    }
+    log.trace("%s", line.buffer());
+    _next_index = 0;
+  }
+
+  void handle(size_t region) {
+    if (!_enabled) {
+      return;
+    }
+    _regions[_next_index++] = region;
+    if (_next_index == LineLength) {
+      print_line();
+    }
+    _total_regions++;
+  }
+};
+
 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
                                                       uint parallel_gc_threads)
 {
-  GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
+  GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
 
   // Find the threads that are active
   unsigned int which = 0;
@@ -2262,13 +2244,13 @@
 
   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 
-  size_t fillable_regions = 0;   // A count for diagnostic purposes.
   // A region index which corresponds to the tasks created above.
   // "which" must be 0 <= which < task_count
 
   which = 0;
   // id + 1 is used to test termination so unsigned  can
   // be used with an old_space_id == 0.
+  FillableRegionLogger region_logger;
   for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
     SpaceInfo* const space_info = _space_info + id;
     MutableSpace* const space = space_info->space();
@@ -2281,16 +2263,7 @@
     for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
       if (sd.region(cur)->claim_unsafe()) {
         ParCompactionManager::region_list_push(which, cur);
-
-        if (TraceParallelOldGCCompactionPhase && Verbose) {
-          const size_t count_mod_8 = fillable_regions & 7;
-          if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
-          gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
-          if (count_mod_8 == 7) gclog_or_tty->cr();
-        }
-
-        NOT_PRODUCT(++fillable_regions;)
-
+        region_logger.handle(cur);
         // Assign regions to tasks in round-robin fashion.
         if (++which == task_count) {
           assert(which <= parallel_gc_threads,
@@ -2299,11 +2272,7 @@
         }
       }
     }
-  }
-
-  if (TraceParallelOldGCCompactionPhase) {
-    if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
-    gclog_or_tty->print_cr(SIZE_FORMAT " initially fillable regions", fillable_regions);
+    region_logger.print_line();
   }
 }
 
@@ -2311,7 +2280,7 @@
 
 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
                                                     uint parallel_gc_threads) {
-  GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
+  GCTraceTime(Trace, gc, phases) tm("Dense Prefix Task Setup", &_gc_timer);
 
   ParallelCompactData& sd = PSParallelCompact::summary_data();
 
@@ -2393,7 +2362,7 @@
                                      GCTaskQueue* q,
                                      ParallelTaskTerminator* terminator_ptr,
                                      uint parallel_gc_threads) {
-  GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
+  GCTraceTime(Trace, gc, phases) tm("Steal Task Setup", &_gc_timer);
 
   // Once a thread has drained it's stack, it should try to steal regions from
   // other threads.
@@ -2407,9 +2376,15 @@
 #ifdef ASSERT
 // Write a histogram of the number of times the block table was filled for a
 // region.
-void PSParallelCompact::write_block_fill_histogram(outputStream* const out)
+void PSParallelCompact::write_block_fill_histogram()
 {
-  if (!TraceParallelOldGCCompactionPhase) return;
+  if (!develop_log_is_enabled(Trace, gc, compaction)) {
+    return;
+  }
+
+  LogHandle(gc, compaction) log;
+  ResourceMark rm;
+  outputStream* out = log.trace_stream();
 
   typedef ParallelCompactData::RegionData rd_t;
   ParallelCompactData& sd = summary_data();
@@ -2428,7 +2403,7 @@
       for (const rd_t* cur = beg; cur < end; ++cur) {
         ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
       }
-      out->print("%u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
+      out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
       for (size_t i = 0; i < histo_len; ++i) {
         out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
                    histo[i], 100.0 * histo[i] / region_cnt);
@@ -2440,8 +2415,7 @@
 #endif // #ifdef ASSERT
 
 void PSParallelCompact::compact() {
-  // trace("5");
-  GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
+  GCTraceTime(Trace, gc, phases) tm("Compaction Phase", &_gc_timer);
 
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
   PSOldGen* old_gen = heap->old_gen();
@@ -2457,7 +2431,7 @@
   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
 
   {
-    GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
+    GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
 
     gc_task_manager()->execute_and_wait(q);
 
@@ -2471,14 +2445,14 @@
 
   {
     // Update the deferred objects, if any.  Any compaction manager can be used.
-    GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
+    GCTraceTime(Trace, gc, phases) tm("Deferred Updates", &_gc_timer);
     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
       update_deferred_objects(cm, SpaceId(id));
     }
   }
 
-  DEBUG_ONLY(write_block_fill_histogram(gclog_or_tty));
+  DEBUG_ONLY(write_block_fill_histogram());
 }
 
 #ifdef  ASSERT
@@ -3107,18 +3081,13 @@
                                                   T* referent_addr,
                                                   T* next_addr,
                                                   T* discovered_addr) {
-  if(TraceReferenceGC && PrintGCDetails) {
-    gclog_or_tty->print_cr("%s obj " PTR_FORMAT, s, p2i(obj));
-    gclog_or_tty->print_cr("     referent_addr/* " PTR_FORMAT " / "
-                           PTR_FORMAT, p2i(referent_addr),
-                           referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
-    gclog_or_tty->print_cr("     next_addr/* " PTR_FORMAT " / "
-                           PTR_FORMAT, p2i(next_addr),
-                           next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
-    gclog_or_tty->print_cr("     discovered_addr/* " PTR_FORMAT " / "
-                           PTR_FORMAT, p2i(discovered_addr),
-                           discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);
-  }
+  log_develop_trace(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj));
+  log_develop_trace(gc, ref)("     referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
+                             p2i(referent_addr), referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
+  log_develop_trace(gc, ref)("     next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
+                             p2i(next_addr), next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
+  log_develop_trace(gc, ref)("     discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
+                             p2i(discovered_addr), discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);
 }
 #endif
 
--- a/src/share/vm/gc/parallel/psParallelCompact.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psParallelCompact.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -966,7 +966,6 @@
   static ParallelCompactData  _summary_data;
   static IsAliveClosure       _is_alive_closure;
   static SpaceInfo            _space_info[last_space_id];
-  static bool                 _print_phases;
   static AdjustPointerClosure _adjust_pointer_closure;
   static AdjustKlassClosure   _adjust_klass_closure;
 
@@ -989,13 +988,10 @@
 
   static void initialize_space_info();
 
-  // Return true if details about individual phases should be printed.
-  static inline bool print_phases();
-
   // Clear the marking bitmap and summary data that cover the specified space.
   static void clear_data_covering_space(SpaceId id);
 
-  static void pre_compact(PreGCValues* pre_gc_values);
+  static void pre_compact();
   static void post_compact();
 
   // Mark live objects
@@ -1069,7 +1065,7 @@
   // Adjust addresses in roots.  Does not adjust addresses in heap.
   static void adjust_roots();
 
-  DEBUG_ONLY(static void write_block_fill_histogram(outputStream* const out);)
+  DEBUG_ONLY(static void write_block_fill_histogram();)
 
   // Move objects to new locations.
   static void compact_perm(ParCompactionManager* cm);
@@ -1260,10 +1256,6 @@
   return mark_bitmap()->is_marked(obj);
 }
 
-inline bool PSParallelCompact::print_phases() {
-  return _print_phases;
-}
-
 inline double PSParallelCompact::normal_distribution(double density) {
   assert(_dwl_initialized, "uninitialized");
   const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
--- a/src/share/vm/gc/parallel/psPromotionManager.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psPromotionManager.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -30,6 +30,7 @@
 #include "gc/parallel/psScavenge.inline.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/memRegion.hpp"
 #include "memory/padded.inline.hpp"
@@ -99,7 +100,7 @@
 bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) {
   bool promotion_failure_occurred = false;
 
-  TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
+  TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
   for (uint i = 0; i < ParallelGCThreads + 1; i++) {
     PSPromotionManager* manager = manager_array(i);
     assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
@@ -128,7 +129,13 @@
 };
 
 void
-PSPromotionManager::print_taskqueue_stats(outputStream* const out) {
+PSPromotionManager::print_taskqueue_stats() {
+  if (!develop_log_is_enabled(Trace, gc, task, stats)) {
+    return;
+  }
+  LogHandle(gc, task, stats) log;
+  ResourceMark rm;
+  outputStream* out = log.trace_stream();
   out->print_cr("== GC Tasks Stats, GC %3d",
                 ParallelScavengeHeap::heap()->total_collections());
 
@@ -368,12 +375,7 @@
   T  next_oop = oopDesc::load_heap_oop(next_addr);
   if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
     T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
-    debug_only(
-      if(TraceReferenceGC && PrintGCDetails) {
-        gclog_or_tty->print_cr("   Process discovered as normal "
-                               PTR_FORMAT, p2i(discovered_addr));
-      }
-    )
+    log_develop_trace(gc, ref)("   Process discovered as normal " PTR_FORMAT, p2i(discovered_addr));
     if (PSScavenge::should_scavenge(discovered_addr)) {
       pm->claim_or_forward_depth(discovered_addr);
     }
@@ -430,13 +432,7 @@
     obj = obj->forwardee();
   }
 
-  if (TraceScavenge) {
-    gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " (%d)}",
-                           "promotion-failure",
-                           obj->klass()->internal_name(),
-                           p2i(obj), obj->size());
-
-  }
+  log_develop_trace(gc, scavenge)("{promotion-failure %s " PTR_FORMAT " (%d)}", obj->klass()->internal_name(), p2i(obj), obj->size());
 
   return obj;
 }
--- a/src/share/vm/gc/parallel/psPromotionManager.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psPromotionManager.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -65,7 +65,7 @@
   size_t                              _array_chunks_processed;
 
   void print_local_stats(outputStream* const out, uint i) const;
-  static void print_taskqueue_stats(outputStream* const out = gclog_or_tty);
+  static void print_taskqueue_stats();
 
   void reset_stats();
 #endif // TASKQUEUE_STATS
--- a/src/share/vm/gc/parallel/psPromotionManager.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psPromotionManager.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -31,6 +31,7 @@
 #include "gc/parallel/psPromotionManager.hpp"
 #include "gc/parallel/psScavenge.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 
 inline PSPromotionManager* PSPromotionManager::manager_array(uint index) {
@@ -262,11 +263,9 @@
 
   // This code must come after the CAS test, or it will print incorrect
   // information.
-  if (TraceScavenge) {
-    gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
-       should_scavenge(&new_obj) ? "copying" : "tenuring",
-       new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
-  }
+  log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
+                                  should_scavenge(&new_obj) ? "copying" : "tenuring",
+                                  new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
 
   return new_obj;
 }
@@ -285,10 +284,10 @@
 
   // This code must come after the CAS test, or it will print incorrect
   // information.
-  if (TraceScavenge && o->is_forwarded()) {
-    gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
-       "forwarding",
-       new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
+  if (develop_log_is_enabled(Trace, gc, scavenge) && o->is_forwarded()) {
+    log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
+                      "forwarding",
+                      new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
   }
 
   oopDesc::encode_store_heap_oop_not_null(p, new_obj);
--- a/src/share/vm/gc/parallel/psScavenge.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psScavenge.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -40,12 +40,13 @@
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "memory/resourceArea.hpp"
+#include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/fprofiler.hpp"
@@ -290,8 +291,6 @@
 
   heap->increment_total_collections();
 
-  AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
-
   if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) {
     // Gather the feedback data for eden occupancy.
     young_gen->eden_space()->accumulate_statistics();
@@ -303,23 +302,21 @@
   assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
   assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
 
-  size_t prev_used = heap->used();
-
   // Fill in TLABs
   heap->accumulate_statistics_all_tlabs();
   heap->ensure_parsability(true);  // retire TLABs
 
   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify(" VerifyBeforeGC:");
+    Universe::verify("Before GC");
   }
 
   {
     ResourceMark rm;
     HandleMark hm;
 
-    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
+    GCTraceCPUTime tcpu;
+    GCTraceTime(Info, gc) tm("Pause Young", NULL, gc_cause, true);
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
 
@@ -352,12 +349,7 @@
     reference_processor()->enable_discovery();
     reference_processor()->setup_policy(false);
 
-    // We track how much was promoted to the next generation for
-    // the AdaptiveSizePolicy.
-    size_t old_gen_used_before = old_gen->used_in_bytes();
-
-    // For PrintGCDetails
-    size_t young_gen_used_before = young_gen->used_in_bytes();
+    PreGCValues pre_gc_values(heap);
 
     // Reset our survivor overflow.
     set_survivor_overflow(false);
@@ -383,7 +375,7 @@
     // We'll use the promotion manager again later.
     PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
     {
-      GCTraceTime tm("Scavenge", false, false, &_gc_timer);
+      GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer);
       ParallelScavengeHeap::ParStrongRootsScope psrs;
 
       GCTaskQueue* q = GCTaskQueue::create();
@@ -425,7 +417,7 @@
 
     // Process reference objects discovered during scavenge
     {
-      GCTraceTime tm("References", false, false, &_gc_timer);
+      GCTraceTime(Debug, gc, phases) tm("References", &_gc_timer);
 
       reference_processor()->setup_policy(false); // not always_clear
       reference_processor()->set_active_mt_degree(active_workers);
@@ -454,7 +446,7 @@
     }
 
     {
-      GCTraceTime tm("StringTable", false, false, &_gc_timer);
+      GCTraceTime(Debug, gc, phases) tm("StringTable", &_gc_timer);
       // Unlink any dead interned Strings and process the remaining live ones.
       PSScavengeRootsClosure root_closure(promotion_manager);
       StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
@@ -464,9 +456,7 @@
     promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
     if (promotion_failure_occurred) {
       clean_up_failed_promotion();
-      if (PrintGC) {
-        gclog_or_tty->print("--");
-      }
+      log_info(gc)("Promotion failed");
     }
 
     _gc_tracer.report_tenuring_threshold(tenuring_threshold());
@@ -483,7 +473,7 @@
       young_gen->swap_spaces();
 
       size_t survived = young_gen->from_space()->used_in_bytes();
-      size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
+      size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used();
       size_policy->update_averages(_survivor_overflow, survived, promoted);
 
       // A successful scavenge should restart the GC time limit count which is
@@ -492,19 +482,9 @@
       if (UseAdaptiveSizePolicy) {
         // Calculate the new survivor size and tenuring threshold
 
-        if (PrintAdaptiveSizePolicy) {
-          gclog_or_tty->print("AdaptiveSizeStart: ");
-          gclog_or_tty->stamp();
-          gclog_or_tty->print_cr(" collection: %d ",
-                         heap->total_collections());
-
-          if (Verbose) {
-            gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT
-              " young_gen_capacity: " SIZE_FORMAT,
-              old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
-          }
-        }
-
+        log_debug(gc, ergo)("AdaptiveSizeStart:  collection: %d ", heap->total_collections());
+        log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
+                            old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
 
         if (UsePerfData) {
           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
@@ -538,13 +518,9 @@
                                                            _tenuring_threshold,
                                                            survivor_limit);
 
-       if (PrintTenuringDistribution) {
-         gclog_or_tty->cr();
-         gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u"
-                                " (max threshold " UINTX_FORMAT ")",
-                                size_policy->calculated_survivor_size_in_bytes(),
-                                _tenuring_threshold, MaxTenuringThreshold);
-       }
+       log_debug(gc, age)("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max threshold " UINTX_FORMAT ")",
+                          size_policy->calculated_survivor_size_in_bytes(),
+                          _tenuring_threshold, MaxTenuringThreshold);
 
         if (UsePerfData) {
           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
@@ -602,10 +578,7 @@
         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
                         size_policy->calculated_survivor_size_in_bytes());
 
-        if (PrintAdaptiveSizePolicy) {
-          gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
-                         heap->total_collections());
-        }
+        log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
       }
 
       // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
@@ -628,7 +601,7 @@
     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
 
     {
-      GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);
+      GCTraceTime(Debug, gc, phases) tm("Prune Scavenge Root Methods", &_gc_timer);
 
       CodeCache::prune_scavenge_root_nmethods();
     }
@@ -649,14 +622,9 @@
 
     if (TraceYoungGenTime) accumulated_time()->stop();
 
-    if (PrintGC) {
-      if (PrintGCDetails) {
-        // Don't print a GC timestamp here.  This is after the GC so
-        // would be confusing.
-        young_gen->print_used_change(young_gen_used_before);
-      }
-      heap->print_heap_change(prev_used);
-    }
+    young_gen->print_used_change(pre_gc_values.young_gen_used());
+    old_gen->print_used_change(pre_gc_values.old_gen_used());
+    MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
 
     // Track memory usage and detect low memory
     MemoryService::track_memory_usage();
@@ -667,7 +635,7 @@
 
   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify(" VerifyAfterGC:");
+    Universe::verify("After GC");
   }
 
   heap->print_heap_after_gc();
@@ -675,17 +643,16 @@
 
   scavenge_exit.update();
 
-  if (PrintGCTaskTimeStamps) {
-    tty->print_cr("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
-                  scavenge_entry.ticks(), scavenge_midpoint.ticks(),
-                  scavenge_exit.ticks());
-    gc_task_manager()->print_task_time_stamps();
-  }
+  log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
+                            scavenge_entry.ticks(), scavenge_midpoint.ticks(),
+                            scavenge_exit.ticks());
+  gc_task_manager()->print_task_time_stamps();
 
 #ifdef TRACESPINNING
   ParallelTaskTerminator::print_termination_counts();
 #endif
 
+  AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
 
   _gc_timer.register_gc_end();
 
@@ -708,9 +675,7 @@
     PSPromotionFailedClosure unforward_closure;
     young_gen->object_iterate(&unforward_closure);
 
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("Restoring " SIZE_FORMAT " marks", _preserved_oop_stack.size());
-    }
+    log_trace(gc, ergo)("Restoring " SIZE_FORMAT " marks", _preserved_oop_stack.size());
 
     // Restore any saved marks.
     while (!_preserved_oop_stack.is_empty()) {
@@ -772,19 +737,12 @@
   size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
   bool result = promotion_estimate < old_gen->free_in_bytes();
 
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print(result ? "  do scavenge: " : "  skip scavenge: ");
-    gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
-      " padded_average_promoted " SIZE_FORMAT
-      " free in old gen " SIZE_FORMAT,
-      (size_t) policy->average_promoted_in_bytes(),
-      (size_t) policy->padded_average_promoted_in_bytes(),
-      old_gen->free_in_bytes());
-    if (young_gen->used_in_bytes() <
-        (size_t) policy->padded_average_promoted_in_bytes()) {
-      gclog_or_tty->print_cr(" padded_promoted_average is greater"
-        " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
-    }
+  log_trace(ergo)("%s scavenge: average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen " SIZE_FORMAT,
+                result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(),
+                (size_t) policy->padded_average_promoted_in_bytes(),
+                old_gen->free_in_bytes());
+  if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) {
+    log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
   }
 
   if (result) {
--- a/src/share/vm/gc/parallel/psScavenge.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psScavenge.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,6 +29,7 @@
 #include "gc/parallel/parallelScavengeHeap.hpp"
 #include "gc/parallel/psPromotionManager.inline.hpp"
 #include "gc/parallel/psScavenge.hpp"
+#include "logging/log.hpp"
 #include "memory/iterator.hpp"
 #include "utilities/globalDefinitions.hpp"
 
@@ -138,13 +139,11 @@
     // If the klass has not been dirtied we know that there's
     // no references into  the young gen and we can skip it.
 
-    if (TraceScavenge) {
-      ResourceMark rm;
-      gclog_or_tty->print_cr("PSScavengeKlassClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
-                             p2i(klass),
-                             klass->external_name(),
-                             klass->has_modified_oops() ? "true" : "false");
-    }
+    NOT_PRODUCT(ResourceMark rm);
+    log_develop_trace(gc, scavenge)("PSScavengeKlassClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
+                                    p2i(klass),
+                                    klass->external_name(),
+                                    klass->has_modified_oops() ? "true" : "false");
 
     if (klass->has_modified_oops()) {
       // Clean the klass since we're going to scavenge all the metadata.
--- a/src/share/vm/gc/parallel/psVirtualspace.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psVirtualspace.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -213,20 +213,6 @@
   }
 }
 
-void PSVirtualSpace::print() const {
-  gclog_or_tty->print_cr("virtual space [" PTR_FORMAT "]:  alignment="
-                         SIZE_FORMAT "K grows %s%s",
-                         p2i(this), alignment() / K, grows_up() ? "up" : "down",
-                         special() ? " (pinned in memory)" : "");
-  gclog_or_tty->print_cr("    reserved=" SIZE_FORMAT "K"
-                         " [" PTR_FORMAT "," PTR_FORMAT "]"
-                         " committed=" SIZE_FORMAT "K"
-                         " [" PTR_FORMAT "," PTR_FORMAT "]",
-                         reserved_size() / K,
-                         p2i(reserved_low_addr()), p2i(reserved_high_addr()),
-                         committed_size() / K,
-                         p2i(committed_low_addr()), p2i(committed_high_addr()));
-}
 #endif // #ifndef PRODUCT
 
 void PSVirtualSpace::print_space_boundaries_on(outputStream* st) const {
--- a/src/share/vm/gc/parallel/psVirtualspace.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psVirtualspace.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -96,7 +96,6 @@
           bool is_aligned(size_t val) const;
           bool is_aligned(char* val) const;
           void verify() const;
-          void print() const;
   virtual bool grows_up() const   { return true; }
           bool grows_down() const { return !grows_up(); }
 
--- a/src/share/vm/gc/parallel/psYoungGen.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/parallel/psYoungGen.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -30,6 +30,7 @@
 #include "gc/parallel/psYoungGen.hpp"
 #include "gc/shared/gcUtil.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 
@@ -268,14 +269,12 @@
 
     space_invariants();
 
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("Young generation size: "
-        "desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT
-        " used: " SIZE_FORMAT " capacity: " SIZE_FORMAT
-        " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
-        eden_size, survivor_size, used_in_bytes(), capacity_in_bytes(),
-        _max_gen_size, min_gen_size());
-    }
+    log_trace(gc, ergo)("Young generation size: "
+                        "desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT
+                        " used: " SIZE_FORMAT " capacity: " SIZE_FORMAT
+                        " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
+                        eden_size, survivor_size, used_in_bytes(), capacity_in_bytes(),
+                        _max_gen_size, min_gen_size());
   }
 }
 
@@ -330,26 +329,17 @@
       size_changed = true;
     }
   } else {
-    if (Verbose && PrintGC) {
-      if (orig_size == gen_size_limit()) {
-        gclog_or_tty->print_cr("PSYoung generation size at maximum: "
-          SIZE_FORMAT "K", orig_size/K);
-      } else if (orig_size == min_gen_size()) {
-        gclog_or_tty->print_cr("PSYoung generation size at minium: "
-          SIZE_FORMAT "K", orig_size/K);
-      }
+    if (orig_size == gen_size_limit()) {
+      log_trace(gc)("PSYoung generation size at maximum: " SIZE_FORMAT "K", orig_size/K);
+    } else if (orig_size == min_gen_size()) {
+      log_trace(gc)("PSYoung generation size at minium: " SIZE_FORMAT "K", orig_size/K);
     }
   }
 
   if (size_changed) {
     post_resize();
-
-    if (Verbose && PrintGC) {
-      size_t current_size  = virtual_space()->committed_size();
-      gclog_or_tty->print_cr("PSYoung generation size changed: "
-                             SIZE_FORMAT "K->" SIZE_FORMAT "K",
-                             orig_size/K, current_size/K);
-    }
+    log_trace(gc)("PSYoung generation size changed: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
+                  orig_size/K, virtual_space()->committed_size()/K);
   }
 
   guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
@@ -412,28 +402,25 @@
     s2->mangle_region(delta2_right);
   }
 
-  if (TraceZapUnusedHeapArea) {
-    // s1
-    gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
-      "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
-      p2i(s1->bottom()), p2i(s1->end()),
-      p2i(s1MR.start()), p2i(s1MR.end()));
-    gclog_or_tty->print_cr("    Mangle before: [" PTR_FORMAT ", "
-      PTR_FORMAT ")  Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
-      p2i(delta1_left.start()), p2i(delta1_left.end()),
-      p2i(delta1_right.start()), p2i(delta1_right.end()));
+  // s1
+  log_develop_trace(gc)("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
+    "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
+    p2i(s1->bottom()), p2i(s1->end()),
+    p2i(s1MR.start()), p2i(s1MR.end()));
+  log_develop_trace(gc)("    Mangle before: [" PTR_FORMAT ", "
+    PTR_FORMAT ")  Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
+    p2i(delta1_left.start()), p2i(delta1_left.end()),
+    p2i(delta1_right.start()), p2i(delta1_right.end()));
 
-    // s2
-    gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
-      "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
-      p2i(s2->bottom()), p2i(s2->end()),
-      p2i(s2MR.start()), p2i(s2MR.end()));
-    gclog_or_tty->print_cr("    Mangle before: [" PTR_FORMAT ", "
-      PTR_FORMAT ")  Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
-      p2i(delta2_left.start()), p2i(delta2_left.end()),
-      p2i(delta2_right.start()), p2i(delta2_right.end()));
-  }
-
+  // s2
+  log_develop_trace(gc)("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
+    "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
+    p2i(s2->bottom()), p2i(s2->end()),
+    p2i(s2MR.start()), p2i(s2MR.end()));
+  log_develop_trace(gc)("    Mangle before: [" PTR_FORMAT ", "
+    PTR_FORMAT ")  Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
+    p2i(delta2_left.start()), p2i(delta2_left.end()),
+    p2i(delta2_right.start()), p2i(delta2_right.end()));
 }
 #endif // NOT PRODUCT
 
@@ -448,41 +435,32 @@
     return;
   }
 
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("PSYoungGen::resize_spaces(requested_eden_size: "
-                  SIZE_FORMAT
-                  ", requested_survivor_size: " SIZE_FORMAT ")",
-                  requested_eden_size, requested_survivor_size);
-    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
-                  SIZE_FORMAT,
-                  p2i(eden_space()->bottom()),
-                  p2i(eden_space()->end()),
-                  pointer_delta(eden_space()->end(),
-                                eden_space()->bottom(),
-                                sizeof(char)));
-    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") "
-                  SIZE_FORMAT,
-                  p2i(from_space()->bottom()),
-                  p2i(from_space()->end()),
-                  pointer_delta(from_space()->end(),
-                                from_space()->bottom(),
-                                sizeof(char)));
-    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") "
-                  SIZE_FORMAT,
-                  p2i(to_space()->bottom()),
-                  p2i(to_space()->end()),
-                  pointer_delta(  to_space()->end(),
-                                  to_space()->bottom(),
-                                  sizeof(char)));
-  }
+  log_trace(gc, ergo)("PSYoungGen::resize_spaces(requested_eden_size: " SIZE_FORMAT ", requested_survivor_size: " SIZE_FORMAT ")",
+                      requested_eden_size, requested_survivor_size);
+  log_trace(gc, ergo)("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT,
+                      p2i(eden_space()->bottom()),
+                      p2i(eden_space()->end()),
+                      pointer_delta(eden_space()->end(),
+                                    eden_space()->bottom(),
+                                    sizeof(char)));
+  log_trace(gc, ergo)("    from: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT,
+                      p2i(from_space()->bottom()),
+                      p2i(from_space()->end()),
+                      pointer_delta(from_space()->end(),
+                                    from_space()->bottom(),
+                                    sizeof(char)));
+  log_trace(gc, ergo)("      to: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT,
+                      p2i(to_space()->bottom()),
+                      p2i(to_space()->end()),
+                      pointer_delta(  to_space()->end(),
+                                      to_space()->bottom(),
+                                      sizeof(char)));
 
   // There's nothing to do if the new sizes are the same as the current
   if (requested_survivor_size == to_space()->capacity_in_bytes() &&
       requested_survivor_size == from_space()->capacity_in_bytes() &&
       requested_eden_size == eden_space()->capacity_in_bytes()) {
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    capacities are the right sizes, returning");
-    }
+    log_trace(gc, ergo)("    capacities are the right sizes, returning");
     return;
   }
 
@@ -503,9 +481,7 @@
   if (eden_from_to_order) {
     // Eden, from, to
     eden_from_to_order = true;
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("  Eden, from, to:");
-    }
+    log_trace(gc, ergo)("  Eden, from, to:");
 
     // Set eden
     // "requested_eden_size" is a goal for the size of eden
@@ -566,28 +542,21 @@
 
     guarantee(to_start != to_end, "to space is zero sized");
 
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(eden_start),
-                    p2i(eden_end),
-                    pointer_delta(eden_end, eden_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [from_start .. from_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(from_start),
-                    p2i(from_end),
-                    pointer_delta(from_end, from_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(to_start),
-                    p2i(to_end),
-                    pointer_delta(  to_end,   to_start, sizeof(char)));
-    }
+    log_trace(gc, ergo)("    [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
+                        p2i(eden_start),
+                        p2i(eden_end),
+                        pointer_delta(eden_end, eden_start, sizeof(char)));
+    log_trace(gc, ergo)("    [from_start .. from_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
+                        p2i(from_start),
+                        p2i(from_end),
+                        pointer_delta(from_end, from_start, sizeof(char)));
+    log_trace(gc, ergo)("    [  to_start ..   to_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
+                        p2i(to_start),
+                        p2i(to_end),
+                        pointer_delta(  to_end,   to_start, sizeof(char)));
   } else {
     // Eden, to, from
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("  Eden, to, from:");
-    }
+    log_trace(gc, ergo)("  Eden, to, from:");
 
     // To space gets priority over eden resizing. Note that we position
     // to space as if we were able to resize from space, even though from
@@ -623,23 +592,18 @@
     eden_end = MAX2(eden_end, eden_start + alignment);
     to_start = MAX2(to_start, eden_end);
 
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(eden_start),
-                    p2i(eden_end),
-                    pointer_delta(eden_end, eden_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(to_start),
-                    p2i(to_end),
-                    pointer_delta(  to_end,   to_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [from_start .. from_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(from_start),
-                    p2i(from_end),
-                    pointer_delta(from_end, from_start, sizeof(char)));
-    }
+    log_trace(gc, ergo)("    [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
+                        p2i(eden_start),
+                        p2i(eden_end),
+                        pointer_delta(eden_end, eden_start, sizeof(char)));
+    log_trace(gc, ergo)("    [  to_start ..   to_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
+                        p2i(to_start),
+                        p2i(to_end),
+                        pointer_delta(  to_end,   to_start, sizeof(char)));
+    log_trace(gc, ergo)("    [from_start .. from_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
+                        p2i(from_start),
+                        p2i(from_end),
+                        pointer_delta(from_end, from_start, sizeof(char)));
   }
 
 
@@ -658,7 +622,7 @@
   // Let's make sure the call to initialize doesn't reset "top"!
   HeapWord* old_from_top = from_space()->top();
 
-  // For PrintAdaptiveSizePolicy block  below
+  // For logging block  below
   size_t old_from = from_space()->capacity_in_bytes();
   size_t old_to   = to_space()->capacity_in_bytes();
 
@@ -704,18 +668,11 @@
 
   assert(from_space()->top() == old_from_top, "from top changed!");
 
-  if (PrintAdaptiveSizePolicy) {
-    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-    gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
-                  "collection: %d "
-                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
-                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
-                  heap->total_collections(),
-                  old_from, old_to,
-                  from_space()->capacity_in_bytes(),
-                  to_space()->capacity_in_bytes());
-    gclog_or_tty->cr();
-  }
+  log_trace(gc, ergo)("AdaptiveSizePolicy::survivor space sizes: collection: %d (" SIZE_FORMAT ", " SIZE_FORMAT ") -> (" SIZE_FORMAT ", " SIZE_FORMAT ") ",
+                      ParallelScavengeHeap::heap()->total_collections(),
+                      old_from, old_to,
+                      from_space()->capacity_in_bytes(),
+                      to_space()->capacity_in_bytes());
 }
 
 void PSYoungGen::swap_spaces() {
@@ -794,13 +751,8 @@
 void PSYoungGen::print() const { print_on(tty); }
 void PSYoungGen::print_on(outputStream* st) const {
   st->print(" %-15s", "PSYoungGen");
-  if (PrintGCDetails && Verbose) {
-    st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT,
-               capacity_in_bytes(), used_in_bytes());
-  } else {
-    st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
-               capacity_in_bytes()/K, used_in_bytes()/K);
-  }
+  st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
+             capacity_in_bytes()/K, used_in_bytes()/K);
   virtual_space()->print_space_boundaries_on(st);
   st->print("  eden"); eden_space()->print_on(st);
   st->print("  from"); from_space()->print_on(st);
@@ -809,13 +761,8 @@
 
 // Note that a space is not printed before the [NAME:
 void PSYoungGen::print_used_change(size_t prev_used) const {
-  gclog_or_tty->print("[%s:", name());
-  gclog_or_tty->print(" "  SIZE_FORMAT "K"
-                      "->" SIZE_FORMAT "K"
-                      "("  SIZE_FORMAT "K)",
-                      prev_used / K, used_in_bytes() / K,
-                      capacity_in_bytes() / K);
-  gclog_or_tty->print("]");
+  log_info(gc, heap)("%s: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
+      name(), prev_used / K, used_in_bytes() / K, capacity_in_bytes() / K);
 }
 
 size_t PSYoungGen::available_for_expansion() {
--- a/src/share/vm/gc/serial/defNewGeneration.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/serial/defNewGeneration.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -31,7 +31,7 @@
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "gc/shared/generationSpec.hpp"
@@ -39,6 +39,7 @@
 #include "gc/shared/space.inline.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "gc/shared/strongRootsScope.hpp"
+#include "logging/log.hpp"
 #include "memory/iterator.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
@@ -134,13 +135,11 @@
 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
 
 void KlassScanClosure::do_klass(Klass* klass) {
-  if (TraceScavenge) {
-    ResourceMark rm;
-    gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
-                           p2i(klass),
-                           klass->external_name(),
-                           klass->has_modified_oops() ? "true" : "false");
-  }
+  NOT_PRODUCT(ResourceMark rm);
+  log_develop_trace(gc, scavenge)("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
+                                  p2i(klass),
+                                  klass->external_name(),
+                                  klass->has_modified_oops() ? "true" : "false");
 
   // If the klass has not been dirtied we know that there's
   // no references into  the young gen and we can skip it.
@@ -359,10 +358,7 @@
   // but the second succeeds and expands the heap to its maximum
   // value.
   if (GC_locker::is_active()) {
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("Garbage collection disabled, "
-        "expanded heap instead");
-    }
+    log_debug(gc)("Garbage collection disabled, expanded heap instead");
   }
 
   return success;
@@ -429,22 +425,15 @@
     MemRegion cmr((HeapWord*)_virtual_space.low(),
                   (HeapWord*)_virtual_space.high());
     gch->barrier_set()->resize_covered_region(cmr);
-    if (Verbose && PrintGC) {
-      size_t new_size_after  = _virtual_space.committed_size();
-      size_t eden_size_after = eden()->capacity();
-      size_t survivor_size_after = from()->capacity();
-      gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
-        SIZE_FORMAT "K [eden="
-        SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
-        new_size_before/K, new_size_after/K,
-        eden_size_after/K, survivor_size_after/K);
-      if (WizardMode) {
-        gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
+
+    log_debug(gc, heap, ergo)(
+        "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
+        new_size_before/K, _virtual_space.committed_size()/K,
+        eden()->capacity()/K, from()->capacity()/K);
+    log_trace(gc, heap, ergo)(
+        "  [allowed " SIZE_FORMAT "K extra for %d threads]",
           thread_increase_size/K, threads_count);
       }
-      gclog_or_tty->cr();
-    }
-  }
 }
 
 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) {
@@ -507,34 +496,27 @@
 // The last collection bailed out, we are running out of heap space,
 // so we try to allocate the from-space, too.
 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
+  bool should_try_alloc = should_allocate_from_space() || GC_locker::is_active_and_needs_gc();
+
+  // If the Heap_lock is not locked by this thread, this will be called
+  // again later with the Heap_lock held.
+  bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
+
   HeapWord* result = NULL;
-  if (Verbose && PrintGCDetails) {
-    gclog_or_tty->print("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):"
-                        "  will_fail: %s"
-                        "  heap_lock: %s"
-                        "  free: " SIZE_FORMAT,
+  if (do_alloc) {
+    result = from()->allocate(size);
+  }
+
+  log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):  will_fail: %s  heap_lock: %s  free: " SIZE_FORMAT "%s%s returns %s",
                         size,
                         GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
                           "true" : "false",
                         Heap_lock->is_locked() ? "locked" : "unlocked",
-                        from()->free());
-  }
-  if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
-    if (Heap_lock->owned_by_self() ||
-        (SafepointSynchronize::is_at_safepoint() &&
-         Thread::current()->is_VM_thread())) {
-      // If the Heap_lock is not locked by this thread, this will be called
-      // again later with the Heap_lock held.
-      result = from()->allocate(size);
-    } else if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("  Heap_lock is not owned by self");
-    }
-  } else if (PrintGC && Verbose) {
-    gclog_or_tty->print_cr("  should_allocate_from_space: NOT");
-  }
-  if (PrintGC && Verbose) {
-    gclog_or_tty->print_cr("  returns %s", result == NULL ? "NULL" : "object");
-  }
+                        from()->free(),
+                        should_try_alloc ? "" : "  should_allocate_from_space: NOT",
+                        do_alloc ? "  Heap_lock is not owned by self" : "",
+                        result == NULL ? "NULL" : "object");
+
   return result;
 }
 
@@ -570,9 +552,7 @@
   // from this generation, pass on collection; let the next generation
   // do it.
   if (!collection_attempt_is_safe()) {
-    if (Verbose && PrintGCDetails) {
-      gclog_or_tty->print(" :: Collection attempt not safe :: ");
-    }
+    log_trace(gc)(":: Collection attempt not safe ::");
     gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
     return;
   }
@@ -580,9 +560,7 @@
 
   init_assuming_no_promotion_failure();
 
-  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
-  // Capture heap used before collection (for printing).
-  size_t gch_prev_used = gch->used();
+  GCTraceTime(Trace, gc) tm("DefNew", NULL, gch->gc_cause());
 
   gch->trace_heap_before_gc(&gc_tracer);
 
@@ -677,9 +655,7 @@
     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 
     remove_forwarding_pointers();
-    if (PrintGCDetails) {
-      gclog_or_tty->print(" (promotion failed) ");
-    }
+    log_debug(gc)("Promotion failed");
     // Add to-space to the list of space to compact
     // when a promotion failure has occurred.  In that
     // case there can be live objects in to-space
@@ -696,9 +672,6 @@
     // Reset the PromotionFailureALot counters.
     NOT_PRODUCT(gch->reset_promotion_should_fail();)
   }
-  if (PrintGC && !PrintGCDetails) {
-    gch->print_heap_change(gch_prev_used);
-  }
   // set new iteration safe limit for the survivor spaces
   from()->set_concurrent_iteration_safe_limit(from()->top());
   to()->set_concurrent_iteration_safe_limit(to()->top());
@@ -760,10 +733,8 @@
 }
 
 void DefNewGeneration::handle_promotion_failure(oop old) {
-  if (PrintPromotionFailure && !_promotion_failed) {
-    gclog_or_tty->print(" (promotion failure size = %d) ",
-                        old->size());
-  }
+  log_debug(gc, promotion)("Promotion failure size = %d) ", old->size());
+
   _promotion_failed = true;
   _promotion_failed_info.register_copy_failure(old->size());
   preserve_mark_if_necessary(old, old->mark());
@@ -895,9 +866,7 @@
 
 bool DefNewGeneration::collection_attempt_is_safe() {
   if (!to()->is_empty()) {
-    if (Verbose && PrintGCDetails) {
-      gclog_or_tty->print(" :: to is not empty :: ");
-    }
+    log_trace(gc)(":: to is not empty ::");
     return false;
   }
   if (_old_gen == NULL) {
@@ -919,17 +888,13 @@
   if (full) {
     DEBUG_ONLY(seen_incremental_collection_failed = false;)
     if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
-      if (Verbose && PrintGCDetails) {
-        gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
+      log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
                             GCCause::to_string(gch->gc_cause()));
-      }
       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
       set_should_allocate_from_space(); // we seem to be running out of space
     } else {
-      if (Verbose && PrintGCDetails) {
-        gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
+      log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
                             GCCause::to_string(gch->gc_cause()));
-      }
       gch->clear_incremental_collection_failed(); // We just did a full collection
       clear_should_allocate_from_space(); // if set
     }
@@ -943,16 +908,12 @@
     // a full collection in between.
     if (!seen_incremental_collection_failed &&
         gch->incremental_collection_failed()) {
-      if (Verbose && PrintGCDetails) {
-        gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
+      log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
                             GCCause::to_string(gch->gc_cause()));
-      }
       seen_incremental_collection_failed = true;
     } else if (seen_incremental_collection_failed) {
-      if (Verbose && PrintGCDetails) {
-        gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
+      log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
                             GCCause::to_string(gch->gc_cause()));
-      }
       assert(gch->gc_cause() == GCCause::_scavenge_alot ||
              (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
              !gch->incremental_collection_failed(),
--- a/src/share/vm/gc/serial/defNewGeneration.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/serial/defNewGeneration.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -339,7 +339,6 @@
   virtual const char* name() const;
   virtual const char* short_name() const { return "DefNew"; }
 
-  // PrintHeapAtGC support.
   void print_on(outputStream* st) const;
 
   void verify();
--- a/src/share/vm/gc/serial/genMarkSweep.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/serial/genMarkSweep.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/stringTable.hpp"
+#include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
@@ -34,7 +35,7 @@
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
@@ -70,8 +71,6 @@
   set_ref_processor(rp);
   rp->setup_policy(clear_all_softrefs);
 
-  GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
-
   gch->trace_heap_before_gc(_gc_tracer);
 
   // When collecting the permanent generation Method*s may be moving,
@@ -81,9 +80,6 @@
   // Increment the invocation count
   _total_invocations++;
 
-  // Capture heap size before collection for printing.
-  size_t gch_prev_used = gch->used();
-
   // Capture used regions for each generation that will be
   // subject to collection, so that card table adjustments can
   // be made intelligently (see clear / invalidate further below).
@@ -133,10 +129,6 @@
   CodeCache::gc_epilogue();
   JvmtiExport::gc_epilogue();
 
-  if (PrintGC && !PrintGCDetails) {
-    gch->print_heap_change(gch_prev_used);
-  }
-
   // refs processing: clean slate
   set_ref_processor(NULL);
 
@@ -188,7 +180,7 @@
 
 void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer);
+  GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", _gc_timer);
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
@@ -261,7 +253,7 @@
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
-  GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer);
+  GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", _gc_timer);
 
   gch->prepare_for_compaction();
 }
@@ -277,7 +269,7 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
   // Adjust the pointers to reflect the new locations
-  GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer);
+  GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", _gc_timer);
 
   // Need new claim bits for the pointer adjustment tracing.
   ClassLoaderDataGraph::clear_claimed_marks();
@@ -329,7 +321,7 @@
   // to use a higher index (saved from phase2) when verifying perm_gen.
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
-  GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer);
+  GCTraceTime(Trace, gc) tm("Phase 4: Move objects", _gc_timer);
 
   GenCompactClosure blk;
   gch->generation_iterate(&blk, true);
--- a/src/share/vm/gc/serial/markSweep.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/serial/markSweep.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -250,10 +250,7 @@
 void MarkSweep::restore_marks() {
   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
          "inconsistent preserved oop stacks");
-  if (PrintGC && Verbose) {
-    gclog_or_tty->print_cr("Restoring " SIZE_FORMAT " marks",
-                           _preserved_count + _preserved_oop_stack.size());
-  }
+  log_trace(gc)("Restoring " SIZE_FORMAT " marks", _preserved_count + _preserved_oop_stack.size());
 
   // restore the marks we saved earlier
   for (size_t i = 0; i < _preserved_count; i++) {
@@ -305,20 +302,13 @@
                                                   T* referent_addr,
                                                   T* next_addr,
                                                   T* discovered_addr) {
-  if(TraceReferenceGC && PrintGCDetails) {
-    gclog_or_tty->print_cr("%s obj " PTR_FORMAT, s, p2i(obj));
-    gclog_or_tty->print_cr("     referent_addr/* " PTR_FORMAT " / "
-                           PTR_FORMAT, p2i(referent_addr),
-                           p2i(referent_addr ?
-                               (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL));
-    gclog_or_tty->print_cr("     next_addr/* " PTR_FORMAT " / "
-                           PTR_FORMAT, p2i(next_addr),
-                           p2i(next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL));
-    gclog_or_tty->print_cr("     discovered_addr/* " PTR_FORMAT " / "
-                           PTR_FORMAT, p2i(discovered_addr),
-                           p2i(discovered_addr ?
-                               (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL));
-  }
+  log_develop_trace(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj));
+  log_develop_trace(gc, ref)("     referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
+                             p2i(referent_addr), p2i(referent_addr ? (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL));
+  log_develop_trace(gc, ref)("     next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
+                             p2i(next_addr), p2i(next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL));
+  log_develop_trace(gc, ref)("     discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
+                             p2i(discovered_addr), p2i(discovered_addr ? (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL));
 }
 #endif
 
--- a/src/share/vm/gc/serial/tenuredGeneration.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/serial/tenuredGeneration.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -32,6 +32,7 @@
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "gc/shared/generationSpec.hpp"
 #include "gc/shared/space.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
@@ -81,42 +82,28 @@
   // why it returns what it returns (without re-evaluating the conditionals
   // in case they aren't idempotent), so I'm doing it this way.
   // DeMorgan says it's okay.
-  bool result = false;
-  if (!result && full) {
-    result = true;
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
-                    " full");
-    }
+  if (full) {
+    log_trace(gc)("TenuredGeneration::should_collect: because full");
+    return true;
   }
-  if (!result && should_allocate(size, is_tlab)) {
-    result = true;
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
-                    " should_allocate(" SIZE_FORMAT ")",
-                    size);
-    }
+  if (should_allocate(size, is_tlab)) {
+    log_trace(gc)("TenuredGeneration::should_collect: because should_allocate(" SIZE_FORMAT ")", size);
+    return true;
   }
   // If we don't have very much free space.
   // XXX: 10000 should be a percentage of the capacity!!!
-  if (!result && free() < 10000) {
-    result = true;
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
-                    " free(): " SIZE_FORMAT,
-                    free());
-    }
+  if (free() < 10000) {
+    log_trace(gc)("TenuredGeneration::should_collect: because free(): " SIZE_FORMAT, free());
+    return true;
   }
   // If we had to expand to accommodate promotions from the young generation
-  if (!result && _capacity_at_prologue < capacity()) {
-    result = true;
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
-                    "_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
-                    _capacity_at_prologue, capacity());
-    }
+  if (_capacity_at_prologue < capacity()) {
+    log_trace(gc)("TenuredGeneration::should_collect: because_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
+        _capacity_at_prologue, capacity());
+    return true;
   }
-  return result;
+
+  return false;
 }
 
 void TenuredGeneration::compute_new_size() {
@@ -165,13 +152,10 @@
   size_t available = max_contiguous_available();
   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
-  if (PrintGC && Verbose) {
-    gclog_or_tty->print_cr(
-      "Tenured: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "),"
-      "max_promo(" SIZE_FORMAT ")",
-      res? "":" not", available, res? ">=":"<",
-      av_promo, max_promotion_in_bytes);
-  }
+
+  log_trace(gc)("Tenured: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
+    res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
+
   return res;
 }
 
--- a/src/share/vm/gc/shared/adaptiveSizePolicy.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/adaptiveSizePolicy.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,6 +27,7 @@
 #include "gc/shared/collectorPolicy.hpp"
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/workgroup.hpp"
+#include "logging/log.hpp"
 #include "runtime/timer.hpp"
 #include "utilities/ostream.hpp"
 elapsedTimer AdaptiveSizePolicy::_minor_timer;
@@ -166,14 +167,12 @@
       "Jiggled active workers too much");
   }
 
-  if (TraceDynamicGCThreads) {
-     gclog_or_tty->print_cr("GCTaskManager::calc_default_active_workers() : "
-       "active_workers(): " UINTX_FORMAT "  new_active_workers: " UINTX_FORMAT "  "
-       "prev_active_workers: " UINTX_FORMAT "\n"
-       " active_workers_by_JT: " UINTX_FORMAT "  active_workers_by_heap_size: " UINTX_FORMAT,
-       active_workers, new_active_workers, prev_active_workers,
-       active_workers_by_JT, active_workers_by_heap_size);
-  }
+   log_trace(gc, task)("GCTaskManager::calc_default_active_workers() : "
+     "active_workers(): " UINTX_FORMAT "  new_active_workers: " UINTX_FORMAT "  "
+     "prev_active_workers: " UINTX_FORMAT "\n"
+     " active_workers_by_JT: " UINTX_FORMAT "  active_workers_by_heap_size: " UINTX_FORMAT,
+     active_workers, new_active_workers, prev_active_workers,
+     active_workers_by_JT, active_workers_by_heap_size);
   assert(new_active_workers > 0, "Always need at least 1");
   return new_active_workers;
 }
@@ -275,14 +274,10 @@
     update_minor_pause_young_estimator(minor_pause_in_ms);
     update_minor_pause_old_estimator(minor_pause_in_ms);
 
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print("AdaptiveSizePolicy::minor_collection_end: "
-                          "minor gc cost: %f  average: %f", collection_cost,
-                          _avg_minor_gc_cost->average());
-      gclog_or_tty->print_cr("  minor pause: %f minor period %f",
-                             minor_pause_in_ms,
-                             _latest_minor_mutator_interval_seconds * MILLIUNITS);
-    }
+    log_trace(gc, ergo)("AdaptiveSizePolicy::minor_collection_end: minor gc cost: %f  average: %f",
+                        collection_cost, _avg_minor_gc_cost->average());
+    log_trace(gc, ergo)("  minor pause: %f minor period %f",
+                        minor_pause_in_ms, _latest_minor_mutator_interval_seconds * MILLIUNITS);
 
     // Calculate variable used to estimate collection cost vs. gen sizes
     assert(collection_cost >= 0.0, "Expected to be non-negative");
@@ -388,13 +383,10 @@
 
       // Decay using the time-since-last-major-gc
       decayed_major_gc_cost = decaying_major_gc_cost();
-      if (PrintGCDetails && Verbose) {
-        gclog_or_tty->print_cr("\ndecaying_gc_cost: major interval average:"
-          " %f  time since last major gc: %f",
-          avg_major_interval, time_since_last_major_gc);
-        gclog_or_tty->print_cr("  major gc cost: %f  decayed major gc cost: %f",
-          major_gc_cost(), decayed_major_gc_cost);
-      }
+      log_trace(gc, ergo)("decaying_gc_cost: major interval average: %f  time since last major gc: %f",
+                    avg_major_interval, time_since_last_major_gc);
+      log_trace(gc, ergo)("  major gc cost: %f  decayed major gc cost: %f",
+                    major_gc_cost(), decayed_major_gc_cost);
     }
   }
   double result = MIN2(1.0, decayed_major_gc_cost + minor_gc_cost());
@@ -461,21 +453,17 @@
   promo_limit = MAX2(promo_limit, _promo_size);
 
 
-  if (PrintAdaptiveSizePolicy && (Verbose ||
-      (free_in_old_gen < (size_t) mem_free_old_limit &&
-       free_in_eden < (size_t) mem_free_eden_limit))) {
-    gclog_or_tty->print_cr(
-          "PSAdaptiveSizePolicy::check_gc_overhead_limit:"
-          " promo_limit: " SIZE_FORMAT
-          " max_eden_size: " SIZE_FORMAT
-          " total_free_limit: " SIZE_FORMAT
-          " max_old_gen_size: " SIZE_FORMAT
-          " max_eden_size: " SIZE_FORMAT
-          " mem_free_limit: " SIZE_FORMAT,
-          promo_limit, max_eden_size, total_free_limit,
-          max_old_gen_size, max_eden_size,
-          (size_t) mem_free_limit);
-  }
+  log_trace(gc, ergo)(
+        "PSAdaptiveSizePolicy::check_gc_overhead_limit:"
+        " promo_limit: " SIZE_FORMAT
+        " max_eden_size: " SIZE_FORMAT
+        " total_free_limit: " SIZE_FORMAT
+        " max_old_gen_size: " SIZE_FORMAT
+        " max_eden_size: " SIZE_FORMAT
+        " mem_free_limit: " SIZE_FORMAT,
+        promo_limit, max_eden_size, total_free_limit,
+        max_old_gen_size, max_eden_size,
+        (size_t) mem_free_limit);
 
   bool print_gc_overhead_limit_would_be_exceeded = false;
   if (is_full_gc) {
@@ -521,10 +509,7 @@
           bool near_limit = gc_overhead_limit_near();
           if (near_limit) {
             collector_policy->set_should_clear_all_soft_refs(true);
-            if (PrintGCDetails && Verbose) {
-              gclog_or_tty->print_cr("  Nearing GC overhead limit, "
-                "will be clearing all SoftReference");
-            }
+            log_trace(gc, ergo)("Nearing GC overhead limit, will be clearing all SoftReference");
           }
         }
       }
@@ -540,26 +525,25 @@
     }
   }
 
-  if (UseGCOverheadLimit && PrintGCDetails && Verbose) {
+  if (UseGCOverheadLimit) {
     if (gc_overhead_limit_exceeded()) {
-      gclog_or_tty->print_cr("      GC is exceeding overhead limit "
-        "of " UINTX_FORMAT "%%", GCTimeLimit);
+      log_trace(gc, ergo)("GC is exceeding overhead limit of " UINTX_FORMAT "%%", GCTimeLimit);
       reset_gc_overhead_limit_count();
     } else if (print_gc_overhead_limit_would_be_exceeded) {
       assert(gc_overhead_limit_count() > 0, "Should not be printing");
-      gclog_or_tty->print_cr("      GC would exceed overhead limit "
-        "of " UINTX_FORMAT "%% %d consecutive time(s)",
-        GCTimeLimit, gc_overhead_limit_count());
+      log_trace(gc, ergo)("GC would exceed overhead limit of " UINTX_FORMAT "%% %d consecutive time(s)",
+                          GCTimeLimit, gc_overhead_limit_count());
     }
   }
 }
 // Printing
 
-bool AdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st) const {
+bool AdaptiveSizePolicy::print() const {
+  assert(UseAdaptiveSizePolicy, "UseAdaptiveSizePolicy need to be enabled.");
 
-  //  Should only be used with adaptive size policy turned on.
-  // Otherwise, there may be variables that are undefined.
-  if (!UseAdaptiveSizePolicy) return false;
+  if (!log_is_enabled(Debug, gc, ergo)) {
+    return false;
+  }
 
   // Print goal for which action is needed.
   char* action = NULL;
@@ -627,41 +611,24 @@
     tenured_gen_action = shrink_msg;
   }
 
-  st->print_cr("    UseAdaptiveSizePolicy actions to meet %s", action);
-  st->print_cr("                       GC overhead (%%)");
-  st->print_cr("    Young generation:     %7.2f\t  %s",
-    100.0 * avg_minor_gc_cost()->average(),
-    young_gen_action);
-  st->print_cr("    Tenured generation:   %7.2f\t  %s",
-    100.0 * avg_major_gc_cost()->average(),
-    tenured_gen_action);
+  log_debug(gc, ergo)("UseAdaptiveSizePolicy actions to meet %s", action);
+  log_debug(gc, ergo)("                       GC overhead (%%)");
+  log_debug(gc, ergo)("    Young generation:     %7.2f\t  %s",
+                      100.0 * avg_minor_gc_cost()->average(), young_gen_action);
+  log_debug(gc, ergo)("    Tenured generation:   %7.2f\t  %s",
+                      100.0 * avg_major_gc_cost()->average(), tenured_gen_action);
   return true;
 }
 
-bool AdaptiveSizePolicy::print_adaptive_size_policy_on(
-                                            outputStream* st,
-                                            uint tenuring_threshold_arg) const {
-  if (!AdaptiveSizePolicy::print_adaptive_size_policy_on(st)) {
-    return false;
-  }
-
+void AdaptiveSizePolicy::print_tenuring_threshold( uint new_tenuring_threshold_arg) const {
   // Tenuring threshold
-  bool tenuring_threshold_changed = true;
   if (decrement_tenuring_threshold_for_survivor_limit()) {
-    st->print("    Tenuring threshold:    (attempted to decrease to avoid"
-              " survivor space overflow) = ");
+    log_debug(gc, ergo)("Tenuring threshold: (attempted to decrease to avoid survivor space overflow) = %u", new_tenuring_threshold_arg);
   } else if (decrement_tenuring_threshold_for_gc_cost()) {
-    st->print("    Tenuring threshold:    (attempted to decrease to balance"
-              " GC costs) = ");
+    log_debug(gc, ergo)("Tenuring threshold: (attempted to decrease to balance GC costs) = %u", new_tenuring_threshold_arg);
   } else if (increment_tenuring_threshold_for_gc_cost()) {
-    st->print("    Tenuring threshold:    (attempted to increase to balance"
-              " GC costs) = ");
+    log_debug(gc, ergo)("Tenuring threshold: (attempted to increase to balance GC costs) = %u", new_tenuring_threshold_arg);
   } else {
-    tenuring_threshold_changed = false;
     assert(!tenuring_threshold_change(), "(no change was attempted)");
   }
-  if (tenuring_threshold_changed) {
-    st->print_cr("%u", tenuring_threshold_arg);
-  }
-  return true;
 }
--- a/src/share/vm/gc/shared/adaptiveSizePolicy.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/adaptiveSizePolicy.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -28,6 +28,7 @@
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/gcUtil.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/universe.hpp"
 
@@ -500,9 +501,8 @@
   }
 
   // Printing support
-  virtual bool print_adaptive_size_policy_on(outputStream* st) const;
-  bool print_adaptive_size_policy_on(outputStream* st,
-                                     uint tenuring_threshold) const;
+  virtual bool print() const;
+  void print_tenuring_threshold(uint new_tenuring_threshold) const;
 };
 
 // Class that can be used to print information about the
@@ -510,46 +510,26 @@
 // AdaptiveSizePolicyOutputInterval.  Only print information
 // if an adaptive size policy is in use.
 class AdaptiveSizePolicyOutput : StackObj {
-  AdaptiveSizePolicy* _size_policy;
-  bool _do_print;
-  bool print_test(uint count) {
-    // A count of zero is a special value that indicates that the
-    // interval test should be ignored.  An interval is of zero is
-    // a special value that indicates that the interval test should
-    // always fail (never do the print based on the interval test).
-    return PrintGCDetails &&
+  static bool enabled() {
+    return UseParallelGC &&
            UseAdaptiveSizePolicy &&
-           UseParallelGC &&
-           (AdaptiveSizePolicyOutputInterval > 0) &&
-           ((count == 0) ||
-             ((count % AdaptiveSizePolicyOutputInterval) == 0));
+           log_is_enabled(Debug, gc, ergo);
   }
  public:
-  // The special value of a zero count can be used to ignore
-  // the count test.
-  AdaptiveSizePolicyOutput(uint count) {
-    if (UseAdaptiveSizePolicy && (AdaptiveSizePolicyOutputInterval > 0)) {
-      CollectedHeap* heap = Universe::heap();
-      _size_policy = heap->size_policy();
-      _do_print = print_test(count);
-    } else {
-      _size_policy = NULL;
-      _do_print = false;
+  static void print() {
+    if (enabled()) {
+      Universe::heap()->size_policy()->print();
     }
   }
-  AdaptiveSizePolicyOutput(AdaptiveSizePolicy* size_policy,
-                           uint count) :
-    _size_policy(size_policy) {
-    if (UseAdaptiveSizePolicy && (AdaptiveSizePolicyOutputInterval > 0)) {
-      _do_print = print_test(count);
-    } else {
-      _do_print = false;
-    }
-  }
-  ~AdaptiveSizePolicyOutput() {
-    if (_do_print) {
-      assert(UseAdaptiveSizePolicy, "Should not be in use");
-      _size_policy->print_adaptive_size_policy_on(gclog_or_tty);
+
+  static void print(AdaptiveSizePolicy* size_policy, uint count) {
+    bool do_print =
+        enabled() &&
+        (AdaptiveSizePolicyOutputInterval > 0) &&
+        (count % AdaptiveSizePolicyOutputInterval) == 0;
+
+    if (do_print) {
+      size_policy->print();
     }
   }
 };
--- a/src/share/vm/gc/shared/ageTable.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/ageTable.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -28,6 +28,7 @@
 #include "gc/shared/collectorPolicy.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "memory/resourceArea.hpp"
+#include "logging/log.hpp"
 #include "utilities/copy.hpp"
 
 /* Copyright (c) 1992, 2015, Oracle and/or its affiliates, and Stanford University.
@@ -94,24 +95,18 @@
     result = age < MaxTenuringThreshold ? age : MaxTenuringThreshold;
   }
 
-  if (PrintTenuringDistribution || UsePerfData) {
 
-    if (PrintTenuringDistribution) {
-      gclog_or_tty->cr();
-      gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold "
-        UINTX_FORMAT " (max threshold " UINTX_FORMAT ")",
-        desired_survivor_size*oopSize, (uintx) result, MaxTenuringThreshold);
-    }
+  log_debug(gc, age)("Desired survivor size " SIZE_FORMAT " bytes, new threshold " UINTX_FORMAT " (max threshold " UINTX_FORMAT ")",
+                     desired_survivor_size*oopSize, (uintx) result, MaxTenuringThreshold);
 
+  if (log_is_enabled(Trace, gc, age) || UsePerfData) {
     size_t total = 0;
     uint age = 1;
     while (age < table_size) {
       total += sizes[age];
       if (sizes[age] > 0) {
-        if (PrintTenuringDistribution) {
-          gclog_or_tty->print_cr("- age %3u: " SIZE_FORMAT_W(10) " bytes, " SIZE_FORMAT_W(10) " total",
-                                        age,    sizes[age]*oopSize,          total*oopSize);
-        }
+        log_trace(gc, age)("- age %3u: " SIZE_FORMAT_W(10) " bytes, " SIZE_FORMAT_W(10) " total",
+                            age, sizes[age]*oopSize, total*oopSize);
       }
       if (UsePerfData) {
         _perf_sizes[age]->set_value(sizes[age]*oopSize);
--- a/src/share/vm/gc/shared/blockOffsetTable.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/blockOffsetTable.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -28,6 +28,7 @@
 #include "gc/shared/space.inline.hpp"
 #include "memory/iterator.hpp"
 #include "memory/universe.hpp"
+#include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "services/memTracker.hpp"
@@ -53,19 +54,11 @@
   }
   _offset_array = (u_char*)_vs.low_boundary();
   resize(init_word_size);
-  if (TraceBlockOffsetTable) {
-    gclog_or_tty->print_cr("BlockOffsetSharedArray::BlockOffsetSharedArray: ");
-    gclog_or_tty->print_cr("  "
-                  "  rs.base(): " INTPTR_FORMAT
-                  "  rs.size(): " INTPTR_FORMAT
-                  "  rs end(): " INTPTR_FORMAT,
-                  p2i(rs.base()), rs.size(), p2i(rs.base() + rs.size()));
-    gclog_or_tty->print_cr("  "
-                  "  _vs.low_boundary(): " INTPTR_FORMAT
-                  "  _vs.high_boundary(): " INTPTR_FORMAT,
-                  p2i(_vs.low_boundary()),
-                  p2i(_vs.high_boundary()));
-  }
+  log_trace(gc, bot)("BlockOffsetSharedArray::BlockOffsetSharedArray: ");
+  log_trace(gc, bot)("   rs.base(): " INTPTR_FORMAT " rs.size(): " INTPTR_FORMAT " rs end(): " INTPTR_FORMAT,
+                     p2i(rs.base()), rs.size(), p2i(rs.base() + rs.size()));
+  log_trace(gc, bot)("   _vs.low_boundary(): " INTPTR_FORMAT "  _vs.high_boundary(): " INTPTR_FORMAT,
+                     p2i(_vs.low_boundary()), p2i(_vs.high_boundary()));
 }
 
 void BlockOffsetSharedArray::resize(size_t new_word_size) {
--- a/src/share/vm/gc/shared/cardGeneration.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/cardGeneration.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -33,6 +33,7 @@
 #include "gc/shared/space.inline.hpp"
 #include "memory/iterator.hpp"
 #include "memory/memRegion.hpp"
+#include "logging/log.hpp"
 #include "runtime/java.hpp"
 
 CardGeneration::CardGeneration(ReservedSpace rs,
@@ -96,13 +97,10 @@
     // update the space and generation capacity counters
     update_counters();
 
-    if (Verbose && PrintGC) {
-      size_t new_mem_size = _virtual_space.committed_size();
-      size_t old_mem_size = new_mem_size - bytes;
-      gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
-                      SIZE_FORMAT "K to " SIZE_FORMAT "K",
-                      name(), old_mem_size/K, bytes/K, new_mem_size/K);
-    }
+    size_t new_mem_size = _virtual_space.committed_size();
+    size_t old_mem_size = new_mem_size - bytes;
+    log_trace(gc, heap)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
+                    name(), old_mem_size/K, bytes/K, new_mem_size/K);
   }
   return result;
 }
@@ -133,10 +131,8 @@
   if (!success) {
     success = grow_to_reserved();
   }
-  if (PrintGC && Verbose) {
-    if (success && GC_locker::is_active_and_needs_gc()) {
-      gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
-    }
+  if (success && GC_locker::is_active_and_needs_gc()) {
+    log_trace(gc, heap)("Garbage collection disabled, expanded heap instead");
   }
 
   return success;
@@ -172,12 +168,10 @@
   // Shrink the card table
   GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr);
 
-  if (Verbose && PrintGC) {
-    size_t new_mem_size = _virtual_space.committed_size();
-    size_t old_mem_size = new_mem_size + size;
-    gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
-                  name(), old_mem_size/K, new_mem_size/K);
-  }
+  size_t new_mem_size = _virtual_space.committed_size();
+  size_t old_mem_size = new_mem_size + size;
+  log_trace(gc, heap)("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
+                      name(), old_mem_size/K, new_mem_size/K);
 }
 
 // No young generation references, clear this generation's cards.
@@ -211,26 +205,17 @@
   minimum_desired_capacity = MAX2(minimum_desired_capacity, initial_size());
   assert(used_after_gc <= minimum_desired_capacity, "sanity check");
 
-  if (PrintGC && Verbose) {
     const size_t free_after_gc = free();
     const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
-    gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
-    gclog_or_tty->print_cr("  "
-                  "  minimum_free_percentage: %6.2f"
-                  "  maximum_used_percentage: %6.2f",
+    log_trace(gc, heap)("TenuredGeneration::compute_new_size:");
+    log_trace(gc, heap)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
                   minimum_free_percentage,
                   maximum_used_percentage);
-    gclog_or_tty->print_cr("  "
-                  "   free_after_gc   : %6.1fK"
-                  "   used_after_gc   : %6.1fK"
-                  "   capacity_after_gc   : %6.1fK",
+    log_trace(gc, heap)("     free_after_gc   : %6.1fK   used_after_gc   : %6.1fK   capacity_after_gc   : %6.1fK",
                   free_after_gc / (double) K,
                   used_after_gc / (double) K,
                   capacity_after_gc / (double) K);
-    gclog_or_tty->print_cr("  "
-                  "   free_percentage: %6.2f",
-                  free_percentage);
-  }
+    log_trace(gc, heap)("     free_percentage: %6.2f", free_percentage);
 
   if (capacity_after_gc < minimum_desired_capacity) {
     // If we have less free space than we want then expand
@@ -239,15 +224,10 @@
     if (expand_bytes >= _min_heap_delta_bytes) {
       expand(expand_bytes, 0); // safe if expansion fails
     }
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("    expanding:"
-                    "  minimum_desired_capacity: %6.1fK"
-                    "  expand_bytes: %6.1fK"
-                    "  _min_heap_delta_bytes: %6.1fK",
-                    minimum_desired_capacity / (double) K,
-                    expand_bytes / (double) K,
-                    _min_heap_delta_bytes / (double) K);
-    }
+    log_trace(gc, heap)("    expanding:  minimum_desired_capacity: %6.1fK  expand_bytes: %6.1fK  _min_heap_delta_bytes: %6.1fK",
+                  minimum_desired_capacity / (double) K,
+                  expand_bytes / (double) K,
+                  _min_heap_delta_bytes / (double) K);
     return;
   }
 
@@ -262,20 +242,12 @@
     const double max_tmp = used_after_gc / minimum_used_percentage;
     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
     maximum_desired_capacity = MAX2(maximum_desired_capacity, initial_size());
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("  "
-                             "  maximum_free_percentage: %6.2f"
-                             "  minimum_used_percentage: %6.2f",
-                             maximum_free_percentage,
-                             minimum_used_percentage);
-      gclog_or_tty->print_cr("  "
-                             "  _capacity_at_prologue: %6.1fK"
-                             "  minimum_desired_capacity: %6.1fK"
-                             "  maximum_desired_capacity: %6.1fK",
+    log_trace(gc, heap)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
+                             maximum_free_percentage, minimum_used_percentage);
+    log_trace(gc, heap)("    _capacity_at_prologue: %6.1fK  minimum_desired_capacity: %6.1fK  maximum_desired_capacity: %6.1fK",
                              _capacity_at_prologue / (double) K,
                              minimum_desired_capacity / (double) K,
                              maximum_desired_capacity / (double) K);
-    }
     assert(minimum_desired_capacity <= maximum_desired_capacity,
            "sanity check");
 
@@ -295,23 +267,13 @@
       } else {
         _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
       }
-      if (PrintGC && Verbose) {
-        gclog_or_tty->print_cr("  "
-                               "  shrinking:"
-                               "  initSize: %.1fK"
-                               "  maximum_desired_capacity: %.1fK",
-                               initial_size() / (double) K,
-                               maximum_desired_capacity / (double) K);
-        gclog_or_tty->print_cr("  "
-                               "  shrink_bytes: %.1fK"
-                               "  current_shrink_factor: " SIZE_FORMAT
-                               "  new shrink factor: " SIZE_FORMAT
-                               "  _min_heap_delta_bytes: %.1fK",
+      log_trace(gc, heap)("    shrinking:  initSize: %.1fK  maximum_desired_capacity: %.1fK",
+                               initial_size() / (double) K, maximum_desired_capacity / (double) K);
+      log_trace(gc, heap)("    shrink_bytes: %.1fK  current_shrink_factor: " SIZE_FORMAT "  new shrink factor: " SIZE_FORMAT "  _min_heap_delta_bytes: %.1fK",
                                shrink_bytes / (double) K,
                                current_shrink_factor,
                                _shrink_factor,
                                _min_heap_delta_bytes / (double) K);
-      }
     }
   }
 
@@ -324,18 +286,11 @@
     // We have two shrinking computations, take the largest
     shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
     assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("  "
-                             "  aggressive shrinking:"
-                             "  _capacity_at_prologue: %.1fK"
-                             "  capacity_after_gc: %.1fK"
-                             "  expansion_for_promotion: %.1fK"
-                             "  shrink_bytes: %.1fK",
-                             capacity_after_gc / (double) K,
-                             _capacity_at_prologue / (double) K,
-                             expansion_for_promotion / (double) K,
-                             shrink_bytes / (double) K);
-    }
+    log_trace(gc, heap)("    aggressive shrinking:  _capacity_at_prologue: %.1fK  capacity_after_gc: %.1fK  expansion_for_promotion: %.1fK  shrink_bytes: %.1fK",
+                        capacity_after_gc / (double) K,
+                        _capacity_at_prologue / (double) K,
+                        expansion_for_promotion / (double) K,
+                        shrink_bytes / (double) K);
   }
   // Don't shrink unless it's significant
   if (shrink_bytes >= _min_heap_delta_bytes) {
--- a/src/share/vm/gc/shared/cardTableModRefBS.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/cardTableModRefBS.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -28,6 +28,7 @@
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "memory/virtualspace.hpp"
+#include "logging/log.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/macros.hpp"
 
@@ -115,17 +116,10 @@
                             !ExecMem, "card table last card");
   *guard_card = last_card;
 
-  if (TraceCardTableModRefBS) {
-    gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
-    gclog_or_tty->print_cr("  "
-                  "  &_byte_map[0]: " INTPTR_FORMAT
-                  "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
-                  p2i(&_byte_map[0]),
-                  p2i(&_byte_map[_last_valid_index]));
-    gclog_or_tty->print_cr("  "
-                  "  byte_map_base: " INTPTR_FORMAT,
-                  p2i(byte_map_base));
-  }
+  log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: ");
+  log_trace(gc, barrier)("    &_byte_map[0]: " INTPTR_FORMAT "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
+                  p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
+  log_trace(gc, barrier)("    byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
 }
 
 CardTableModRefBS::~CardTableModRefBS() {
@@ -350,29 +344,17 @@
   }
   // In any case, the covered size changes.
   _covered[ind].set_word_size(new_region.word_size());
-  if (TraceCardTableModRefBS) {
-    gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
-    gclog_or_tty->print_cr("  "
-                  "  _covered[%d].start(): " INTPTR_FORMAT
-                  "  _covered[%d].last(): " INTPTR_FORMAT,
-                  ind, p2i(_covered[ind].start()),
-                  ind, p2i(_covered[ind].last()));
-    gclog_or_tty->print_cr("  "
-                  "  _committed[%d].start(): " INTPTR_FORMAT
-                  "  _committed[%d].last(): " INTPTR_FORMAT,
-                  ind, p2i(_committed[ind].start()),
-                  ind, p2i(_committed[ind].last()));
-    gclog_or_tty->print_cr("  "
-                  "  byte_for(start): " INTPTR_FORMAT
-                  "  byte_for(last): " INTPTR_FORMAT,
-                  p2i(byte_for(_covered[ind].start())),
-                  p2i(byte_for(_covered[ind].last())));
-    gclog_or_tty->print_cr("  "
-                  "  addr_for(start): " INTPTR_FORMAT
-                  "  addr_for(last): " INTPTR_FORMAT,
-                  p2i(addr_for((jbyte*) _committed[ind].start())),
-                  p2i(addr_for((jbyte*) _committed[ind].last())));
-  }
+
+  log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
+  log_trace(gc, barrier)("    _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
+                         ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
+  log_trace(gc, barrier)("    _committed[%d].start(): " INTPTR_FORMAT "  _committed[%d].last(): " INTPTR_FORMAT,
+                         ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
+  log_trace(gc, barrier)("    byte_for(start): " INTPTR_FORMAT "  byte_for(last): " INTPTR_FORMAT,
+                         p2i(byte_for(_covered[ind].start())),  p2i(byte_for(_covered[ind].last())));
+  log_trace(gc, barrier)("    addr_for(start): " INTPTR_FORMAT "  addr_for(last): " INTPTR_FORMAT,
+                         p2i(addr_for((jbyte*) _committed[ind].start())),  p2i(addr_for((jbyte*) _committed[ind].last())));
+
   // Touch the last card of the covered region to show that it
   // is committed (or SEGV).
   debug_only((void) (*byte_for(_covered[ind].last()));)
--- a/src/share/vm/gc/shared/collectedHeap.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/collectedHeap.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -30,9 +30,10 @@
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/gcWhen.hpp"
 #include "gc/shared/vmGCOperations.hpp"
+#include "logging/log.hpp"
 #include "memory/metaspace.hpp"
 #include "oops/instanceMirrorKlass.hpp"
 #include "oops/oop.inline.hpp"
@@ -53,7 +54,7 @@
   st->print_raw(m);
 }
 
-void GCHeapLog::log_heap(bool before) {
+void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
   if (!should_log()) {
     return;
   }
@@ -65,11 +66,14 @@
   _records[index].timestamp = timestamp;
   _records[index].data.is_before = before;
   stringStream st(_records[index].data.buffer(), _records[index].data.size());
-  if (before) {
-    Universe::print_heap_before_gc(&st, true);
-  } else {
-    Universe::print_heap_after_gc(&st, true);
-  }
+
+  st.print_cr("{Heap %s GC invocations=%u (full %u):",
+                 before ? "before" : "after",
+                 heap->total_collections(),
+                 heap->total_full_collections());
+
+  heap->print_on(&st);
+  st.print_cr("}");
 }
 
 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
@@ -108,20 +112,16 @@
 }
 
 void CollectedHeap::print_heap_before_gc() {
-  if (PrintHeapAtGC) {
-    Universe::print_heap_before_gc();
-  }
+  Universe::print_heap_before_gc();
   if (_gc_heap_log != NULL) {
-    _gc_heap_log->log_heap_before();
+    _gc_heap_log->log_heap_before(this);
   }
 }
 
 void CollectedHeap::print_heap_after_gc() {
-  if (PrintHeapAtGC) {
-    Universe::print_heap_after_gc();
-  }
+  Universe::print_heap_after_gc();
   if (_gc_heap_log != NULL) {
-    _gc_heap_log->log_heap_after();
+    _gc_heap_log->log_heap_after(this);
   }
 }
 
@@ -571,34 +571,30 @@
   }
 }
 
-void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
-  if (HeapDumpBeforeFullGC) {
+void CollectedHeap::full_gc_dump(GCTimer* timer, const char* when) {
+  if (HeapDumpBeforeFullGC || HeapDumpAfterFullGC) {
     GCIdMarkAndRestore gc_id_mark;
-    GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer);
-    // We are doing a full collection and a heap dump before
-    // full collection has been requested.
+    FormatBuffer<> title("Heap Dump (%s full gc)", when);
+    GCTraceTime(Info, gc) tm(title.buffer(), timer);
     HeapDumper::dump_heap();
   }
-  if (PrintClassHistogramBeforeFullGC) {
+  LogHandle(gc, classhisto) log;
+  if (log.is_trace()) {
+    ResourceMark rm;
     GCIdMarkAndRestore gc_id_mark;
-    GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer);
-    VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
+    FormatBuffer<> title("Class Histogram (%s full gc)", when);
+    GCTraceTime(Trace, gc, classhisto) tm(title.buffer(), timer);
+    VM_GC_HeapInspection inspector(log.trace_stream(), false /* ! full gc */);
     inspector.doit();
   }
 }
 
+void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
+  full_gc_dump(timer, "before");
+}
+
 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
-  if (HeapDumpAfterFullGC) {
-    GCIdMarkAndRestore gc_id_mark;
-    GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer);
-    HeapDumper::dump_heap();
-  }
-  if (PrintClassHistogramAfterFullGC) {
-    GCIdMarkAndRestore gc_id_mark;
-    GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer);
-    VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
-    inspector.doit();
-  }
+  full_gc_dump(timer, "after");
 }
 
 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
--- a/src/share/vm/gc/shared/collectedHeap.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/collectedHeap.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -58,18 +58,20 @@
   GCMessage() {}
 };
 
+class CollectedHeap;
+
 class GCHeapLog : public EventLogBase<GCMessage> {
  private:
-  void log_heap(bool before);
+  void log_heap(CollectedHeap* heap, bool before);
 
  public:
   GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
 
-  void log_heap_before() {
-    log_heap(true);
+  void log_heap_before(CollectedHeap* heap) {
+    log_heap(heap, true);
   }
-  void log_heap_after() {
-    log_heap(false);
+  void log_heap_after(CollectedHeap* heap) {
+    log_heap(heap, false);
   }
 };
 
@@ -196,6 +198,8 @@
 
   virtual Name kind() const = 0;
 
+  virtual const char* name() const = 0;
+
   /**
    * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
    * and JNI_OK on success.
@@ -520,6 +524,9 @@
   virtual void prepare_for_verify() = 0;
 
   // Generate any dumps preceding or following a full gc
+ private:
+  void full_gc_dump(GCTimer* timer, const char* when);
+ public:
   void pre_full_gc_dump(GCTimer* timer);
   void post_full_gc_dump(GCTimer* timer);
 
@@ -570,7 +577,7 @@
   void trace_heap_after_gc(const GCTracer* gc_tracer);
 
   // Heap verification
-  virtual void verify(bool silent, VerifyOption option) = 0;
+  virtual void verify(VerifyOption option) = 0;
 
   // Non product verification and debugging.
 #ifndef PRODUCT
--- a/src/share/vm/gc/shared/collectorPolicy.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/collectorPolicy.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -32,6 +32,7 @@
 #include "gc/shared/generationSpec.hpp"
 #include "gc/shared/space.hpp"
 #include "gc/shared/vmGCOperations.hpp"
+#include "logging/log.hpp"
 #include "memory/universe.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/globals_extension.hpp"
@@ -137,11 +138,8 @@
 }
 
 void CollectorPolicy::initialize_size_info() {
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT "  Initial heap "
-      SIZE_FORMAT "  Maximum heap " SIZE_FORMAT,
-      _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size);
-  }
+  log_debug(gc, heap)("Minimum heap " SIZE_FORMAT "  Initial heap " SIZE_FORMAT "  Maximum heap " SIZE_FORMAT,
+                      _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size);
 
   DEBUG_ONLY(CollectorPolicy::assert_size_info();)
 }
@@ -488,11 +486,8 @@
     }
   }
 
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print_cr("1: Minimum young " SIZE_FORMAT "  Initial young "
-      SIZE_FORMAT "  Maximum young " SIZE_FORMAT,
-      _min_young_size, _initial_young_size, _max_young_size);
-  }
+  log_trace(gc, heap)("1: Minimum young " SIZE_FORMAT "  Initial young " SIZE_FORMAT "  Maximum young " SIZE_FORMAT,
+                      _min_young_size, _initial_young_size, _max_young_size);
 
   // At this point the minimum, initial and maximum sizes
   // of the overall heap and of the young generation have been determined.
@@ -558,11 +553,8 @@
       _initial_young_size = desired_young_size;
     }
 
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("2: Minimum young " SIZE_FORMAT "  Initial young "
-        SIZE_FORMAT "  Maximum young " SIZE_FORMAT,
-        _min_young_size, _initial_young_size, _max_young_size);
-    }
+    log_trace(gc, heap)("2: Minimum young " SIZE_FORMAT "  Initial young " SIZE_FORMAT "  Maximum young " SIZE_FORMAT,
+                    _min_young_size, _initial_young_size, _max_young_size);
   }
 
   // Write back to flags if necessary.
@@ -578,11 +570,8 @@
     FLAG_SET_ERGO(size_t, OldSize, _initial_old_size);
   }
 
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print_cr("Minimum old " SIZE_FORMAT "  Initial old "
-      SIZE_FORMAT "  Maximum old " SIZE_FORMAT,
-      _min_old_size, _initial_old_size, _max_old_size);
-  }
+  log_trace(gc, heap)("Minimum old " SIZE_FORMAT "  Initial old " SIZE_FORMAT "  Maximum old " SIZE_FORMAT,
+                  _min_old_size, _initial_old_size, _max_old_size);
 
   DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
 }
@@ -620,10 +609,7 @@
     uint gc_count_before;  // Read inside the Heap_lock locked region.
     {
       MutexLocker ml(Heap_lock);
-      if (PrintGC && Verbose) {
-        gclog_or_tty->print_cr("GenCollectorPolicy::mem_allocate_work:"
-                               " attempting locked slow path allocation");
-      }
+      log_trace(gc, alloc)("GenCollectorPolicy::mem_allocate_work: attempting locked slow path allocation");
       // Note that only large objects get a shot at being
       // allocated in later generations.
       bool first_only = ! should_try_older_generation_allocation(size);
@@ -757,9 +743,7 @@
                        is_tlab,                   // is_tlab
                        GenCollectedHeap::OldGen); // max_generation
   } else {
-    if (Verbose && PrintGCDetails) {
-      gclog_or_tty->print(" :: Trying full because partial may fail :: ");
-    }
+    log_trace(gc)(" :: Trying full because partial may fail :: ");
     // Try a full collection; see delta for bug id 6266275
     // for the original code and why this has been simplified
     // with from-space allocation criteria modified and
--- a/src/share/vm/gc/shared/concurrentGCThread.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/concurrentGCThread.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -51,7 +51,6 @@
 
 void ConcurrentGCThread::initialize_in_thread() {
   this->record_stack_base_and_size();
-  this->initialize_thread_local_storage();
   this->initialize_named_thread();
   this->set_active_handles(JNIHandleBlock::allocate_block());
   // From this time Thread::current() should be working.
@@ -74,9 +73,6 @@
     _has_terminated = true;
     Terminator_lock->notify();
   }
-
-  // Thread destructor usually does this..
-  ThreadLocalStorage::set_thread(NULL);
 }
 
 static void _sltLoop(JavaThread* thread, TRAPS) {
--- a/src/share/vm/gc/shared/gcCause.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/gcCause.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -125,36 +125,4 @@
   static const char* to_string(GCCause::Cause cause);
 };
 
-// Helper class for doing logging that includes the GC Cause
-// as a string.
-class GCCauseString : StackObj {
- private:
-   static const int _length = 128;
-   char _buffer[_length];
-   int _position;
-
- public:
-   GCCauseString(const char* prefix, GCCause::Cause cause) {
-     if (PrintGCCause) {
-      _position = jio_snprintf(_buffer, _length, "%s (%s) ", prefix, GCCause::to_string(cause));
-     } else {
-      _position = jio_snprintf(_buffer, _length, "%s ", prefix);
-     }
-     assert(_position >= 0 && _position <= _length,
-            "Need to increase the buffer size in GCCauseString? %d", _position);
-   }
-
-   GCCauseString& append(const char* str) {
-     int res = jio_snprintf(_buffer + _position, _length - _position, "%s", str);
-     _position += res;
-     assert(res >= 0 && _position <= _length,
-            "Need to increase the buffer size in GCCauseString? %d", res);
-     return *this;
-   }
-
-   operator const char*() {
-     return _buffer;
-   }
-};
-
 #endif // SHARE_VM_GC_SHARED_GCCAUSE_HPP
--- a/src/share/vm/gc/shared/gcId.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/gcId.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -26,6 +26,7 @@
 #include "gc/shared/gcId.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.inline.hpp"
+#include "runtime/threadLocalStorage.hpp"
 
 uint GCId::_next_id = 0;
 
@@ -47,6 +48,18 @@
   return currentNamedthread()->gc_id();
 }
 
+size_t GCId::print_prefix(char* buf, size_t len) {
+  if (ThreadLocalStorage::is_initialized() && ThreadLocalStorage::thread()->is_Named_thread()) {
+    uint gc_id = current_raw();
+    if (gc_id != undefined()) {
+      int ret = jio_snprintf(buf, len, "GC(%u) ", gc_id);
+      assert(ret > 0, "Failed to print prefix. Log buffer too small?");
+      return (size_t)ret;
+    }
+  }
+  return 0;
+}
+
 GCIdMark::GCIdMark() : _gc_id(GCId::create()) {
   currentNamedthread()->set_gc_id(_gc_id);
 }
--- a/src/share/vm/gc/shared/gcId.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/gcId.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -40,6 +40,7 @@
   // Same as current() but can return undefined() if no GC id is currently active
   static const uint current_raw();
   static const uint undefined() { return UNDEFINED; }
+  static size_t print_prefix(char* buf, size_t len);
 };
 
 class GCIdMark : public StackObj {
--- a/src/share/vm/gc/shared/gcLocker.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/gcLocker.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -26,6 +26,7 @@
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "memory/resourceArea.hpp"
+#include "logging/log.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
@@ -73,17 +74,20 @@
 }
 #endif
 
+void GC_locker::log_debug_jni(const char* msg) {
+  LogHandle(gc, jni) log;
+  if (log.is_debug()) {
+    ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
+    log.debug("%s Thread \"%s\" %d locked.", msg, Thread::current()->name(), _jni_lock_count);
+  }
+}
+
 bool GC_locker::check_active_before_gc() {
   assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
   if (is_active() && !_needs_gc) {
     verify_critical_count();
     _needs_gc = true;
-    if (PrintJNIGCStalls && PrintGCDetails) {
-      ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
-      gclog_or_tty->print_cr("%.3f: Setting _needs_gc. Thread \"%s\" %d locked.",
-                             gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
-    }
-
+    log_debug_jni("Setting _needs_gc.");
   }
   return is_active();
 }
@@ -93,11 +97,7 @@
   MutexLocker   ml(JNICritical_lock);
 
   if (needs_gc()) {
-    if (PrintJNIGCStalls && PrintGCDetails) {
-      ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
-      gclog_or_tty->print_cr("%.3f: Allocation failed. Thread \"%s\" is stalled by JNI critical section, %d locked.",
-                             gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
-    }
+    log_debug_jni("Allocation failed. Thread stalled by JNI critical section.");
   }
 
   // Wait for _needs_gc  to be cleared
@@ -134,11 +134,7 @@
     {
       // Must give up the lock while at a safepoint
       MutexUnlocker munlock(JNICritical_lock);
-      if (PrintJNIGCStalls && PrintGCDetails) {
-        ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
-        gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
-            gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
-      }
+      log_debug_jni("Performing GC after exiting critical section.");
       Universe::heap()->collect(GCCause::_gc_locker);
     }
     _doing_gc = false;
--- a/src/share/vm/gc/shared/gcLocker.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/gcLocker.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -64,6 +64,7 @@
     return _jni_lock_count > 0;
   }
 
+  static void log_debug_jni(const char* msg);
  public:
   // Accessors
   static bool is_active() {
--- a/src/share/vm/gc/shared/gcTraceTime.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/gcTraceTime.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -23,57 +23,38 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.hpp"
-#include "runtime/globals.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "logging/log.hpp"
 #include "runtime/os.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/timer.hpp"
-#include "utilities/ostream.hpp"
-#include "utilities/ticks.inline.hpp"
-
 
-GCTraceTimeImpl::GCTraceTimeImpl(const char* title, bool doit, bool print_cr, GCTimer* timer) :
-    _title(title), _doit(doit), _print_cr(print_cr), _timer(timer), _start_counter() {
-  if (_doit || _timer != NULL) {
-    _start_counter.stamp();
-  }
-
-  if (_timer != NULL) {
-    assert(SafepointSynchronize::is_at_safepoint(), "Tracing currently only supported at safepoints");
-    assert(Thread::current()->is_VM_thread(), "Tracing currently only supported from the VM thread");
-
-    _timer->register_gc_phase_start(title, _start_counter);
-  }
-
-  if (_doit) {
-    gclog_or_tty->gclog_stamp();
-    gclog_or_tty->print("[%s", title);
-    gclog_or_tty->flush();
+GCTraceCPUTime::GCTraceCPUTime() :
+  _active(log_is_enabled(Info, gc, cpu)),
+  _starting_user_time(0.0),
+  _starting_system_time(0.0),
+  _starting_real_time(0.0)
+{
+  if (_active) {
+    bool valid = os::getTimesSecs(&_starting_real_time,
+                               &_starting_user_time,
+                               &_starting_system_time);
+    if (!valid) {
+      log_warning(gc, cpu)("TraceCPUTime: os::getTimesSecs() returned invalid result");
+      _active = false;
+    }
   }
 }
 
-GCTraceTimeImpl::~GCTraceTimeImpl() {
-  Ticks stop_counter;
-
-  if (_doit || _timer != NULL) {
-    stop_counter.stamp();
-  }
-
-  if (_timer != NULL) {
-    _timer->register_gc_phase_end(stop_counter);
-  }
-
-  if (_doit) {
-    const Tickspan duration = stop_counter - _start_counter;
-    double duration_in_seconds = TicksToTimeHelper::seconds(duration);
-    if (_print_cr) {
-      gclog_or_tty->print_cr(", %3.7f secs]", duration_in_seconds);
+GCTraceCPUTime::~GCTraceCPUTime() {
+  if (_active) {
+    double real_time, user_time, system_time;
+    bool valid = os::getTimesSecs(&real_time, &user_time, &system_time);
+    if (valid) {
+      log_info(gc, cpu)("User=%3.2fs Sys=%3.2fs Real=%3.2fs",
+                        user_time - _starting_user_time,
+                        system_time - _starting_system_time,
+                        real_time - _starting_real_time);
     } else {
-      gclog_or_tty->print(", %3.7f secs]", duration_in_seconds);
+      log_warning(gc, cpu)("TraceCPUTime: os::getTimesSecs() returned invalid result");
     }
-    gclog_or_tty->flush();
   }
 }
--- a/src/share/vm/gc/shared/gcTraceTime.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/gcTraceTime.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,31 +25,55 @@
 #ifndef SHARE_VM_GC_SHARED_GCTRACETIME_HPP
 #define SHARE_VM_GC_SHARED_GCTRACETIME_HPP
 
-#include "gc/shared/gcTrace.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
-#include "prims/jni_md.h"
 #include "utilities/ticks.hpp"
 
+class GCTraceCPUTime : public StackObj {
+  bool _active;                 // true if times will be measured and printed
+  double _starting_user_time;   // user time at start of measurement
+  double _starting_system_time; // system time at start of measurement
+  double _starting_real_time;   // real time at start of measurement
+ public:
+  GCTraceCPUTime();
+  ~GCTraceCPUTime();
+};
+
 class GCTimer;
 
-class GCTraceTimeImpl VALUE_OBJ_CLASS_SPEC {
+template <LogLevelType Level, LogTagType T0, LogTagType T1 = LogTag::__NO_TAG, LogTagType T2 = LogTag::__NO_TAG, LogTagType T3 = LogTag::__NO_TAG,
+    LogTagType T4 = LogTag::__NO_TAG, LogTagType GuardTag = LogTag::__NO_TAG>
+class GCTraceTimeImpl : public StackObj {
+ private:
+  bool _enabled;
+  Ticks _start_ticks;
   const char* _title;
-  bool _doit;
-  bool _print_cr;
+  GCCause::Cause _gc_cause;
   GCTimer* _timer;
-  Ticks _start_counter;
+  size_t _heap_usage_before;
+
+  void log_start(jlong start_counter);
+  void log_stop(jlong start_counter, jlong stop_counter);
+  void time_stamp(Ticks& ticks);
 
  public:
-  GCTraceTimeImpl(const char* title, bool doit, bool print_cr, GCTimer* timer);
+  GCTraceTimeImpl(const char* title, GCTimer* timer = NULL, GCCause::Cause gc_cause = GCCause::_no_gc, bool log_heap_usage = false);
   ~GCTraceTimeImpl();
 };
 
-class GCTraceTime : public StackObj {
-  GCTraceTimeImpl _gc_trace_time_impl;
-
+// Similar to GCTraceTimeImpl but is intended for concurrent phase logging,
+// which is a bit simpler and should always print the start line, i.e. not add the "start" tag.
+template <LogLevelType Level, LogTagType T0, LogTagType T1 = LogTag::__NO_TAG, LogTagType T2 = LogTag::__NO_TAG, LogTagType T3 = LogTag::__NO_TAG,
+    LogTagType T4 = LogTag::__NO_TAG, LogTagType GuardTag = LogTag::__NO_TAG>
+class GCTraceConcTimeImpl : public StackObj {
+ private:
+  bool _enabled;
+  jlong _start_time;
+  const char* _title;
  public:
-  GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer) :
-    _gc_trace_time_impl(title, doit, print_cr, timer) {};
+  GCTraceConcTimeImpl(const char* title);
+  ~GCTraceConcTimeImpl();
+  jlong start_time() { return _start_time; }
 };
 
 #endif // SHARE_VM_GC_SHARED_GCTRACETIME_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shared/gcTraceTime.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_GCTRACETIME_INLINE_HPP
+#define SHARE_VM_GC_SHARED_GCTRACETIME_INLINE_HPP
+
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/gcTimer.hpp"
+#include "gc/shared/gcTrace.hpp"
+#include "gc/shared/gcTraceTime.hpp"
+#include "logging/log.hpp"
+#include "memory/universe.hpp"
+#include "prims/jni_md.h"
+#include "utilities/ticks.hpp"
+#include "runtime/timer.hpp"
+
+#define LOG_STOP_TIME_FORMAT "(%.3fs, %.3fs) %.3fms"
+#define LOG_STOP_HEAP_FORMAT SIZE_FORMAT "M->" SIZE_FORMAT "M("  SIZE_FORMAT "M)"
+
+template <LogLevelType Level, LogTagType T0, LogTagType T1, LogTagType T2, LogTagType T3, LogTagType T4, LogTagType GuardTag >
+void GCTraceTimeImpl<Level, T0, T1, T2, T3, T4, GuardTag>::log_start(jlong start_counter) {
+  if (Log<PREFIX_LOG_TAG(start), T0, T1, T2, T3>::is_level(Level)) {
+    FormatBuffer<> start_msg("%s", _title);
+    if (_gc_cause != GCCause::_no_gc) {
+      start_msg.append(" (%s)", GCCause::to_string(_gc_cause));
+    }
+    start_msg.append(" (%.3fs)", TimeHelper::counter_to_seconds(start_counter));
+    // Make sure to put the "start" tag last in the tag set
+    STATIC_ASSERT(T0 != LogTag::__NO_TAG); // Need some tag to log on.
+    STATIC_ASSERT(T4 == LogTag::__NO_TAG); // Need to leave at least the last tag for the "start" tag in log_start()
+    if (T1 == LogTag::__NO_TAG) {
+      Log<T0, PREFIX_LOG_TAG(start)>::template write<Level>("%s", start_msg.buffer());
+    } else if (T2 == LogTag::__NO_TAG) {
+      Log<T0, T1, PREFIX_LOG_TAG(start)>::template write<Level>("%s", start_msg.buffer());
+    } else if (T3 == LogTag::__NO_TAG) {
+      Log<T0, T1, T2, PREFIX_LOG_TAG(start)>::template write<Level>("%s", start_msg.buffer());
+    } else {
+      Log<T0, T1, T2, T3, PREFIX_LOG_TAG(start)>::template write<Level>("%s", start_msg.buffer());
+    }
+  }
+}
+
+template <LogLevelType Level, LogTagType T0, LogTagType T1, LogTagType T2, LogTagType T3, LogTagType T4, LogTagType GuardTag >
+void GCTraceTimeImpl<Level, T0, T1, T2, T3, T4, GuardTag>::log_stop(jlong start_counter, jlong stop_counter) {
+  double duration_in_ms = TimeHelper::counter_to_millis(stop_counter - start_counter);
+  double start_time_in_secs = TimeHelper::counter_to_seconds(start_counter);
+  double stop_time_in_secs = TimeHelper::counter_to_seconds(stop_counter);
+  FormatBuffer<> stop_msg("%s", _title);
+  if (_gc_cause != GCCause::_no_gc) {
+    stop_msg.append(" (%s)", GCCause::to_string(_gc_cause));
+  }
+  if (_heap_usage_before == SIZE_MAX) {
+    Log<T0, T1, T2, T3, T4>::template write<Level>("%s " LOG_STOP_TIME_FORMAT,
+        stop_msg.buffer(), start_time_in_secs, stop_time_in_secs, duration_in_ms);
+  } else {
+    CollectedHeap* heap = Universe::heap();
+    size_t used_before_m = _heap_usage_before / M;
+    size_t used_m = heap->used() / M;
+    size_t capacity_m = heap->capacity() / M;
+    Log<T0, T1, T2, T3, T4>::template write<Level>("%s " LOG_STOP_HEAP_FORMAT " " LOG_STOP_TIME_FORMAT,
+        stop_msg.buffer(), used_before_m, used_m, capacity_m, start_time_in_secs, stop_time_in_secs, duration_in_ms);
+  }
+}
+
+template <LogLevelType Level, LogTagType T0, LogTagType T1, LogTagType T2, LogTagType T3, LogTagType T4, LogTagType GuardTag >
+void GCTraceTimeImpl<Level, T0, T1, T2, T3, T4, GuardTag>::time_stamp(Ticks& ticks) {
+  if (_enabled || _timer != NULL) {
+    ticks.stamp();
+  }
+}
+
+template <LogLevelType Level, LogTagType T0, LogTagType T1, LogTagType T2, LogTagType T3, LogTagType T4, LogTagType GuardTag >
+GCTraceTimeImpl<Level, T0, T1, T2, T3, T4, GuardTag>::GCTraceTimeImpl(const char* title, GCTimer* timer, GCCause::Cause gc_cause, bool log_heap_usage) :
+  _enabled(Log<T0, T1, T2, T3, T4, GuardTag>::is_level(Level)),
+  _start_ticks(),
+  _heap_usage_before(SIZE_MAX),
+  _title(title),
+  _gc_cause(gc_cause),
+  _timer(timer) {
+
+  time_stamp(_start_ticks);
+  if (_enabled) {
+    if (log_heap_usage) {
+      _heap_usage_before = Universe::heap()->used();
+    }
+    log_start(_start_ticks.value());
+  }
+  if (_timer != NULL) {
+    _timer->register_gc_phase_start(_title, _start_ticks);
+  }
+}
+
+template <LogLevelType Level, LogTagType T0, LogTagType T1, LogTagType T2, LogTagType T3, LogTagType T4, LogTagType GuardTag >
+GCTraceTimeImpl<Level, T0, T1, T2, T3, T4, GuardTag>::~GCTraceTimeImpl() {
+  Ticks stop_ticks;
+  time_stamp(stop_ticks);
+  if (_enabled) {
+    log_stop(_start_ticks.value(), stop_ticks.value());
+  }
+  if (_timer != NULL) {
+    _timer->register_gc_phase_end(stop_ticks);
+  }
+}
+
+template <LogLevelType Level, LogTagType T0, LogTagType T1, LogTagType T2, LogTagType T3, LogTagType T4, LogTagType GuardTag >
+GCTraceConcTimeImpl<Level, T0, T1, T2, T3, T4, GuardTag>::GCTraceConcTimeImpl(const char* title) :
+  _enabled(Log<T0, T1, T2, T3, T4, GuardTag>::is_level(Level)), _start_time(os::elapsed_counter()), _title(title) {
+  if (_enabled) {
+    Log<T0, T1, T2, T3, T4>::template write<Level>("%s (%.3fs)", _title, TimeHelper::counter_to_seconds(_start_time));
+  }
+}
+
+template <LogLevelType Level, LogTagType T0, LogTagType T1, LogTagType T2, LogTagType T3, LogTagType T4, LogTagType GuardTag >
+GCTraceConcTimeImpl<Level, T0, T1, T2, T3, T4, GuardTag>::~GCTraceConcTimeImpl() {
+  if (_enabled) {
+    jlong stop_time = os::elapsed_counter();
+    Log<T0, T1, T2, T3, T4>::template write<Level>("%s " LOG_STOP_TIME_FORMAT,
+                                                   _title,
+                                                   TimeHelper::counter_to_seconds(_start_time),
+                                                   TimeHelper::counter_to_seconds(stop_time),
+                                                   TimeHelper::counter_to_millis(stop_time - _start_time));
+  }
+}
+
+#define GCTraceTime(Level, ...) GCTraceTimeImpl<LogLevel::Level, LOG_TAGS(__VA_ARGS__)>
+#define GCTraceConcTime(Level, ...) GCTraceConcTimeImpl<LogLevel::Level, LOG_TAGS(__VA_ARGS__)>
+
+#endif // SHARE_VM_GC_SHARED_GCTRACETIME_INLINE_HPP
--- a/src/share/vm/gc/shared/genCollectedHeap.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/genCollectedHeap.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -33,7 +33,7 @@
 #include "gc/shared/gcId.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "gc/shared/generationSpec.hpp"
@@ -314,13 +314,11 @@
 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
                                           bool restore_marks_for_biased_locking) {
-  // Timer for individual generations. Last argument is false: no CR
-  // FIXME: We should try to start the timing earlier to cover more of the GC pause
-  GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL);
+  FormatBuffer<> title("Collect gen: %s", gen->short_name());
+  GCTraceTime(Debug, gc) t1(title);
   TraceCollectorStats tcs(gen->counters());
   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 
-  size_t prev_used = gen->used();
   gen->stat_record()->invocations++;
   gen->stat_record()->accumulated_time.start();
 
@@ -329,24 +327,11 @@
   // change top of some spaces.
   record_gen_tops_before_GC();
 
-  if (PrintGC && Verbose) {
-    // I didn't want to change the logging when removing the level concept,
-    // but I guess this logging could say young/old or something instead of 0/1.
-    uint level;
-    if (heap()->is_young_gen(gen)) {
-      level = 0;
-    } else {
-      level = 1;
-    }
-    gclog_or_tty->print("level=%u invoke=%d size=" SIZE_FORMAT,
-                        level,
-                        gen->stat_record()->invocations,
-                        size * HeapWordSize);
-  }
+  log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 
   if (run_verification && VerifyBeforeGC) {
     HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify(" VerifyBeforeGC:");
+    Universe::verify("Before GC");
   }
   COMPILER2_PRESENT(DerivedPointerTable::clear());
 
@@ -404,12 +389,7 @@
 
   if (run_verification && VerifyAfterGC) {
     HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify(" VerifyAfterGC:");
-  }
-
-  if (PrintGCDetails) {
-    gclog_or_tty->print(":");
-    gen->print_heap_change(prev_used);
+    Universe::verify("After GC");
   }
 }
 
@@ -448,21 +428,31 @@
     FlagSetting fl(_is_gc_active, true);
 
     bool complete = full && (max_generation == OldGen);
-    const char* gc_cause_prefix = complete ? "Full GC" : "GC";
-    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL);
+    bool old_collects_young = complete && !ScavengeBeforeFullGC;
+    bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
+
+    FormatBuffer<> gc_string("%s", "Pause ");
+    if (do_young_collection) {
+      gc_string.append("Young");
+    } else {
+      gc_string.append("Full");
+    }
+
+    GCTraceCPUTime tcpu;
+    GCTraceTime(Info, gc) t(gc_string, NULL, gc_cause(), true);
 
     gc_prologue(complete);
     increment_total_collections(complete);
 
-    size_t gch_prev_used = used();
+    size_t young_prev_used = _young_gen->used();
+    size_t old_prev_used = _old_gen->used();
+
     bool run_verification = total_collections() >= VerifyGCStartAt;
 
     bool prepared_for_verification = false;
     bool collected_old = false;
-    bool old_collects_young = complete && !ScavengeBeforeFullGC;
 
-    if (!old_collects_young && _young_gen->should_collect(full, size, is_tlab)) {
+    if (do_young_collection) {
       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
         prepare_for_verify();
         prepared_for_verification = true;
@@ -487,7 +477,6 @@
     bool must_restore_marks_for_biased_locking = false;
 
     if (max_generation == OldGen && _old_gen->should_collect(full, size, is_tlab)) {
-      GCIdMarkAndRestore gc_id_mark;
       if (!complete) {
         // The full_collections increment was missed above.
         increment_total_full_collections();
@@ -501,13 +490,16 @@
       }
 
       assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking");
-      collect_generation(_old_gen,
-                         full,
-                         size,
-                         is_tlab,
-                         run_verification && VerifyGCLevel <= 1,
-                         do_clear_all_soft_refs,
-                         true);
+
+      if (do_young_collection) {
+        // We did a young GC. Need a new GC id for the old GC.
+        GCIdMarkAndRestore gc_id_mark;
+        GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause(), true);
+        collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs, true);
+      } else {
+        // No young GC done. Use the same GC id as was set up earlier in this method.
+        collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs, true);
+      }
 
       must_restore_marks_for_biased_locking = true;
       collected_old = true;
@@ -523,14 +515,8 @@
       post_full_gc_dump(NULL);   // do any post full gc dumps
     }
 
-    if (PrintGCDetails) {
-      print_heap_change(gch_prev_used);
-
-      // Print metaspace info for full GC with PrintGCDetails flag.
-      if (complete) {
-        MetaspaceAux::print_metaspace_change(metadata_prev_used);
-      }
-    }
+    print_heap_change(young_prev_used, old_prev_used);
+    MetaspaceAux::print_metaspace_change(metadata_prev_used);
 
     // Adjust generation sizes.
     if (collected_old) {
@@ -874,10 +860,7 @@
   // been attempted and failed, because the old gen was too full
   if (local_last_generation == YoungGen && gc_cause() == GCCause::_gc_locker &&
       incremental_collection_will_fail(false /* don't consult_young */)) {
-    if (PrintGCDetails) {
-      gclog_or_tty->print_cr("GC locker: Trying a full collection "
-                             "because scavenge failed");
-    }
+    log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
     // This time allow the old gen to be collected as well
     do_collection(true,                // full
                   clear_all_soft_refs, // clear_all_soft_refs
@@ -1106,22 +1089,14 @@
   _young_gen->prepare_for_compaction(&cp);
 }
 
-void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
-  if (!silent) {
-    gclog_or_tty->print("%s", _old_gen->name());
-    gclog_or_tty->print(" ");
-  }
+void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
+  log_debug(gc, verify)("%s", _old_gen->name());
   _old_gen->verify();
 
-  if (!silent) {
-    gclog_or_tty->print("%s", _young_gen->name());
-    gclog_or_tty->print(" ");
-  }
+  log_debug(gc, verify)("%s", _old_gen->name());
   _young_gen->verify();
 
-  if (!silent) {
-    gclog_or_tty->print("remset ");
-  }
+  log_debug(gc, verify)("RemSet");
   rem_set()->verify();
 }
 
@@ -1171,18 +1146,11 @@
   }
 }
 
-void GenCollectedHeap::print_heap_change(size_t prev_used) const {
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print(" "  SIZE_FORMAT
-                        "->" SIZE_FORMAT
-                        "("  SIZE_FORMAT ")",
-                        prev_used, used(), capacity());
-  } else {
-    gclog_or_tty->print(" "  SIZE_FORMAT "K"
-                        "->" SIZE_FORMAT "K"
-                        "("  SIZE_FORMAT "K)",
-                        prev_used / K, used() / K, capacity() / K);
-  }
+void GenCollectedHeap::print_heap_change(size_t young_prev_used, size_t old_prev_used) const {
+  log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
+                     _young_gen->short_name(), young_prev_used / K, _young_gen->used() /K, _young_gen->capacity() /K);
+  log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
+                     _old_gen->short_name(), old_prev_used / K, _old_gen->used() /K, _old_gen->capacity() /K);
 }
 
 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
--- a/src/share/vm/gc/shared/genCollectedHeap.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/genCollectedHeap.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -142,6 +142,14 @@
     return CollectedHeap::GenCollectedHeap;
   }
 
+  virtual const char* name() const {
+    if (UseConcMarkSweepGC) {
+      return "Concurrent Mark Sweep";
+    } else {
+      return "Serial";
+    }
+  }
+
   Generation* young_gen() const { return _young_gen; }
   Generation* old_gen()   const { return _old_gen; }
 
@@ -329,7 +337,7 @@
   void prepare_for_verify();
 
   // Override.
-  void verify(bool silent, VerifyOption option);
+  void verify(VerifyOption option);
 
   // Override.
   virtual void print_on(outputStream* st) const;
@@ -338,8 +346,7 @@
   virtual void print_tracing_info() const;
   virtual void print_on_error(outputStream* st) const;
 
-  // PrintGC, PrintGCDetails support
-  void print_heap_change(size_t prev_used) const;
+  void print_heap_change(size_t young_prev_used, size_t old_prev_used) const;
 
   // The functions below are helper functions that a subclass of
   // "CollectedHeap" can use in the implementation of its virtual
--- a/src/share/vm/gc/shared/generation.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/generation.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -36,6 +36,7 @@
 #include "gc/shared/generation.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
@@ -70,20 +71,6 @@
   return reserved().byte_size();
 }
 
-void Generation::print_heap_change(size_t prev_used) const {
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print(" "  SIZE_FORMAT
-                        "->" SIZE_FORMAT
-                        "("  SIZE_FORMAT ")",
-                        prev_used, used(), capacity());
-  } else {
-    gclog_or_tty->print(" "  SIZE_FORMAT "K"
-                        "->" SIZE_FORMAT "K"
-                        "("  SIZE_FORMAT "K)",
-                        prev_used / K, used() / K, capacity() / K);
-  }
-}
-
 // By default we get a single threaded default reference processor;
 // generations needing multi-threaded refs processing or discovery override this method.
 void Generation::ref_processor_init() {
@@ -171,12 +158,8 @@
 bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
   size_t available = max_contiguous_available();
   bool   res = (available >= max_promotion_in_bytes);
-  if (PrintGC && Verbose) {
-    gclog_or_tty->print_cr(
-      "Generation: promo attempt is%s safe: available(" SIZE_FORMAT ") %s max_promo(" SIZE_FORMAT ")",
-      res? "":" not", available, res? ">=":"<",
-      max_promotion_in_bytes);
-  }
+  log_trace(gc)("Generation: promo attempt is%s safe: available(" SIZE_FORMAT ") %s max_promo(" SIZE_FORMAT ")",
+                res? "":" not", available, res? ">=":"<", max_promotion_in_bytes);
   return res;
 }
 
--- a/src/share/vm/gc/shared/generation.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/generation.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -536,11 +536,8 @@
   // the block is an object.
   virtual bool block_is_obj(const HeapWord* addr) const;
 
-
-  // PrintGC, PrintGCDetails support
   void print_heap_change(size_t prev_used) const;
 
-  // PrintHeapAtGC support
   virtual void print() const;
   virtual void print_on(outputStream* st) const;
 
--- a/src/share/vm/gc/shared/plab.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/plab.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -26,6 +26,7 @@
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/plab.inline.hpp"
 #include "gc/shared/threadLocalAllocBuffer.hpp"
+#include "logging/log.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/oop.inline.hpp"
 
@@ -149,18 +150,8 @@
   new_plab_sz = MIN2(max_size(), new_plab_sz);
   new_plab_sz = align_object_size(new_plab_sz);
   // Latch the result
-  if (PrintPLAB) {
-    gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT " desired_net_plab_sz = " SIZE_FORMAT ") ", recent_plab_sz, new_plab_sz);
-  }
+  log_trace(gc, plab)("plab_size = " SIZE_FORMAT " desired_net_plab_sz = " SIZE_FORMAT ") ", recent_plab_sz, new_plab_sz);
   _desired_net_plab_sz = new_plab_sz;
 
   reset();
 }
-
-#ifndef PRODUCT
-void PLAB::print() {
-  gclog_or_tty->print_cr("PLAB: _bottom: " PTR_FORMAT "  _top: " PTR_FORMAT
-    "  _end: " PTR_FORMAT "  _hard_end: " PTR_FORMAT ")",
-    p2i(_bottom), p2i(_top), p2i(_end), p2i(_hard_end));
-}
-#endif // !PRODUCT
--- a/src/share/vm/gc/shared/plab.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/plab.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -141,8 +141,6 @@
   // Fills in the unallocated portion of the buffer with a garbage object and updates
   // statistics. To be called during GC.
   virtual void retire();
-
-  void print() PRODUCT_RETURN;
 };
 
 // PLAB book-keeping.
--- a/src/share/vm/gc/shared/referenceProcessor.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/referenceProcessor.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -28,9 +28,10 @@
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
@@ -186,21 +187,6 @@
   return total;
 }
 
-static void log_ref_count(size_t count, bool doit) {
-  if (doit) {
-    gclog_or_tty->print(", " SIZE_FORMAT " refs", count);
-  }
-}
-
-class GCRefTraceTime : public StackObj {
-  GCTraceTimeImpl _gc_trace_time;
- public:
-  GCRefTraceTime(const char* title, bool doit, GCTimer* timer, size_t count) :
-    _gc_trace_time(title, doit, false, timer) {
-    log_ref_count(count, doit);
-  }
-};
-
 ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
   BoolObjectClosure*           is_alive,
   OopClosure*                  keep_alive,
@@ -222,8 +208,6 @@
 
   _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
 
-  bool trace_time = PrintGCDetails && PrintReferenceGC;
-
   // Include cleaners in phantom statistics.  We expect Cleaner
   // references to be temporary, and don't want to deal with
   // possible incompatibilities arising from making it more visible.
@@ -235,7 +219,7 @@
 
   // Soft references
   {
-    GCRefTraceTime tt("SoftReference", trace_time, gc_timer, stats.soft_count());
+    GCTraceTime(Debug, gc, ref) tt("SoftReference", gc_timer);
     process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
                                is_alive, keep_alive, complete_gc, task_executor);
   }
@@ -244,21 +228,21 @@
 
   // Weak references
   {
-    GCRefTraceTime tt("WeakReference", trace_time, gc_timer, stats.weak_count());
+    GCTraceTime(Debug, gc, ref) tt("WeakReference", gc_timer);
     process_discovered_reflist(_discoveredWeakRefs, NULL, true,
                                is_alive, keep_alive, complete_gc, task_executor);
   }
 
   // Final references
   {
-    GCRefTraceTime tt("FinalReference", trace_time, gc_timer, stats.final_count());
+  GCTraceTime(Debug, gc, ref) tt("FinalReference", gc_timer);
     process_discovered_reflist(_discoveredFinalRefs, NULL, false,
                                is_alive, keep_alive, complete_gc, task_executor);
   }
 
   // Phantom references
   {
-    GCRefTraceTime tt("PhantomReference", trace_time, gc_timer, stats.phantom_count());
+    GCTraceTime(Debug, gc, ref) tt("PhantomReference", gc_timer);
     process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
                                is_alive, keep_alive, complete_gc, task_executor);
 
@@ -275,20 +259,23 @@
   // thus use JNI weak references to circumvent the phantom references and
   // resurrect a "post-mortem" object.
   {
-    GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer);
-    NOT_PRODUCT(log_ref_count(count_jni_refs(), trace_time);)
+    GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", gc_timer);
     if (task_executor != NULL) {
       task_executor->set_single_threaded_mode();
     }
     process_phaseJNI(is_alive, keep_alive, complete_gc);
   }
 
+  log_debug(gc, ref)("Ref Counts: Soft: " SIZE_FORMAT " Weak: " SIZE_FORMAT " Final: " SIZE_FORMAT " Phantom: " SIZE_FORMAT,
+                     stats.soft_count(), stats.weak_count(), stats.final_count(), stats.phantom_count());
+  log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs());
+
   return stats;
 }
 
 #ifndef PRODUCT
 // Calculate the number of jni handles.
-uint ReferenceProcessor::count_jni_refs() {
+size_t ReferenceProcessor::count_jni_refs() {
   class AlwaysAliveClosure: public BoolObjectClosure {
   public:
     virtual bool do_object_b(oop obj) { return true; }
@@ -296,12 +283,12 @@
 
   class CountHandleClosure: public OopClosure {
   private:
-    int _count;
+    size_t _count;
   public:
     CountHandleClosure(): _count(0) {}
     void do_oop(oop* unused)       { _count++; }
     void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
-    int count() { return _count; }
+    size_t count() { return _count; }
   };
   CountHandleClosure global_handle_count;
   AlwaysAliveClosure always_alive;
@@ -362,10 +349,7 @@
   // all linked Reference objects. Note that it is important to not dirty any
   // cards during reference processing since this will cause card table
   // verification to fail for G1.
-  if (TraceReferenceGC && PrintGCDetails) {
-    gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
-                           INTPTR_FORMAT, p2i(refs_list.head()));
-  }
+  log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(refs_list.head()));
 
   oop obj = NULL;
   oop next_d = refs_list.head();
@@ -376,10 +360,7 @@
     assert(obj->is_instance(), "should be an instance object");
     assert(InstanceKlass::cast(obj->klass())->is_reference_instance_klass(), "should be reference object");
     next_d = java_lang_ref_Reference::discovered(obj);
-    if (TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
-                             p2i(obj), p2i(next_d));
-    }
+    log_develop_trace(gc, ref)("        obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, p2i(obj), p2i(next_d));
     assert(java_lang_ref_Reference::next(obj) == NULL,
            "Reference not active; should not be discovered");
     // Self-loop next, so as to make Ref not active.
@@ -517,10 +498,8 @@
     bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
     if (referent_is_dead &&
         !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
-      if (TraceReferenceGC) {
-        gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s"  ") by policy",
-                               p2i(iter.obj()), iter.obj()->klass()->internal_name());
-      }
+      log_develop_trace(gc, ref)("Dropping reference (" INTPTR_FORMAT ": %s"  ") by policy",
+                                 p2i(iter.obj()), iter.obj()->klass()->internal_name());
       // Remove Reference object from list
       iter.remove();
       // keep the referent around
@@ -532,14 +511,9 @@
   }
   // Close the reachable set
   complete_gc->do_void();
-  NOT_PRODUCT(
-    if (PrintGCDetails && TraceReferenceGC) {
-      gclog_or_tty->print_cr(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT
-        " discovered Refs by policy, from list " INTPTR_FORMAT,
-        iter.removed(), iter.processed(), p2i(refs_list.head()));
+  log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT,
+                             iter.removed(), iter.processed(), p2i(refs_list.head()));
     }
-  )
-}
 
 // Traverse the list and remove any Refs that are not active, or
 // whose referents are either alive or NULL.
@@ -554,10 +528,8 @@
     DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
     assert(next == NULL, "Should not discover inactive Reference");
     if (iter.is_referent_alive()) {
-      if (TraceReferenceGC) {
-        gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
-                               p2i(iter.obj()), iter.obj()->klass()->internal_name());
-      }
+      log_develop_trace(gc, ref)("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
+                                 p2i(iter.obj()), iter.obj()->klass()->internal_name());
       // The referent is reachable after all.
       // Remove Reference object from list.
       iter.remove();
@@ -571,8 +543,8 @@
     }
   }
   NOT_PRODUCT(
-    if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
-      gclog_or_tty->print_cr(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT
+    if (iter.processed() > 0) {
+      log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT
         " Refs in discovered list " INTPTR_FORMAT,
         iter.removed(), iter.processed(), p2i(refs_list.head()));
     }
@@ -610,8 +582,8 @@
   // Now close the newly reachable set
   complete_gc->do_void();
   NOT_PRODUCT(
-    if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
-      gclog_or_tty->print_cr(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT
+    if (iter.processed() > 0) {
+      log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT
         " Refs in discovered list " INTPTR_FORMAT,
         iter.removed(), iter.processed(), p2i(refs_list.head()));
     }
@@ -638,11 +610,8 @@
       // keep the referent around
       iter.make_referent_alive();
     }
-    if (TraceReferenceGC) {
-      gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
-                             clear_referent ? "cleared " : "",
-                             p2i(iter.obj()), iter.obj()->klass()->internal_name());
-    }
+    log_develop_trace(gc, ref)("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
+                               clear_referent ? "cleared " : "", p2i(iter.obj()), iter.obj()->klass()->internal_name());
     assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
     iter.next();
   }
@@ -666,8 +635,8 @@
 void ReferenceProcessor::abandon_partial_discovery() {
   // loop over the lists
   for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
-    if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
-      gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
+    if ((i % _max_num_q) == 0) {
+      log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i));
     }
     clear_discovered_references(_discovered_refs[i]);
   }
@@ -736,6 +705,20 @@
   bool _clear_referent;
 };
 
+#ifndef PRODUCT
+void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], size_t total_refs) {
+  if (!log_is_enabled(Trace, gc, ref)) {
+    return;
+  }
+
+  stringStream st;
+  for (uint i = 0; i < _max_num_q; ++i) {
+    st.print(SIZE_FORMAT " ", ref_lists[i].length());
+  }
+  log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs);
+}
+#endif
+
 // Balances reference queues.
 // Move entries from all queues[0, 1, ..., _max_num_q-1] to
 // queues[0, 1, ..., _num_q-1] because only the first _num_q
@@ -744,19 +727,12 @@
 {
   // calculate total length
   size_t total_refs = 0;
-  if (TraceReferenceGC && PrintGCDetails) {
-    gclog_or_tty->print_cr("\nBalance ref_lists ");
-  }
+  log_develop_trace(gc, ref)("Balance ref_lists ");
 
   for (uint i = 0; i < _max_num_q; ++i) {
     total_refs += ref_lists[i].length();
-    if (TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print(SIZE_FORMAT " ", ref_lists[i].length());
     }
-  }
-  if (TraceReferenceGC && PrintGCDetails) {
-    gclog_or_tty->print_cr(" = " SIZE_FORMAT, total_refs);
-  }
+  log_reflist_counts(ref_lists, total_refs);
   size_t avg_refs = total_refs / _num_q + 1;
   uint to_idx = 0;
   for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) {
@@ -820,14 +796,8 @@
   size_t balanced_total_refs = 0;
   for (uint i = 0; i < _max_num_q; ++i) {
     balanced_total_refs += ref_lists[i].length();
-    if (TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print(SIZE_FORMAT " ", ref_lists[i].length());
     }
-  }
-  if (TraceReferenceGC && PrintGCDetails) {
-    gclog_or_tty->print_cr(" = " SIZE_FORMAT, balanced_total_refs);
-    gclog_or_tty->flush();
-  }
+  log_reflist_counts(ref_lists, balanced_total_refs);
   assert(total_refs == balanced_total_refs, "Balancing was incomplete");
 #endif
 }
@@ -950,9 +920,7 @@
     default:
       ShouldNotReachHere();
   }
-  if (TraceReferenceGC && PrintGCDetails) {
-    gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, p2i(list));
-  }
+  log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list));
   return list;
 }
 
@@ -976,19 +944,15 @@
     refs_list.set_head(obj);
     refs_list.inc_length(1);
 
-    if (TraceReferenceGC) {
-      gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
-                             p2i(obj), obj->klass()->internal_name());
-    }
+    log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
+                               p2i(obj), obj->klass()->internal_name());
   } else {
     // If retest was non NULL, another thread beat us to it:
     // The reference has already been discovered...
-    if (TraceReferenceGC) {
-      gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
-                             p2i(obj), obj->klass()->internal_name());
+    log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)",
+                               p2i(obj), obj->klass()->internal_name());
     }
   }
-}
 
 #ifndef PRODUCT
 // Non-atomic (i.e. concurrent) discovery might allow us
@@ -1078,10 +1042,8 @@
   assert(discovered->is_oop_or_null(), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered));
   if (discovered != NULL) {
     // The reference has already been discovered...
-    if (TraceReferenceGC) {
-      gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
-                             p2i(obj), obj->klass()->internal_name());
-    }
+    log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)",
+                               p2i(obj), obj->klass()->internal_name());
     if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
       // assumes that an object is not processed twice;
       // if it's been already discovered it must be on another
@@ -1136,10 +1098,7 @@
     list->set_head(obj);
     list->inc_length(1);
 
-    if (TraceReferenceGC) {
-      gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)",
-                                p2i(obj), obj->klass()->internal_name());
-    }
+    log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name());
   }
   assert(obj->is_oop(), "Discovered a bad reference");
   verify_referent(obj);
@@ -1159,8 +1118,7 @@
 
   // Soft references
   {
-    GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
-              false, gc_timer);
+    GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
@@ -1172,8 +1130,7 @@
 
   // Weak references
   {
-    GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
-              false, gc_timer);
+    GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
@@ -1185,8 +1142,7 @@
 
   // Final references
   {
-    GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
-              false, gc_timer);
+    GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
@@ -1198,8 +1154,7 @@
 
   // Phantom references
   {
-    GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
-              false, gc_timer);
+    GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
@@ -1244,10 +1199,8 @@
         next != NULL) {
       // The referent has been cleared, or is alive, or the Reference is not
       // active; we need to trace and mark its cohort.
-      if (TraceReferenceGC) {
-        gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
-                               p2i(iter.obj()), iter.obj()->klass()->internal_name());
-      }
+      log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)",
+                                 p2i(iter.obj()), iter.obj()->klass()->internal_name());
       // Remove Reference object from list
       iter.remove();
       // Keep alive its cohort.
@@ -1268,9 +1221,8 @@
   complete_gc->do_void();
 
   NOT_PRODUCT(
-    if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) {
-      gclog_or_tty->print_cr(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT
-        " Refs in discovered list " INTPTR_FORMAT,
+    if (iter.processed() > 0) {
+      log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT,
         iter.removed(), iter.processed(), p2i(refs_list.head()));
     }
   )
--- a/src/share/vm/gc/shared/referenceProcessor.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/referenceProcessor.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -364,7 +364,9 @@
   void clear_discovered_references(DiscoveredList& refs_list);
 
   // Calculate the number of jni handles.
-  unsigned int count_jni_refs();
+  size_t count_jni_refs();
+
+  void log_reflist_counts(DiscoveredList ref_lists[], size_t total_count) PRODUCT_RETURN;
 
   // Balances reference queues.
   void balance_queues(DiscoveredList ref_lists[]);
--- a/src/share/vm/gc/shared/space.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/space.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -220,7 +220,6 @@
   // moving as a part of compaction.
   virtual void adjust_pointers() = 0;
 
-  // PrintHeapAtGC support
   virtual void print() const;
   virtual void print_on(outputStream* st) const;
   virtual void print_short() const;
@@ -659,7 +658,6 @@
   // Overrides for more efficient compaction support.
   void prepare_for_compaction(CompactPoint* cp);
 
-  // PrintHeapAtGC support.
   virtual void print_on(outputStream* st) const;
 
   // Checked dynamic downcasts.
--- a/src/share/vm/gc/shared/spaceDecorator.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/spaceDecorator.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "logging/log.hpp"
 #include "utilities/copy.hpp"
 
 // Catch-all file for utility classes
@@ -83,13 +84,9 @@
 void SpaceMangler::mangle_region(MemRegion mr) {
   assert(ZapUnusedHeapArea, "Mangling should not be in use");
 #ifdef ASSERT
-  if(TraceZapUnusedHeapArea) {
-    gclog_or_tty->print("Mangling [" PTR_FORMAT " to " PTR_FORMAT ")", p2i(mr.start()), p2i(mr.end()));
-  }
+  log_develop_trace(gc)("Mangling [" PTR_FORMAT " to " PTR_FORMAT ")", p2i(mr.start()), p2i(mr.end()));
   Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord);
-  if(TraceZapUnusedHeapArea) {
-    gclog_or_tty->print_cr(" done");
-  }
+  log_develop_trace(gc)("Mangling done.");
 #endif
 }
 
--- a/src/share/vm/gc/shared/taskqueue.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/taskqueue.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/shared/taskqueue.hpp"
 #include "oops/oop.inline.hpp"
+#include "logging/log.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.inline.hpp"
@@ -212,11 +213,8 @@
 #endif
         }
       } else {
-        if (PrintGCDetails && Verbose) {
-         gclog_or_tty->print_cr("ParallelTaskTerminator::offer_termination() "
-           "thread " PTR_FORMAT " sleeps after %u yields",
-           p2i(Thread::current()), yield_count);
-        }
+        log_develop_trace(gc, task)("ParallelTaskTerminator::offer_termination() thread " PTR_FORMAT " sleeps after %u yields",
+                                    p2i(Thread::current()), yield_count);
         yield_count = 0;
         // A sleep will cause this processor to seek work on another processor's
         // runqueue, if it has nothing else to run (as opposed to the yield
@@ -240,7 +238,7 @@
 
 #ifdef TRACESPINNING
 void ParallelTaskTerminator::print_termination_counts() {
-  gclog_or_tty->print_cr("ParallelTaskTerminator Total yields: %u"
+  log_trace(gc, task)("ParallelTaskTerminator Total yields: %u"
     " Total spins: %u Total peeks: %u",
     total_yields(),
     total_spins(),
--- a/src/share/vm/gc/shared/threadLocalAllocBuffer.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/threadLocalAllocBuffer.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
+#include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -54,9 +55,7 @@
   // Publish new stats if some allocation occurred.
   if (global_stats()->allocation() != 0) {
     global_stats()->publish();
-    if (PrintTLAB) {
-      global_stats()->print();
-    }
+    global_stats()->print();
   }
 }
 
@@ -70,9 +69,7 @@
   size_t allocated_since_last_gc = total_allocated - _allocated_before_last_gc;
   _allocated_before_last_gc = total_allocated;
 
-  if (PrintTLAB && (_number_of_refills > 0 || Verbose)) {
-    print_stats("gc");
-  }
+  print_stats("gc");
 
   if (_number_of_refills > 0) {
     // Update allocation history if a reasonable amount of eden was allocated.
@@ -149,12 +146,11 @@
 
   size_t aligned_new_size = align_object_size(new_size);
 
-  if (PrintTLAB && Verbose) {
-    gclog_or_tty->print("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]"
-                        " refills %d  alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT "\n",
-                        p2i(myThread()), myThread()->osthread()->thread_id(),
-                        _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size);
-  }
+  log_trace(gc, tlab)("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]"
+                      " refills %d  alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT,
+                      p2i(myThread()), myThread()->osthread()->thread_id(),
+                      _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size);
+
   set_desired_size(aligned_new_size);
   set_refill_waste_limit(initial_refill_waste_limit());
 }
@@ -171,9 +167,7 @@
                                   HeapWord* top,
                                   size_t    new_size) {
   _number_of_refills++;
-  if (PrintTLAB && Verbose) {
-    print_stats("fill");
-  }
+  print_stats("fill");
   assert(top <= start + new_size - alignment_reserve(), "size too small");
   initialize(start, top, start + new_size - alignment_reserve());
 
@@ -226,10 +220,8 @@
   guarantee(Thread::current()->is_Java_thread(), "tlab initialization thread not Java thread");
   Thread::current()->tlab().initialize();
 
-  if (PrintTLAB && Verbose) {
-    gclog_or_tty->print("TLAB min: " SIZE_FORMAT " initial: " SIZE_FORMAT " max: " SIZE_FORMAT "\n",
-                        min_size(), Thread::current()->tlab().initial_desired_size(), max_size());
-  }
+  log_develop_trace(gc, tlab)("TLAB min: " SIZE_FORMAT " initial: " SIZE_FORMAT " max: " SIZE_FORMAT,
+                               min_size(), Thread::current()->tlab().initial_desired_size(), max_size());
 }
 
 size_t ThreadLocalAllocBuffer::initial_desired_size() {
@@ -250,26 +242,31 @@
 }
 
 void ThreadLocalAllocBuffer::print_stats(const char* tag) {
+  LogHandle(gc, tlab) log;
+  if (!log.is_trace()) {
+    return;
+  }
+
   Thread* thrd = myThread();
   size_t waste = _gc_waste + _slow_refill_waste + _fast_refill_waste;
   size_t alloc = _number_of_refills * _desired_size;
   double waste_percent = alloc == 0 ? 0.0 :
                       100.0 * waste / alloc;
   size_t tlab_used  = Universe::heap()->tlab_used(thrd);
-  gclog_or_tty->print("TLAB: %s thread: " INTPTR_FORMAT " [id: %2d]"
-                      " desired_size: " SIZE_FORMAT "KB"
-                      " slow allocs: %d  refill waste: " SIZE_FORMAT "B"
-                      " alloc:%8.5f %8.0fKB refills: %d waste %4.1f%% gc: %dB"
-                      " slow: %dB fast: %dB\n",
-                      tag, p2i(thrd), thrd->osthread()->thread_id(),
-                      _desired_size / (K / HeapWordSize),
-                      _slow_allocations, _refill_waste_limit * HeapWordSize,
-                      _allocation_fraction.average(),
-                      _allocation_fraction.average() * tlab_used / K,
-                      _number_of_refills, waste_percent,
-                      _gc_waste * HeapWordSize,
-                      _slow_refill_waste * HeapWordSize,
-                      _fast_refill_waste * HeapWordSize);
+  log.trace("TLAB: %s thread: " INTPTR_FORMAT " [id: %2d]"
+            " desired_size: " SIZE_FORMAT "KB"
+            " slow allocs: %d  refill waste: " SIZE_FORMAT "B"
+            " alloc:%8.5f %8.0fKB refills: %d waste %4.1f%% gc: %dB"
+            " slow: %dB fast: %dB",
+            tag, p2i(thrd), thrd->osthread()->thread_id(),
+            _desired_size / (K / HeapWordSize),
+            _slow_allocations, _refill_waste_limit * HeapWordSize,
+            _allocation_fraction.average(),
+            _allocation_fraction.average() * tlab_used / K,
+            _number_of_refills, waste_percent,
+            _gc_waste * HeapWordSize,
+            _slow_refill_waste * HeapWordSize,
+            _fast_refill_waste * HeapWordSize);
 }
 
 void ThreadLocalAllocBuffer::verify() {
@@ -388,22 +385,27 @@
 }
 
 void GlobalTLABStats::print() {
+  LogHandle(gc, tlab) log;
+  if (!log.is_debug()) {
+    return;
+  }
+
   size_t waste = _total_gc_waste + _total_slow_refill_waste + _total_fast_refill_waste;
   double waste_percent = _total_allocation == 0 ? 0.0 :
                          100.0 * waste / _total_allocation;
-  gclog_or_tty->print("TLAB totals: thrds: %d  refills: %d max: %d"
-                      " slow allocs: %d max %d waste: %4.1f%%"
-                      " gc: " SIZE_FORMAT "B max: " SIZE_FORMAT "B"
-                      " slow: " SIZE_FORMAT "B max: " SIZE_FORMAT "B"
-                      " fast: " SIZE_FORMAT "B max: " SIZE_FORMAT "B\n",
-                      _allocating_threads,
-                      _total_refills, _max_refills,
-                      _total_slow_allocations, _max_slow_allocations,
-                      waste_percent,
-                      _total_gc_waste * HeapWordSize,
-                      _max_gc_waste * HeapWordSize,
-                      _total_slow_refill_waste * HeapWordSize,
-                      _max_slow_refill_waste * HeapWordSize,
-                      _total_fast_refill_waste * HeapWordSize,
-                      _max_fast_refill_waste * HeapWordSize);
+  log.debug("TLAB totals: thrds: %d  refills: %d max: %d"
+            " slow allocs: %d max %d waste: %4.1f%%"
+            " gc: " SIZE_FORMAT "B max: " SIZE_FORMAT "B"
+            " slow: " SIZE_FORMAT "B max: " SIZE_FORMAT "B"
+            " fast: " SIZE_FORMAT "B max: " SIZE_FORMAT "B",
+            _allocating_threads,
+            _total_refills, _max_refills,
+            _total_slow_allocations, _max_slow_allocations,
+            waste_percent,
+            _total_gc_waste * HeapWordSize,
+            _max_gc_waste * HeapWordSize,
+            _total_slow_refill_waste * HeapWordSize,
+            _max_slow_refill_waste * HeapWordSize,
+            _total_fast_refill_waste * HeapWordSize,
+            _max_fast_refill_waste * HeapWordSize);
 }
--- a/src/share/vm/gc/shared/threadLocalAllocBuffer.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/threadLocalAllocBuffer.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,6 +27,7 @@
 
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/threadLocalAllocBuffer.hpp"
+#include "logging/log.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/copy.hpp"
 
@@ -66,18 +67,12 @@
   const size_t obj_plus_filler_size = aligned_obj_size + alignment_reserve();
   if (new_tlab_size < obj_plus_filler_size) {
     // If there isn't enough room for the allocation, return failure.
-    if (PrintTLAB && Verbose) {
-      gclog_or_tty->print_cr("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ")"
-                    " returns failure",
-                    obj_size);
-    }
+    log_trace(gc, tlab)("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ") returns failure",
+                        obj_size);
     return 0;
   }
-  if (PrintTLAB && Verbose) {
-    gclog_or_tty->print_cr("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ")"
-                  " returns " SIZE_FORMAT,
-                  obj_size, new_tlab_size);
-  }
+  log_trace(gc, tlab)("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ") returns " SIZE_FORMAT,
+                      obj_size, new_tlab_size);
   return new_tlab_size;
 }
 
@@ -91,15 +86,12 @@
 
   _slow_allocations++;
 
-  if (PrintTLAB && Verbose) {
-    Thread* thrd = myThread();
-    gclog_or_tty->print("TLAB: %s thread: " INTPTR_FORMAT " [id: %2d]"
-                        " obj: " SIZE_FORMAT
-                        " free: " SIZE_FORMAT
-                        " waste: " SIZE_FORMAT "\n",
-                        "slow", p2i(thrd), thrd->osthread()->thread_id(),
-                        obj_size, free(), refill_waste_limit());
-  }
+  log_develop_trace(gc, tlab)("TLAB: %s thread: " INTPTR_FORMAT " [id: %2d]"
+                              " obj: " SIZE_FORMAT
+                              " free: " SIZE_FORMAT
+                              " waste: " SIZE_FORMAT,
+                              "slow", p2i(myThread()), myThread()->osthread()->thread_id(),
+                              obj_size, free(), refill_waste_limit());
 }
 
 #endif // SHARE_VM_GC_SHARED_THREADLOCALALLOCBUFFER_INLINE_HPP
--- a/src/share/vm/gc/shared/vmGCOperations.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/vmGCOperations.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,6 +29,7 @@
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/vmGCOperations.hpp"
 #include "memory/oopFactory.hpp"
+#include "logging/log.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "runtime/handles.inline.hpp"
@@ -216,16 +217,6 @@
   return false;
 }
 
-static void log_metaspace_alloc_failure_for_concurrent_GC() {
-  if (Verbose && PrintGCDetails) {
-    if (UseConcMarkSweepGC) {
-      gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
-    } else if (UseG1GC) {
-      gclog_or_tty->print_cr("\nG1 full GC for Metaspace");
-    }
-  }
-}
-
 void VM_CollectForMetadataAllocation::doit() {
   SvcGCMarker sgcm(SvcGCMarker::FULL);
 
@@ -249,7 +240,7 @@
       return;
     }
 
-    log_metaspace_alloc_failure_for_concurrent_GC();
+    log_debug(gc)("%s full GC for Metaspace", UseConcMarkSweepGC ? "CMS" : "G1");
   }
 
   // Don't clear the soft refs yet.
@@ -282,10 +273,7 @@
     return;
   }
 
-  if (Verbose && PrintGCDetails) {
-    gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
-                           SIZE_FORMAT, _size);
-  }
+  log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size);
 
   if (GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
--- a/src/share/vm/gc/shared/workgroup.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/workgroup.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -275,7 +275,6 @@
 }
 
 void AbstractGangWorker::initialize() {
-  this->initialize_thread_local_storage();
   this->record_stack_base_and_size();
   this->initialize_named_thread();
   assert(_gang != NULL, "No gang to run in");
@@ -501,122 +500,42 @@
   return false;
 }
 
-bool FreeIdSet::_stat_init = false;
-FreeIdSet* FreeIdSet::_sets[NSets];
-bool FreeIdSet::_safepoint;
-
-FreeIdSet::FreeIdSet(int sz, Monitor* mon) :
-  _sz(sz), _mon(mon), _hd(0), _waiters(0), _index(-1), _claimed(0)
+FreeIdSet::FreeIdSet(uint size, Monitor* mon) :
+  _size(size), _mon(mon), _hd(0), _waiters(0), _claimed(0)
 {
-  _ids = NEW_C_HEAP_ARRAY(int, sz, mtInternal);
-  for (int i = 0; i < sz; i++) _ids[i] = i+1;
-  _ids[sz-1] = end_of_list; // end of list.
-  if (_stat_init) {
-    for (int j = 0; j < NSets; j++) _sets[j] = NULL;
-    _stat_init = true;
+  guarantee(size != 0, "must be");
+  _ids = NEW_C_HEAP_ARRAY(uint, size, mtGC);
+  for (uint i = 0; i < size - 1; i++) {
+    _ids[i] = i+1;
   }
-  // Add to sets.  (This should happen while the system is still single-threaded.)
-  for (int j = 0; j < NSets; j++) {
-    if (_sets[j] == NULL) {
-      _sets[j] = this;
-      _index = j;
-      break;
-    }
-  }
-  guarantee(_index != -1, "Too many FreeIdSets in use!");
+  _ids[size-1] = end_of_list; // end of list.
 }
 
 FreeIdSet::~FreeIdSet() {
-  _sets[_index] = NULL;
-  FREE_C_HEAP_ARRAY(int, _ids);
+  FREE_C_HEAP_ARRAY(uint, _ids);
 }
 
-void FreeIdSet::set_safepoint(bool b) {
-  _safepoint = b;
-  if (b) {
-    for (int j = 0; j < NSets; j++) {
-      if (_sets[j] != NULL && _sets[j]->_waiters > 0) {
-        Monitor* mon = _sets[j]->_mon;
-        mon->lock_without_safepoint_check();
-        mon->notify_all();
-        mon->unlock();
-      }
-    }
-  }
-}
-
-#define FID_STATS 0
-
-int FreeIdSet::claim_par_id() {
-#if FID_STATS
-  thread_t tslf = thr_self();
-  tty->print("claim_par_id[%d]: sz = %d, claimed = %d\n", tslf, _sz, _claimed);
-#endif
+uint FreeIdSet::claim_par_id() {
   MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
-  while (!_safepoint && _hd == end_of_list) {
+  while (_hd == end_of_list) {
     _waiters++;
-#if FID_STATS
-    if (_waiters > 5) {
-      tty->print("claim_par_id waiting[%d]: %d waiters, %d claimed.\n",
-                 tslf, _waiters, _claimed);
-    }
-#endif
     _mon->wait(Mutex::_no_safepoint_check_flag);
     _waiters--;
   }
-  if (_hd == end_of_list) {
-#if FID_STATS
-    tty->print("claim_par_id[%d]: returning EOL.\n", tslf);
-#endif
-    return -1;
-  } else {
-    int res = _hd;
-    _hd = _ids[res];
-    _ids[res] = claimed;  // For debugging.
-    _claimed++;
-#if FID_STATS
-    tty->print("claim_par_id[%d]: returning %d, claimed = %d.\n",
-               tslf, res, _claimed);
-#endif
-    return res;
-  }
+  uint res = _hd;
+  _hd = _ids[res];
+  _ids[res] = claimed;  // For debugging.
+  _claimed++;
+  return res;
 }
 
-bool FreeIdSet::claim_perm_id(int i) {
-  assert(0 <= i && i < _sz, "Out of range.");
-  MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
-  int prev = end_of_list;
-  int cur = _hd;
-  while (cur != end_of_list) {
-    if (cur == i) {
-      if (prev == end_of_list) {
-        _hd = _ids[cur];
-      } else {
-        _ids[prev] = _ids[cur];
-      }
-      _ids[cur] = claimed;
-      _claimed++;
-      return true;
-    } else {
-      prev = cur;
-      cur = _ids[cur];
-    }
-  }
-  return false;
-
-}
-
-void FreeIdSet::release_par_id(int id) {
+void FreeIdSet::release_par_id(uint id) {
   MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
   assert(_ids[id] == claimed, "Precondition.");
   _ids[id] = _hd;
   _hd = id;
   _claimed--;
-#if FID_STATS
-  tty->print("[%d] release_par_id(%d), waiters =%d,  claimed = %d.\n",
-             thr_self(), id, _waiters, _claimed);
-#endif
-  if (_waiters > 0)
-    // Notify all would be safer, but this is OK, right?
+  if (_waiters > 0) {
     _mon->notify_all();
+  }
 }
--- a/src/share/vm/gc/shared/workgroup.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/gc/shared/workgroup.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -379,42 +379,29 @@
 };
 
 // Represents a set of free small integer ids.
-class FreeIdSet : public CHeapObj<mtInternal> {
+class FreeIdSet : public CHeapObj<mtGC> {
   enum {
-    end_of_list = -1,
-    claimed = -2
+    end_of_list = UINT_MAX,
+    claimed = UINT_MAX - 1
   };
 
-  int _sz;
+  uint _size;
   Monitor* _mon;
 
-  int* _ids;
-  int _hd;
-  int _waiters;
-  int _claimed;
-
-  static bool _safepoint;
-  typedef FreeIdSet* FreeIdSetPtr;
-  static const int NSets = 10;
-  static FreeIdSetPtr _sets[NSets];
-  static bool _stat_init;
-  int _index;
+  uint* _ids;
+  uint _hd;
+  uint _waiters;
+  uint _claimed;
 
 public:
-  FreeIdSet(int sz, Monitor* mon);
+  FreeIdSet(uint size, Monitor* mon);
   ~FreeIdSet();
 
-  static void set_safepoint(bool b);
-
-  // Attempt to claim the given id permanently.  Returns "true" iff
-  // successful.
-  bool claim_perm_id(int i);
+  // Returns an unclaimed parallel id (waiting for one to be released if
+  // necessary).
+  uint claim_par_id();
 
-  // Returns an unclaimed parallel id (waiting for one to be released if
-  // necessary).  Returns "-1" if a GC wakes up a wait for an id.
-  int claim_par_id();
-
-  void release_par_id(int id);
+  void release_par_id(uint id);
 };
 
 #endif // SHARE_VM_GC_SHARED_WORKGROUP_HPP
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -314,6 +314,27 @@
   THROW_HANDLE(exception);
 IRT_END
 
+IRT_ENTRY(address, InterpreterRuntime::check_ReservedStackAccess_annotated_methods(JavaThread* thread))
+  frame fr = thread->last_frame();
+  assert(fr.is_java_frame(), "Must be a Java frame");
+  frame activation = SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
+  if (activation.sp() != NULL) {
+    thread->disable_stack_reserved_zone();
+    thread->set_reserved_stack_activation((address)activation.unextended_sp());
+  }
+  return (address)activation.sp();
+IRT_END
+
+ IRT_ENTRY(void, InterpreterRuntime::throw_delayed_StackOverflowError(JavaThread* thread))
+  Handle exception = get_preinitialized_exception(
+                                 SystemDictionary::StackOverflowError_klass(),
+                                 CHECK);
+  java_lang_Throwable::set_message(exception(),
+          Universe::delayed_stack_overflow_error_message());
+  // Increment counter for hs_err file reporting
+  Atomic::inc(&Exceptions::_stack_overflow_errors);
+  THROW_HANDLE(exception);
+IRT_END
 
 IRT_ENTRY(void, InterpreterRuntime::create_exception(JavaThread* thread, char* name, char* message))
   // lookup exception klass
--- a/src/share/vm/interpreter/interpreterRuntime.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/interpreter/interpreterRuntime.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -91,10 +91,13 @@
   // Quicken instance-of and check-cast bytecodes
   static void    quicken_io_cc(JavaThread* thread);
 
+  static address check_ReservedStackAccess_annotated_methods(JavaThread* thread);
+
   // Exceptions thrown by the interpreter
   static void    throw_AbstractMethodError(JavaThread* thread);
   static void    throw_IncompatibleClassChangeError(JavaThread* thread);
   static void    throw_StackOverflowError(JavaThread* thread);
+  static void    throw_delayed_StackOverflowError(JavaThread* thread);
   static void    throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index);
   static void    throw_ClassCastException(JavaThread* thread, oopDesc* obj);
   static void    create_exception(JavaThread* thread, char* name, char* message);
--- a/src/share/vm/interpreter/linkResolver.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/interpreter/linkResolver.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/defaultMethods.hpp"
+#include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
--- a/src/share/vm/jvmci/jvmciRuntime.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/jvmci/jvmciRuntime.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -248,7 +248,7 @@
   // Check the stack guard pages and reenable them if necessary and there is
   // enough space on the stack to do so.  Use fast exceptions only if the guard
   // pages are enabled.
-  bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
+  bool guard_pages_enabled = thread->stack_guards_enabled();
   if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
 
   if (JvmtiExport::can_post_on_exceptions()) {
--- a/src/share/vm/logging/logConfiguration.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/logging/logConfiguration.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -39,6 +39,7 @@
 
 LogOutput** LogConfiguration::_outputs = NULL;
 size_t      LogConfiguration::_n_outputs = 0;
+bool        LogConfiguration::_post_initialized = false;
 
 void LogConfiguration::post_initialize() {
   assert(LogConfiguration_lock != NULL, "Lock must be initialized before post-initialization");
@@ -51,6 +52,8 @@
     MutexLocker ml(LogConfiguration_lock);
     describe(log.trace_stream());
   }
+
+  _post_initialized = true;
 }
 
 void LogConfiguration::initialize(jlong vm_start_time) {
@@ -422,3 +425,12 @@
               "\t Turn off all logging, including warnings and errors,\n"
               "\t and then enable messages tagged with 'rt' using 'trace' level to file 'rttrace.txt'.\n");
 }
+
+void LogConfiguration::rotate_all_outputs() {
+  for (size_t idx = 0; idx < _n_outputs; idx++) {
+    if (_outputs[idx]->is_rotatable()) {
+      _outputs[idx]->rotate(true);
+    }
+  }
+}
+
--- a/src/share/vm/logging/logConfiguration.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/logging/logConfiguration.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -40,6 +40,7 @@
  private:
   static LogOutput**  _outputs;
   static size_t       _n_outputs;
+  static bool         _post_initialized;
 
   // Create a new output. Returns NULL if failed.
   static LogOutput* new_output(char* name, const char* options = NULL);
@@ -94,6 +95,13 @@
 
   // Prints usage help for command line log configuration.
   static void print_command_line_help(FILE* out);
+
+  static bool is_post_initialized() {
+    return _post_initialized;
+  }
+
+  // Rotates all LogOutput
+  static void rotate_all_outputs();
 };
 
 #endif // SHARE_VM_LOGGING_LOGCONFIGURATION_HPP
--- a/src/share/vm/logging/logDecorations.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/logging/logDecorations.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -96,7 +96,7 @@
 
 char * LogDecorations::create_tid_decoration(char* pos) {
   int written = jio_snprintf(pos, DecorationsBufferSize - (pos - _decorations_buffer),
-                             INTX_FORMAT, Thread::current()->osthread()->thread_id());
+                             INTX_FORMAT, os::current_thread_id());
   ASSERT_AND_RETURN(written, pos)
 }
 
--- a/src/share/vm/logging/logDiagnosticCommand.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/logging/logDiagnosticCommand.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -35,13 +35,15 @@
     _what("what", "Configures what tags to log.", "STRING", false),
     _decorators("decorators", "Configures which decorators to use. Use 'none' or an empty value to remove all.", "STRING", false),
     _disable("disable", "Turns off all logging and clears the log configuration.", "BOOLEAN", false),
-    _list("list", "Lists current log configuration.", "BOOLEAN", false) {
+    _list("list", "Lists current log configuration.", "BOOLEAN", false),
+    _rotate("rotate", "Rotates all logs.", "BOOLEAN", false) {
   _dcmdparser.add_dcmd_option(&_output);
   _dcmdparser.add_dcmd_option(&_output_options);
   _dcmdparser.add_dcmd_option(&_what);
   _dcmdparser.add_dcmd_option(&_decorators);
   _dcmdparser.add_dcmd_option(&_disable);
   _dcmdparser.add_dcmd_option(&_list);
+  _dcmdparser.add_dcmd_option(&_rotate);
 }
 
 int LogDiagnosticCommand::num_arguments() {
@@ -86,6 +88,11 @@
     any_command = true;
   }
 
+  if (_rotate.has_value()) {
+    LogConfiguration::rotate_all_outputs();
+    any_command = true;
+  }
+
   if (!any_command) {
     // If no argument was provided, print usage
     print_help(LogDiagnosticCommand::name());
--- a/src/share/vm/logging/logDiagnosticCommand.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/logging/logDiagnosticCommand.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -43,6 +43,7 @@
   DCmdArgument<char *> _decorators;
   DCmdArgument<bool> _disable;
   DCmdArgument<bool> _list;
+  DCmdArgument<bool> _rotate;
 
  public:
   LogDiagnosticCommand(outputStream* output, bool heap_allocated);
@@ -55,7 +56,7 @@
   }
 
   static const char* description() {
-    return "Lists, enables, disables or changes a log output configuration.";
+    return "Lists current log configuration, enables/disables/configures a log output, or rotates all logs.";
   }
 
   // Used by SecurityManager. This DCMD requires ManagementPermission = control.
--- a/src/share/vm/logging/logFileOutput.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/logging/logFileOutput.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -155,12 +155,7 @@
   int written = LogFileStreamOutput::write(decorations, msg);
   _current_size += written;
 
-  if (should_rotate()) {
-    MutexLockerEx ml(&_rotation_lock, true /* no safepoint check */);
-    if (should_rotate()) {
-      rotate();
-    }
-  }
+  rotate(false);
 
   return written;
 }
@@ -182,7 +177,14 @@
   }
 }
 
-void LogFileOutput::rotate() {
+void LogFileOutput::rotate(bool force) {
+
+  if (!should_rotate(force)) {
+    return;
+  }
+
+  MutexLockerEx ml(&_rotation_lock, true /* no safepoint check */);
+
   // Archive the current log file
   archive();
 
--- a/src/share/vm/logging/logFileOutput.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/logging/logFileOutput.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -58,13 +58,13 @@
   size_t  _current_size;
 
   void archive();
-  void rotate();
   bool configure_rotation(const char* options);
   char *make_file_name(const char* file_name, const char* pid_string, const char* timestamp_string);
   static size_t parse_value(const char* value_str);
 
-  bool should_rotate() const {
-    return _file_count > 0 && _rotate_size > 0 && _current_size >= _rotate_size;
+  bool should_rotate(bool force) {
+    return is_rotatable() &&
+             (force || (_rotate_size > 0 && _current_size >= _rotate_size));
   }
 
  public:
@@ -73,6 +73,12 @@
   virtual bool initialize(const char* options);
   virtual int write(const LogDecorations& decorations, const char* msg);
 
+  virtual bool is_rotatable() {
+    return LogConfiguration::is_post_initialized() && (_file_count > 0);
+  }
+
+  virtual void rotate(bool force);
+
   virtual const char* name() const {
     return _name;
   }
--- a/src/share/vm/logging/logOutput.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/logging/logOutput.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -75,6 +75,14 @@
   virtual const char* name() const = 0;
   virtual bool initialize(const char* options) = 0;
   virtual int write(const LogDecorations &decorations, const char* msg) = 0;
+
+  virtual bool is_rotatable() {
+    return false;
+  }
+
+  virtual void rotate(bool force) {
+    // Do nothing by default.
+  }
 };
 
 #endif // SHARE_VM_LOGGING_LOGOUTPUT_HPP
--- a/src/share/vm/logging/logPrefix.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/logging/logPrefix.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -38,7 +38,38 @@
 // List of prefixes for specific tags and/or tagsets.
 // Syntax: LOG_PREFIX(<name of prefixer function>, LOG_TAGS(<chosen log tags>))
 // Where the prefixer function matches the following signature: size_t (*)(char*, size_t)
-#define LOG_PREFIX_LIST // Currently unused/empty
+#define LOG_PREFIX_LIST \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, age)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, alloc)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, barrier)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, compaction)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, compaction, phases)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, cpu)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, cset)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, heap)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, ihop)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, heap)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, freelist)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ihop)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, liveness)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, metaspace)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, start)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, plab)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, region)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, remset)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ref)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ref, start)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, start)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, sweep)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task, start)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task, stats)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task, time)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, tlab))
+
 
 // The empty prefix, used when there's no prefix defined.
 template <LogTagType T0, LogTagType T1, LogTagType T2, LogTagType T3, LogTagType T4, LogTagType GuardTag = LogTag::__NO_TAG>
--- a/src/share/vm/logging/logTag.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/logging/logTag.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -31,13 +31,55 @@
 // (The tags 'all', 'disable' and 'help' are special tags that can
 // not be used in log calls, and should not be listed below.)
 #define LOG_TAG_LIST \
+  LOG_TAG(alloc) \
+  LOG_TAG(age) \
+  LOG_TAG(barrier) \
+  LOG_TAG(bot) \
+  LOG_TAG(census) \
+  LOG_TAG(classhisto) \
+  LOG_TAG(classinit) \
+  LOG_TAG(comp) \
+  LOG_TAG(compaction) \
+  LOG_TAG(cpu) \
+  LOG_TAG(cset) \
   LOG_TAG(defaultmethods) \
+  LOG_TAG(ergo) \
+  LOG_TAG(exit) \
+  LOG_TAG(freelist) \
   LOG_TAG(gc) \
+  LOG_TAG(heap) \
+  LOG_TAG(humongous) \
+  LOG_TAG(ihop) \
+  LOG_TAG(jni) \
+  LOG_TAG(liveness) \
   LOG_TAG(logging) \
+  LOG_TAG(marking) \
+  LOG_TAG(metaspace) \
+  LOG_TAG(phases) \
+  LOG_TAG(plab) \
+  LOG_TAG(promotion) \
+  LOG_TAG(ref) \
+  LOG_TAG(refine) \
+  LOG_TAG(region) \
+  LOG_TAG(remset) \
+  LOG_TAG(rt) \
   LOG_TAG(safepoint) \
+  LOG_TAG(scavenge) \
+  LOG_TAG(scrub) \
+  LOG_TAG(start) \
+  LOG_TAG(state) \
+  LOG_TAG(stats) \
+  LOG_TAG(stringdedup) \
+  LOG_TAG(survivor) \
+  LOG_TAG(svc) \
+  LOG_TAG(sweep) \
+  LOG_TAG(task) \
+  LOG_TAG(tlab) \
+  LOG_TAG(time) \
+  LOG_TAG(verify) \
   LOG_TAG(vmoperation)
 
-#define PREFIX_LOG_TAG(T) (LogTag::T)
+#define PREFIX_LOG_TAG(T) (LogTag::_##T)
 
 // Expand a set of log tags to their prefixed names.
 // For error detection purposes, the macro passes one more tag than what is supported.
@@ -46,7 +88,7 @@
                                                         PREFIX_LOG_TAG(T3), PREFIX_LOG_TAG(T4), PREFIX_LOG_TAG(T5)
 // The EXPAND_VARARGS macro is required for MSVC, or it will resolve the LOG_TAGS_EXPANDED macro incorrectly.
 #define EXPAND_VARARGS(x) x
-#define LOG_TAGS(...) EXPAND_VARARGS(LOG_TAGS_EXPANDED(__VA_ARGS__, __NO_TAG, __NO_TAG, __NO_TAG, __NO_TAG, __NO_TAG, __NO_TAG))
+#define LOG_TAGS(...) EXPAND_VARARGS(LOG_TAGS_EXPANDED(__VA_ARGS__, _NO_TAG, _NO_TAG, _NO_TAG, _NO_TAG, _NO_TAG, _NO_TAG))
 
 // Log tags are used to classify log messages.
 // Each log message can be assigned between 1 to LogTag::MaxTags number of tags.
@@ -62,7 +104,7 @@
 
   enum type {
     __NO_TAG,
-#define LOG_TAG(name) name,
+#define LOG_TAG(name) _##name,
     LOG_TAG_LIST
 #undef LOG_TAG
     Count
--- a/src/share/vm/memory/allocation.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/memory/allocation.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -790,7 +790,7 @@
 
 ReallocMark::ReallocMark() {
 #ifdef ASSERT
-  Thread *thread = ThreadLocalStorage::get_thread_slow();
+  Thread *thread = Thread::current();
   _nesting = thread->resource_area()->nesting();
 #endif
 }
--- a/src/share/vm/memory/binaryTreeDictionary.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/memory/binaryTreeDictionary.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,6 +29,7 @@
 #include "memory/freeBlockDictionary.hpp"
 #include "memory/freeList.hpp"
 #include "memory/metachunk.hpp"
+#include "memory/resourceArea.hpp"
 #include "runtime/globals.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
@@ -1189,27 +1190,29 @@
   // Does walking the tree 3 times hurt?
   set_tree_surplus(splitSurplusPercent);
   set_tree_hints();
-  if (PrintGC && Verbose) {
-    report_statistics();
+  LogHandle(gc, freelist, stats) log;
+  if (log.is_trace()) {
+    ResourceMark rm;
+    report_statistics(log.trace_stream());
   }
   clear_tree_census();
 }
 
 // Print summary statistics
 template <class Chunk_t, class FreeList_t>
-void BinaryTreeDictionary<Chunk_t, FreeList_t>::report_statistics() const {
+void BinaryTreeDictionary<Chunk_t, FreeList_t>::report_statistics(outputStream* st) const {
   FreeBlockDictionary<Chunk_t>::verify_par_locked();
-  gclog_or_tty->print("Statistics for BinaryTreeDictionary:\n"
-         "------------------------------------\n");
+  st->print_cr("Statistics for BinaryTreeDictionary:");
+  st->print_cr("------------------------------------");
   size_t total_size = total_chunk_size(debug_only(NULL));
-  size_t    free_blocks = num_free_blocks();
-  gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
-  gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", max_chunk_size());
-  gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
+  size_t free_blocks = num_free_blocks();
+  st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
+  st->print_cr("Max   Chunk Size: " SIZE_FORMAT, max_chunk_size());
+  st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
   if (free_blocks > 0) {
-    gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
+    st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
   }
-  gclog_or_tty->print("Tree      Height: " SIZE_FORMAT "\n", tree_height());
+  st->print_cr("Tree      Height: " SIZE_FORMAT, tree_height());
 }
 
 // Print census information - counts, births, deaths, etc.
@@ -1229,22 +1232,27 @@
   FreeList_t* total() { return &_total; }
   size_t total_free() { return _total_free; }
   void do_list(FreeList<Chunk_t>* fl) {
+    LogHandle(gc, freelist, census) log;
+    outputStream* out = log.debug_stream();
     if (++_print_line >= 40) {
-      FreeList_t::print_labels_on(gclog_or_tty, "size");
+      ResourceMark rm;
+      FreeList_t::print_labels_on(out, "size");
       _print_line = 0;
     }
-    fl->print_on(gclog_or_tty);
-    _total_free +=            fl->count()            * fl->size()        ;
-    total()->set_count(      total()->count()       + fl->count()      );
+    fl->print_on(out);
+    _total_free += fl->count() * fl->size();
+    total()->set_count(total()->count() + fl->count());
   }
 
 #if INCLUDE_ALL_GCS
   void do_list(AdaptiveFreeList<Chunk_t>* fl) {
+    LogHandle(gc, freelist, census) log;
+    outputStream* out = log.debug_stream();
     if (++_print_line >= 40) {
-      FreeList_t::print_labels_on(gclog_or_tty, "size");
+      FreeList_t::print_labels_on(out, "size");
       _print_line = 0;
     }
-    fl->print_on(gclog_or_tty);
+    fl->print_on(out);
     _total_free +=           fl->count()             * fl->size()        ;
     total()->set_count(      total()->count()        + fl->count()      );
     total()->set_bfr_surp(   total()->bfr_surp()     + fl->bfr_surp()    );
@@ -1261,38 +1269,36 @@
 };
 
 template <class Chunk_t, class FreeList_t>
-void BinaryTreeDictionary<Chunk_t, FreeList_t>::print_dict_census(void) const {
+void BinaryTreeDictionary<Chunk_t, FreeList_t>::print_dict_census(outputStream* st) const {
 
-  gclog_or_tty->print("\nBinaryTree\n");
-  FreeList_t::print_labels_on(gclog_or_tty, "size");
+  st->print("BinaryTree");
+  FreeList_t::print_labels_on(st, "size");
   PrintTreeCensusClosure<Chunk_t, FreeList_t> ptc;
   ptc.do_tree(root());
 
   FreeList_t* total = ptc.total();
-  FreeList_t::print_labels_on(gclog_or_tty, " ");
+  FreeList_t::print_labels_on(st, " ");
 }
 
 #if INCLUDE_ALL_GCS
 template <>
-void AFLBinaryTreeDictionary::print_dict_census(void) const {
+void AFLBinaryTreeDictionary::print_dict_census(outputStream* st) const {
 
-  gclog_or_tty->print("\nBinaryTree\n");
-  AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
+  st->print_cr("BinaryTree");
+  AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
   PrintTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > ptc;
   ptc.do_tree(root());
 
   AdaptiveFreeList<FreeChunk>* total = ptc.total();
-  AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, " ");
-  total->print_on(gclog_or_tty, "TOTAL\t");
-  gclog_or_tty->print(
-              "total_free(words): " SIZE_FORMAT_W(16)
-              " growth: %8.5f  deficit: %8.5f\n",
-              ptc.total_free(),
-              (double)(total->split_births() + total->coal_births()
-                     - total->split_deaths() - total->coal_deaths())
-              /(total->prev_sweep() != 0 ? (double)total->prev_sweep() : 1.0),
-             (double)(total->desired() - total->count())
-             /(total->desired() != 0 ? (double)total->desired() : 1.0));
+  AdaptiveFreeList<FreeChunk>::print_labels_on(st, " ");
+  total->print_on(st, "TOTAL\t");
+  st->print_cr("total_free(words): " SIZE_FORMAT_W(16) " growth: %8.5f  deficit: %8.5f",
+               ptc.total_free(),
+               (double)(total->split_births() + total->coal_births()
+                      - total->split_deaths() - total->coal_deaths())
+               /(total->prev_sweep() != 0 ? (double)total->prev_sweep() : 1.0),
+              (double)(total->desired() - total->count())
+              /(total->desired() != 0 ? (double)total->desired() : 1.0));
 }
 #endif // INCLUDE_ALL_GCS
 
@@ -1311,7 +1317,7 @@
       FreeList_t::print_labels_on(_st, "size");
       _print_line = 0;
     }
-    fl->print_on(gclog_or_tty);
+    fl->print_on(_st);
     size_t sz = fl->size();
     for (Chunk_t* fc = fl->head(); fc != NULL;
          fc = fc->next()) {
--- a/src/share/vm/memory/binaryTreeDictionary.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/memory/binaryTreeDictionary.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -324,7 +324,7 @@
   void       clear_tree_census(void);
   // Print the statistics for all the lists in the tree.  Also may
   // print out summaries.
-  void       print_dict_census(void) const;
+  void       print_dict_census(outputStream* st) const;
   void       print_free_lists(outputStream* st) const;
 
   // For debugging.  Returns the sum of the _returned_bytes for
@@ -335,7 +335,7 @@
   // For debugging.  Return the total number of chunks in the dictionary.
   size_t     total_count()       PRODUCT_RETURN0;
 
-  void       report_statistics() const;
+  void       report_statistics(outputStream* st) const;
 
   void       verify() const;
 };
--- a/src/share/vm/memory/filemap.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/memory/filemap.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/classLoader.hpp"
+#include "classfile/compactHashtable.inline.hpp"
 #include "classfile/sharedClassUtil.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionaryShared.hpp"
@@ -953,11 +954,11 @@
 }
 
 void FileMapInfo::print_shared_spaces() {
-  gclog_or_tty->print_cr("Shared Spaces:");
+  tty->print_cr("Shared Spaces:");
   for (int i = 0; i < MetaspaceShared::n_regions; i++) {
     struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
     char *base = _header->region_addr(i);
-    gclog_or_tty->print("  %s " INTPTR_FORMAT "-" INTPTR_FORMAT,
+    tty->print("  %s " INTPTR_FORMAT "-" INTPTR_FORMAT,
                         shared_region_name[i],
                         p2i(base), p2i(base + si->_used));
   }
--- a/src/share/vm/memory/freeBlockDictionary.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/memory/freeBlockDictionary.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -89,11 +89,11 @@
     virtual size_t   total_count() = 0;
   )
 
-  virtual void       report_statistics() const {
-    gclog_or_tty->print("No statistics available");
+  virtual void       report_statistics(outputStream* st) const {
+    st->print_cr("No statistics available");
   }
 
-  virtual void       print_dict_census() const = 0;
+  virtual void       print_dict_census(outputStream* st) const = 0;
   virtual void       print_free_lists(outputStream* st) const = 0;
 
   virtual void       verify()         const = 0;
--- a/src/share/vm/memory/metaspace.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/memory/metaspace.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,6 +25,7 @@
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/collectorPolicy.hpp"
 #include "gc/shared/gcLocker.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/binaryTreeDictionary.hpp"
 #include "memory/filemap.hpp"
@@ -811,8 +812,10 @@
 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()) {}
 
 BlockFreelist::~BlockFreelist() {
-  if (Verbose && TraceMetadataChunkAllocation) {
-    dictionary()->print_free_lists(gclog_or_tty);
+  LogHandle(gc, metaspace, freelist) log;
+  if (log.is_trace()) {
+    ResourceMark rm;
+    dictionary()->print_free_lists(log.trace_stream());
   }
   delete _dictionary;
 }
@@ -892,11 +895,11 @@
       "The committed memory doesn't match the expanded memory.");
 
   if (!is_available(chunk_word_size)) {
-    if (TraceMetadataChunkAllocation) {
-      gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
-      // Dump some information about the virtual space that is nearly full
-      print_on(gclog_or_tty);
-    }
+    LogHandle(gc, metaspace, freelist) log;
+    log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
+    // Dump some information about the virtual space that is nearly full
+    ResourceMark rm;
+    print_on(log.debug_stream());
     return NULL;
   }
 
@@ -1231,9 +1234,11 @@
 #ifdef ASSERT
   new_entry->mangle();
 #endif
-  if (TraceMetavirtualspaceAllocation && Verbose) {
+  if (develop_log_is_enabled(Trace, gc, metaspace)) {
+    LogHandle(gc, metaspace) log;
     VirtualSpaceNode* vsl = current_virtual_space();
-    vsl->print_on(gclog_or_tty);
+    ResourceMark rm;
+    vsl->print_on(log.trace_stream());
   }
 }
 
@@ -1330,12 +1335,10 @@
 }
 
 void VirtualSpaceList::print_on(outputStream* st) const {
-  if (TraceMetadataChunkAllocation && Verbose) {
-    VirtualSpaceListIterator iter(virtual_space_list());
-    while (iter.repeat()) {
-      VirtualSpaceNode* node = iter.get_next();
-      node->print_on(st);
-    }
+  VirtualSpaceListIterator iter(virtual_space_list());
+  while (iter.repeat()) {
+    VirtualSpaceNode* node = iter.get_next();
+    node->print_on(st);
   }
 }
 
@@ -1497,17 +1500,10 @@
   minimum_desired_capacity = MAX2(minimum_desired_capacity,
                                   MetaspaceSize);
 
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
-    gclog_or_tty->print_cr("  "
-                  "  minimum_free_percentage: %6.2f"
-                  "  maximum_used_percentage: %6.2f",
-                  minimum_free_percentage,
-                  maximum_used_percentage);
-    gclog_or_tty->print_cr("  "
-                  "   used_after_gc       : %6.1fKB",
-                  used_after_gc / (double) K);
-  }
+  log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
+  log_trace(gc, metaspace)("    minimum_free_percentage: %6.2f  maximum_used_percentage: %6.2f",
+                           minimum_free_percentage, maximum_used_percentage);
+  log_trace(gc, metaspace)("     used_after_gc       : %6.1fKB", used_after_gc / (double) K);
 
 
   size_t shrink_bytes = 0;
@@ -1525,17 +1521,11 @@
       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
                                                new_capacity_until_GC,
                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
-      if (PrintGCDetails && Verbose) {
-        gclog_or_tty->print_cr("    expanding:"
-                      "  minimum_desired_capacity: %6.1fKB"
-                      "  expand_bytes: %6.1fKB"
-                      "  MinMetaspaceExpansion: %6.1fKB"
-                      "  new metaspace HWM:  %6.1fKB",
-                      minimum_desired_capacity / (double) K,
-                      expand_bytes / (double) K,
-                      MinMetaspaceExpansion / (double) K,
-                      new_capacity_until_GC / (double) K);
-      }
+      log_trace(gc, metaspace)("    expanding:  minimum_desired_capacity: %6.1fKB  expand_bytes: %6.1fKB  MinMetaspaceExpansion: %6.1fKB  new metaspace HWM:  %6.1fKB",
+                               minimum_desired_capacity / (double) K,
+                               expand_bytes / (double) K,
+                               MinMetaspaceExpansion / (double) K,
+                               new_capacity_until_GC / (double) K);
     }
     return;
   }
@@ -1555,18 +1545,10 @@
     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
     maximum_desired_capacity = MAX2(maximum_desired_capacity,
                                     MetaspaceSize);
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("  "
-                             "  maximum_free_percentage: %6.2f"
-                             "  minimum_used_percentage: %6.2f",
-                             maximum_free_percentage,
-                             minimum_used_percentage);
-      gclog_or_tty->print_cr("  "
-                             "  minimum_desired_capacity: %6.1fKB"
-                             "  maximum_desired_capacity: %6.1fKB",
-                             minimum_desired_capacity / (double) K,
-                             maximum_desired_capacity / (double) K);
-    }
+    log_trace(gc, metaspace)("    maximum_free_percentage: %6.2f  minimum_used_percentage: %6.2f",
+                             maximum_free_percentage, minimum_used_percentage);
+    log_trace(gc, metaspace)("    minimum_desired_capacity: %6.1fKB  maximum_desired_capacity: %6.1fKB",
+                             minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
 
     assert(minimum_desired_capacity <= maximum_desired_capacity,
            "sanity check");
@@ -1592,23 +1574,10 @@
       } else {
         _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
       }
-      if (PrintGCDetails && Verbose) {
-        gclog_or_tty->print_cr("  "
-                      "  shrinking:"
-                      "  initSize: %.1fK"
-                      "  maximum_desired_capacity: %.1fK",
-                      MetaspaceSize / (double) K,
-                      maximum_desired_capacity / (double) K);
-        gclog_or_tty->print_cr("  "
-                      "  shrink_bytes: %.1fK"
-                      "  current_shrink_factor: %d"
-                      "  new shrink factor: %d"
-                      "  MinMetaspaceExpansion: %.1fK",
-                      shrink_bytes / (double) K,
-                      current_shrink_factor,
-                      _shrink_factor,
-                      MinMetaspaceExpansion / (double) K);
-      }
+      log_trace(gc, metaspace)("    shrinking:  initSize: %.1fK  maximum_desired_capacity: %.1fK",
+                               MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
+      log_trace(gc, metaspace)("    shrink_bytes: %.1fK  current_shrink_factor: %d  new shrink factor: %d  MinMetaspaceExpansion: %.1fK",
+                               shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
     }
   }
 
@@ -1638,10 +1607,7 @@
     if (_allocation_fail_alot_count > 0) {
       _allocation_fail_alot_count--;
     } else {
-      if (TraceMetadataChunkAllocation && Verbose) {
-        gclog_or_tty->print_cr("Metadata allocation failing for "
-                               "MetadataAllocationFailALot");
-      }
+      log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot");
       init_allocation_fail_alot_count();
       return true;
     }
@@ -1786,11 +1752,8 @@
     // Remove the chunk as the head of the list.
     free_list->remove_chunk(chunk);
 
-    if (TraceMetadataChunkAllocation && Verbose) {
-      gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
-                             PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
-                             p2i(free_list), p2i(chunk), chunk->word_size());
-    }
+    log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
+                                       p2i(free_list), p2i(chunk), chunk->word_size());
   } else {
     chunk = humongous_dictionary()->get_chunk(
       word_size,
@@ -1800,13 +1763,8 @@
       return NULL;
     }
 
-    if (TraceMetadataHumongousAllocation) {
-      size_t waste = chunk->word_size() - word_size;
-      gclog_or_tty->print_cr("Free list allocate humongous chunk size "
-                             SIZE_FORMAT " for requested size " SIZE_FORMAT
-                             " waste " SIZE_FORMAT,
-                             chunk->word_size(), word_size, waste);
-    }
+    log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT,
+                                    chunk->word_size(), word_size, chunk->word_size() - word_size);
   }
 
   // Chunk is being removed from the chunks free list.
@@ -1839,7 +1797,8 @@
   assert((word_size <= chunk->word_size()) ||
          list_index(chunk->word_size() == HumongousIndex),
          "Non-humongous variable sized chunk");
-  if (TraceMetadataChunkAllocation) {
+  LogHandle(gc, metaspace, freelist) log;
+  if (log.is_debug()) {
     size_t list_count;
     if (list_index(word_size) < HumongousIndex) {
       ChunkList* list = find_free_chunks_list(word_size);
@@ -1847,19 +1806,17 @@
     } else {
       list_count = humongous_dictionary()->total_count();
     }
-    gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
-                        PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
-                        p2i(this), p2i(chunk), chunk->word_size(), list_count);
-    locked_print_free_chunks(gclog_or_tty);
+    log.debug("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
+               p2i(this), p2i(chunk), chunk->word_size(), list_count);
+    ResourceMark rm;
+    locked_print_free_chunks(log.debug_stream());
   }
 
   return chunk;
 }
 
 void ChunkManager::print_on(outputStream* out) const {
-  if (PrintFLSStatistics != 0) {
-    const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
-  }
+  const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out);
 }
 
 // SpaceManager methods
@@ -2039,14 +1996,12 @@
          "Size calculation is wrong, word_size " SIZE_FORMAT
          " chunk_word_size " SIZE_FORMAT,
          word_size, chunk_word_size);
-  if (TraceMetadataHumongousAllocation &&
-      SpaceManager::is_humongous(word_size)) {
-    gclog_or_tty->print_cr("Metadata humongous allocation:");
-    gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
-    gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
-                           chunk_word_size);
-    gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
-                           Metachunk::overhead());
+  LogHandle(gc, metaspace, alloc) log;
+  if (log.is_debug() && SpaceManager::is_humongous(word_size)) {
+    log.debug("Metadata humongous allocation:");
+    log.debug("  word_size " PTR_FORMAT, word_size);
+    log.debug("  chunk_word_size " PTR_FORMAT, chunk_word_size);
+    log.debug("    chunk overhead " PTR_FORMAT, Metachunk::overhead());
   }
   return chunk_word_size;
 }
@@ -2068,17 +2023,15 @@
          "Don't need to expand");
   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
 
-  if (TraceMetadataChunkAllocation && Verbose) {
+  if (log_is_enabled(Trace, gc, metaspace, freelist)) {
     size_t words_left = 0;
     size_t words_used = 0;
     if (current_chunk() != NULL) {
       words_left = current_chunk()->free_word_size();
       words_used = current_chunk()->used_word_size();
     }
-    gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
-                           " words " SIZE_FORMAT " words used " SIZE_FORMAT
-                           " words left",
-                            word_size, words_used, words_left);
+    log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left",
+                                       word_size, words_used, words_left);
   }
 
   // Get another chunk
@@ -2169,9 +2122,7 @@
     _chunks_in_use[i] = NULL;
   }
   _current_chunk = NULL;
-  if (TraceMetadataChunkAllocation && Verbose) {
-    gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, p2i(this));
-  }
+  log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
 }
 
 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
@@ -2213,9 +2164,11 @@
 
   dec_total_from_size_metrics();
 
-  if (TraceMetadataChunkAllocation && Verbose) {
-    gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, p2i(this));
-    locked_print_chunks_in_use_on(gclog_or_tty);
+  LogHandle(gc, metaspace, freelist) log;
+  if (log.is_trace()) {
+    log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
+    ResourceMark rm;
+    locked_print_chunks_in_use_on(log.trace_stream());
   }
 
   // Do not mangle freed Metachunks.  The chunk size inside Metachunks
@@ -2233,19 +2186,11 @@
   // free lists.  Each list is NULL terminated.
 
   for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
-    if (TraceMetadataChunkAllocation && Verbose) {
-      gclog_or_tty->print_cr("returned " SIZE_FORMAT " %s chunks to freelist",
-                             sum_count_in_chunks_in_use(i),
-                             chunk_size_name(i));
-    }
+    log.trace("returned " SIZE_FORMAT " %s chunks to freelist", sum_count_in_chunks_in_use(i), chunk_size_name(i));
     Metachunk* chunks = chunks_in_use(i);
     chunk_manager()->return_chunks(i, chunks);
     set_chunks_in_use(i, NULL);
-    if (TraceMetadataChunkAllocation && Verbose) {
-      gclog_or_tty->print_cr("updated freelist count " SSIZE_FORMAT " %s",
-                             chunk_manager()->free_chunks(i)->count(),
-                             chunk_size_name(i));
-    }
+    log.trace("updated freelist count " SSIZE_FORMAT " %s", chunk_manager()->free_chunks(i)->count(), chunk_size_name(i));
     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
   }
 
@@ -2254,12 +2199,9 @@
   // the current chunk but there are probably exceptions.
 
   // Humongous chunks
-  if (TraceMetadataChunkAllocation && Verbose) {
-    gclog_or_tty->print_cr("returned " SIZE_FORMAT " %s humongous chunks to dictionary",
-                            sum_count_in_chunks_in_use(HumongousIndex),
-                            chunk_size_name(HumongousIndex));
-    gclog_or_tty->print("Humongous chunk dictionary: ");
-  }
+  log.trace("returned " SIZE_FORMAT " %s humongous chunks to dictionary",
+            sum_count_in_chunks_in_use(HumongousIndex), chunk_size_name(HumongousIndex));
+  log.trace("Humongous chunk dictionary: ");
   // Humongous chunks are never the current chunk.
   Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
 
@@ -2267,11 +2209,7 @@
 #ifdef ASSERT
     humongous_chunks->set_is_tagged_free(true);
 #endif
-    if (TraceMetadataChunkAllocation && Verbose) {
-      gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
-                          p2i(humongous_chunks),
-                          humongous_chunks->word_size());
-    }
+    log.trace(PTR_FORMAT " (" SIZE_FORMAT ") ", p2i(humongous_chunks), humongous_chunks->word_size());
     assert(humongous_chunks->word_size() == (size_t)
            align_size_up(humongous_chunks->word_size(),
                              smallest_chunk_size()),
@@ -2283,12 +2221,7 @@
     chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
     humongous_chunks = next_humongous_chunks;
   }
-  if (TraceMetadataChunkAllocation && Verbose) {
-    gclog_or_tty->cr();
-    gclog_or_tty->print_cr("updated dictionary count " SIZE_FORMAT " %s",
-                     chunk_manager()->humongous_dictionary()->total_count(),
-                     chunk_size_name(HumongousIndex));
-  }
+  log.trace("updated dictionary count " SIZE_FORMAT " %s", chunk_manager()->humongous_dictionary()->total_count(), chunk_size_name(HumongousIndex));
   chunk_manager()->slow_locked_verify();
 }
 
@@ -2374,11 +2307,13 @@
   inc_size_metrics(new_chunk->word_size());
 
   assert(new_chunk->is_empty(), "Not ready for reuse");
-  if (TraceMetadataChunkAllocation && Verbose) {
-    gclog_or_tty->print("SpaceManager::add_chunk: " SIZE_FORMAT ") ",
-                        sum_count_in_chunks_in_use());
-    new_chunk->print_on(gclog_or_tty);
-    chunk_manager()->locked_print_free_chunks(gclog_or_tty);
+  LogHandle(gc, metaspace, freelist) log;
+  if (log.is_trace()) {
+    log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
+    ResourceMark rm;
+    outputStream* out = log.trace_stream();
+    new_chunk->print_on(out);
+    chunk_manager()->locked_print_free_chunks(out);
   }
 }
 
@@ -2403,10 +2338,10 @@
                                     medium_chunk_bunch());
   }
 
-  if (TraceMetadataHumongousAllocation && next != NULL &&
+  LogHandle(gc, metaspace, alloc) log;
+  if (log.is_debug() && next != NULL &&
       SpaceManager::is_humongous(next->word_size())) {
-    gclog_or_tty->print_cr("  new humongous chunk word size "
-                           PTR_FORMAT, next->word_size());
+    log.debug("  new humongous chunk word size " PTR_FORMAT, next->word_size());
   }
 
   return next;
@@ -2571,7 +2506,7 @@
     }
   }
 
-  if (TraceMetadataChunkAllocation && Verbose) {
+  if (log_is_enabled(Trace, gc, metaspace, freelist)) {
     block_freelists()->print_on(out);
   }
 
@@ -2756,27 +2691,10 @@
 }
 
 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
-  gclog_or_tty->print(", [Metaspace:");
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print(" "  SIZE_FORMAT
-                        "->" SIZE_FORMAT
-                        "("  SIZE_FORMAT ")",
-                        prev_metadata_used,
-                        used_bytes(),
-                        reserved_bytes());
-  } else {
-    gclog_or_tty->print(" "  SIZE_FORMAT "K"
-                        "->" SIZE_FORMAT "K"
-                        "("  SIZE_FORMAT "K)",
-                        prev_metadata_used/K,
-                        used_bytes()/K,
-                        reserved_bytes()/K);
-  }
-
-  gclog_or_tty->print("]");
+  log_info(gc, metaspace)("Metaspace: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
+                          prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
 }
 
-// This is printed when PrintGCDetails
 void MetaspaceAux::print_on(outputStream* out) {
   Metaspace::MetadataType nct = Metaspace::NonClassType;
 
@@ -3133,8 +3051,10 @@
 
   initialize_class_space(metaspace_rs);
 
-  if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
-    print_compressed_class_space(gclog_or_tty, requested_addr);
+  if (develop_log_is_enabled(Trace, gc, metaspace)) {
+    LogHandle(gc, metaspace) log;
+    ResourceMark rm;
+    print_compressed_class_space(log.trace_stream(), requested_addr);
   }
 }
 
@@ -3256,10 +3176,8 @@
     assert(UseCompressedOops && UseCompressedClassPointers,
       "UseCompressedOops and UseCompressedClassPointers must be set");
     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
-    if (TraceMetavirtualspaceAllocation && Verbose) {
-      gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
-                             p2i(_space_list->current_virtual_space()->bottom()));
-    }
+    log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT,
+                                     p2i(_space_list->current_virtual_space()->bottom()));
 
     Universe::set_narrow_klass_shift(0);
 #endif // _LP64
@@ -3446,10 +3364,7 @@
   if (incremented) {
     tracer()->report_gc_threshold(before, after,
                                   MetaspaceGCThresholdUpdater::ExpandAndAllocate);
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
-          " to " SIZE_FORMAT, before, after);
-    }
+    log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
   }
 
   return res;
@@ -3612,13 +3527,15 @@
   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
 
   // If result is still null, we are out of memory.
-  if (Verbose && TraceMetadataChunkAllocation) {
-    gclog_or_tty->print_cr("Metaspace allocation failed for size "
-        SIZE_FORMAT, word_size);
+  LogHandle(gc, metaspace, freelist) log;
+  if (log.is_trace()) {
+    log.trace("Metaspace allocation failed for size " SIZE_FORMAT, word_size);
+    ResourceMark rm;
+    outputStream* out = log.trace_stream();
     if (loader_data->metaspace_or_null() != NULL) {
-      loader_data->dump(gclog_or_tty);
+      loader_data->dump(out);
     }
-    MetaspaceAux::dump(gclog_or_tty);
+    MetaspaceAux::dump(out);
   }
 
   bool out_of_compressed_class_space = false;
--- a/src/share/vm/memory/resourceArea.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/memory/resourceArea.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -121,7 +121,7 @@
     debug_only(_area->_nesting++;)
     assert( _area->_nesting > 0, "must stack allocate RMs" );
 #ifdef ASSERT
-    Thread* thread = ThreadLocalStorage::thread();
+    Thread* thread = Thread::current_or_null();
     if (thread != NULL) {
       _thread = thread;
       _previous_resource_mark = thread->current_resource_mark();
--- a/src/share/vm/memory/universe.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/memory/universe.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -36,8 +36,10 @@
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/generation.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/space.hpp"
 #include "interpreter/interpreter.hpp"
+#include "logging/log.hpp"
 #include "memory/filemap.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspaceShared.hpp"
@@ -72,6 +74,7 @@
 #include "utilities/events.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/macros.hpp"
+#include "utilities/ostream.hpp"
 #include "utilities/preserveException.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc/cms/cmsCollectorPolicy.hpp"
@@ -122,6 +125,7 @@
 oop Universe::_out_of_memory_error_array_size         = NULL;
 oop Universe::_out_of_memory_error_gc_overhead_limit  = NULL;
 oop Universe::_out_of_memory_error_realloc_objects    = NULL;
+oop Universe::_delayed_stack_overflow_error_message   = NULL;
 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
 bool Universe::_verify_in_progress                    = false;
@@ -197,7 +201,8 @@
   f->do_oop((oop*)&_out_of_memory_error_array_size);
   f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
   f->do_oop((oop*)&_out_of_memory_error_realloc_objects);
-    f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
+  f->do_oop((oop*)&_delayed_stack_overflow_error_message);
+  f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
   f->do_oop((oop*)&_null_ptr_exception_instance);
   f->do_oop((oop*)&_arithmetic_exception_instance);
   f->do_oop((oop*)&_virtual_machine_error_instance);
@@ -489,7 +494,7 @@
   has_run_finalizers_on_exit = true;
 
   // Called on VM exit. This ought to be run in a separate thread.
-  if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit");
+  log_trace(ref)("Callback to run finalizers on exit");
   {
     PRESERVE_EXCEPTION_MARK;
     KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass());
@@ -713,6 +718,7 @@
   if (status != JNI_OK) {
     return status;
   }
+  log_info(gc)("Using %s", _collectedHeap->name());
 
   ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
 
@@ -905,6 +911,12 @@
       k_h->allocate_instance(CHECK_false);
     Universe::_out_of_memory_error_realloc_objects = k_h->allocate_instance(CHECK_false);
 
+    // Setup preallocated cause message for delayed StackOverflowError
+    if (StackReservedPages > 0) {
+      Universe::_delayed_stack_overflow_error_message =
+        java_lang_String::create_oop_from_str("Delayed StackOverflowError due to ReservedStackAccess annotated method", CHECK_false);
+    }
+
     // Setup preallocated NullPointerException
     // (this is currently used for a cheap & dirty solution in compiler exception handling)
     k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false);
@@ -1059,18 +1071,9 @@
   _base_vtable_size = ClassLoader::compute_Object_vtable();
 }
 
-
-void Universe::print() {
-  print_on(gclog_or_tty);
-}
-
-void Universe::print_on(outputStream* st, bool extended) {
+void Universe::print_on(outputStream* st) {
   st->print_cr("Heap");
-  if (!extended) {
-    heap()->print_on(st);
-  } else {
-    heap()->print_extended_on(st);
-  }
+  heap()->print_on(st);
 }
 
 void Universe::print_heap_at_SIGBREAK() {
@@ -1082,30 +1085,25 @@
   }
 }
 
-void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) {
-  st->print_cr("{Heap before GC invocations=%u (full %u):",
-               heap()->total_collections(),
-               heap()->total_full_collections());
-  if (!PrintHeapAtGCExtended || ignore_extended) {
-    heap()->print_on(st);
-  } else {
-    heap()->print_extended_on(st);
+void Universe::print_heap_before_gc() {
+  LogHandle(gc, heap) log;
+  if (log.is_trace()) {
+    log.trace("Heap before GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections());
+    ResourceMark rm;
+    heap()->print_on(log.trace_stream());
   }
 }
 
-void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
-  st->print_cr("Heap after GC invocations=%u (full %u):",
-               heap()->total_collections(),
-               heap()->total_full_collections());
-  if (!PrintHeapAtGCExtended || ignore_extended) {
-    heap()->print_on(st);
-  } else {
-    heap()->print_extended_on(st);
+void Universe::print_heap_after_gc() {
+  LogHandle(gc, heap) log;
+  if (log.is_trace()) {
+    log.trace("Heap after GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections());
+    ResourceMark rm;
+    heap()->print_on(log.trace_stream());
   }
-  st->print_cr("}");
 }
 
-void Universe::verify(VerifyOption option, const char* prefix, bool silent) {
+void Universe::verify(VerifyOption option, const char* prefix) {
   // The use of _verify_in_progress is a temporary work around for
   // 6320749.  Don't bother with a creating a class to set and clear
   // it since it is only used in this method and the control flow is
@@ -1122,36 +1120,35 @@
   HandleMark hm;  // Handles created during verification can be zapped
   _verify_count++;
 
-  if (!silent) gclog_or_tty->print("%s", prefix);
-  if (!silent) gclog_or_tty->print("[Verifying ");
-  if (!silent) gclog_or_tty->print("threads ");
+  FormatBuffer<> title("Verifying %s", prefix);
+  GCTraceTime(Info, gc, verify) tm(title.buffer());
+  log_debug(gc, verify)("Threads");
   Threads::verify();
-  if (!silent) gclog_or_tty->print("heap ");
-  heap()->verify(silent, option);
-  if (!silent) gclog_or_tty->print("syms ");
+  log_debug(gc, verify)("Heap");
+  heap()->verify(option);
+  log_debug(gc, verify)("SymbolTable");
   SymbolTable::verify();
-  if (!silent) gclog_or_tty->print("strs ");
+  log_debug(gc, verify)("StringTable");
   StringTable::verify();
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    if (!silent) gclog_or_tty->print("zone ");
+    log_debug(gc, verify)("CodeCache");
     CodeCache::verify();
   }
-  if (!silent) gclog_or_tty->print("dict ");
+  log_debug(gc, verify)("SystemDictionary");
   SystemDictionary::verify();
 #ifndef PRODUCT
-  if (!silent) gclog_or_tty->print("cldg ");
+  log_debug(gc, verify)("ClassLoaderDataGraph");
   ClassLoaderDataGraph::verify();
 #endif
-  if (!silent) gclog_or_tty->print("metaspace chunks ");
+  log_debug(gc, verify)("MetaspaceAux");
   MetaspaceAux::verify_free_chunks();
-  if (!silent) gclog_or_tty->print("hand ");
+  log_debug(gc, verify)("JNIHandles");
   JNIHandles::verify();
-  if (!silent) gclog_or_tty->print("C-heap ");
+  log_debug(gc, verify)("C-heap");
   os::check_heap();
-  if (!silent) gclog_or_tty->print("code cache ");
+  log_debug(gc, verify)("CodeCache Oops");
   CodeCache::verify_oops();
-  if (!silent) gclog_or_tty->print_cr("]");
 
   _verify_in_progress = false;
 }
--- a/src/share/vm/memory/universe.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/memory/universe.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -159,6 +159,9 @@
   static oop          _out_of_memory_error_gc_overhead_limit;
   static oop          _out_of_memory_error_realloc_objects;
 
+  // preallocated cause message for delayed StackOverflowError
+  static oop          _delayed_stack_overflow_error_message;
+
   static Array<int>*       _the_empty_int_array;    // Canonicalized int array
   static Array<u2>*        _the_empty_short_array;  // Canonicalized short array
   static Array<Klass*>*  _the_empty_klass_array;  // Canonicalized klass obj array
@@ -339,6 +342,7 @@
   static oop out_of_memory_error_array_size()         { return gen_out_of_memory_error(_out_of_memory_error_array_size); }
   static oop out_of_memory_error_gc_overhead_limit()  { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit);  }
   static oop out_of_memory_error_realloc_objects()    { return gen_out_of_memory_error(_out_of_memory_error_realloc_objects);  }
+  static oop delayed_stack_overflow_error_message()   { return _delayed_stack_overflow_error_message; }
 
   // Accessors needed for fast allocation
   static Klass** boolArrayKlassObj_addr()           { return &_boolArrayKlassObj;   }
@@ -460,26 +464,19 @@
 
   // Debugging
   static bool verify_in_progress() { return _verify_in_progress; }
-  static void verify(VerifyOption option, const char* prefix, bool silent = VerifySilently);
-  static void verify(const char* prefix, bool silent = VerifySilently) {
-    verify(VerifyOption_Default, prefix, silent);
+  static void verify(VerifyOption option, const char* prefix);
+  static void verify(const char* prefix) {
+    verify(VerifyOption_Default, prefix);
   }
-  static void verify(bool silent = VerifySilently) {
-    verify("", silent);
+  static void verify() {
+    verify("");
   }
 
   static int  verify_count()       { return _verify_count; }
-  // The default behavior is to call print_on() on gclog_or_tty.
-  static void print();
-  // The extended parameter determines which method on the heap will
-  // be called: print_on() (extended == false) or print_extended_on()
-  // (extended == true).
-  static void print_on(outputStream* st, bool extended = false);
+  static void print_on(outputStream* st);
   static void print_heap_at_SIGBREAK();
-  static void print_heap_before_gc() { print_heap_before_gc(gclog_or_tty); }
-  static void print_heap_after_gc()  { print_heap_after_gc(gclog_or_tty); }
-  static void print_heap_before_gc(outputStream* st, bool ignore_extended = false);
-  static void print_heap_after_gc(outputStream* st, bool ignore_extended = false);
+  static void print_heap_before_gc();
+  static void print_heap_after_gc();
 
   // Change the number of dummy objects kept reachable by the full gc dummy
   // array; this should trigger relocation in a sliding compaction collector.
--- a/src/share/vm/oops/arrayKlass.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/arrayKlass.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -71,7 +71,9 @@
   return super()->find_field(name, sig, fd);
 }
 
-Method* ArrayKlass::uncached_lookup_method(Symbol* name, Symbol* signature, OverpassLookupMode overpass_mode) const {
+Method* ArrayKlass::uncached_lookup_method(const Symbol* name,
+                                           const Symbol* signature,
+                                           OverpassLookupMode overpass_mode) const {
   // There are no methods in an array klass but the super class (Object) has some
   assert(super(), "super klass must be present");
   // Always ignore overpass methods in superclasses, although technically the
@@ -80,19 +82,18 @@
   return super()->uncached_lookup_method(name, signature, Klass::skip_overpass);
 }
 
-ArrayKlass::ArrayKlass(Symbol* name) {
-  set_name(name);
-
-  set_super(Universe::is_bootstrapping() ? (Klass*)NULL : SystemDictionary::Object_klass());
-  set_layout_helper(Klass::_lh_neutral_value);
-  set_dimension(1);
-  set_higher_dimension(NULL);
-  set_lower_dimension(NULL);
+ArrayKlass::ArrayKlass(Symbol* name) :
+  _dimension(1),
+  _higher_dimension(NULL),
+  _lower_dimension(NULL),
   // Arrays don't add any new methods, so their vtable is the same size as
   // the vtable of klass Object.
-  int vtable_size = Universe::base_vtable_size();
-  set_vtable_length(vtable_size);
-  set_is_cloneable(); // All arrays are considered to be cloneable (See JLS 20.1.5)
+  _vtable_len(Universe::base_vtable_size()) {
+    set_name(name);
+    set_super(Universe::is_bootstrapping() ? (Klass*)NULL : SystemDictionary::Object_klass());
+    set_layout_helper(Klass::_lh_neutral_value);
+    set_is_cloneable(); // All arrays are considered to be cloneable (See JLS 20.1.5)
+    TRACE_INIT_ID(this);
 }
 
 
--- a/src/share/vm/oops/arrayKlass.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/arrayKlass.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -82,12 +82,17 @@
   Klass* find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const;
 
   // Lookup operations
-  Method* uncached_lookup_method(Symbol* name, Symbol* signature, OverpassLookupMode overpass_mode) const;
+  Method* uncached_lookup_method(const Symbol* name,
+                                 const Symbol* signature,
+                                 OverpassLookupMode overpass_mode) const;
 
-  // Casting from Klass*
   static ArrayKlass* cast(Klass* k) {
+    return const_cast<ArrayKlass*>(cast(const_cast<const Klass*>(k)));
+  }
+
+  static const ArrayKlass* cast(const Klass* k) {
     assert(k->is_array_klass(), "cast to ArrayKlass");
-    return static_cast<ArrayKlass*>(k);
+    return static_cast<const ArrayKlass*>(k);
   }
 
   GrowableArray<Klass*>* compute_secondary_supers(int num_extra_slots);
--- a/src/share/vm/oops/constantPool.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/constantPool.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -60,25 +60,33 @@
   return new (loader_data, size, false, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags);
 }
 
-ConstantPool::ConstantPool(Array<u1>* tags) {
-  set_length(tags->length());
-  set_tags(NULL);
-  set_cache(NULL);
-  set_reference_map(NULL);
-  set_resolved_references(NULL);
-  set_operands(NULL);
-  set_pool_holder(NULL);
-  set_flags(0);
+#ifdef ASSERT
 
-  // only set to non-zero if constant pool is merged by RedefineClasses
-  set_version(0);
+// MetaspaceObj allocation invariant is calloc equivalent memory
+// simple verification of this here (JVM_CONSTANT_Invalid == 0 )
+static bool tag_array_is_zero_initialized(Array<u1>* tags) {
+  assert(tags != NULL, "invariant");
+  const int length = tags->length();
+  for (int index = 0; index < length; ++index) {
+    if (JVM_CONSTANT_Invalid != tags->at(index)) {
+      return false;
+    }
+  }
+  return true;
+}
 
-  // initialize tag array
-  int length = tags->length();
-  for (int index = 0; index < length; index++) {
-    tags->at_put(index, JVM_CONSTANT_Invalid);
-  }
-  set_tags(tags);
+#endif
+
+ConstantPool::ConstantPool(Array<u1>* tags) :
+  _tags(tags),
+  _length(tags->length()) {
+
+    assert(_tags != NULL, "invariant");
+    assert(tags->length() == _length, "invariant");
+    assert(tag_array_is_zero_initialized(tags), "invariant");
+    assert(0 == _flags, "invariant");
+    assert(0 == version(), "invariant");
+    assert(NULL == _pool_holder, "invariant");
 }
 
 void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) {
@@ -466,7 +474,7 @@
 }
 
 
-Symbol* ConstantPool::klass_name_at(int which) {
+Symbol* ConstantPool::klass_name_at(int which) const {
   assert(tag_at(which).is_unresolved_klass() || tag_at(which).is_klass(),
          "Corrupted constant pool");
   // A resolved constantPool entry will contain a Klass*, otherwise a Symbol*.
@@ -497,7 +505,7 @@
   return unresolved_string_at(which)->as_C_string();
 }
 
-BasicType ConstantPool::basic_type_for_signature_at(int which) {
+BasicType ConstantPool::basic_type_for_signature_at(int which) const {
   return FieldType::basic_type(symbol_at(which));
 }
 
--- a/src/share/vm/oops/constantPool.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/constantPool.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -117,7 +117,7 @@
  private:
   intptr_t* base() const { return (intptr_t*) (((char*) this) + sizeof(ConstantPool)); }
 
-  CPSlot slot_at(int which) {
+  CPSlot slot_at(int which) const {
     assert(is_within_bounds(which), "index out of bounds");
     // Uses volatile because the klass slot changes without a lock.
     volatile intptr_t adr = (intptr_t)OrderAccess::load_ptr_acquire(obj_at_addr_raw(which));
@@ -350,7 +350,7 @@
     return klass_at_impl(h_this, which, false, THREAD);
   }
 
-  Symbol* klass_name_at(int which);  // Returns the name, w/o resolving.
+  Symbol* klass_name_at(int which) const;  // Returns the name, w/o resolving.
 
   Klass* resolved_klass_at(int which) const {  // Used by Compiler
     guarantee(tag_at(which).is_klass(), "Corrupted constant pool");
@@ -385,7 +385,7 @@
     return *((jdouble*)&tmp);
   }
 
-  Symbol* symbol_at(int which) {
+  Symbol* symbol_at(int which) const {
     assert(tag_at(which).is_utf8(), "Corrupted constant pool");
     return *symbol_at_addr(which);
   }
@@ -669,7 +669,7 @@
   int name_ref_index_at(int which_nt);            // ==  low-order jshort of name_and_type_at(which_nt)
   int signature_ref_index_at(int which_nt);       // == high-order jshort of name_and_type_at(which_nt)
 
-  BasicType basic_type_for_signature_at(int which);
+  BasicType basic_type_for_signature_at(int which) const;
 
   // Resolve string constants (to prevent allocation during compilation)
   void resolve_string_constants(TRAPS) {
--- a/src/share/vm/oops/instanceClassLoaderKlass.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/instanceClassLoaderKlass.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,6 +29,8 @@
 #include "oops/instanceKlass.hpp"
 #include "utilities/macros.hpp"
 
+class ClassFileParser;
+
 // An InstanceClassLoaderKlass is a specialization of the InstanceKlass. It does
 // not add any field.  It is added to walk the dependencies for the class loader
 // key that this class loader points to.  This is how the loader_data graph is
@@ -38,11 +40,8 @@
 class InstanceClassLoaderKlass: public InstanceKlass {
   friend class VMStructs;
   friend class InstanceKlass;
-
-  // Constructor
-  InstanceClassLoaderKlass(int vtable_len, int itable_len, int static_field_size, int nonstatic_oop_map_size, ReferenceType rt, AccessFlags access_flags, bool is_anonymous)
-    : InstanceKlass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_size,
-                    InstanceKlass::_misc_kind_class_loader, rt, access_flags, is_anonymous) {}
+ private:
+  InstanceClassLoaderKlass(const ClassFileParser& parser) : InstanceKlass(parser, InstanceKlass::_misc_kind_class_loader) {}
 
 public:
   InstanceClassLoaderKlass() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); }
--- a/src/share/vm/oops/instanceKlass.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/instanceKlass.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/classFileParser.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/verifier.hpp"
@@ -63,6 +64,7 @@
 #include "services/threadService.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/macros.hpp"
+#include "logging/log.hpp"
 #ifdef COMPILER1
 #include "c1/c1_Compiler.hpp"
 #endif
@@ -113,47 +115,57 @@
 
 volatile int InstanceKlass::_total_instanceKlass_count = 0;
 
-InstanceKlass* InstanceKlass::allocate_instance_klass(
-                                              ClassLoaderData* loader_data,
-                                              int vtable_len,
-                                              int itable_len,
-                                              int static_field_size,
-                                              int nonstatic_oop_map_size,
-                                              ReferenceType rt,
-                                              AccessFlags access_flags,
-                                              Symbol* name,
-                                              Klass* super_klass,
-                                              bool is_anonymous,
-                                              TRAPS) {
-
-  int size = InstanceKlass::size(vtable_len, itable_len, nonstatic_oop_map_size,
-                                 access_flags.is_interface(), is_anonymous);
+static inline bool is_class_loader(const Symbol* class_name,
+                                   const ClassFileParser& parser) {
+  assert(class_name != NULL, "invariant");
+
+  if (class_name == vmSymbols::java_lang_ClassLoader()) {
+    return true;
+  }
+
+  if (SystemDictionary::ClassLoader_klass_loaded()) {
+    const Klass* const super_klass = parser.super_klass();
+    if (super_klass != NULL) {
+      if (super_klass->is_subtype_of(SystemDictionary::ClassLoader_klass())) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+InstanceKlass* InstanceKlass::allocate_instance_klass(const ClassFileParser& parser, TRAPS) {
+  const int size = InstanceKlass::size(parser.vtable_size(),
+                                       parser.itable_size(),
+                                       nonstatic_oop_map_size(parser.total_oop_map_count()),
+                                       parser.is_interface(),
+                                       parser.is_anonymous());
+
+  const Symbol* const class_name = parser.class_name();
+  assert(class_name != NULL, "invariant");
+  ClassLoaderData* loader_data = parser.loader_data();
+  assert(loader_data != NULL, "invariant");
+
+  InstanceKlass* ik;
 
   // Allocation
-  InstanceKlass* ik;
-  if (rt == REF_NONE) {
-    if (name == vmSymbols::java_lang_Class()) {
-      ik = new (loader_data, size, THREAD) InstanceMirrorKlass(
-        vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt,
-        access_flags, is_anonymous);
-    } else if (name == vmSymbols::java_lang_ClassLoader() ||
-          (SystemDictionary::ClassLoader_klass_loaded() &&
-          super_klass != NULL &&
-          super_klass->is_subtype_of(SystemDictionary::ClassLoader_klass()))) {
-      ik = new (loader_data, size, THREAD) InstanceClassLoaderKlass(
-        vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt,
-        access_flags, is_anonymous);
-    } else {
-      // normal class
-      ik = new (loader_data, size, THREAD) InstanceKlass(
-        vtable_len, itable_len, static_field_size, nonstatic_oop_map_size,
-        InstanceKlass::_misc_kind_other, rt, access_flags, is_anonymous);
+  if (REF_NONE == parser.reference_type()) {
+    if (class_name == vmSymbols::java_lang_Class()) {
+      // mirror
+      ik = new (loader_data, size, THREAD) InstanceMirrorKlass(parser);
+    }
+    else if (is_class_loader(class_name, parser)) {
+      // class loader
+      ik = new (loader_data, size, THREAD) InstanceClassLoaderKlass(parser);
     }
-  } else {
-    // reference klass
-    ik = new (loader_data, size, THREAD) InstanceRefKlass(
-        vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt,
-        access_flags, is_anonymous);
+    else {
+      // normal
+      ik = new (loader_data, size, THREAD) InstanceKlass(parser, InstanceKlass::_misc_kind_other);
+    }
+  }
+  else {
+    // reference
+    ik = new (loader_data, size, THREAD) InstanceRefKlass(parser);
   }
 
   // Check for pending exception before adding to the loader data and incrementing
@@ -162,17 +174,21 @@
     return NULL;
   }
 
+  assert(ik != NULL, "invariant");
+
+  const bool publicize = !parser.is_internal();
+
   // Add all classes to our internal class loader list here,
   // including classes in the bootstrap (NULL) class loader.
-  loader_data->add_class(ik);
-
+  loader_data->add_class(ik, publicize);
   Atomic::inc(&_total_instanceKlass_count);
+
   return ik;
 }
 
 
 // copy method ordering from resource area to Metaspace
-void InstanceKlass::copy_method_ordering(intArray* m, TRAPS) {
+void InstanceKlass::copy_method_ordering(const intArray* m, TRAPS) {
   if (m != NULL) {
     // allocate a new array and copy contents (memcpy?)
     _method_ordering = MetadataFactory::new_array<int>(class_loader_data(), m->length(), CHECK);
@@ -192,79 +208,23 @@
   return vtable_indices;
 }
 
-InstanceKlass::InstanceKlass(int vtable_len,
-                             int itable_len,
-                             int static_field_size,
-                             int nonstatic_oop_map_size,
-                             unsigned kind,
-                             ReferenceType rt,
-                             AccessFlags access_flags,
-                             bool is_anonymous) {
-  No_Safepoint_Verifier no_safepoint; // until k becomes parsable
-
-  int iksize = InstanceKlass::size(vtable_len, itable_len, nonstatic_oop_map_size,
-                                   access_flags.is_interface(), is_anonymous);
-  set_vtable_length(vtable_len);
-  set_itable_length(itable_len);
-  set_static_field_size(static_field_size);
-  set_nonstatic_oop_map_size(nonstatic_oop_map_size);
-  set_access_flags(access_flags);
-  _misc_flags = 0;  // initialize to zero
-  set_kind(kind);
-  set_is_anonymous(is_anonymous);
-  assert(size() == iksize, "wrong size for object");
-
-  set_array_klasses(NULL);
-  set_methods(NULL);
-  set_method_ordering(NULL);
-  set_default_methods(NULL);
-  set_default_vtable_indices(NULL);
-  set_local_interfaces(NULL);
-  set_transitive_interfaces(NULL);
-  init_implementor();
-  set_fields(NULL, 0);
-  set_constants(NULL);
-  set_class_loader_data(NULL);
-  set_source_file_name_index(0);
-  set_source_debug_extension(NULL, 0);
-  set_array_name(NULL);
-  set_inner_classes(NULL);
-  set_static_oop_field_count(0);
-  set_nonstatic_field_size(0);
-  set_is_marked_dependent(false);
-  _dep_context = DependencyContext::EMPTY;
-  set_init_state(InstanceKlass::allocated);
-  set_init_thread(NULL);
-  set_reference_type(rt);
-  set_oop_map_cache(NULL);
-  set_jni_ids(NULL);
-  set_osr_nmethods_head(NULL);
-  set_breakpoints(NULL);
-  init_previous_versions();
-  set_generic_signature_index(0);
-  release_set_methods_jmethod_ids(NULL);
-  set_annotations(NULL);
-  set_jvmti_cached_class_field_map(NULL);
-  set_initial_method_idnum(0);
-  set_jvmti_cached_class_field_map(NULL);
-  set_cached_class_file(NULL);
-  set_initial_method_idnum(0);
-  set_minor_version(0);
-  set_major_version(0);
-  NOT_PRODUCT(_verify_count = 0;)
-
-  // initialize the non-header words to zero
-  intptr_t* p = (intptr_t*)this;
-  for (int index = InstanceKlass::header_size(); index < iksize; index++) {
-    p[index] = NULL_WORD;
-  }
-
-  // Set temporary value until parseClassFile updates it with the real instance
-  // size.
-  set_layout_helper(Klass::instance_layout_helper(0, true));
+InstanceKlass::InstanceKlass(const ClassFileParser& parser, unsigned kind) :
+  _static_field_size(parser.static_field_size()),
+  _nonstatic_oop_map_size(nonstatic_oop_map_size(parser.total_oop_map_count())),
+  _vtable_len(parser.vtable_size()),
+  _itable_len(parser.itable_size()),
+  _reference_type(parser.reference_type()) {
+    set_kind(kind);
+    set_access_flags(parser.access_flags());
+    set_is_anonymous(parser.is_anonymous());
+    set_layout_helper(Klass::instance_layout_helper(parser.layout_size(),
+                                                    false));
+
+    assert(NULL == _methods, "underlying memory not zeroed?");
+    assert(is_instance_klass(), "is layout incorrect?");
+    assert(size_helper() == parser.layout_size(), "incorrect size_helper?");
 }
 
-
 void InstanceKlass::deallocate_methods(ClassLoaderData* loader_data,
                                        Array<Method*>* methods) {
   if (methods != NULL && methods != Universe::the_empty_method_array() &&
@@ -282,7 +242,7 @@
 }
 
 void InstanceKlass::deallocate_interfaces(ClassLoaderData* loader_data,
-                                          Klass* super_klass,
+                                          const Klass* super_klass,
                                           Array<Klass*>* local_interfaces,
                                           Array<Klass*>* transitive_interfaces) {
   // Only deallocate transitive interfaces if not empty, same as super class
@@ -491,9 +451,9 @@
     this_k->set_init_state (fully_initialized);
     this_k->fence_and_clear_init_lock();
     // trace
-    if (TraceClassInitialization) {
+    if (log_is_enabled(Info, classinit)) {
       ResourceMark rm(THREAD);
-      tty->print_cr("[Initialized %s without side effects]", this_k->external_name());
+      log_info(classinit)("[Initialized %s without side effects]", this_k->external_name());
     }
   }
 }
@@ -1129,10 +1089,12 @@
 
   methodHandle h_method(THREAD, this_k->class_initializer());
   assert(!this_k->is_initialized(), "we cannot initialize twice");
-  if (TraceClassInitialization) {
-    tty->print("%d Initializing ", call_class_initializer_impl_counter++);
-    this_k->name()->print_value();
-    tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", p2i(this_k()));
+  if (log_is_enabled(Info, classinit)) {
+    ResourceMark rm;
+    outputStream* log = LogHandle(classinit)::info_stream();
+    log->print("%d Initializing ", call_class_initializer_impl_counter++);
+    this_k->name()->print_value_on(log);
+    log->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", p2i(this_k()));
   }
   if (h_method() != NULL) {
     JavaCallArguments args; // No arguments
@@ -1346,10 +1308,12 @@
 }
 
 #ifdef ASSERT
-static int linear_search(Array<Method*>* methods, Symbol* name, Symbol* signature) {
-  int len = methods->length();
+static int linear_search(const Array<Method*>* methods,
+                         const Symbol* name,
+                         const Symbol* signature) {
+  const int len = methods->length();
   for (int index = 0; index < len; index++) {
-    Method* m = methods->at(index);
+    const Method* const m = methods->at(index);
     assert(m->is_method(), "must be method");
     if (m->signature() == signature && m->name() == name) {
        return index;
@@ -1359,7 +1323,7 @@
 }
 #endif
 
-static int binary_search(Array<Method*>* methods, Symbol* name) {
+static int binary_search(const Array<Method*>* methods, const Symbol* name) {
   int len = methods->length();
   // methods are sorted, so do binary search
   int l = 0;
@@ -1381,31 +1345,44 @@
 }
 
 // find_method looks up the name/signature in the local methods array
-Method* InstanceKlass::find_method(Symbol* name, Symbol* signature) const {
+Method* InstanceKlass::find_method(const Symbol* name,
+                                   const Symbol* signature) const {
   return find_method_impl(name, signature, find_overpass, find_static, find_private);
 }
 
-Method* InstanceKlass::find_method_impl(Symbol* name, Symbol* signature,
+Method* InstanceKlass::find_method_impl(const Symbol* name,
+                                        const Symbol* signature,
                                         OverpassLookupMode overpass_mode,
                                         StaticLookupMode static_mode,
                                         PrivateLookupMode private_mode) const {
-  return InstanceKlass::find_method_impl(methods(), name, signature, overpass_mode, static_mode, private_mode);
+  return InstanceKlass::find_method_impl(methods(),
+                                         name,
+                                         signature,
+                                         overpass_mode,
+                                         static_mode,
+                                         private_mode);
 }
 
 // find_instance_method looks up the name/signature in the local methods array
 // and skips over static methods
-Method* InstanceKlass::find_instance_method(
-    Array<Method*>* methods, Symbol* name, Symbol* signature) {
-  Method* meth = InstanceKlass::find_method_impl(methods, name, signature,
-                                                 find_overpass, skip_static, find_private);
-  assert(((meth == NULL) || !meth->is_static()), "find_instance_method should have skipped statics");
+Method* InstanceKlass::find_instance_method(const Array<Method*>* methods,
+                                            const Symbol* name,
+                                            const Symbol* signature) {
+  Method* const meth = InstanceKlass::find_method_impl(methods,
+                                                 name,
+                                                 signature,
+                                                 find_overpass,
+                                                 skip_static,
+                                                 find_private);
+  assert(((meth == NULL) || !meth->is_static()),
+    "find_instance_method should have skipped statics");
   return meth;
 }
 
 // find_instance_method looks up the name/signature in the local methods array
 // and skips over static methods
-Method* InstanceKlass::find_instance_method(Symbol* name, Symbol* signature) {
-    return InstanceKlass::find_instance_method(methods(), name, signature);
+Method* InstanceKlass::find_instance_method(const Symbol* name, const Symbol* signature) const {
+  return InstanceKlass::find_instance_method(methods(), name, signature);
 }
 
 // Find looks up the name/signature in the local methods array
@@ -1413,11 +1390,17 @@
 // This returns the first one found
 // note that the local methods array can have up to one overpass, one static
 // and one instance (private or not) with the same name/signature
-Method* InstanceKlass::find_local_method(Symbol* name, Symbol* signature,
-                                        OverpassLookupMode overpass_mode,
-                                        StaticLookupMode static_mode,
-                                        PrivateLookupMode private_mode) const {
-  return InstanceKlass::find_method_impl(methods(), name, signature, overpass_mode, static_mode, private_mode);
+Method* InstanceKlass::find_local_method(const Symbol* name,
+                                         const Symbol* signature,
+                                         OverpassLookupMode overpass_mode,
+                                         StaticLookupMode static_mode,
+                                         PrivateLookupMode private_mode) const {
+  return InstanceKlass::find_method_impl(methods(),
+                                         name,
+                                         signature,
+                                         overpass_mode,
+                                         static_mode,
+                                         private_mode);
 }
 
 // Find looks up the name/signature in the local methods array
@@ -1425,34 +1408,51 @@
 // This returns the first one found
 // note that the local methods array can have up to one overpass, one static
 // and one instance (private or not) with the same name/signature
-Method* InstanceKlass::find_local_method(Array<Method*>* methods,
-                                        Symbol* name, Symbol* signature,
+Method* InstanceKlass::find_local_method(const Array<Method*>* methods,
+                                         const Symbol* name,
+                                         const Symbol* signature,
+                                         OverpassLookupMode overpass_mode,
+                                         StaticLookupMode static_mode,
+                                         PrivateLookupMode private_mode) {
+  return InstanceKlass::find_method_impl(methods,
+                                         name,
+                                         signature,
+                                         overpass_mode,
+                                         static_mode,
+                                         private_mode);
+}
+
+Method* InstanceKlass::find_method(const Array<Method*>* methods,
+                                   const Symbol* name,
+                                   const Symbol* signature) {
+  return InstanceKlass::find_method_impl(methods,
+                                         name,
+                                         signature,
+                                         find_overpass,
+                                         find_static,
+                                         find_private);
+}
+
+Method* InstanceKlass::find_method_impl(const Array<Method*>* methods,
+                                        const Symbol* name,
+                                        const Symbol* signature,
                                         OverpassLookupMode overpass_mode,
                                         StaticLookupMode static_mode,
                                         PrivateLookupMode private_mode) {
-  return InstanceKlass::find_method_impl(methods, name, signature, overpass_mode, static_mode, private_mode);
-}
-
-
-// find_method looks up the name/signature in the local methods array
-Method* InstanceKlass::find_method(
-    Array<Method*>* methods, Symbol* name, Symbol* signature) {
-  return InstanceKlass::find_method_impl(methods, name, signature, find_overpass, find_static, find_private);
-}
-
-Method* InstanceKlass::find_method_impl(
-    Array<Method*>* methods, Symbol* name, Symbol* signature,
-    OverpassLookupMode overpass_mode, StaticLookupMode static_mode,
-    PrivateLookupMode private_mode) {
   int hit = find_method_index(methods, name, signature, overpass_mode, static_mode, private_mode);
   return hit >= 0 ? methods->at(hit): NULL;
 }
 
-bool InstanceKlass::method_matches(Method* m, Symbol* signature, bool skipping_overpass, bool skipping_static, bool skipping_private) {
-    return  ((m->signature() == signature) &&
-            (!skipping_overpass || !m->is_overpass()) &&
-            (!skipping_static || !m->is_static()) &&
-            (!skipping_private || !m->is_private()));
+// true if method matches signature and conforms to skipping_X conditions.
+static bool method_matches(const Method* m,
+                           const Symbol* signature,
+                           bool skipping_overpass,
+                           bool skipping_static,
+                           bool skipping_private) {
+  return ((m->signature() == signature) &&
+    (!skipping_overpass || !m->is_overpass()) &&
+    (!skipping_static || !m->is_static()) &&
+    (!skipping_private || !m->is_private()));
 }
 
 // Used directly for default_methods to find the index into the
@@ -1467,50 +1467,65 @@
 // To correctly catch a given method, the search criteria may need
 // to explicitly skip the other two. For local instance methods, it
 // is often necessary to skip private methods
-int InstanceKlass::find_method_index(
-    Array<Method*>* methods, Symbol* name, Symbol* signature,
-    OverpassLookupMode overpass_mode, StaticLookupMode static_mode,
-    PrivateLookupMode private_mode) {
-  bool skipping_overpass = (overpass_mode == skip_overpass);
-  bool skipping_static = (static_mode == skip_static);
-  bool skipping_private = (private_mode == skip_private);
-  int hit = binary_search(methods, name);
+int InstanceKlass::find_method_index(const Array<Method*>* methods,
+                                     const Symbol* name,
+                                     const Symbol* signature,
+                                     OverpassLookupMode overpass_mode,
+                                     StaticLookupMode static_mode,
+                                     PrivateLookupMode private_mode) {
+  const bool skipping_overpass = (overpass_mode == skip_overpass);
+  const bool skipping_static = (static_mode == skip_static);
+  const bool skipping_private = (private_mode == skip_private);
+  const int hit = binary_search(methods, name);
   if (hit != -1) {
-    Method* m = methods->at(hit);
+    const Method* const m = methods->at(hit);
 
     // Do linear search to find matching signature.  First, quick check
     // for common case, ignoring overpasses if requested.
-    if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) return hit;
+    if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) {
+          return hit;
+    }
 
     // search downwards through overloaded methods
     int i;
     for (i = hit - 1; i >= 0; --i) {
-        Method* m = methods->at(i);
+        const Method* const m = methods->at(i);
         assert(m->is_method(), "must be method");
-        if (m->name() != name) break;
-        if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) return i;
+        if (m->name() != name) {
+          break;
+        }
+        if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) {
+          return i;
+        }
     }
     // search upwards
     for (i = hit + 1; i < methods->length(); ++i) {
-        Method* m = methods->at(i);
+        const Method* const m = methods->at(i);
         assert(m->is_method(), "must be method");
-        if (m->name() != name) break;
-        if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) return i;
+        if (m->name() != name) {
+          break;
+        }
+        if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) {
+          return i;
+        }
     }
     // not found
 #ifdef ASSERT
-    int index = (skipping_overpass || skipping_static || skipping_private) ? -1 : linear_search(methods, name, signature);
-    assert(index == -1, "binary search should have found entry %d", index);
+    const int index = (skipping_overpass || skipping_static || skipping_private) ? -1 :
+      linear_search(methods, name, signature);
+    assert(-1 == index, "binary search should have found entry %d", index);
 #endif
   }
   return -1;
 }
-int InstanceKlass::find_method_by_name(Symbol* name, int* end) {
+
+int InstanceKlass::find_method_by_name(const Symbol* name, int* end) const {
   return find_method_by_name(methods(), name, end);
 }
 
-int InstanceKlass::find_method_by_name(
-    Array<Method*>* methods, Symbol* name, int* end_ptr) {
+int InstanceKlass::find_method_by_name(const Array<Method*>* methods,
+                                       const Symbol* name,
+                                       int* end_ptr) {
   assert(end_ptr != NULL, "just checking");
   int start = binary_search(methods, name);
   int end = start + 1;
@@ -1525,11 +1540,17 @@
 
 // uncached_lookup_method searches both the local class methods array and all
 // superclasses methods arrays, skipping any overpass methods in superclasses.
-Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature, OverpassLookupMode overpass_mode) const {
+Method* InstanceKlass::uncached_lookup_method(const Symbol* name,
+                                              const Symbol* signature,
+                                              OverpassLookupMode overpass_mode) const {
   OverpassLookupMode overpass_local_mode = overpass_mode;
-  Klass* klass = const_cast<InstanceKlass*>(this);
+  const Klass* klass = this;
   while (klass != NULL) {
-    Method* method = InstanceKlass::cast(klass)->find_method_impl(name, signature, overpass_local_mode, find_static, find_private);
+    Method* const method = InstanceKlass::cast(klass)->find_method_impl(name,
+                                                                        signature,
+                                                                        overpass_local_mode,
+                                                                        find_static,
+                                                                        find_private);
     if (method != NULL) {
       return method;
     }
@@ -1542,8 +1563,8 @@
 #ifdef ASSERT
 // search through class hierarchy and return true if this class or
 // one of the superclasses was redefined
-bool InstanceKlass::has_redefined_this_or_super() {
-  Klass* klass = this;
+bool InstanceKlass::has_redefined_this_or_super() const {
+  const Klass* klass = this;
   while (klass != NULL) {
     if (InstanceKlass::cast(klass)->has_been_redefined()) {
       return true;
@@ -1612,19 +1633,18 @@
   return probe;
 }
 
-u2 InstanceKlass::enclosing_method_data(int offset) {
-  Array<jushort>* inner_class_list = inner_classes();
+u2 InstanceKlass::enclosing_method_data(int offset) const {
+  const Array<jushort>* const inner_class_list = inner_classes();
   if (inner_class_list == NULL) {
     return 0;
   }
-  int length = inner_class_list->length();
+  const int length = inner_class_list->length();
   if (length % inner_class_next_offset == 0) {
     return 0;
-  } else {
-    int index = length - enclosing_method_attribute_size;
-    assert(offset < enclosing_method_attribute_size, "invalid offset");
-    return inner_class_list->at(index + offset);
   }
+  const int index = length - enclosing_method_attribute_size;
+  assert(offset < enclosing_method_attribute_size, "invalid offset");
+  return inner_class_list->at(index + offset);
 }
 
 void InstanceKlass::set_enclosing_method_indices(u2 class_index,
@@ -2100,7 +2120,7 @@
   Atomic::dec(&_total_instanceKlass_count);
 }
 
-void InstanceKlass::set_source_debug_extension(char* array, int length) {
+void InstanceKlass::set_source_debug_extension(const char* array, int length) {
   if (array == NULL) {
     _source_debug_extension = NULL;
   } else {
@@ -2161,26 +2181,42 @@
 }
 
 // different verisons of is_same_class_package
-bool InstanceKlass::is_same_class_package(Klass* class2) {
+bool InstanceKlass::is_same_class_package(const Klass* class2) const {
+  const Klass* const class1 = (const Klass* const)this;
+  oop classloader1 = InstanceKlass::cast(class1)->class_loader();
+  const Symbol* const classname1 = class1->name();
+
   if (class2->is_objArray_klass()) {
     class2 = ObjArrayKlass::cast(class2)->bottom_klass();
   }
-  oop classloader2 = class2->class_loader();
-  Symbol* classname2 = class2->name();
-
-  return InstanceKlass::is_same_class_package(class_loader(), name(),
+  oop classloader2;
+  if (class2->is_instance_klass()) {
+    classloader2 = InstanceKlass::cast(class2)->class_loader();
+  } else {
+    assert(class2->is_typeArray_klass(), "should be type array");
+    classloader2 = NULL;
+  }
+  const Symbol* classname2 = class2->name();
+
+  return InstanceKlass::is_same_class_package(classloader1, classname1,
                                               classloader2, classname2);
 }
 
-bool InstanceKlass::is_same_class_package(oop classloader2, Symbol* classname2) {
-  return InstanceKlass::is_same_class_package(class_loader(), name(),
-                                              classloader2, classname2);
+bool InstanceKlass::is_same_class_package(oop other_class_loader,
+                                          const Symbol* other_class_name) const {
+  oop this_class_loader = class_loader();
+  const Symbol* const this_class_name = name();
+
+  return InstanceKlass::is_same_class_package(this_class_loader,
+                                             this_class_name,
+                                             other_class_loader,
+                                             other_class_name);
 }
 
 // return true if two classes are in the same package, classloader
 // and classname information is enough to determine a class's package
-bool InstanceKlass::is_same_class_package(oop class_loader1, Symbol* class_name1,
-                                          oop class_loader2, Symbol* class_name2) {
+bool InstanceKlass::is_same_class_package(oop class_loader1, const Symbol* class_name1,
+                                          oop class_loader2, const Symbol* class_name2) {
   if (class_loader1 != class_loader2) {
     return false;
   } else if (class_name1 == class_name2) {
@@ -2259,11 +2295,11 @@
 */
 
 // tell if two classes have the same enclosing class (at package level)
-bool InstanceKlass::is_same_package_member_impl(instanceKlassHandle class1,
-                                                Klass* class2_oop, TRAPS) {
-  if (class2_oop == class1())                       return true;
-  if (!class2_oop->is_instance_klass())  return false;
-  instanceKlassHandle class2(THREAD, class2_oop);
+bool InstanceKlass::is_same_package_member_impl(const InstanceKlass* class1,
+                                                const Klass* class2,
+                                                TRAPS) {
+  if (class2 == class1) return true;
+  if (!class2->is_instance_klass())  return false;
 
   // must be in same package before we try anything else
   if (!class1->is_same_class_package(class2->class_loader(), class2->name()))
@@ -2271,30 +2307,30 @@
 
   // As long as there is an outer1.getEnclosingClass,
   // shift the search outward.
-  instanceKlassHandle outer1 = class1;
+  const InstanceKlass* outer1 = class1;
   for (;;) {
     // As we walk along, look for equalities between outer1 and class2.
     // Eventually, the walks will terminate as outer1 stops
     // at the top-level class around the original class.
     bool ignore_inner_is_member;
-    Klass* next = outer1->compute_enclosing_class(&ignore_inner_is_member,
-                                                    CHECK_false);
+    const Klass* next = outer1->compute_enclosing_class(&ignore_inner_is_member,
+                                                  CHECK_false);
     if (next == NULL)  break;
-    if (next == class2())  return true;
-    outer1 = instanceKlassHandle(THREAD, next);
+    if (next == class2)  return true;
+    outer1 = InstanceKlass::cast(next);
   }
 
   // Now do the same for class2.
-  instanceKlassHandle outer2 = class2;
+  const InstanceKlass* outer2 = InstanceKlass::cast(class2);
   for (;;) {
     bool ignore_inner_is_member;
     Klass* next = outer2->compute_enclosing_class(&ignore_inner_is_member,
                                                     CHECK_false);
     if (next == NULL)  break;
     // Might as well check the new outer against all available values.
-    if (next == class1())  return true;
-    if (next == outer1())  return true;
-    outer2 = instanceKlassHandle(THREAD, next);
+    if (next == class1)  return true;
+    if (next == outer1)  return true;
+    outer2 = InstanceKlass::cast(next);
   }
 
   // If by this point we have not found an equality between the
@@ -2322,36 +2358,38 @@
   return false;
 }
 
-Klass* InstanceKlass::compute_enclosing_class_impl(instanceKlassHandle k, bool* inner_is_member, TRAPS) {
-  instanceKlassHandle outer_klass;
+InstanceKlass* InstanceKlass::compute_enclosing_class_impl(const InstanceKlass* k,
+                                                           bool* inner_is_member,
+                                                           TRAPS) {
+  InstanceKlass* outer_klass = NULL;
   *inner_is_member = false;
   int ooff = 0, noff = 0;
   if (find_inner_classes_attr(k, &ooff, &noff, THREAD)) {
     constantPoolHandle i_cp(THREAD, k->constants());
     if (ooff != 0) {
       Klass* ok = i_cp->klass_at(ooff, CHECK_NULL);
-      outer_klass = instanceKlassHandle(THREAD, ok);
+      outer_klass = InstanceKlass::cast(ok);
       *inner_is_member = true;
     }
-    if (outer_klass.is_null()) {
+    if (NULL == outer_klass) {
       // It may be anonymous; try for that.
       int encl_method_class_idx = k->enclosing_method_class_index();
       if (encl_method_class_idx != 0) {
         Klass* ok = i_cp->klass_at(encl_method_class_idx, CHECK_NULL);
-        outer_klass = instanceKlassHandle(THREAD, ok);
+        outer_klass = InstanceKlass::cast(ok);
         *inner_is_member = false;
       }
     }
   }
 
   // If no inner class attribute found for this class.
-  if (outer_klass.is_null())  return NULL;
+  if (NULL == outer_klass) return NULL;
 
   // Throws an exception if outer klass has not declared k as an inner klass
   // We need evidence that each klass knows about the other, or else
   // the system could allow a spoof of an inner class to gain access rights.
   Reflection::check_for_inner_class(outer_klass, k, *inner_is_member, CHECK_NULL);
-  return outer_klass();
+  return outer_klass;
 }
 
 jint InstanceKlass::compute_modifier_flags(TRAPS) const {
@@ -2919,7 +2957,7 @@
     oop obj = oopDesc::load_decode_heap_oop(p);
     if (!obj->is_oop_or_null()) {
       tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p2i(p), p2i(obj));
-      Universe::print();
+      Universe::print_on(tty);
       guarantee(false, "boom");
     }
   }
--- a/src/share/vm/oops/instanceKlass.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/instanceKlass.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -54,6 +54,7 @@
 
 // forward declaration for class -- see below for definition
 class BreakpointInfo;
+class ClassFileParser;
 class DepChange;
 class DependencyContext;
 class fieldDescriptor;
@@ -113,29 +114,9 @@
   friend class CompileReplay;
 
  protected:
-  // Constructor
-  InstanceKlass(int vtable_len,
-                int itable_len,
-                int static_field_size,
-                int nonstatic_oop_map_size,
-                unsigned kind,
-                ReferenceType rt,
-                AccessFlags access_flags,
-                bool is_anonymous);
+  InstanceKlass(const ClassFileParser& parser, unsigned kind);
+
  public:
-  static InstanceKlass* allocate_instance_klass(
-                                          ClassLoaderData* loader_data,
-                                          int vtable_len,
-                                          int itable_len,
-                                          int static_field_size,
-                                          int nonstatic_oop_map_size,
-                                          ReferenceType rt,
-                                          AccessFlags access_flags,
-                                          Symbol* name,
-                                          Klass* super_klass,
-                                          bool is_anonymous,
-                                          TRAPS);
-
   InstanceKlass() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); }
 
   // See "The Java Virtual Machine Specification" section 2.16.2-5 for a detailed description
@@ -153,6 +134,7 @@
 
  private:
   static volatile int _total_instanceKlass_count;
+  static InstanceKlass* allocate_instance_klass(const ClassFileParser& parser, TRAPS);
 
  protected:
   // Annotations for this class
@@ -177,7 +159,7 @@
   // the source debug extension for this klass, NULL if not specified.
   // Specified as UTF-8 string without terminating zero byte in the classfile,
   // it is stored in the instanceklass as a NULL-terminated UTF-8 string
-  char*           _source_debug_extension;
+  const char*     _source_debug_extension;
   // Array name derived from this class which needs unreferencing
   // if this class is unloaded.
   Symbol*         _array_name;
@@ -351,7 +333,7 @@
   // method ordering
   Array<int>* method_ordering() const     { return _method_ordering; }
   void set_method_ordering(Array<int>* m) { _method_ordering = m; }
-  void copy_method_ordering(intArray* m, TRAPS);
+  void copy_method_ordering(const intArray* m, TRAPS);
 
   // default_methods
   Array<Method*>* default_methods() const  { return _default_methods; }
@@ -417,29 +399,32 @@
   bool is_override(const methodHandle& super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS);
 
   // package
-  bool is_same_class_package(Klass* class2);
-  bool is_same_class_package(oop classloader2, Symbol* classname2);
-  static bool is_same_class_package(oop class_loader1, Symbol* class_name1, oop class_loader2, Symbol* class_name2);
+  bool is_same_class_package(const Klass* class2) const;
+  bool is_same_class_package(oop classloader2, const Symbol* classname2) const;
+  static bool is_same_class_package(oop class_loader1,
+                                    const Symbol* class_name1,
+                                    oop class_loader2,
+                                    const Symbol* class_name2);
 
   // find an enclosing class
-  Klass* compute_enclosing_class(bool* inner_is_member, TRAPS) {
-    instanceKlassHandle self(THREAD, this);
-    return compute_enclosing_class_impl(self, inner_is_member, THREAD);
+  InstanceKlass* compute_enclosing_class(bool* inner_is_member, TRAPS) const {
+    return compute_enclosing_class_impl(this, inner_is_member, THREAD);
   }
-  static Klass* compute_enclosing_class_impl(instanceKlassHandle self,
-                                             bool* inner_is_member, TRAPS);
+  static InstanceKlass* compute_enclosing_class_impl(const InstanceKlass* self,
+                                                     bool* inner_is_member,
+                                                     TRAPS);
 
   // Find InnerClasses attribute for k and return outer_class_info_index & inner_name_index.
   static bool find_inner_classes_attr(instanceKlassHandle k,
                                       int* ooff, int* noff, TRAPS);
 
   // tell if two classes have the same enclosing class (at package level)
-  bool is_same_package_member(Klass* class2, TRAPS) {
-    instanceKlassHandle self(THREAD, this);
-    return is_same_package_member_impl(self, class2, THREAD);
+  bool is_same_package_member(const Klass* class2, TRAPS) const {
+    return is_same_package_member_impl(this, class2, THREAD);
   }
-  static bool is_same_package_member_impl(instanceKlassHandle self,
-                                          Klass* class2, TRAPS);
+  static bool is_same_package_member_impl(const InstanceKlass* self,
+                                          const Klass* class2,
+                                          TRAPS);
 
   // initialization state
   bool is_loaded() const                   { return _init_state >= loaded; }
@@ -508,38 +493,44 @@
   bool find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const;
 
   // find a local method (returns NULL if not found)
-  Method* find_method(Symbol* name, Symbol* signature) const;
-  static Method* find_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
+  Method* find_method(const Symbol* name, const Symbol* signature) const;
+  static Method* find_method(const Array<Method*>* methods,
+                             const Symbol* name,
+                             const Symbol* signature);
 
   // find a local method, but skip static methods
-  Method* find_instance_method(Symbol* name, Symbol* signature);
-  static Method* find_instance_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
+  Method* find_instance_method(const Symbol* name, const Symbol* signature) const;
+  static Method* find_instance_method(const Array<Method*>* methods,
+                                      const Symbol* name,
+                                      const Symbol* signature);
 
   // find a local method (returns NULL if not found)
-  Method* find_local_method(Symbol* name, Symbol* signature,
-                           OverpassLookupMode overpass_mode,
-                           StaticLookupMode static_mode,
-                           PrivateLookupMode private_mode) const;
+  Method* find_local_method(const Symbol* name,
+                            const Symbol* signature,
+                            OverpassLookupMode overpass_mode,
+                            StaticLookupMode static_mode,
+                            PrivateLookupMode private_mode) const;
 
   // find a local method from given methods array (returns NULL if not found)
-  static Method* find_local_method(Array<Method*>* methods,
-                           Symbol* name, Symbol* signature,
-                           OverpassLookupMode overpass_mode,
-                           StaticLookupMode static_mode,
-                           PrivateLookupMode private_mode);
-
-  // true if method matches signature and conforms to skipping_X conditions.
-  static bool method_matches(Method* m, Symbol* signature, bool skipping_overpass, bool skipping_static, bool skipping_private);
+  static Method* find_local_method(const Array<Method*>* methods,
+                                   const Symbol* name,
+                                   const Symbol* signature,
+                                   OverpassLookupMode overpass_mode,
+                                   StaticLookupMode static_mode,
+                                   PrivateLookupMode private_mode);
 
   // find a local method index in methods or default_methods (returns -1 if not found)
-  static int find_method_index(Array<Method*>* methods,
-                               Symbol* name, Symbol* signature,
+  static int find_method_index(const Array<Method*>* methods,
+                               const Symbol* name,
+                               const Symbol* signature,
                                OverpassLookupMode overpass_mode,
                                StaticLookupMode static_mode,
                                PrivateLookupMode private_mode);
 
   // lookup operation (returns NULL if not found)
-  Method* uncached_lookup_method(Symbol* name, Symbol* signature, OverpassLookupMode overpass_mode) const;
+  Method* uncached_lookup_method(const Symbol* name,
+                                 const Symbol* signature,
+                                 OverpassLookupMode overpass_mode) const;
 
   // lookup a method in all the interfaces that this class implements
   // (returns NULL if not found)
@@ -553,8 +544,9 @@
   // found the index to the first method is returned, and 'end' is filled in
   // with the index of first non-name-matching method.  If no method is found
   // -1 is returned.
-  int find_method_by_name(Symbol* name, int* end);
-  static int find_method_by_name(Array<Method*>* methods, Symbol* name, int* end);
+  int find_method_by_name(const Symbol* name, int* end) const;
+  static int find_method_by_name(const Array<Method*>* methods,
+                                 const Symbol* name, int* end);
 
   // constant pool
   ConstantPool* constants() const        { return _constants; }
@@ -576,9 +568,9 @@
       return *hk;
     }
   }
-  void set_host_klass(Klass* host)            {
+  void set_host_klass(const Klass* host) {
     assert(is_anonymous(), "not anonymous");
-    Klass** addr = (Klass**)adr_host_klass();
+    const Klass** addr = (const Klass**)adr_host_klass();
     assert(addr != NULL, "no reversed space");
     if (addr != NULL) {
       *addr = host;
@@ -631,8 +623,8 @@
   void set_major_version(u2 major_version) { _major_version = major_version; }
 
   // source debug extension
-  char* source_debug_extension() const     { return _source_debug_extension; }
-  void set_source_debug_extension(char* array, int length);
+  const char* source_debug_extension() const { return _source_debug_extension; }
+  void set_source_debug_extension(const char* array, int length);
 
   // symbol unloading support (refcount already added)
   Symbol* array_name()                     { return _array_name; }
@@ -765,8 +757,8 @@
     _generic_signature_index = sig_index;
   }
 
-  u2 enclosing_method_data(int offset);
-  u2 enclosing_method_class_index() {
+  u2 enclosing_method_data(int offset) const;
+  u2 enclosing_method_class_index() const {
     return enclosing_method_data(enclosing_method_class_index_offset);
   }
   u2 enclosing_method_method_index() {
@@ -860,7 +852,7 @@
 
 #ifdef ASSERT
   // check whether this class or one of its superclasses was redefined
-  bool has_redefined_this_or_super();
+  bool has_redefined_this_or_super() const;
 #endif
 
   // Access to the implementor of an interface.
@@ -920,11 +912,14 @@
   void array_klasses_do(void f(Klass* k, TRAPS), TRAPS);
   bool super_types_do(SuperTypeClosure* blk);
 
-  // Casting from Klass*
   static InstanceKlass* cast(Klass* k) {
+    return const_cast<InstanceKlass*>(cast(const_cast<const Klass*>(k)));
+  }
+
+  static const InstanceKlass* cast(const Klass* k) {
     assert(k != NULL, "k should not be null");
     assert(k->is_instance_klass(), "cast to InstanceKlass");
-    return static_cast<InstanceKlass*>(k);
+    return static_cast<const InstanceKlass*>(k);
   }
 
   InstanceKlass* java_super() const {
@@ -1033,7 +1028,7 @@
   static void deallocate_methods(ClassLoaderData* loader_data,
                                  Array<Method*>* methods);
   void static deallocate_interfaces(ClassLoaderData* loader_data,
-                                    Klass* super_klass,
+                                    const Klass* super_klass,
                                     Array<Klass*>* local_interfaces,
                                     Array<Klass*>* transitive_interfaces);
 
@@ -1204,12 +1199,15 @@
   Klass* array_klass_impl(bool or_null, TRAPS);
 
   // find a local method (returns NULL if not found)
-  Method* find_method_impl(Symbol* name, Symbol* signature,
+  Method* find_method_impl(const Symbol* name,
+                           const Symbol* signature,
                            OverpassLookupMode overpass_mode,
                            StaticLookupMode static_mode,
                            PrivateLookupMode private_mode) const;
-  static Method* find_method_impl(Array<Method*>* methods,
-                                  Symbol* name, Symbol* signature,
+
+  static Method* find_method_impl(const Array<Method*>* methods,
+                                  const Symbol* name,
+                                  const Symbol* signature,
                                   OverpassLookupMode overpass_mode,
                                   StaticLookupMode static_mode,
                                   PrivateLookupMode private_mode);
--- a/src/share/vm/oops/instanceMirrorKlass.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/instanceMirrorKlass.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -31,6 +31,8 @@
 #include "runtime/handles.hpp"
 #include "utilities/macros.hpp"
 
+class ClassFileParser;
+
 // An InstanceMirrorKlass is a specialized InstanceKlass for
 // java.lang.Class instances.  These instances are special because
 // they contain the static fields of the class in addition to the
@@ -46,10 +48,7 @@
  private:
   static int _offset_of_static_fields;
 
-  // Constructor
-  InstanceMirrorKlass(int vtable_len, int itable_len, int static_field_size, int nonstatic_oop_map_size, ReferenceType rt, AccessFlags access_flags,  bool is_anonymous)
-    : InstanceKlass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_size,
-                    InstanceKlass::_misc_kind_mirror, rt, access_flags, is_anonymous) {}
+  InstanceMirrorKlass(const ClassFileParser& parser) : InstanceKlass(parser, InstanceKlass::_misc_kind_mirror) {}
 
  public:
   InstanceMirrorKlass() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); }
--- a/src/share/vm/oops/instanceRefKlass.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/instanceRefKlass.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,6 +29,8 @@
 #include "oops/instanceKlass.hpp"
 #include "utilities/macros.hpp"
 
+class ClassFileParser;
+
 // An InstanceRefKlass is a specialized InstanceKlass for Java
 // classes that are subclasses of java/lang/ref/Reference.
 //
@@ -48,11 +50,8 @@
 
 class InstanceRefKlass: public InstanceKlass {
   friend class InstanceKlass;
-
-  // Constructor
-  InstanceRefKlass(int vtable_len, int itable_len, int static_field_size, int nonstatic_oop_map_size, ReferenceType rt, AccessFlags access_flags, bool is_anonymous)
-    : InstanceKlass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_size,
-                    InstanceKlass::_misc_kind_reference, rt, access_flags, is_anonymous) {}
+ private:
+  InstanceRefKlass(const ClassFileParser& parser) : InstanceKlass(parser, InstanceKlass::_misc_kind_reference) {}
 
  public:
   InstanceRefKlass() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); }
--- a/src/share/vm/oops/instanceRefKlass.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/instanceRefKlass.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,6 +27,7 @@
 
 #include "classfile/javaClasses.hpp"
 #include "gc/shared/referenceProcessor.hpp"
+#include "logging/log.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
@@ -59,12 +60,7 @@
   // Treat discovered as normal oop, if ref is not "active" (next non-NULL)
   if (!oopDesc::is_null(next_oop) && contains(disc_addr)) {
     // i.e. ref is not "active"
-    debug_only(
-      if(TraceReferenceGC && PrintGCDetails) {
-        gclog_or_tty->print_cr("   Process discovered as normal "
-                               PTR_FORMAT, p2i(disc_addr));
-      }
-    )
+    log_develop_trace(gc, ref)("   Process discovered as normal " PTR_FORMAT, p2i(disc_addr));
     Devirtualizer<nv>::do_oop(closure, disc_addr);
   }
   // treat next as normal oop
--- a/src/share/vm/oops/klass.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/klass.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -136,7 +136,7 @@
   return NULL;
 }
 
-Method* Klass::uncached_lookup_method(Symbol* name, Symbol* signature, OverpassLookupMode overpass_mode) const {
+Method* Klass::uncached_lookup_method(const Symbol* name, const Symbol* signature, OverpassLookupMode overpass_mode) const {
 #ifdef ASSERT
   tty->print_cr("Error: uncached_lookup_method called on a klass oop."
                 " Likely error: reflection method does not correctly"
@@ -151,45 +151,18 @@
                              MetaspaceObj::ClassType, THREAD);
 }
 
-Klass::Klass() {
-  Klass* k = this;
-
-  // Preinitialize supertype information.
-  // A later call to initialize_supers() may update these settings:
-  set_super(NULL);
-  for (juint i = 0; i < Klass::primary_super_limit(); i++) {
-    _primary_supers[i] = NULL;
-  }
-  set_secondary_supers(NULL);
-  set_secondary_super_cache(NULL);
-  _primary_supers[0] = k;
-  set_super_check_offset(in_bytes(primary_supers_offset()));
-
-  // The constructor is used from init_self_patching_vtbl_list,
-  // which doesn't zero out the memory before calling the constructor.
-  // Need to set the field explicitly to not hit an assert that the field
-  // should be NULL before setting it.
-  _java_mirror = NULL;
+// "Normal" instantiation is preceeded by a MetaspaceObj allocation
+// which zeros out memory - calloc equivalent.
+// The constructor is also used from init_self_patching_vtbl_list,
+// which doesn't zero out the memory before calling the constructor.
+// Need to set the _java_mirror field explicitly to not hit an assert that the field
+// should be NULL before setting it.
+Klass::Klass() : _prototype_header(markOopDesc::prototype()),
+                 _shared_class_path_index(-1),
+                 _java_mirror(NULL) {
 
-  set_modifier_flags(0);
-  set_layout_helper(Klass::_lh_neutral_value);
-  set_name(NULL);
-  AccessFlags af;
-  af.set_flags(0);
-  set_access_flags(af);
-  set_subklass(NULL);
-  set_next_sibling(NULL);
-  set_next_link(NULL);
-  TRACE_INIT_ID(this);
-
-  set_prototype_header(markOopDesc::prototype());
-  set_biased_lock_revocation_count(0);
-  set_last_biased_lock_bulk_revocation_time(0);
-
-  // The klass doesn't have any references at this point.
-  clear_modified_oops();
-  clear_accumulated_modified_oops();
-  _shared_class_path_index = -1;
+  _primary_supers[0] = this;
+  set_super_check_offset(in_bytes(primary_supers_offset()));
 }
 
 jint Klass::array_layout_helper(BasicType etype) {
--- a/src/share/vm/oops/klass.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/klass.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -411,9 +411,9 @@
   // lookup operation for MethodLookupCache
   friend class MethodLookupCache;
   virtual Klass* find_field(Symbol* name, Symbol* signature, fieldDescriptor* fd) const;
-  virtual Method* uncached_lookup_method(Symbol* name, Symbol* signature, OverpassLookupMode overpass_mode) const;
+  virtual Method* uncached_lookup_method(const Symbol* name, const Symbol* signature, OverpassLookupMode overpass_mode) const;
  public:
-  Method* lookup_method(Symbol* name, Symbol* signature) const {
+  Method* lookup_method(const Symbol* name, const Symbol* signature) const {
     return uncached_lookup_method(name, signature, find_overpass);
   }
 
--- a/src/share/vm/oops/klassVtable.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/klassVtable.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -54,7 +54,7 @@
 // treated as any other public method in C for method over-ride purposes.
 void klassVtable::compute_vtable_size_and_num_mirandas(
     int* vtable_length_ret, int* num_new_mirandas,
-    GrowableArray<Method*>* all_mirandas, Klass* super,
+    GrowableArray<Method*>* all_mirandas, const Klass* super,
     Array<Method*>* methods, AccessFlags class_flags,
     Handle classloader, Symbol* classname, Array<Klass*>* local_interfaces,
     TRAPS) {
@@ -548,7 +548,7 @@
 // However, the vtable entries are filled in at link time, and therefore
 // the superclass' vtable may not yet have been filled in.
 bool klassVtable::needs_new_vtable_entry(methodHandle target_method,
-                                         Klass* super,
+                                         const Klass* super,
                                          Handle classloader,
                                          Symbol* classname,
                                          AccessFlags class_flags,
@@ -605,7 +605,7 @@
   ResourceMark rm;
   Symbol* name = target_method()->name();
   Symbol* signature = target_method()->signature();
-  Klass* k = super;
+  const Klass* k = super;
   Method* super_method = NULL;
   InstanceKlass *holder = NULL;
   Method* recheck_method =  NULL;
@@ -640,7 +640,7 @@
   // miranda method in the super, whose entry it should re-use.
   // Actually, to handle cases that javac would not generate, we need
   // this check for all access permissions.
-  InstanceKlass *sk = InstanceKlass::cast(super);
+  const InstanceKlass *sk = InstanceKlass::cast(super);
   if (sk->has_miranda_methods()) {
     if (sk->lookup_method_in_all_interfaces(name, signature, Klass::find_defaults) != NULL) {
       return false;  // found a matching miranda; we do not need a new entry
@@ -734,7 +734,7 @@
 // Part of the Miranda Rights in the US mean that if you do not have
 // an attorney one will be appointed for you.
 bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
-                             Array<Method*>* default_methods, Klass* super) {
+                             Array<Method*>* default_methods, const Klass* super) {
   if (m->is_static() || m->is_private() || m->is_overpass()) {
     return false;
   }
@@ -760,7 +760,7 @@
   // Overpasses may or may not exist for supers for pass 1,
   // they should have been created for pass 2 and later.
 
-  for (Klass* cursuper = super; cursuper != NULL; cursuper = cursuper->super())
+  for (const Klass* cursuper = super; cursuper != NULL; cursuper = cursuper->super())
   {
      if (InstanceKlass::cast(cursuper)->find_local_method(name, signature,
            Klass::find_overpass, Klass::skip_static, Klass::skip_private) != NULL) {
@@ -782,7 +782,7 @@
 void klassVtable::add_new_mirandas_to_lists(
     GrowableArray<Method*>* new_mirandas, GrowableArray<Method*>* all_mirandas,
     Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
-    Array<Method*>* default_methods, Klass* super) {
+    Array<Method*>* default_methods, const Klass* super) {
 
   // iterate thru the current interface's method to see if it a miranda
   int num_methods = current_interface_methods->length();
@@ -802,7 +802,7 @@
 
     if (!is_duplicate) { // we don't want duplicate miranda entries in the vtable
       if (is_miranda(im, class_methods, default_methods, super)) { // is it a miranda at all?
-        InstanceKlass *sk = InstanceKlass::cast(super);
+        const InstanceKlass *sk = InstanceKlass::cast(super);
         // check if it is a duplicate of a super's miranda
         if (sk->lookup_method_in_all_interfaces(im->name(), im->signature(), Klass::find_defaults) == NULL) {
           new_mirandas->append(im);
@@ -817,7 +817,8 @@
 
 void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
                                GrowableArray<Method*>* all_mirandas,
-                               Klass* super, Array<Method*>* class_methods,
+                               const Klass* super,
+                               Array<Method*>* class_methods,
                                Array<Method*>* default_methods,
                                Array<Klass*>* local_interfaces) {
   assert((new_mirandas->length() == 0) , "current mirandas must be 0");
--- a/src/share/vm/oops/klassVtable.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/klassVtable.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -84,11 +84,16 @@
   bool is_initialized();
 
   // computes vtable length (in words) and the number of miranda methods
-  static void compute_vtable_size_and_num_mirandas(
-      int* vtable_length, int* num_new_mirandas,
-      GrowableArray<Method*>* all_mirandas, Klass* super,
-      Array<Method*>* methods, AccessFlags class_flags, Handle classloader,
-      Symbol* classname, Array<Klass*>* local_interfaces, TRAPS);
+  static void compute_vtable_size_and_num_mirandas(int* vtable_length,
+                                                   int* num_new_mirandas,
+                                                   GrowableArray<Method*>* all_mirandas,
+                                                   const Klass* super,
+                                                   Array<Method*>* methods,
+                                                   AccessFlags class_flags,
+                                                   Handle classloader,
+                                                   Symbol* classname,
+                                                   Array<Klass*>* local_interfaces,
+                                                   TRAPS);
 
 #if INCLUDE_JVMTI
   // RedefineClasses() API support:
@@ -116,7 +121,12 @@
   int  initialize_from_super(KlassHandle super);
   int  index_of(Method* m, int len) const; // same as index_of, but search only up to len
   void put_method_at(Method* m, int index);
-  static bool needs_new_vtable_entry(methodHandle m, Klass* super, Handle classloader, Symbol* classname, AccessFlags access_flags, TRAPS);
+  static bool needs_new_vtable_entry(methodHandle m,
+                                     const Klass* super,
+                                     Handle classloader,
+                                     Symbol* classname,
+                                     AccessFlags access_flags,
+                                     TRAPS);
 
   bool update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len, int default_index, bool checkconstraints, TRAPS);
  InstanceKlass* find_transitive_override(InstanceKlass* initialsuper, methodHandle target_method, int vtable_index,
@@ -126,17 +136,18 @@
   bool is_miranda_entry_at(int i);
   int fill_in_mirandas(int initialized);
   static bool is_miranda(Method* m, Array<Method*>* class_methods,
-                         Array<Method*>* default_methods, Klass* super);
+                         Array<Method*>* default_methods, const Klass* super);
   static void add_new_mirandas_to_lists(
       GrowableArray<Method*>* new_mirandas,
       GrowableArray<Method*>* all_mirandas,
       Array<Method*>* current_interface_methods,
       Array<Method*>* class_methods,
       Array<Method*>* default_methods,
-      Klass* super);
+      const Klass* super);
   static void get_mirandas(
       GrowableArray<Method*>* new_mirandas,
-      GrowableArray<Method*>* all_mirandas, Klass* super,
+      GrowableArray<Method*>* all_mirandas,
+      const Klass* super,
       Array<Method*>* class_methods,
       Array<Method*>* default_methods,
       Array<Klass*>* local_interfaces);
--- a/src/share/vm/oops/method.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/method.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1320,12 +1320,12 @@
   return newm;
 }
 
-vmSymbols::SID Method::klass_id_for_intrinsics(Klass* holder) {
+vmSymbols::SID Method::klass_id_for_intrinsics(const Klass* holder) {
   // if loader is not the default loader (i.e., != NULL), we can't know the intrinsics
   // because we are not loading from core libraries
   // exception: the AES intrinsics come from lib/ext/sunjce_provider.jar
   // which does not use the class default class loader so we check for its loader here
-  InstanceKlass* ik = InstanceKlass::cast(holder);
+  const InstanceKlass* ik = InstanceKlass::cast(holder);
   if ((ik->class_loader() != NULL) && !SystemDictionary::is_ext_class_loader(ik->class_loader())) {
     return vmSymbols::NO_SID;   // regardless of name, no intrinsics here
   }
--- a/src/share/vm/oops/method.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/method.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -76,16 +76,17 @@
 
   // Flags
   enum Flags {
-    _jfr_towrite          = 1 << 0,
-    _caller_sensitive     = 1 << 1,
-    _force_inline         = 1 << 2,
-    _dont_inline          = 1 << 3,
-    _hidden               = 1 << 4,
-    _has_injected_profile = 1 << 5,
-    _running_emcp         = 1 << 6,
-    _intrinsic_candidate  = 1 << 7
+    _jfr_towrite           = 1 << 0,
+    _caller_sensitive      = 1 << 1,
+    _force_inline          = 1 << 2,
+    _dont_inline           = 1 << 3,
+    _hidden                = 1 << 4,
+    _has_injected_profile  = 1 << 5,
+    _running_emcp          = 1 << 6,
+    _intrinsic_candidate   = 1 << 7,
+    _reserved_stack_access = 1 << 8
   };
-  u1 _flags;
+  mutable u2 _flags;
 
 #ifndef PRODUCT
   int               _compiled_invocation_count;  // Number of nmethod invocations so far (for perf. debugging)
@@ -785,12 +786,12 @@
 
   // Helper routines for intrinsic_id() and vmIntrinsics::method().
   void init_intrinsic_id();     // updates from _none if a match
-  static vmSymbols::SID klass_id_for_intrinsics(Klass* holder);
+  static vmSymbols::SID klass_id_for_intrinsics(const Klass* holder);
 
-  bool jfr_towrite() {
+  bool jfr_towrite() const {
     return (_flags & _jfr_towrite) != 0;
   }
-  void set_jfr_towrite(bool x) {
+  void set_jfr_towrite(bool x) const {
     _flags = x ? (_flags | _jfr_towrite) : (_flags & ~_jfr_towrite);
   }
 
@@ -836,6 +837,14 @@
     _flags = x ? (_flags | _has_injected_profile) : (_flags & ~_has_injected_profile);
   }
 
+  bool has_reserved_stack_access() {
+    return (_flags & _reserved_stack_access) != 0;
+  }
+
+  void set_has_reserved_stack_access(bool x) {
+    _flags = x ? (_flags | _reserved_stack_access) : (_flags & ~_reserved_stack_access);
+  }
+
   ConstMethod::MethodType method_type() const {
       return _constMethod->method_type();
   }
--- a/src/share/vm/oops/objArrayKlass.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/objArrayKlass.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -90,10 +90,14 @@
   virtual Klass* array_klass_impl(bool or_null, TRAPS);
 
  public:
-  // Casting from Klass*
+
   static ObjArrayKlass* cast(Klass* k) {
+    return const_cast<ObjArrayKlass*>(cast(const_cast<const Klass*>(k)));
+  }
+
+  static const ObjArrayKlass* cast(const Klass* k) {
     assert(k->is_objArray_klass(), "cast to ObjArrayKlass");
-    return static_cast<ObjArrayKlass*>(k);
+    return static_cast<const ObjArrayKlass*>(k);
   }
 
   // Sizing
--- a/src/share/vm/oops/oopsHierarchy.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/oopsHierarchy.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -35,7 +35,7 @@
   assert (CheckUnhandledOops, "should only call when CheckUnhandledOops");
   if (!Universe::is_fully_initialized()) return;
   // This gets expensive, which is why checking unhandled oops is on a switch.
-  Thread* t = ThreadLocalStorage::thread();
+  Thread* t = Thread::current_or_null();
   if (t != NULL && t->is_Java_thread()) {
      frame fr = os::current_frame();
      // This points to the oop creator, I guess current frame points to caller
@@ -48,7 +48,7 @@
   assert (CheckUnhandledOops, "should only call when CheckUnhandledOops");
   if (!Universe::is_fully_initialized()) return;
   // This gets expensive, which is why checking unhandled oops is on a switch.
-  Thread* t = ThreadLocalStorage::thread();
+  Thread* t = Thread::current_or_null();
   if (t != NULL && t->is_Java_thread()) {
     t->unhandled_oops()->unregister_unhandled_oop(this);
   }
--- a/src/share/vm/oops/symbol.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/symbol.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -148,8 +148,8 @@
   int size()                { return size(utf8_length()); }
 
   // Returns the largest size symbol we can safely hold.
-  static int max_length()   { return max_symbol_length; }
-  unsigned identity_hash() {
+  static int max_length() { return max_symbol_length; }
+  unsigned identity_hash() const {
     unsigned addr_bits = (unsigned)((uintptr_t)this >> (LogMinObjAlignmentInBytes + 3));
     return ((unsigned)_identity_hash & 0xffff) |
            ((addr_bits ^ (_length << 8) ^ (( _body[0] << 8) | _body[1])) << 16);
@@ -197,7 +197,7 @@
 
   // Three-way compare for sorting; returns -1/0/1 if receiver is </==/> than arg
   // note that the ordering is not alfabetical
-  inline int fast_compare(Symbol* other) const;
+  inline int fast_compare(const Symbol* other) const;
 
   // Returns receiver converted to null-terminated UTF-8 string; string is
   // allocated in resource area, or in the char buffer provided by caller.
@@ -246,7 +246,7 @@
 // what order it defines, as long as it is a total, time-invariant order
 // Since Symbol*s are in C_HEAP, their relative order in memory never changes,
 // so use address comparison for speed
-int Symbol::fast_compare(Symbol* other) const {
+int Symbol::fast_compare(const Symbol* other) const {
  return (((uintptr_t)this < (uintptr_t)other) ? -1
    : ((uintptr_t)this == (uintptr_t) other) ? 0 : 1);
 }
--- a/src/share/vm/oops/typeArrayKlass.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/oops/typeArrayKlass.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -120,10 +120,13 @@
   virtual Klass* array_klass_impl(bool or_null, TRAPS);
 
  public:
-  // Casting from Klass*
   static TypeArrayKlass* cast(Klass* k) {
+    return const_cast<TypeArrayKlass*>(cast(const_cast<const Klass*>(k)));
+  }
+
+  static const TypeArrayKlass* cast(const Klass* k) {
     assert(k->is_typeArray_klass(), "cast to TypeArrayKlass");
-    return static_cast<TypeArrayKlass*>(k);
+    return static_cast<const TypeArrayKlass*>(k);
   }
 
   // Naming
--- a/src/share/vm/opto/compile.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/opto/compile.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -672,7 +672,8 @@
                   _print_inlining_idx(0),
                   _print_inlining_output(NULL),
                   _interpreter_frame_size(0),
-                  _max_node_limit(MaxNodeLimit) {
+                  _max_node_limit(MaxNodeLimit),
+                  _has_reserved_stack_access(target->has_reserved_stack_access()) {
   C = this;
 #ifndef PRODUCT
   if (_printer != NULL) {
--- a/src/share/vm/opto/compile.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/opto/compile.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -364,6 +364,7 @@
   bool                  _has_unsafe_access;     // True if the method _may_ produce faults in unsafe loads or stores.
   bool                  _has_stringbuilder;     // True StringBuffers or StringBuilders are allocated
   bool                  _has_boxed_value;       // True if a boxed object is allocated
+  bool                  _has_reserved_stack_access; // True if the method or an inlined method is annotated with ReservedStackAccess
   int                   _max_vector_size;       // Maximum size of generated vectors
   uint                  _trap_hist[trapHistLength];  // Cumulative traps
   bool                  _trap_can_recompile;    // Have we emitted a recompiling trap?
@@ -637,6 +638,8 @@
   void          set_has_stringbuilder(bool z)   { _has_stringbuilder = z; }
   bool              has_boxed_value() const     { return _has_boxed_value; }
   void          set_has_boxed_value(bool z)     { _has_boxed_value = z; }
+  bool              has_reserved_stack_access() const { return _has_reserved_stack_access; }
+  void          set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; }
   int               max_vector_size() const     { return _max_vector_size; }
   void          set_max_vector_size(int s)      { _max_vector_size = s; }
   void          set_trap_count(uint r, uint c)  { assert(r < trapHistLength, "oob");        _trap_hist[r] = c; }
--- a/src/share/vm/opto/parse1.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/opto/parse1.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -415,6 +415,10 @@
   _est_switch_depth = 0;
 #endif
 
+  if (parse_method->has_reserved_stack_access()) {
+    C->set_has_reserved_stack_access(true);
+  }
+
   _tf = TypeFunc::make(method());
   _iter.reset_to_method(method());
   _flow = method()->get_flow_analysis();
--- a/src/share/vm/precompiled/precompiled.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/precompiled/precompiled.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -203,7 +203,6 @@
 # include "runtime/stubRoutines.hpp"
 # include "runtime/synchronizer.hpp"
 # include "runtime/thread.hpp"
-# include "runtime/threadLocalStorage.hpp"
 # include "runtime/timer.hpp"
 # include "runtime/unhandledOops.hpp"
 # include "runtime/vframe.hpp"
--- a/src/share/vm/prims/jni.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/prims/jni.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -26,6 +26,7 @@
 #include "precompiled.hpp"
 #include "ci/ciReplay.hpp"
 #include "classfile/altHashing.hpp"
+#include "classfile/classFileStream.hpp"
 #include "classfile/classLoader.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/symbolTable.hpp"
@@ -326,7 +327,7 @@
     class_name = SymbolTable::new_symbol(name, CHECK_NULL);
   }
   ResourceMark rm(THREAD);
-  ClassFileStream st((u1*) buf, bufLen, NULL);
+  ClassFileStream st((u1*)buf, bufLen, NULL, ClassFileStream::verify);
   Handle class_loader (THREAD, JNIHandles::resolve(loaderRef));
 
   if (UsePerfData && !class_loader.is_null()) {
@@ -338,9 +339,11 @@
       ClassLoader::sync_JNIDefineClassLockFreeCounter()->inc();
     }
   }
-  Klass* k = SystemDictionary::resolve_from_stream(class_name, class_loader,
-                                                     Handle(), &st, true,
-                                                     CHECK_NULL);
+  Klass* k = SystemDictionary::resolve_from_stream(class_name,
+                                                   class_loader,
+                                                   Handle(),
+                                                   &st,
+                                                   CHECK_NULL);
 
   if (TraceClassResolution && k != NULL) {
     trace_class_resolution(k);
@@ -3914,7 +3917,6 @@
     run_unit_test(QuickSort::test_quick_sort());
     run_unit_test(GuardedMemory::test_guarded_memory());
     run_unit_test(AltHashing::test_alt_hash());
-    run_unit_test(test_loggc_filename());
     run_unit_test(TestNewSize_test());
     run_unit_test(TestOldSize_test());
     run_unit_test(TestKlass_test());
@@ -3933,7 +3935,6 @@
 #if INCLUDE_ALL_GCS
     run_unit_test(TestOldFreeSpaceCalculation_test());
     run_unit_test(TestG1BiasedArray_test());
-    run_unit_test(HeapRegionRemSet::test_prt());
     run_unit_test(TestBufferingOopClosure_test());
     run_unit_test(TestCodeCacheRemSet_test());
     if (UseG1GC) {
@@ -4175,7 +4176,7 @@
   }
   */
 
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null();
   if (t != NULL) {
     // If the thread has been attached this operation is a no-op
     *(JNIEnv**)penv = ((JavaThread*) t)->jni_environment();
@@ -4190,10 +4191,8 @@
   // initializing the Java level thread object. Hence, the correct state must
   // be set in order for the Safepoint code to deal with it correctly.
   thread->set_thread_state(_thread_in_vm);
-  // Must do this before initialize_thread_local_storage
   thread->record_stack_base_and_size();
-
-  thread->initialize_thread_local_storage();
+  thread->initialize_thread_current();
 
   if (!os::create_attached_thread(thread)) {
     delete thread;
@@ -4300,8 +4299,8 @@
 
   JNIWrapper("DetachCurrentThread");
 
-  // If the thread has been deattacted the operations is a no-op
-  if (ThreadLocalStorage::thread() == NULL) {
+  // If the thread has already been detached the operation is a no-op
+  if (Thread::current_or_null() == NULL) {
   HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(JNI_OK);
     return JNI_OK;
   }
@@ -4358,7 +4357,7 @@
 #define JVMPI_VERSION_1_2 ((jint)0x10000003)
 #endif // !JVMPI_VERSION_1
 
-  Thread* thread = ThreadLocalStorage::thread();
+  Thread* thread = Thread::current_or_null();
   if (thread != NULL && thread->is_Java_thread()) {
     if (Threads::is_supported_jni_version_including_1_1(version)) {
       *(JNIEnv**)penv = ((JavaThread*) thread)->jni_environment();
--- a/src/share/vm/prims/jniCheck.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/prims/jniCheck.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -87,9 +87,9 @@
 #define JNI_ENTRY_CHECKED(result_type, header)                           \
 extern "C" {                                                             \
   result_type JNICALL header {                                           \
-    JavaThread* thr = (JavaThread*)ThreadLocalStorage::get_thread_slow();\
+    JavaThread* thr = (JavaThread*) Thread::current_or_null();           \
     if (thr == NULL || !thr->is_Java_thread()) {                         \
-      tty->print_cr("%s", fatal_using_jnienv_in_nonjava);                      \
+      tty->print_cr("%s", fatal_using_jnienv_in_nonjava);                \
       os::abort(true);                                                   \
     }                                                                    \
     JNIEnv* xenv = thr->jni_environment();                               \
--- a/src/share/vm/prims/jvm.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/prims/jvm.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/classFileStream.hpp"
 #include "classfile/classLoader.hpp"
 #include "classfile/javaAssertions.hpp"
 #include "classfile/javaClasses.inline.hpp"
@@ -965,7 +966,7 @@
   }
 
   ResourceMark rm(THREAD);
-  ClassFileStream st((u1*) buf, len, (char *)source);
+  ClassFileStream st((u1*)buf, len, source, ClassFileStream::verify);
   Handle class_loader (THREAD, JNIHandles::resolve(loader));
   if (UsePerfData) {
     is_lock_held_by_thread(class_loader,
@@ -973,9 +974,11 @@
                            THREAD);
   }
   Handle protection_domain (THREAD, JNIHandles::resolve(pd));
-  Klass* k = SystemDictionary::resolve_from_stream(class_name, class_loader,
-                                                     protection_domain, &st,
-                                                     true, CHECK_NULL);
+  Klass* k = SystemDictionary::resolve_from_stream(class_name,
+                                                   class_loader,
+                                                   protection_domain,
+                                                   &st,
+                                                   CHECK_NULL);
 
   if (TraceClassResolution && k != NULL) {
     trace_class_resolution(k);
@@ -3719,3 +3722,7 @@
   info->is_attachable = AttachListener::is_attach_supported();
 }
 JVM_END
+
+JVM_ENTRY_NO_ENV(jint, JVM_FindSignal(const char *name))
+  return os::get_signal_number(name);
+JVM_END
--- a/src/share/vm/prims/jvmtiEnter.xsl	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/prims/jvmtiEnter.xsl	Fri Dec 18 12:39:02 2015 -0800
@@ -494,7 +494,7 @@
   }</xsl:text>  
 
       <xsl:text>  
-  Thread* this_thread = (Thread*)ThreadLocalStorage::thread(); </xsl:text>
+  Thread* this_thread = Thread::current_or_null(); </xsl:text>
 
       <xsl:apply-templates select="." mode="transition"/>
     </xsl:when>
@@ -528,7 +528,7 @@
     </xsl:if>
     <xsl:text>    return JVMTI_ERROR_WRONG_PHASE;
   }
-  Thread* this_thread = (Thread*)ThreadLocalStorage::thread(); </xsl:text>
+  Thread* this_thread = Thread::current_or_null(); </xsl:text>
       <xsl:apply-templates select="." mode="transition"/>
       </xsl:if>
     </xsl:otherwise>
@@ -558,7 +558,7 @@
       <xsl:choose>
         <xsl:when test="count(@callbacksafe)=0 or not(contains(@callbacksafe,'safe'))">
           <xsl:text>  if (Threads::number_of_threads() != 0) {
-    Thread* this_thread = (Thread*)ThreadLocalStorage::thread();</xsl:text>
+    Thread* this_thread = Thread::current_or_null();</xsl:text>
         </xsl:when>
         <xsl:otherwise>
 
@@ -567,7 +567,7 @@
   if (Threads::number_of_threads() == 0) {
     transition = false;
   } else {
-    this_thread = (Thread*)ThreadLocalStorage::thread();
+    this_thread = Thread::current_or_null();
     transition = ((this_thread != NULL) &amp;&amp; !this_thread->is_VM_thread() &amp;&amp; !this_thread->is_ConcurrentGC_thread());
   }
   if (transition) {</xsl:text>
--- a/src/share/vm/prims/jvmtiEnv.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/prims/jvmtiEnv.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,6 +29,7 @@
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/interpreter.hpp"
 #include "jvmtifiles/jvmtiEnv.hpp"
+#include "logging/logConfiguration.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/instanceKlass.hpp"
@@ -628,7 +629,22 @@
     TraceClassUnloading = value != 0;
     break;
   case JVMTI_VERBOSE_GC:
-    PrintGC = value != 0;
+    {
+      // This is a temporary solution to work around initialization issues.
+      // JDK-8145083 will fix this.
+      Mutex* conf_mutex = LogConfiguration_lock;
+      if (Threads::number_of_threads() == 0) {
+        // We're too early in the initialization to use mutexes
+        LogConfiguration_lock = NULL;
+      }
+      MutexLockerEx ml(LogConfiguration_lock);
+      if (value == 0) {
+        LogConfiguration::parse_log_arguments("stdout", "gc=off", NULL, NULL, NULL);
+      } else {
+        LogConfiguration::parse_log_arguments("stdout", "gc", NULL, NULL, NULL);
+      }
+      LogConfiguration_lock = conf_mutex;
+    }
     break;
   case JVMTI_VERBOSE_JNI:
     PrintJNIResolving = value != 0;
@@ -2577,7 +2593,7 @@
     if (!k->is_instance_klass()) {
       return JVMTI_ERROR_ABSENT_INFORMATION;
     }
-    char* sde = InstanceKlass::cast(k)->source_debug_extension();
+    const char* sde = InstanceKlass::cast(k)->source_debug_extension();
     NULL_CHECK(sde, JVMTI_ERROR_ABSENT_INFORMATION);
 
     {
--- a/src/share/vm/prims/jvmtiExport.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/prims/jvmtiExport.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -374,7 +374,7 @@
   }
 
   if (JvmtiEnv::get_phase() == JVMTI_PHASE_LIVE) {
-    JavaThread* current_thread = (JavaThread*) ThreadLocalStorage::thread();
+    JavaThread* current_thread = JavaThread::current();
     // transition code: native to VM
     ThreadInVMfromNative __tiv(current_thread);
     VM_ENTRY_BASE(jvmtiEnv*, JvmtiExport::get_jvmti_interface, current_thread)
@@ -1901,7 +1901,7 @@
 
 // Collect all the vm internally allocated objects which are visible to java world
 void JvmtiExport::record_vm_internal_object_allocation(oop obj) {
-  Thread* thread = ThreadLocalStorage::thread();
+  Thread* thread = Thread::current_or_null();
   if (thread != NULL && thread->is_Java_thread())  {
     // Can not take safepoint here.
     No_Safepoint_Verifier no_sfpt;
@@ -2436,7 +2436,7 @@
   if (!JvmtiExport::should_post_vm_object_alloc()) {
     return;
   }
-  Thread* thread = ThreadLocalStorage::thread();
+  Thread* thread = Thread::current_or_null();
   if (thread != NULL && thread->is_Java_thread())  {
     JavaThread* current_thread = (JavaThread*)thread;
     JvmtiThreadState *state = current_thread->jvmti_thread_state();
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/classFileStream.hpp"
 #include "classfile/metadataOnStackMark.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/verifier.hpp"
@@ -977,8 +978,10 @@
       the_class->external_name(), _class_load_kind,
       os::available_memory() >> 10));
 
-    ClassFileStream st((u1*) _class_defs[i].class_bytes,
-      _class_defs[i].class_byte_count, (char *)"__VM_RedefineClasses__");
+    ClassFileStream st((u1*)_class_defs[i].class_bytes,
+                       _class_defs[i].class_byte_count,
+                       "__VM_RedefineClasses__",
+                       ClassFileStream::verify);
 
     // Parse the stream.
     Handle the_class_loader(THREAD, the_class->class_loader());
--- a/src/share/vm/prims/jvmtiUtil.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/prims/jvmtiUtil.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -79,7 +79,7 @@
     if (Threads::number_of_threads() == 0) {
       return JvmtiUtil::single_threaded_resource_area();
     }
-    thread = ThreadLocalStorage::thread();
+    thread = Thread::current_or_null();
     if (thread == NULL) {
       return JvmtiUtil::single_threaded_resource_area();
     }
--- a/src/share/vm/prims/unsafe.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/prims/unsafe.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/classFileStream.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "memory/allocation.inline.hpp"
 #include "oops/objArrayOop.inline.hpp"
@@ -997,7 +998,9 @@
     cp_patches_h = objArrayHandle(THREAD, (objArrayOop)p);
   }
 
-  KlassHandle host_klass(THREAD, java_lang_Class::as_Klass(JNIHandles::resolve_non_null(host_class)));
+  const Klass* host_klass = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(host_class));
+  assert(host_klass != NULL, "invariant");
+
   const char* host_source = host_klass->external_name();
   Handle      host_loader(THREAD, host_klass->class_loader());
   Handle      host_domain(THREAD, host_klass->protection_domain());
@@ -1016,15 +1019,21 @@
     }
   }
 
-  ClassFileStream st(class_bytes, class_bytes_length, (char*) host_source);
+  ClassFileStream st(class_bytes,
+                     class_bytes_length,
+                     host_source,
+                     ClassFileStream::verify);
 
   instanceKlassHandle anon_klass;
   {
     Symbol* no_class_name = NULL;
     Klass* anonk = SystemDictionary::parse_stream(no_class_name,
-                                                    host_loader, host_domain,
-                                                    &st, host_klass, cp_patches,
-                                                    CHECK_NULL);
+                                                  host_loader,
+                                                  host_domain,
+                                                  &st,
+                                                  host_klass,
+                                                  cp_patches,
+                                                  CHECK_NULL);
     if (anonk == NULL)  return NULL;
     anon_klass = instanceKlassHandle(THREAD, anonk);
   }
--- a/src/share/vm/prims/whitebox.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/prims/whitebox.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -159,7 +159,7 @@
 
 WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
   CollectorPolicy * p = Universe::heap()->collector_policy();
-  gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap "
+  tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap "
     SIZE_FORMAT " Maximum heap " SIZE_FORMAT " Space alignment " SIZE_FORMAT " Heap alignment " SIZE_FORMAT,
     p->min_heap_byte_size(), p->initial_heap_byte_size(), p->max_heap_byte_size(),
     p->space_alignment(), p->heap_alignment());
--- a/src/share/vm/runtime/arguments.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/arguments.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -32,6 +32,7 @@
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/taskqueue.hpp"
+#include "logging/log.hpp"
 #include "logging/logConfiguration.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/universe.inline.hpp"
@@ -81,7 +82,6 @@
 int    Arguments::_num_jvm_args                 = 0;
 char*  Arguments::_java_command                 = NULL;
 SystemProperty* Arguments::_system_properties   = NULL;
-const char*  Arguments::_gc_log_filename        = NULL;
 bool   Arguments::_has_profile                  = false;
 size_t Arguments::_conservative_max_heap_alignment = 0;
 size_t Arguments::_min_heap_size                = 0;
@@ -1308,18 +1308,20 @@
     PropertyList_unique_add(&_system_properties, key, value, true);
   } else {
     if (strcmp(key, "sun.java.command") == 0) {
-      if (_java_command != NULL) {
-        os::free(_java_command);
+      char *old_java_command = _java_command;
+      _java_command = os::strdup_check_oom(value, mtInternal);
+      if (old_java_command != NULL) {
+        os::free(old_java_command);
       }
-      _java_command = os::strdup_check_oom(value, mtInternal);
     } else if (strcmp(key, "java.vendor.url.bug") == 0) {
-      if (_java_vendor_url_bug != DEFAULT_VENDOR_URL_BUG) {
-        assert(_java_vendor_url_bug != NULL, "_java_vendor_url_bug is NULL");
-        os::free((void *)_java_vendor_url_bug);
-      }
+      const char* old_java_vendor_url_bug = _java_vendor_url_bug;
       // save it in _java_vendor_url_bug, so JVM fatal error handler can access
       // its value without going through the property list or making a Java call.
       _java_vendor_url_bug = os::strdup_check_oom(value, mtInternal);
+      if (old_java_vendor_url_bug != DEFAULT_VENDOR_URL_BUG) {
+        assert(old_java_vendor_url_bug != NULL, "_java_vendor_url_bug is NULL");
+        os::free((void *)old_java_vendor_url_bug);
+      }
     }
 
     // Create new property and add at the end of the list
@@ -1600,19 +1602,11 @@
     } else {
       FLAG_SET_ERGO(size_t, MaxNewSize, preferred_max_new_size);
     }
-    if (PrintGCDetails && Verbose) {
-      // Too early to use gclog_or_tty
-      tty->print_cr("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
-    }
+    log_trace(gc, heap)("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
 
     // Code along this path potentially sets NewSize and OldSize
-    if (PrintGCDetails && Verbose) {
-      // Too early to use gclog_or_tty
-      tty->print_cr("CMS set min_heap_size: " SIZE_FORMAT
-           " initial_heap_size:  " SIZE_FORMAT
-           " max_heap: " SIZE_FORMAT,
-           min_heap_size(), InitialHeapSize, max_heap);
-    }
+    log_trace(gc, heap)("CMS set min_heap_size: " SIZE_FORMAT " initial_heap_size:  " SIZE_FORMAT " max_heap: " SIZE_FORMAT,
+                        min_heap_size(), InitialHeapSize, max_heap);
     size_t min_new = preferred_max_new_size;
     if (FLAG_IS_CMDLINE(NewSize)) {
       min_new = NewSize;
@@ -1623,20 +1617,14 @@
       if (FLAG_IS_DEFAULT(NewSize)) {
         FLAG_SET_ERGO(size_t, NewSize, MAX2(NewSize, min_new));
         FLAG_SET_ERGO(size_t, NewSize, MIN2(preferred_max_new_size, NewSize));
-        if (PrintGCDetails && Verbose) {
-          // Too early to use gclog_or_tty
-          tty->print_cr("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
-        }
+        log_trace(gc, heap)("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
       }
       // Unless explicitly requested otherwise, size old gen
       // so it's NewRatio x of NewSize.
       if (FLAG_IS_DEFAULT(OldSize)) {
         if (max_heap > NewSize) {
           FLAG_SET_ERGO(size_t, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
-          if (PrintGCDetails && Verbose) {
-            // Too early to use gclog_or_tty
-            tty->print_cr("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
-          }
+          log_trace(gc, heap)("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
         }
       }
     }
@@ -1680,11 +1668,8 @@
     FLAG_SET_CMDLINE(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false);
   }
 
-  if (PrintGCDetails && Verbose) {
-    tty->print_cr("MarkStackSize: %uk  MarkStackSizeMax: %uk",
-      (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
-    tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
-  }
+  log_trace(gc)("MarkStackSize: %uk  MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
+  log_trace(gc)("ConcGCThreads: %u", ConcGCThreads);
 }
 #endif // INCLUDE_ALL_GCS
 
@@ -1731,11 +1716,7 @@
   if (UseAutoGCSelectPolicy &&
       !FLAG_IS_DEFAULT(MaxGCPauseMillis) &&
       (MaxGCPauseMillis <= AutoGCSelectPauseMillis)) {
-    if (PrintGCDetails) {
-      // Cannot use gclog_or_tty yet.
-      tty->print_cr("Automatic selection of the low pause collector"
-       " based on pause goal of %d (ms)", (int) MaxGCPauseMillis);
-    }
+    log_trace(gc)("Automatic selection of the low pause collector based on pause goal of %d (ms)", (int) MaxGCPauseMillis);
     return true;
   }
   return false;
@@ -1949,19 +1930,13 @@
 
   if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
     // In G1, we want the default GC overhead goal to be higher than
-    // say in PS. So we set it here to 10%. Otherwise the heap might
-    // be expanded more aggressively than we would like it to. In
-    // fact, even 10% seems to not be high enough in some cases
-    // (especially small GC stress tests that the main thing they do
-    // is allocation). We might consider increase it further.
-    FLAG_SET_DEFAULT(GCTimeRatio, 9);
-  }
-
-  if (PrintGCDetails && Verbose) {
-    tty->print_cr("MarkStackSize: %uk  MarkStackSizeMax: %uk",
-      (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
-    tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
-  }
+    // it is for PS, or the heap might be expanded too aggressively.
+    // We set it here to ~8%.
+    FLAG_SET_DEFAULT(GCTimeRatio, 12);
+  }
+
+  log_trace(gc)("MarkStackSize: %uk  MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
+  log_trace(gc)("ConcGCThreads: %u", ConcGCThreads);
 }
 
 #if !INCLUDE_ALL_GCS
@@ -2073,10 +2048,7 @@
       reasonable_max = MAX2(reasonable_max, (julong)InitialHeapSize);
     }
 
-    if (PrintGCDetails && Verbose) {
-      // Cannot use gclog_or_tty yet.
-      tty->print_cr("  Maximum heap size " SIZE_FORMAT, (size_t) reasonable_max);
-    }
+    log_trace(gc, heap)("  Maximum heap size " SIZE_FORMAT, (size_t) reasonable_max);
     FLAG_SET_ERGO(size_t, MaxHeapSize, (size_t)reasonable_max);
   }
 
@@ -2097,20 +2069,14 @@
 
       reasonable_initial = limit_by_allocatable_memory(reasonable_initial);
 
-      if (PrintGCDetails && Verbose) {
-        // Cannot use gclog_or_tty yet.
-        tty->print_cr("  Initial heap size " SIZE_FORMAT, (size_t)reasonable_initial);
-      }
+      log_trace(gc, heap)("  Initial heap size " SIZE_FORMAT, (size_t)reasonable_initial);
       FLAG_SET_ERGO(size_t, InitialHeapSize, (size_t)reasonable_initial);
     }
     // If the minimum heap size has not been set (via -Xms),
     // synchronize with InitialHeapSize to avoid errors with the default value.
     if (min_heap_size() == 0) {
       set_min_heap_size(MIN2((size_t)reasonable_minimum, InitialHeapSize));
-      if (PrintGCDetails && Verbose) {
-        // Cannot use gclog_or_tty yet.
-        tty->print_cr("  Minimum heap size " SIZE_FORMAT, min_heap_size());
-      }
+      log_trace(gc, heap)("  Minimum heap size " SIZE_FORMAT, min_heap_size());
     }
   }
 }
@@ -2313,77 +2279,8 @@
 //===========================================================================================================
 // Parsing of main arguments
 
-// check if do gclog rotation
-// +UseGCLogFileRotation is a must,
-// no gc log rotation when log file not supplied or
-// NumberOfGCLogFiles is 0
-void check_gclog_consistency() {
-  if (UseGCLogFileRotation) {
-    if ((Arguments::gc_log_filename() == NULL) || (NumberOfGCLogFiles == 0)) {
-      jio_fprintf(defaultStream::output_stream(),
-                  "To enable GC log rotation, use -Xloggc:<filename> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=<num_of_files>\n"
-                  "where num_of_file > 0\n"
-                  "GC log rotation is turned off\n");
-      UseGCLogFileRotation = false;
-    }
-  }
-
-  if (UseGCLogFileRotation && (GCLogFileSize != 0) && (GCLogFileSize < 8*K)) {
-    if (FLAG_SET_CMDLINE(size_t, GCLogFileSize, 8*K) == Flag::SUCCESS) {
-      jio_fprintf(defaultStream::output_stream(),
-                "GCLogFileSize changed to minimum 8K\n");
-    }
-  }
-}
-
-// This function is called for -Xloggc:<filename>, it can be used
-// to check if a given file name(or string) conforms to the following
-// specification:
-// A valid string only contains "[A-Z][a-z][0-9].-_%[p|t]"
-// %p and %t only allowed once. We only limit usage of filename not path
-bool is_filename_valid(const char *file_name) {
-  const char* p = file_name;
-  char file_sep = os::file_separator()[0];
-  const char* cp;
-  // skip prefix path
-  for (cp = file_name; *cp != '\0'; cp++) {
-    if (*cp == '/' || *cp == file_sep) {
-      p = cp + 1;
-    }
-  }
-
-  int count_p = 0;
-  int count_t = 0;
-  while (*p != '\0') {
-    if ((*p >= '0' && *p <= '9') ||
-        (*p >= 'A' && *p <= 'Z') ||
-        (*p >= 'a' && *p <= 'z') ||
-         *p == '-'               ||
-         *p == '_'               ||
-         *p == '.') {
-       p++;
-       continue;
-    }
-    if (*p == '%') {
-      if(*(p + 1) == 'p') {
-        p += 2;
-        count_p ++;
-        continue;
-      }
-      if (*(p + 1) == 't') {
-        p += 2;
-        count_t ++;
-        continue;
-      }
-    }
-    return false;
-  }
-  return count_p < 2 && count_t < 2;
-}
-
 // Check consistency of GC selection
 bool Arguments::check_gc_consistency() {
-  check_gclog_consistency();
   // Ensure that the user has not selected conflicting sets
   // of collectors.
   uint i = 0;
@@ -2529,6 +2426,12 @@
     warning("The VM option CICompilerCountPerCPU overrides CICompilerCount.");
   }
 
+#ifndef SUPPORT_RESERVED_STACK_AREA
+  if (StackReservedPages != 0) {
+    FLAG_SET_CMDLINE(intx, StackReservedPages, 0);
+    warning("Reserved Stack Area not supported on this platform");
+  }
+#endif
   return status;
 }
 
@@ -2726,7 +2629,9 @@
           return JNI_EINVAL;
         }
       } else if (!strcmp(tail, ":gc")) {
-        if (FLAG_SET_CMDLINE(bool, PrintGC, true) != Flag::SUCCESS) {
+        // LogConfiguration_lock is not set up yet, but this code is executed by a single thread
+        bool ret = LogConfiguration::parse_log_arguments("stdout", "gc", NULL, NULL, NULL);
+        if (!ret) {
           return JNI_EINVAL;
         }
       } else if (!strcmp(tail, ":jni")) {
@@ -3159,24 +3064,6 @@
     // -Xnoagent
     } else if (match_option(option, "-Xnoagent")) {
       // For compatibility with classic. HotSpot refuses to load the old style agent.dll.
-    } else if (match_option(option, "-Xloggc:", &tail)) {
-      // Redirect GC output to the file. -Xloggc:<filename>
-      // ostream_init_log(), when called will use this filename
-      // to initialize a fileStream.
-      _gc_log_filename = os::strdup_check_oom(tail);
-     if (!is_filename_valid(_gc_log_filename)) {
-       jio_fprintf(defaultStream::output_stream(),
-                  "Invalid file name for use with -Xloggc: Filename can only contain the "
-                  "characters [A-Z][a-z][0-9]-_.%%[p|t] but it has been %s\n"
-                  "Note %%p or %%t can only be used once\n", _gc_log_filename);
-        return JNI_EINVAL;
-      }
-      if (FLAG_SET_CMDLINE(bool, PrintGC, true) != Flag::SUCCESS) {
-        return JNI_EINVAL;
-      }
-      if (FLAG_SET_CMDLINE(bool, PrintGCTimeStamps, true) != Flag::SUCCESS) {
-        return JNI_EINVAL;
-      }
     } else if (match_option(option, "-Xlog", &tail)) {
       bool ret = false;
       if (strcmp(tail, ":help") == 0) {
@@ -4181,11 +4068,6 @@
     ScavengeRootsInCode = 1;
   }
 
-  if (PrintGCDetails) {
-    // Turn on -verbose:gc options as well
-    PrintGC = true;
-  }
-
   // Set object alignment values.
   set_object_alignment();
 
--- a/src/share/vm/runtime/arguments.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/arguments.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -284,7 +284,6 @@
 
   // Option flags
   static bool   _has_profile;
-  static const char*  _gc_log_filename;
   // Value of the conservative maximum heap alignment needed
   static size_t  _conservative_max_heap_alignment;
 
@@ -543,9 +542,6 @@
   // -Dsun.java.launcher.pid
   static int sun_java_launcher_pid()        { return _sun_java_launcher_pid; }
 
-  // -Xloggc:<file>, if not specified will be NULL
-  static const char* gc_log_filename()      { return _gc_log_filename; }
-
   // -Xprof
   static bool has_profile()                 { return _has_profile; }
 
--- a/src/share/vm/runtime/commandLineFlagConstraintList.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/commandLineFlagConstraintList.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -223,7 +223,7 @@
 #define EMIT_CONSTRAINT_CHECK(func, type)                               , func, CommandLineFlagConstraint::type
 
 // the "name" argument must be a string literal
-#define INITIAL_CONSTRAINTS_SIZE 69
+#define INITIAL_CONSTRAINTS_SIZE 72
 GrowableArray<CommandLineFlagConstraint*>* CommandLineFlagConstraintList::_constraints = NULL;
 CommandLineFlagConstraint::ConstraintType CommandLineFlagConstraintList::_validating_type = CommandLineFlagConstraint::AtParse;
 
--- a/src/share/vm/runtime/commandLineFlagConstraintsGC.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/commandLineFlagConstraintsGC.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -31,6 +31,7 @@
 #include "runtime/commandLineFlagRangeList.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/globals_extension.hpp"
+#include "runtime/thread.inline.hpp"
 #include "utilities/defaultStream.hpp"
 
 #if INCLUDE_ALL_GCS
@@ -506,6 +507,19 @@
   return Flag::SUCCESS;
 }
 
+// To avoid an overflow by 'align_size_up(value, alignment)'.
+static Flag::Error MaxSizeForAlignment(const char* name, size_t value, size_t alignment, bool verbose) {
+  size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
+  if (value > aligned_max) {
+    CommandLineError::print(verbose,
+                            "%s (" SIZE_FORMAT ") must be "
+                            "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
+                            name, value, aligned_max);
+    return Flag::VIOLATES_CONSTRAINT;
+  }
+  return Flag::SUCCESS;
+}
+
 static Flag::Error MaxSizeForHeapAlignment(const char* name, size_t value, bool verbose) {
   // For G1 GC, we don't know until G1CollectorPolicy is created.
   size_t heap_alignment;
@@ -519,16 +533,7 @@
     heap_alignment = CollectorPolicy::compute_heap_alignment();
   }
 
-  // Not to overflow 'align_size_up(value, _heap_alignment) used from CollectorPolicy::initialize_flags()'.
-  size_t aligned_max = ((max_uintx - heap_alignment) & ~(heap_alignment-1));
-  if (value > aligned_max) {
-    CommandLineError::print(verbose,
-                            "%s (" SIZE_FORMAT ") must be "
-                            "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
-                            name, value, aligned_max);
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-  return Flag::SUCCESS;
+  return MaxSizeForAlignment(name, value, heap_alignment, verbose);
 }
 
 Flag::Error InitialHeapSizeConstraintFunc(size_t value, bool verbose) {
@@ -544,6 +549,29 @@
   return status;
 }
 
+Flag::Error HeapBaseMinAddressConstraintFunc(size_t value, bool verbose) {
+  // If an overflow happened in Arguments::set_heap_size(), MaxHeapSize will have too large a value.
+  // Check for this by ensuring that MaxHeapSize plus the requested min base address still fit within max_uintx.
+  if (UseCompressedOops && FLAG_IS_ERGO(MaxHeapSize) && (value > (max_uintx - MaxHeapSize))) {
+    CommandLineError::print(verbose,
+                            "HeapBaseMinAddress (" SIZE_FORMAT ") or MaxHeapSize (" SIZE_FORMAT ") is too large. "
+                            "Sum of them must be less than or equal to maximum of size_t (" SIZE_FORMAT ")\n",
+                            value, MaxHeapSize, max_uintx);
+    return Flag::VIOLATES_CONSTRAINT;
+  }
+
+  return MaxSizeForHeapAlignment("HeapBaseMinAddress", value, verbose);
+}
+
+Flag::Error NUMAInterleaveGranularityConstraintFunc(size_t value, bool verbose) {
+  if (UseNUMA && UseNUMAInterleaving) {
+    size_t min_interleave_granularity = UseLargePages ? os::large_page_size() : os::vm_allocation_granularity();
+    return MaxSizeForAlignment("NUMAInterleaveGranularity", value, min_interleave_granularity, verbose);
+  } else {
+    return Flag::SUCCESS;
+  }
+}
+
 Flag::Error NewSizeConstraintFunc(size_t value, bool verbose) {
 #ifdef _LP64
 #if INCLUDE_ALL_GCS
@@ -596,6 +624,24 @@
   return Flag::SUCCESS;
 }
 
+// We will protect overflow from ThreadLocalAllocBuffer::record_slow_allocation(),
+// so AfterMemoryInit type is enough to check.
+Flag::Error TLABWasteIncrementConstraintFunc(uintx value, bool verbose) {
+  if (UseTLAB) {
+    size_t refill_waste_limit = Thread::current()->tlab().refill_waste_limit();
+
+    // Compare with 'max_uintx' as ThreadLocalAllocBuffer::_refill_waste_limit is 'size_t'.
+    if (refill_waste_limit > (max_uintx - value)) {
+      CommandLineError::print(verbose,
+                              "TLABWasteIncrement (" UINTX_FORMAT ") must be "
+                              "less than or equal to ergonomic TLAB waste increment maximum size(" SIZE_FORMAT ")\n",
+                              value, (max_uintx - refill_waste_limit));
+      return Flag::VIOLATES_CONSTRAINT;
+    }
+  }
+  return Flag::SUCCESS;
+}
+
 Flag::Error SurvivorRatioConstraintFunc(uintx value, bool verbose) {
   if (FLAG_IS_CMDLINE(SurvivorRatio) &&
       (value > (MaxHeapSize / Universe::heap()->collector_policy()->space_alignment()))) {
--- a/src/share/vm/runtime/commandLineFlagConstraintsGC.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/commandLineFlagConstraintsGC.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -67,9 +67,12 @@
 Flag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose);
 Flag::Error InitialHeapSizeConstraintFunc(size_t value, bool verbose);
 Flag::Error MaxHeapSizeConstraintFunc(size_t value, bool verbose);
+Flag::Error HeapBaseMinAddressConstraintFunc(size_t value, bool verbose);
+Flag::Error NUMAInterleaveGranularityConstraintFunc(size_t value, bool verbose);
 Flag::Error NewSizeConstraintFunc(size_t value, bool verbose);
 Flag::Error MinTLABSizeConstraintFunc(size_t value, bool verbose);
 Flag::Error TLABSizeConstraintFunc(size_t value, bool verbose);
+Flag::Error TLABWasteIncrementConstraintFunc(uintx value, bool verbose);
 Flag::Error SurvivorRatioConstraintFunc(uintx value, bool verbose);
 Flag::Error MetaspaceSizeConstraintFunc(size_t value, bool verbose);
 Flag::Error MaxMetaspaceSizeConstraintFunc(size_t value, bool verbose);
--- a/src/share/vm/runtime/commandLineFlagRangeList.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/commandLineFlagRangeList.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -250,6 +250,9 @@
 void emit_range_intx(const char* name, intx min, intx max) {
   CommandLineFlagRangeList::add(new CommandLineFlagRange_intx(name, min, max));
 }
+void emit_range_uint(const char* name, uint min, uint max) {
+  CommandLineFlagRangeList::add(new CommandLineFlagRange_uint(name, min, max));
+}
 void emit_range_uintx(const char* name, uintx min, uintx max) {
   CommandLineFlagRangeList::add(new CommandLineFlagRange_uintx(name, min, max));
 }
@@ -279,7 +282,7 @@
 // Generate func argument to pass into emit_range_xxx functions
 #define EMIT_RANGE_CHECK(a, b)                               , a, b
 
-#define INITIAL_RANGES_SIZE 320
+#define INITIAL_RANGES_SIZE 379
 GrowableArray<CommandLineFlagRange*>* CommandLineFlagRangeList::_ranges = NULL;
 
 // Check the ranges of all flags that have them
--- a/src/share/vm/runtime/deoptimization.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/deoptimization.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -571,6 +571,23 @@
   thread->dec_in_deopt_handler();
 }
 
+// Moved from cpu directories because none of the cpus has callee save values.
+// If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp.
+void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
+
+  // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
+  // the days we had adapter frames. When we deoptimize a situation where a
+  // compiled caller calls a compiled caller will have registers it expects
+  // to survive the call to the callee. If we deoptimize the callee the only
+  // way we can restore these registers is to have the oldest interpreter
+  // frame that we create restore these values. That is what this routine
+  // will accomplish.
+
+  // At the moment we have modified c2 to not have any callee save registers
+  // so this problem does not exist and this routine is just a place holder.
+
+  assert(f->is_interpreted_frame(), "must be interpreted");
+}
 
 // Return BasicType of value being returned
 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
@@ -1414,7 +1431,7 @@
     // stack bang causes a stack overflow we crash.
     assert(THREAD->is_Java_thread(), "only a java thread can be here");
     JavaThread* thread = (JavaThread*)THREAD;
-    bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
+    bool guard_pages_enabled = thread->stack_guards_enabled();
     if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
     assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash");
   }
--- a/src/share/vm/runtime/globals.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/globals.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_RUNTIME_GLOBALS_HPP
 #define SHARE_VM_RUNTIME_GLOBALS_HPP
 
-#include <float.h>
 #include "utilities/debug.hpp"
 #include <float.h> // for DBL_MAX
 
@@ -625,9 +624,6 @@
   notproduct(bool, CheckCompressedOops, true,                               \
           "Generate checks in encoding/decoding code in debug VM")          \
                                                                             \
-  product_pd(size_t, HeapBaseMinAddress,                                    \
-          "OS specific low limit for heap base address")                    \
-                                                                            \
   product(uintx, HeapSearchSteps, 3 PPC64_ONLY(+17),                        \
           "Heap allocation steps through preferred address regions to find" \
           " where it can allocate the heap. Number of steps to take per "   \
@@ -692,6 +688,8 @@
                                                                             \
   product(size_t, NUMAInterleaveGranularity, 2*M,                           \
           "Granularity to use for NUMA interleaving on Windows OS")         \
+          range(os::vm_allocation_granularity(), max_uintx)                 \
+          constraint(NUMAInterleaveGranularityConstraintFunc,AfterErgo)     \
                                                                             \
   product(bool, ForceNUMA, false,                                           \
           "Force NUMA optimizations on single-node/UMA systems")            \
@@ -704,6 +702,7 @@
                                                                             \
   product(size_t, NUMASpaceResizeRate, 1*G,                                 \
           "Do not reallocate more than this amount per collection")         \
+          range(0, max_uintx)                                               \
                                                                             \
   product(bool, UseAdaptiveNUMAChunkSizing, true,                           \
           "Enable adaptive chunk sizing for NUMA")                          \
@@ -713,6 +712,7 @@
                                                                             \
   product(uintx, NUMAPageScanRate, 256,                                     \
           "Maximum number of pages to include in the page scan procedure")  \
+          range(0, max_uintx)                                               \
                                                                             \
   product_pd(bool, NeedsDeoptSuspend,                                       \
           "True for register window machines (sparc/ia64)")                 \
@@ -733,9 +733,11 @@
                                                                             \
   product(size_t, LargePageSizeInBytes, 0,                                  \
           "Large page size (0 to let VM choose the page size)")             \
+          range(0, max_uintx)                                               \
                                                                             \
   product(size_t, LargePageHeapSizeThreshold, 128*M,                        \
           "Use large pages if maximum heap is at least this big")           \
+          range(0, max_uintx)                                               \
                                                                             \
   product(bool, ForceTimeHighResolution, false,                             \
           "Using high time resolution (for Win32 only)")                    \
@@ -986,9 +988,6 @@
   develop(bool, ZapUnusedHeapArea, trueInDebug,                             \
           "Zap unused heap space with 0xBAADBABE")                          \
                                                                             \
-  develop(bool, TraceZapUnusedHeapArea, false,                              \
-          "Trace zapping of unused heap space")                             \
-                                                                            \
   develop(bool, CheckZapUnusedHeapArea, false,                              \
           "Check zapping of unused heap space")                             \
                                                                             \
@@ -998,12 +997,6 @@
   develop(bool, PrintVMMessages, true,                                      \
           "Print VM messages on console")                                   \
                                                                             \
-  product(bool, PrintGCApplicationConcurrentTime, false,                    \
-          "Print the time the application has been running")                \
-                                                                            \
-  product(bool, PrintGCApplicationStoppedTime, false,                       \
-          "Print the time the application has been stopped")                \
-                                                                            \
   diagnostic(bool, VerboseVerification, false,                              \
           "Display detailed verification details")                          \
                                                                             \
@@ -1424,6 +1417,13 @@
           range(500, max_intx)                                              \
           constraint(BiasedLockingDecayTimeFunc,AfterErgo)                  \
                                                                             \
+  product(bool, ExitOnOutOfMemoryError, false,                              \
+          "JVM exits on the first occurrence of an out-of-memory error")    \
+                                                                            \
+  product(bool, CrashOnOutOfMemoryError, false,                             \
+          "JVM aborts, producing an error log and core/mini dump, on the "  \
+          "first occurrence of an out-of-memory error")                     \
+                                                                            \
   /* tracing */                                                             \
                                                                             \
   develop(bool, StressRewriter, false,                                      \
@@ -1452,9 +1452,6 @@
   develop(bool, TraceBytecodes, false,                                      \
           "Trace bytecode execution")                                       \
                                                                             \
-  develop(bool, TraceClassInitialization, false,                            \
-          "Trace class initialization")                                     \
-                                                                            \
   product(bool, TraceExceptions, false,                                     \
           "Trace exceptions")                                               \
                                                                             \
@@ -1532,9 +1529,11 @@
   product(uintx, HeapMaximumCompactionInterval, 20,                         \
           "How often should we maximally compact the heap (not allowing "   \
           "any dead space)")                                                \
+          range(0, max_uintx)                                               \
                                                                             \
   product(uintx, HeapFirstMaximumCompactionCount, 3,                        \
           "The collection count for the first maximum compaction")          \
+          range(0, max_uintx)                                               \
                                                                             \
   product(bool, UseMaximumCompactionOnSystemGC, true,                       \
           "Use maximum compaction in the Parallel Old garbage collector "   \
@@ -1571,9 +1570,6 @@
           "number of GC threads")                                           \
           range((size_t)os::vm_page_size(), (size_t)max_uintx)              \
                                                                             \
-  product(bool, TraceDynamicGCThreads, false,                               \
-          "Trace the dynamic GC thread usage")                              \
-                                                                            \
   product(uint, ConcGCThreads, 0,                                           \
           "Number of threads concurrent gc will use")                       \
           constraint(ConcGCThreadsConstraintFunc,AfterErgo)                 \
@@ -1616,6 +1612,7 @@
   diagnostic(uintx, GCLockerRetryAllocationCount, 2,                        \
           "Number of times to retry allocations when "                      \
           "blocked by the GC locker")                                       \
+          range(0, max_uintx)                                               \
                                                                             \
   product(bool, UseCMSBestFit, true,                                        \
           "Use CMS best fit allocation strategy")                           \
@@ -1623,12 +1620,6 @@
   product(bool, UseParNewGC, false,                                         \
           "Use parallel threads in the new generation")                     \
                                                                             \
-  product(bool, PrintTaskqueue, false,                                      \
-          "Print taskqueue statistics for parallel collectors")             \
-                                                                            \
-  product(bool, PrintTerminationStats, false,                               \
-          "Print termination statistics for parallel collectors")           \
-                                                                            \
   product(uintx, ParallelGCBufferWastePct, 10,                              \
           "Wasted fraction of parallel allocation buffer")                  \
           range(0, 100)                                                     \
@@ -1646,9 +1637,6 @@
   product(bool, ResizePLAB, true,                                           \
           "Dynamically resize (survivor space) promotion LAB's")            \
                                                                             \
-  product(bool, PrintPLAB, false,                                           \
-          "Print (survivor space) promotion LAB's sizing decisions")        \
-                                                                            \
   product(intx, ParGCArrayScanChunk, 50,                                    \
           "Scan a subset of object array and push remainder, if array is "  \
           "bigger than this")                                               \
@@ -1670,6 +1658,7 @@
                                                                             \
   product(uintx, ParGCDesiredObjsFromOverflowList, 20,                      \
           "The desired number of objects to claim from the overflow list")  \
+          range(0, max_uintx)                                               \
                                                                             \
   diagnostic(uintx, ParGCStridesPerThread, 2,                               \
           "The number of strides per worker thread that we divide up the "  \
@@ -1691,9 +1680,6 @@
   product(bool, ResizeOldPLAB, true,                                        \
           "Dynamically resize (old gen) promotion LAB's")                   \
                                                                             \
-  product(bool, PrintOldPLAB, false,                                        \
-          "Print (old gen) promotion LAB's sizing decisions")               \
-                                                                            \
   product(size_t, CMSOldPLABMax, 1024,                                      \
           "Maximum size of CMS gen promotion LAB caches per worker "        \
           "per block size")                                                 \
@@ -1723,6 +1709,7 @@
   product(uintx, CMSOldPLABReactivityFactor, 2,                             \
           "The gain in the feedback loop for on-the-fly PLAB resizing "     \
           "during a scavenge")                                              \
+          range(1, max_uintx)                                               \
                                                                             \
   product(bool, AlwaysPreTouch, false,                                      \
           "Force all freshly committed pages to be pre-touched")            \
@@ -1751,6 +1738,7 @@
   product(uintx, CMS_FLSPadding, 1,                                         \
           "The multiple of deviation from mean to use for buffering "       \
           "against volatility in free list demand")                         \
+          range(0, max_juint)                                               \
                                                                             \
   product(uintx, FLSCoalescePolicy, 2,                                      \
           "CMS: aggressiveness level for coalescing, increasing "           \
@@ -1799,10 +1787,12 @@
   product(uintx, CMS_SweepPadding, 1,                                       \
           "The multiple of deviation from mean to use for buffering "       \
           "against volatility in inter-sweep duration")                     \
+          range(0, max_juint)                                               \
                                                                             \
   product(uintx, CMS_SweepTimerThresholdMillis, 10,                         \
           "Skip block flux-rate sampling for an epoch unless inter-sweep "  \
           "duration exceeds this threshold in milliseconds")                \
+          range(0, max_uintx)                                               \
                                                                             \
   product(bool, CMSClassUnloadingEnabled, true,                             \
           "Whether class unloading enabled when using CMS GC")              \
@@ -1810,6 +1800,7 @@
   product(uintx, CMSClassUnloadingMaxInterval, 0,                           \
           "When CMS class unloading is enabled, the maximum CMS cycle "     \
           "count for which classes may not be unloaded")                    \
+          range(0, max_uintx)                                               \
                                                                             \
   product(uintx, CMSIndexedFreeListReplenish, 4,                            \
           "Replenish an indexed free list with this number of chunks")      \
@@ -1843,6 +1834,7 @@
                                                                             \
   product(uintx, CMSMaxAbortablePrecleanLoops, 0,                           \
           "Maximum number of abortable preclean iterations, if > 0")        \
+          range(0, max_uintx)                                               \
                                                                             \
   product(intx, CMSMaxAbortablePrecleanTime, 5000,                          \
           "Maximum time in abortable preclean (in milliseconds)")           \
@@ -1850,6 +1842,7 @@
                                                                             \
   product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100,              \
           "Nominal minimum work per abortable preclean iteration")          \
+          range(0, max_uintx)                                               \
                                                                             \
   manageable(intx, CMSAbortablePrecleanWaitMillis, 100,                     \
           "Time that we sleep between iterations when not given "           \
@@ -1885,10 +1878,6 @@
           "Always record eden chunks used for the parallel initial mark "   \
           "or remark of eden")                                              \
                                                                             \
-  product(bool, CMSPrintEdenSurvivorChunks, false,                          \
-          "Print the eden and the survivor chunks used for the parallel "   \
-          "initial mark or remark of the eden/survivor spaces")             \
-                                                                            \
   product(bool, CMSConcurrentMTEnabled, true,                               \
           "Whether multi-threaded concurrent work enabled "                 \
           "(effective only if ParNewGC)")                                   \
@@ -1937,6 +1926,7 @@
                                                                             \
   product(size_t, CMSScheduleRemarkEdenSizeThreshold, 2*M,                  \
           "If Eden size is below this, do not try to schedule remark")      \
+          range(0, max_uintx)                                               \
                                                                             \
   product(uintx, CMSScheduleRemarkEdenPenetration, 50,                      \
           "The Eden occupancy percentage (0-100) at which "                 \
@@ -1956,9 +1946,6 @@
   product(bool, CMSScavengeBeforeRemark, false,                             \
           "Attempt scavenge before the CMS remark step")                    \
                                                                             \
-  develop(bool, CMSTraceSweeper, false,                                     \
-          "Trace some actions of the CMS sweeper")                          \
-                                                                            \
   product(uintx, CMSWorkQueueDrainThreshold, 10,                            \
           "Don't drain below this size per parallel worker/thief")          \
           range(1, max_juint)                                               \
@@ -1966,6 +1953,7 @@
                                                                             \
   manageable(intx, CMSWaitDuration, 2000,                                   \
           "Time in milliseconds that CMS thread waits for young GC")        \
+          range(min_jint, max_jint)                                         \
                                                                             \
   develop(uintx, CMSCheckInterval, 1000,                                    \
           "Interval in milliseconds that CMS thread checks if it "          \
@@ -1979,17 +1967,15 @@
           "between yields")                                                 \
           range(1, max_uintx)                                               \
                                                                             \
-  product(bool, CMSDumpAtPromotionFailure, false,                           \
-          "Dump useful information about the state of the CMS old "         \
-          "generation upon a promotion failure")                            \
-                                                                            \
   product(bool, CMSPrintChunksInDump, false,                                \
-          "In a dump enabled by CMSDumpAtPromotionFailure, include "        \
-          "more detailed information about the free chunks")                \
+          "If logging for the \"gc\" and \"promotion\" tags is enabled on"  \
+          "trace level include more detailed information about the"         \
+          "free chunks")                \
                                                                             \
   product(bool, CMSPrintObjectsInDump, false,                               \
-          "In a dump enabled by CMSDumpAtPromotionFailure, include "        \
-          "more detailed information about the allocated objects")          \
+          "If logging for the \"gc\" and \"promotion\" tags is enabled on"  \
+          "trace level include more detailed information about the"         \
+          "allocated objects")                                              \
                                                                             \
   diagnostic(bool, FLSVerifyAllHeapReferences, false,                       \
           "Verify that all references across the FLS boundary "             \
@@ -2011,9 +1997,6 @@
           "Maintain _unallocated_block in BlockOffsetArray "                \
           "(currently applicable only to CMS collector)")                   \
                                                                             \
-  develop(bool, TraceCMSState, false,                                       \
-          "Trace the state of the CMS collection")                          \
-                                                                            \
   product(intx, RefDiscoveryPolicy, 0,                                      \
           "Select type of reference discovery policy: "                     \
           "reference-based(0) or referent-based(1)")                        \
@@ -2081,10 +2064,6 @@
   notproduct(bool, GCALotAtAllSafepoints, false,                            \
           "Enforce ScavengeALot/GCALot at all potential safepoints")        \
                                                                             \
-  product(bool, PrintPromotionFailure, false,                               \
-          "Print additional diagnostic information following "              \
-          "promotion failure")                                              \
-                                                                            \
   notproduct(bool, PromotionFailureALot, false,                             \
           "Use promotion failure handling on every youngest generation "    \
           "collection")                                                     \
@@ -2124,12 +2103,6 @@
   develop(bool, TraceMetadataChunkAllocation, false,                        \
           "Trace chunk metadata allocations")                               \
                                                                             \
-  product(bool, TraceMetadataHumongousAllocation, false,                    \
-          "Trace humongous metadata allocations")                           \
-                                                                            \
-  develop(bool, TraceMetavirtualspaceAllocation, false,                     \
-          "Trace virtual space metadata allocations")                       \
-                                                                            \
   notproduct(bool, ExecuteInternalVMTests, false,                           \
           "Enable execution of internal VM tests")                          \
                                                                             \
@@ -2147,12 +2120,8 @@
   product(bool, FastTLABRefill, true,                                       \
           "Use fast TLAB refill code")                                      \
                                                                             \
-  product(bool, PrintTLAB, false,                                           \
-          "Print various TLAB related information")                         \
-                                                                            \
   product(bool, TLABStats, true,                                            \
-          "Provide more detailed and expensive TLAB statistics "            \
-          "(with PrintTLAB)")                                               \
+          "Provide more detailed and expensive TLAB statistics.")           \
                                                                             \
   product_pd(bool, NeverActAsServerClassMachine,                            \
           "Never act like a server-class machine")                          \
@@ -2167,6 +2136,7 @@
   product(size_t, ErgoHeapSizeLimit, 0,                                     \
           "Maximum ergonomically set heap size (in bytes); zero means use " \
           "MaxRAM / MaxRAMFraction")                                        \
+          range(0, max_uintx)                                               \
                                                                             \
   product(uintx, MaxRAMFraction, 4,                                         \
           "Maximum fraction (1/n) of real memory used for maximum heap "    \
@@ -2191,6 +2161,7 @@
                                                                             \
   product(uintx, AutoGCSelectPauseMillis, 5000,                             \
           "Automatic GC selection pause threshold in milliseconds")         \
+          range(0, max_uintx)                                               \
                                                                             \
   product(bool, UseAdaptiveSizePolicy, true,                                \
           "Use adaptive generation sizing policies")                        \
@@ -2210,9 +2181,6 @@
   product(bool, UseAdaptiveGCBoundary, false,                               \
           "Allow young-old boundary to move")                               \
                                                                             \
-  develop(bool, TraceAdaptiveGCBoundary, false,                             \
-          "Trace young-old boundary moves")                                 \
-                                                                            \
   develop(intx, PSAdaptiveSizePolicyResizeVirtualSpaceAlot, -1,             \
           "Resize the virtual spaces of the young or old generations")      \
           range(-1, 1)                                                      \
@@ -2223,12 +2191,14 @@
                                                                             \
   product(uintx, AdaptiveSizePolicyInitializingSteps, 20,                   \
           "Number of steps where heuristics is used before data is used")   \
+          range(0, max_uintx)                                               \
                                                                             \
   develop(uintx, AdaptiveSizePolicyReadyThreshold, 5,                       \
           "Number of collections before the adaptive sizing is started")    \
                                                                             \
   product(uintx, AdaptiveSizePolicyOutputInterval, 0,                       \
           "Collection interval for printing information; zero means never") \
+          range(0, max_uintx)                                               \
                                                                             \
   product(bool, UseAdaptiveSizePolicyFootprintGoal, true,                   \
           "Use adaptive minimum footprint as a goal")                       \
@@ -2243,12 +2213,15 @@
                                                                             \
   product(uintx, PausePadding, 1,                                           \
           "How much buffer to keep for pause time")                         \
+          range(0, max_juint)                                               \
                                                                             \
   product(uintx, PromotedPadding, 3,                                        \
           "How much buffer to keep for promotion failure")                  \
+          range(0, max_juint)                                               \
                                                                             \
   product(uintx, SurvivorPadding, 3,                                        \
           "How much buffer to keep for survivor overflow")                  \
+          range(0, max_juint)                                               \
                                                                             \
   product(uintx, ThresholdTolerance, 10,                                    \
           "Allowed collection cost difference between generations")         \
@@ -2257,6 +2230,7 @@
   product(uintx, AdaptiveSizePolicyCollectionCostMargin, 50,                \
           "If collection costs are within margin, reduce both by full "     \
           "delta")                                                          \
+          range(0, 100)                                                     \
                                                                             \
   product(uintx, YoungGenerationSizeIncrement, 20,                          \
           "Adaptive size percentage change in young generation")            \
@@ -2295,9 +2269,11 @@
   product(uintx, MaxGCMinorPauseMillis, max_uintx,                          \
           "Adaptive size policy maximum GC minor pause time goal "          \
           "in millisecond")                                                 \
+          range(0, max_uintx)                                               \
                                                                             \
   product(uintx, GCTimeRatio, 99,                                           \
           "Adaptive size policy application time to GC time ratio")         \
+          range(0, max_juint)                                               \
                                                                             \
   product(uintx, AdaptiveSizeDecrementScaleFactor, 4,                       \
           "Adaptive size scale down factor for shrinking")                  \
@@ -2308,6 +2284,7 @@
                                                                             \
   product(uintx, AdaptiveSizeMajorGCDecayTimeScale, 10,                     \
           "Time scale over which major costs decay")                        \
+          range(0, max_uintx)                                               \
                                                                             \
   product(uintx, MinSurvivorRatio, 3,                                       \
           "Minimum ratio of young generation/survivor space size")          \
@@ -2315,9 +2292,11 @@
                                                                             \
   product(uintx, InitialSurvivorRatio, 8,                                   \
           "Initial ratio of young generation/survivor space size")          \
+          range(0, max_uintx)                                               \
                                                                             \
   product(size_t, BaseFootPrintEstimate, 256*M,                             \
           "Estimate of footprint other than Java Heap")                     \
+          range(0, max_uintx)                                               \
                                                                             \
   product(bool, UseGCOverheadLimit, true,                                   \
           "Use policy to limit of proportion of time spent in GC "          \
@@ -2337,20 +2316,17 @@
           "Number of consecutive collections before gc time limit fires")   \
           range(1, max_uintx)                                               \
                                                                             \
-  product(bool, PrintAdaptiveSizePolicy, false,                             \
-          "Print information about AdaptiveSizePolicy")                     \
-                                                                            \
   product(intx, PrefetchCopyIntervalInBytes, -1,                            \
           "How far ahead to prefetch destination area (<= 0 means off)")    \
+          range(-1, max_jint)                                               \
                                                                             \
   product(intx, PrefetchScanIntervalInBytes, -1,                            \
           "How far ahead to prefetch scan area (<= 0 means off)")           \
+          range(-1, max_jint)                                               \
                                                                             \
   product(intx, PrefetchFieldsAhead, -1,                                    \
           "How many fields ahead to prefetch in oop scan (<= 0 means off)") \
-                                                                            \
-  diagnostic(bool, VerifySilently, false,                                   \
-          "Do not print the verification progress")                         \
+          range(-1, max_jint)                                               \
                                                                             \
   diagnostic(bool, VerifyDuringStartup, false,                              \
           "Verify memory system before executing any Java code "            \
@@ -2395,6 +2371,7 @@
                                                                             \
   diagnostic(uintx, CPUForCMSThread, 0,                                     \
           "When BindCMSThreadToCPU is true, the CPU to bind CMS thread to") \
+          range(0, max_juint)                                               \
                                                                             \
   product(bool, BindGCTaskThreadsToCPUs, false,                             \
           "Bind GCTaskThreads to CPUs if possible")                         \
@@ -2404,46 +2381,23 @@
                                                                             \
   product(uintx, ProcessDistributionStride, 4,                              \
           "Stride through processors when distributing processes")          \
+          range(0, max_juint)                                               \
                                                                             \
   product(uintx, CMSCoordinatorYieldSleepCount, 10,                         \
           "Number of times the coordinator GC thread will sleep while "     \
           "yielding before giving up and resuming GC")                      \
+          range(0, max_juint)                                               \
                                                                             \
   product(uintx, CMSYieldSleepCount, 0,                                     \
           "Number of times a GC thread (minus the coordinator) "            \
           "will sleep while yielding before giving up and resuming GC")     \
-                                                                            \
-  /* gc tracing */                                                          \
-  manageable(bool, PrintGC, false,                                          \
-          "Print message at garbage collection")                            \
-                                                                            \
-  manageable(bool, PrintGCDetails, false,                                   \
-          "Print more details at garbage collection")                       \
-                                                                            \
-  manageable(bool, PrintGCDateStamps, false,                                \
-          "Print date stamps at garbage collection")                        \
-                                                                            \
-  manageable(bool, PrintGCTimeStamps, false,                                \
-          "Print timestamps at garbage collection")                         \
-                                                                            \
-  manageable(bool, PrintGCID, true,                                         \
-          "Print an identifier for each garbage collection")                \
-                                                                            \
-  product(bool, PrintGCTaskTimeStamps, false,                               \
-          "Print timestamps for individual gc worker thread tasks")         \
+          range(0, max_juint)                                               \
                                                                             \
   develop(intx, ConcGCYieldTimeout, 0,                                      \
           "If non-zero, assert that GC threads yield within this "          \
           "number of milliseconds")                                         \
           range(0, max_intx)                                                \
                                                                             \
-  product(bool, PrintReferenceGC, false,                                    \
-          "Print times spent handling reference objects during GC "         \
-          "(enabled only when PrintGCDetails)")                             \
-                                                                            \
-  develop(bool, TraceReferenceGC, false,                                    \
-          "Trace handling of soft/weak/final/phantom references")           \
-                                                                            \
   develop(bool, TraceFinalizerRegistration, false,                          \
           "Trace registration of final references")                         \
                                                                             \
@@ -2483,37 +2437,15 @@
   product(bool, TraceOldGenTime, false,                                     \
           "Trace accumulated time for old collection")                      \
                                                                             \
-  product(bool, PrintTenuringDistribution, false,                           \
-          "Print tenuring age information")                                 \
-                                                                            \
-  product_rw(bool, PrintHeapAtGC, false,                                    \
-          "Print heap layout before and after each GC")                     \
-                                                                            \
-  product_rw(bool, PrintHeapAtGCExtended, false,                            \
-          "Print extended information about the layout of the heap "        \
-          "when -XX:+PrintHeapAtGC is set")                                 \
-                                                                            \
   product(bool, PrintHeapAtSIGBREAK, true,                                  \
           "Print heap layout in response to SIGBREAK")                      \
                                                                             \
-  manageable(bool, PrintClassHistogramBeforeFullGC, false,                  \
-          "Print a class histogram before any major stop-world GC")         \
-                                                                            \
-  manageable(bool, PrintClassHistogramAfterFullGC, false,                   \
-          "Print a class histogram after any major stop-world GC")          \
-                                                                            \
   manageable(bool, PrintClassHistogram, false,                              \
           "Print a histogram of class instances")                           \
                                                                             \
   develop(bool, TraceWorkGang, false,                                       \
           "Trace activities of work gangs")                                 \
                                                                             \
-  develop(bool, TraceBlockOffsetTable, false,                               \
-          "Print BlockOffsetTable maps")                                    \
-                                                                            \
-  develop(bool, TraceCardTableModRefBS, false,                              \
-          "Print CardTableModRefBS maps")                                   \
-                                                                            \
   develop(bool, TraceGCTaskManager, false,                                  \
           "Trace actions of the GC task manager")                           \
                                                                             \
@@ -2523,48 +2455,20 @@
   diagnostic(bool, TraceGCTaskThread, false,                                \
           "Trace actions of the GC task threads")                           \
                                                                             \
-  product(bool, PrintParallelOldGCPhaseTimes, false,                        \
-          "Print the time taken by each phase in ParallelOldGC "            \
-          "(PrintGCDetails must also be enabled)")                          \
-                                                                            \
   develop(bool, TraceParallelOldGCMarkingPhase, false,                      \
           "Trace marking phase in ParallelOldGC")                           \
                                                                             \
-  develop(bool, TraceParallelOldGCSummaryPhase, false,                      \
-          "Trace summary phase in ParallelOldGC")                           \
-                                                                            \
-  develop(bool, TraceParallelOldGCCompactionPhase, false,                   \
-          "Trace compaction phase in ParallelOldGC")                        \
-                                                                            \
   develop(bool, TraceParallelOldGCDensePrefix, false,                       \
           "Trace dense prefix computation for ParallelOldGC")               \
                                                                             \
   develop(bool, IgnoreLibthreadGPFault, false,                              \
           "Suppress workaround for libthread GP fault")                     \
                                                                             \
-  product(bool, PrintJNIGCStalls, false,                                    \
-          "Print diagnostic message when GC is stalled "                    \
-          "by JNI critical section")                                        \
-                                                                            \
   experimental(double, ObjectCountCutOffPercent, 0.5,                       \
           "The percentage of the used heap that the instances of a class "  \
           "must occupy for the class to generate a trace event")            \
           range(0.0, 100.0)                                                 \
                                                                             \
-  /* GC log rotation setting */                                             \
-                                                                            \
-  product(bool, UseGCLogFileRotation, false,                                \
-          "Rotate gclog files (for long running applications). It requires "\
-          "-Xloggc:<filename>")                                             \
-                                                                            \
-  product(uintx, NumberOfGCLogFiles, 0,                                     \
-          "Number of gclog files in rotation "                              \
-          "(default: 0, no rotation)")                                      \
-                                                                            \
-  product(size_t, GCLogFileSize, 8*K,                                       \
-          "GC log file size, requires UseGCLogFileRotation. "               \
-          "Set to 0 to only trigger rotation via jcmd")                     \
-                                                                            \
   /* JVMTI heap profiling */                                                \
                                                                             \
   diagnostic(bool, TraceJVMTIObjectTagging, false,                          \
@@ -3331,6 +3235,7 @@
                                                                             \
   product(size_t, OldSize, ScaleForWordSize(4*M),                           \
           "Initial tenured generation size (in bytes)")                     \
+          range(0, max_uintx)                                               \
                                                                             \
   product(size_t, NewSize, ScaleForWordSize(1*M),                           \
           "Initial new generation size (in bytes)")                         \
@@ -3339,10 +3244,16 @@
   product(size_t, MaxNewSize, max_uintx,                                    \
           "Maximum new generation size (in bytes), max_uintx means set "    \
           "ergonomically")                                                  \
+          range(0, max_uintx)                                               \
+                                                                            \
+  product_pd(size_t, HeapBaseMinAddress,                                    \
+          "OS specific low limit for heap base address")                    \
+          constraint(HeapBaseMinAddressConstraintFunc,AfterErgo)            \
                                                                             \
   product(size_t, PretenureSizeThreshold, 0,                                \
           "Maximum size in bytes of objects allocated in DefNew "           \
           "generation; zero means no maximum")                              \
+          range(0, max_uintx)                                               \
                                                                             \
   product(size_t, MinTLABSize, 2*K,                                         \
           "Minimum allowed TLAB size (in bytes)")                           \
@@ -3374,10 +3285,12 @@
                                                                             \
   product(uintx, TLABRefillWasteFraction,    64,                            \
           "Maximum TLAB waste at a refill (internal fragmentation)")        \
-          range(1, max_uintx)                                               \
+          range(1, max_juint)                                               \
                                                                             \
   product(uintx, TLABWasteIncrement,    4,                                  \
           "Increment allowed waste at slow allocation")                     \
+          range(0, max_jint)                                                \
+          constraint(TLABWasteIncrementConstraintFunc,AfterMemoryInit)      \
                                                                             \
   product(uintx, SurvivorRatio, 8,                                          \
           "Ratio of eden/survivor space size")                              \
@@ -3391,6 +3304,7 @@
   product_pd(size_t, NewSizeThreadIncrease,                                 \
           "Additional size added to desired new generation size per "       \
           "non-daemon thread (in bytes)")                                   \
+          range(0, max_uintx)                                               \
                                                                             \
   product_pd(size_t, MetaspaceSize,                                         \
           "Initial size of Metaspaces (in bytes)")                          \
@@ -3426,9 +3340,11 @@
                                                                             \
   product(size_t, MinHeapDeltaBytes, ScaleForWordSize(128*K),               \
           "The minimum change in heap space due to GC (in bytes)")          \
+          range(0, max_uintx)                                               \
                                                                             \
   product(size_t, MinMetaspaceExpansion, ScaleForWordSize(256*K),           \
           "The minimum expansion of Metaspace (in bytes)")                  \
+          range(0, max_uintx)                                               \
                                                                             \
   product(uintx, MaxMetaspaceFreeRatio,    70,                              \
           "The maximum percentage of Metaspace free after GC to avoid "     \
@@ -3444,13 +3360,16 @@
                                                                             \
   product(size_t, MaxMetaspaceExpansion, ScaleForWordSize(4*M),             \
           "The maximum expansion of Metaspace without full GC (in bytes)")  \
+          range(0, max_uintx)                                               \
                                                                             \
   product(uintx, QueuedAllocationWarningCount, 0,                           \
           "Number of times an allocation that queues behind a GC "          \
           "will retry before printing a warning")                           \
+          range(0, max_uintx)                                               \
                                                                             \
   diagnostic(uintx, VerifyGCStartAt,   0,                                   \
           "GC invoke count where +VerifyBefore/AfterGC kicks in")           \
+          range(0, max_uintx)                                               \
                                                                             \
   diagnostic(intx, VerifyGCLevel,     0,                                    \
           "Generation level at which to start +VerifyBefore/AfterGC")       \
@@ -3486,18 +3405,6 @@
           "space parameters)")                                              \
           range(1, max_uintx)                                               \
                                                                             \
-  product(intx, PrintCMSStatistics, 0,                                      \
-          "Statistics for CMS")                                             \
-                                                                            \
-  product(bool, PrintCMSInitiationStatistics, false,                        \
-          "Statistics for initiating a CMS collection")                     \
-                                                                            \
-  product(intx, PrintFLSStatistics, 0,                                      \
-          "Statistics for CMS' FreeListSpace")                              \
-                                                                            \
-  product(intx, PrintFLSCensus, 0,                                          \
-          "Census for CMS' FreeListSpace")                                  \
-                                                                            \
   develop(uintx, GCExpandToAllocateDelayMillis, 0,                          \
           "Delay between expansion and allocation (in milliseconds)")       \
                                                                             \
@@ -3523,6 +3430,7 @@
   product(uintx, GCDrainStackTargetSize, 64,                                \
           "Number of entries we will try to leave on the stack "            \
           "during parallel gc")                                             \
+          range(0, max_juint)                                               \
                                                                             \
   /* stack parameters */                                                    \
   product_pd(intx, StackYellowPages,                                        \
@@ -3533,6 +3441,13 @@
           "Number of red zone (unrecoverable overflows) pages")             \
           range(MIN_STACK_RED_PAGES, (DEFAULT_STACK_RED_PAGES+2))           \
                                                                             \
+  product_pd(intx, StackReservedPages,                                      \
+          "Number of reserved zone (reserved to annotated methods) pages")  \
+          range(MIN_STACK_RESERVED_PAGES, (DEFAULT_STACK_RESERVED_PAGES+10))\
+                                                                            \
+  product(bool, RestrictReservedStack, true,                                \
+          "Restrict @ReservedStackAccess to trusted classes")               \
+                                                                            \
   /* greater stack shadow pages can't generate instruction to bang stack */ \
   product_pd(intx, StackShadowPages,                                        \
           "Number of shadow zone (for overflow checking) pages "            \
@@ -4191,9 +4106,6 @@
   product(bool, UseStringDeduplication, false,                              \
           "Use string deduplication")                                       \
                                                                             \
-  product(bool, PrintStringDeduplicationStatistics, false,                  \
-          "Print string deduplication statistics")                          \
-                                                                            \
   product(uintx, StringDeduplicationAgeThreshold, 3,                        \
           "A string must reach this age (or be promoted to an old region) " \
           "to be considered for deduplication")                             \
@@ -4208,9 +4120,6 @@
   diagnostic(bool, WhiteBoxAPI, false,                                      \
           "Enable internal testing APIs")                                   \
                                                                             \
-  product(bool, PrintGCCause, true,                                         \
-          "Include GC cause in GC logging")                                 \
-                                                                            \
   experimental(intx, SurvivorAlignmentInBytes, 0,                           \
            "Default survivor space alignment in bytes")                     \
            constraint(SurvivorAlignmentInBytesConstraintFunc,AfterErgo)     \
--- a/src/share/vm/runtime/init.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/init.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/stringTable.hpp"
+#include "classfile/symbolTable.hpp"
 #include "code/codeCacheExtensions.hpp"
 #include "code/icBuffer.hpp"
 #include "gc/shared/collectedHeap.hpp"
--- a/src/share/vm/runtime/interfaceSupport.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/interfaceSupport.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -32,7 +32,6 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
 #include "runtime/vframe.hpp"
 #include "utilities/preserveException.hpp"
 
@@ -102,15 +101,13 @@
       // Compute new interval
       if (FullGCALotInterval > 1) {
         _fullgc_alot_counter = 1+(long)((double)FullGCALotInterval*os::random()/(max_jint+1.0));
-        if (PrintGCDetails && Verbose) {
-          tty->print_cr("Full gc no: %u\tInterval: %ld", invocations, _fullgc_alot_counter);
-        }
+        log_trace(gc)("Full gc no: %u\tInterval: %ld", invocations, _fullgc_alot_counter);
       } else {
         _fullgc_alot_counter = 1;
       }
       // Print progress message
       if (invocations % 100 == 0) {
-        if (PrintGCDetails && Verbose) tty->print_cr("Full gc no: %u", invocations);
+        log_trace(gc)("Full gc no: %u", invocations);
       }
     } else {
       if (ScavengeALot) _scavenge_alot_counter--;
@@ -122,15 +119,13 @@
         // Compute new interval
         if (ScavengeALotInterval > 1) {
           _scavenge_alot_counter = 1+(long)((double)ScavengeALotInterval*os::random()/(max_jint+1.0));
-          if (PrintGCDetails && Verbose) {
-            tty->print_cr("Scavenge no: %u\tInterval: %ld", invocations, _scavenge_alot_counter);
-          }
+          log_trace(gc)("Scavenge no: %u\tInterval: %ld", invocations, _scavenge_alot_counter);
         } else {
           _scavenge_alot_counter = 1;
         }
         // Print progress message
         if (invocations % 1000 == 0) {
-          if (PrintGCDetails && Verbose) tty->print_cr("Scavenge no: %u", invocations);
+          log_trace(gc)("Scavenge no: %u", invocations);
         }
       }
     }
--- a/src/share/vm/runtime/interfaceSupport.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/interfaceSupport.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -562,7 +562,7 @@
 #define JVM_ENTRY_NO_ENV(result_type, header)                        \
 extern "C" {                                                         \
   result_type JNICALL header {                                       \
-    JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();  \
+    JavaThread* thread = JavaThread::current();                      \
     ThreadInVMfromNative __tiv(thread);                              \
     debug_only(VMNativeEntryWrapper __vew;)                          \
     VM_ENTRY_BASE(result_type, header, thread)
--- a/src/share/vm/runtime/java.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/java.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -35,6 +35,7 @@
 #include "jvmci/jvmciCompiler.hpp"
 #include "jvmci/jvmciRuntime.hpp"
 #endif
+#include "logging/log.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/universe.hpp"
 #include "oops/constantPool.hpp"
@@ -453,13 +454,15 @@
   Universe::heap()->stop();
 
   // Print GC/heap related information.
-  if (PrintGCDetails) {
-    Universe::print();
-    AdaptiveSizePolicyOutput(0);
-    if (Verbose) {
-      ClassLoaderDataGraph::dump_on(gclog_or_tty);
+  LogHandle(gc, heap, exit) log;
+  if (log.is_info()) {
+    ResourceMark rm;
+    Universe::print_on(log.info_stream());
+    if (log.is_trace()) {
+      ClassLoaderDataGraph::dump_on(log.trace_stream());
     }
   }
+  AdaptiveSizePolicyOutput::print();
 
   if (PrintBytecodeHistogram) {
     BytecodeHistogram::print();
@@ -512,10 +515,10 @@
 }
 
 void vm_exit(int code) {
-  Thread* thread = ThreadLocalStorage::is_initialized() ?
-    ThreadLocalStorage::get_thread_slow() : NULL;
+  Thread* thread =
+      ThreadLocalStorage::is_initialized() ? Thread::current_or_null() : NULL;
   if (thread == NULL) {
-    // we have serious problems -- just exit
+    // very early initialization failure -- just exit
     vm_direct_exit(code);
   }
 
@@ -551,8 +554,7 @@
   // Calling 'exit_globals()' will disable thread-local-storage and cause all
   // kinds of assertions to trigger in debug mode.
   if (is_init_completed()) {
-    Thread* thread = ThreadLocalStorage::is_initialized() ?
-                     ThreadLocalStorage::get_thread_slow() : NULL;
+    Thread* thread = Thread::current_or_null();
     if (thread != NULL && thread->is_Java_thread()) {
       // We are leaving the VM, set state to native (in case any OS exit
       // handlers call back to the VM)
@@ -606,7 +608,7 @@
   // If there are exceptions on this thread it must be cleared
   // first and here. Any future calls to EXCEPTION_MARK requires
   // that no pending exceptions exist.
-  Thread *THREAD = Thread::current();
+  Thread *THREAD = Thread::current(); // can't be NULL
   if (HAS_PENDING_EXCEPTION) {
     CLEAR_PENDING_EXCEPTION;
   }
--- a/src/share/vm/runtime/javaCalls.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/javaCalls.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -371,9 +371,9 @@
   // Find receiver
   Handle receiver = (!method->is_static()) ? args->receiver() : Handle();
 
-  // When we reenter Java, we need to reenable the yellow zone which
+  // When we reenter Java, we need to reenable the reserved/yellow zone which
   // might already be disabled when we are in VM.
-  if (thread->stack_yellow_zone_disabled()) {
+  if (!thread->stack_guards_enabled()) {
     thread->reguard_stack();
   }
 
--- a/src/share/vm/runtime/jniHandles.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/jniHandles.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
+#include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/jniHandles.hpp"
@@ -393,9 +394,7 @@
           f->do_oop(root);
         } else {
           // The weakly referenced object is not alive, clear the reference by storing NULL
-          if (TraceReferenceGC) {
-            tty->print_cr("Clearing JNI weak reference (" INTPTR_FORMAT ")", p2i(root));
-          }
+          log_develop_trace(gc, ref)("Clearing JNI weak reference (" INTPTR_FORMAT ")", p2i(root));
           *root = NULL;
         }
       }
--- a/src/share/vm/runtime/mutex.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/mutex.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1035,10 +1035,10 @@
  Exeunt:
     assert(ILocked(), "invariant");
     assert(_owner == NULL, "invariant");
-    // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
+    // This can potentially be called by non-java Threads. Thus, the Thread::current_or_null()
     // might return NULL. Don't call set_owner since it will break on an NULL owner
     // Consider installing a non-null "ANON" distinguished value instead of just NULL.
-    _owner = ThreadLocalStorage::thread();
+    _owner = Thread::current_or_null();
     return;
   }
 
--- a/src/share/vm/runtime/mutexLocker.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/mutexLocker.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,7 +27,6 @@
 #include "runtime/os.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
 #include "runtime/vmThread.hpp"
 
 // Mutexes used in the VM (see comment in mutexLocker.hpp):
--- a/src/share/vm/runtime/os.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/os.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -262,7 +262,7 @@
         VMThread::execute(&op1);
         Universe::print_heap_at_SIGBREAK();
         if (PrintClassHistogram) {
-          VM_GC_HeapInspection op1(gclog_or_tty, true /* force full GC before heap inspection */);
+          VM_GC_HeapInspection op1(tty, true /* force full GC before heap inspection */);
           VMThread::execute(&op1);
         }
         if (JvmtiExport::should_post_data_dump()) {
@@ -315,6 +315,10 @@
   // We need to initialize large page support here because ergonomics takes some
   // decisions depending on large page support and the calculated large page size.
   large_page_init();
+
+  // VM version initialization identifies some characteristics of the
+  // the platform that are used during ergonomic decisions.
+  VM_Version::init_before_ergo();
 }
 
 void os::signal_init() {
@@ -420,28 +424,6 @@
     }
 #endif
   }
-  static jboolean onLoaded = JNI_FALSE;
-  if (onLoaded) {
-    // We may have to wait to fire OnLoad until TLS is initialized.
-    if (ThreadLocalStorage::is_initialized()) {
-      // The JNI_OnLoad handling is normally done by method load in
-      // java.lang.ClassLoader$NativeLibrary, but the VM loads the base library
-      // explicitly so we have to check for JNI_OnLoad as well
-      const char *onLoadSymbols[] = JNI_ONLOAD_SYMBOLS;
-      JNI_OnLoad_t JNI_OnLoad = CAST_TO_FN_PTR(
-          JNI_OnLoad_t, dll_lookup(_native_java_library, onLoadSymbols[0]));
-      if (JNI_OnLoad != NULL) {
-        JavaThread* thread = JavaThread::current();
-        ThreadToNativeFromVM ttn(thread);
-        HandleMark hm(thread);
-        jint ver = (*JNI_OnLoad)(&main_vm, NULL);
-        onLoaded = JNI_TRUE;
-        if (!Threads::is_supported_jni_version_including_1_1(ver)) {
-          vm_exit_during_initialization("Unsupported JNI version");
-        }
-      }
-    }
-  }
   return _native_java_library;
 }
 
@@ -574,7 +556,7 @@
   // exists and has crash protection.
   WatcherThread *wt = WatcherThread::watcher_thread();
   if (wt != NULL && wt->has_crash_protection()) {
-    Thread* thread = ThreadLocalStorage::get_thread_slow();
+    Thread* thread = Thread::current_or_null();
     if (thread == wt) {
       assert(!wt->has_crash_protection(),
           "Can't malloc with crash protection from WatcherThread");
@@ -1404,8 +1386,9 @@
   // respectively.
   const int framesize_in_bytes =
     Interpreter::size_top_interpreter_activation(method()) * wordSize;
-  int reserved_area = ((StackShadowPages + StackRedPages + StackYellowPages)
-                      * vm_page_size()) + framesize_in_bytes;
+  int reserved_area = ((StackShadowPages + StackRedPages + StackYellowPages
+                      + StackReservedPages) * vm_page_size())
+                      + framesize_in_bytes;
   // The very lower end of the stack
   address stack_limit = thread->stack_base() - thread->stack_size();
   return (sp > (stack_limit + reserved_area));
--- a/src/share/vm/runtime/os.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/os.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -472,8 +472,9 @@
 
   static int pd_self_suspend_thread(Thread* thread);
 
-  static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp);
-  static frame      fetch_frame_from_context(void* ucVoid);
+  static ExtendedPC fetch_frame_from_context(const void* ucVoid, intptr_t** sp, intptr_t** fp);
+  static frame      fetch_frame_from_context(const void* ucVoid);
+  static frame      fetch_frame_from_ucontext(Thread* thread, void* ucVoid);
 
   static ExtendedPC get_thread_pc(Thread *thread);
   static void breakpoint();
@@ -499,7 +500,7 @@
 
   // Terminate with an error.  Default is to generate a core file on platforms
   // that support such things.  This calls shutdown() and then aborts.
-  static void abort(bool dump_core, void *siginfo, void *context);
+  static void abort(bool dump_core, void *siginfo, const void *context);
   static void abort(bool dump_core = true);
 
   // Die immediately, no exit hook, no abort hook, no cleanup.
@@ -604,8 +605,8 @@
   static void print_memory_info(outputStream* st);
   static void print_dll_info(outputStream* st);
   static void print_environment_variables(outputStream* st, const char** env_list);
-  static void print_context(outputStream* st, void* context);
-  static void print_register_info(outputStream* st, void* context);
+  static void print_context(outputStream* st, const void* context);
+  static void print_register_info(outputStream* st, const void* context);
   static void print_siginfo(outputStream* st, void* siginfo);
   static void print_signal_handlers(outputStream* st, char* buf, size_t buflen);
   static void print_date_and_time(outputStream* st, char* buf, size_t buflen);
@@ -643,6 +644,9 @@
   // returns NULL if exception_code is not an OS exception/signal.
   static const char* exception_name(int exception_code, char* buf, size_t buflen);
 
+  // Returns the signal number (e.g. 11) for a given signal name (SIGSEGV).
+  static int get_signal_number(const char* signal_name);
+
   // Returns native Java library, loads if necessary
   static void*    native_java_library();
 
@@ -668,12 +672,6 @@
   static jlong current_file_offset(int fd);
   static jlong seek_to_file_offset(int fd, jlong offset);
 
-  // Thread Local Storage
-  static int   allocate_thread_local_storage();
-  static void  thread_local_storage_at_put(int index, void* value);
-  static void* thread_local_storage_at(int index);
-  static void  free_thread_local_storage(int index);
-
   // Retrieve native stack frames.
   // Parameter:
   //   stack:  an array to storage stack pointers.
@@ -851,7 +849,7 @@
  public:
 #ifndef PLATFORM_PRINT_NATIVE_STACK
   // No platform-specific code for printing the native stack.
-  static bool platform_print_native_stack(outputStream* st, void* context,
+  static bool platform_print_native_stack(outputStream* st, const void* context,
                                           char *buf, int buf_size) {
     return false;
   }
--- a/src/share/vm/runtime/reflection.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/reflection.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -46,7 +46,7 @@
 #include "runtime/signature.hpp"
 #include "runtime/vframe.hpp"
 
-static void trace_class_resolution(Klass* to_class) {
+static void trace_class_resolution(const Klass* to_class) {
   ResourceMark rm;
   int line_number = -1;
   const char * source_file = NULL;
@@ -300,23 +300,23 @@
   }
 }
 
-
-Klass* Reflection::basic_type_mirror_to_arrayklass(oop basic_type_mirror, TRAPS) {
+static Klass* basic_type_mirror_to_arrayklass(oop basic_type_mirror, TRAPS) {
   assert(java_lang_Class::is_primitive(basic_type_mirror), "just checking");
   BasicType type = java_lang_Class::primitive_type(basic_type_mirror);
   if (type == T_VOID) {
     THROW_0(vmSymbols::java_lang_IllegalArgumentException());
-  } else {
+  }
+  else {
     return Universe::typeArrayKlassObj(type);
   }
 }
 
-
-oop Reflection:: basic_type_arrayklass_to_mirror(Klass* basic_type_arrayklass, TRAPS) {
+#ifdef ASSERT
+static oop basic_type_arrayklass_to_mirror(Klass* basic_type_arrayklass, TRAPS) {
   BasicType type = TypeArrayKlass::cast(basic_type_arrayklass)->element_type();
   return Universe::java_mirror(type);
 }
-
+#endif
 
 arrayOop Reflection::reflect_new_array(oop element_mirror, jint length, TRAPS) {
   if (element_mirror == NULL) {
@@ -410,8 +410,51 @@
   return result;
 }
 
+static bool under_host_klass(const InstanceKlass* ik, const Klass* host_klass) {
+  DEBUG_ONLY(int inf_loop_check = 1000 * 1000 * 1000);
+  for (;;) {
+    const Klass* hc = (const Klass*)ik->host_klass();
+    if (hc == NULL)        return false;
+    if (hc == host_klass)  return true;
+    ik = InstanceKlass::cast(hc);
 
-bool Reflection::verify_class_access(Klass* current_class, Klass* new_class, bool classloader_only) {
+    // There's no way to make a host class loop short of patching memory.
+    // Therefore there cannot be a loop here unless there's another bug.
+    // Still, let's check for it.
+    assert(--inf_loop_check > 0, "no host_klass loop");
+  }
+}
+
+static bool can_relax_access_check_for(const Klass* accessor,
+                                       const Klass* accessee,
+                                       bool classloader_only) {
+
+  const InstanceKlass* accessor_ik = InstanceKlass::cast(accessor);
+  const InstanceKlass* accessee_ik = InstanceKlass::cast(accessee);
+
+  // If either is on the other's host_klass chain, access is OK,
+  // because one is inside the other.
+  if (under_host_klass(accessor_ik, accessee) ||
+    under_host_klass(accessee_ik, accessor))
+    return true;
+
+  if ((RelaxAccessControlCheck &&
+    accessor_ik->major_version() < Verifier::NO_RELAX_ACCESS_CTRL_CHECK_VERSION &&
+    accessee_ik->major_version() < Verifier::NO_RELAX_ACCESS_CTRL_CHECK_VERSION) ||
+    (accessor_ik->major_version() < Verifier::STRICTER_ACCESS_CTRL_CHECK_VERSION &&
+    accessee_ik->major_version() < Verifier::STRICTER_ACCESS_CTRL_CHECK_VERSION)) {
+    return classloader_only &&
+      Verifier::relax_verify_for(accessor_ik->class_loader()) &&
+      accessor_ik->protection_domain() == accessee_ik->protection_domain() &&
+      accessor_ik->class_loader() == accessee_ik->class_loader();
+  }
+
+  return false;
+}
+
+bool Reflection::verify_class_access(const Klass* current_class,
+                                     const Klass* new_class,
+                                     bool classloader_only) {
   // Verify that current_class can access new_class.  If the classloader_only
   // flag is set, we automatically allow any accesses in which current_class
   // doesn't have a classloader.
@@ -430,49 +473,9 @@
   return can_relax_access_check_for(current_class, new_class, classloader_only);
 }
 
-static bool under_host_klass(InstanceKlass* ik, Klass* host_klass) {
-  DEBUG_ONLY(int inf_loop_check = 1000 * 1000 * 1000);
-  for (;;) {
-    Klass* hc = (Klass*) ik->host_klass();
-    if (hc == NULL)        return false;
-    if (hc == host_klass)  return true;
-    ik = InstanceKlass::cast(hc);
-
-    // There's no way to make a host class loop short of patching memory.
-    // Therefore there cannot be a loop here unless there's another bug.
-    // Still, let's check for it.
-    assert(--inf_loop_check > 0, "no host_klass loop");
-  }
-}
-
-bool Reflection::can_relax_access_check_for(
-    Klass* accessor, Klass* accessee, bool classloader_only) {
-  InstanceKlass* accessor_ik = InstanceKlass::cast(accessor);
-  InstanceKlass* accessee_ik  = InstanceKlass::cast(accessee);
-
-  // If either is on the other's host_klass chain, access is OK,
-  // because one is inside the other.
-  if (under_host_klass(accessor_ik, accessee) ||
-      under_host_klass(accessee_ik, accessor))
-    return true;
-
-  if ((RelaxAccessControlCheck &&
-        accessor_ik->major_version() < Verifier::NO_RELAX_ACCESS_CTRL_CHECK_VERSION &&
-        accessee_ik->major_version() < Verifier::NO_RELAX_ACCESS_CTRL_CHECK_VERSION) ||
-      (accessor_ik->major_version() < Verifier::STRICTER_ACCESS_CTRL_CHECK_VERSION &&
-       accessee_ik->major_version() < Verifier::STRICTER_ACCESS_CTRL_CHECK_VERSION)) {
-    return classloader_only &&
-      Verifier::relax_verify_for(accessor_ik->class_loader()) &&
-      accessor_ik->protection_domain() == accessee_ik->protection_domain() &&
-      accessor_ik->class_loader() == accessee_ik->class_loader();
-  } else {
-    return false;
-  }
-}
-
-bool Reflection::verify_field_access(Klass* current_class,
-                                     Klass* resolved_class,
-                                     Klass* field_class,
+bool Reflection::verify_field_access(const Klass* current_class,
+                                     const Klass* resolved_class,
+                                     const Klass* field_class,
                                      AccessFlags access,
                                      bool classloader_only,
                                      bool protected_restriction) {
@@ -494,10 +497,10 @@
     return true;
   }
 
-  Klass* host_class = current_class;
+  const Klass* host_class = current_class;
   while (host_class->is_instance_klass() &&
          InstanceKlass::cast(host_class)->is_anonymous()) {
-    Klass* next_host_class = InstanceKlass::cast(host_class)->host_klass();
+    const Klass* next_host_class = InstanceKlass::cast(host_class)->host_klass();
     if (next_host_class == NULL)  break;
     host_class = next_host_class;
   }
@@ -535,16 +538,10 @@
     current_class, field_class, classloader_only);
 }
 
-
-bool Reflection::is_same_class_package(Klass* class1, Klass* class2) {
+bool Reflection::is_same_class_package(const Klass* class1, const Klass* class2) {
   return InstanceKlass::cast(class1)->is_same_class_package(class2);
 }
 
-bool Reflection::is_same_package_member(Klass* class1, Klass* class2, TRAPS) {
-  return InstanceKlass::cast(class1)->is_same_package_member(class2, THREAD);
-}
-
-
 // Checks that the 'outer' klass has declared 'inner' as being an inner klass. If not,
 // throw an incompatible class change exception
 // If inner_is_member, require the inner to be a member of the outer.
@@ -588,38 +585,43 @@
 }
 
 // Utility method converting a single SignatureStream element into java.lang.Class instance
+static oop get_mirror_from_signature(methodHandle method,
+                                     SignatureStream* ss,
+                                     TRAPS) {
 
-oop get_mirror_from_signature(methodHandle method, SignatureStream* ss, TRAPS) {
-  switch (ss->type()) {
-    default:
-      assert(ss->type() != T_VOID || ss->at_return_type(), "T_VOID should only appear as return type");
-      return java_lang_Class::primitive_mirror(ss->type());
-    case T_OBJECT:
-    case T_ARRAY:
-      Symbol* name        = ss->as_symbol(CHECK_NULL);
-      oop loader            = method->method_holder()->class_loader();
-      oop protection_domain = method->method_holder()->protection_domain();
-      Klass* k = SystemDictionary::resolve_or_fail(
-                                       name,
-                                       Handle(THREAD, loader),
-                                       Handle(THREAD, protection_domain),
-                                       true, CHECK_NULL);
-      if (TraceClassResolution) {
-        trace_class_resolution(k);
-      }
-      return k->java_mirror();
-  };
+
+  if (T_OBJECT == ss->type() || T_ARRAY == ss->type()) {
+    Symbol* name = ss->as_symbol(CHECK_NULL);
+    oop loader = method->method_holder()->class_loader();
+    oop protection_domain = method->method_holder()->protection_domain();
+    const Klass* k = SystemDictionary::resolve_or_fail(name,
+                                                       Handle(THREAD, loader),
+                                                       Handle(THREAD, protection_domain),
+                                                       true,
+                                                       CHECK_NULL);
+    if (TraceClassResolution) {
+      trace_class_resolution(k);
+    }
+    return k->java_mirror();
+  }
+
+  assert(ss->type() != T_VOID || ss->at_return_type(),
+    "T_VOID should only appear as return type");
+
+  return java_lang_Class::primitive_mirror(ss->type());
 }
 
-
-objArrayHandle Reflection::get_parameter_types(const methodHandle& method, int parameter_count, oop* return_type, TRAPS) {
+static objArrayHandle get_parameter_types(methodHandle method,
+                                          int parameter_count,
+                                          oop* return_type,
+                                          TRAPS) {
   // Allocate array holding parameter types (java.lang.Class instances)
   objArrayOop m = oopFactory::new_objArray(SystemDictionary::Class_klass(), parameter_count, CHECK_(objArrayHandle()));
-  objArrayHandle mirrors (THREAD, m);
+  objArrayHandle mirrors(THREAD, m);
   int index = 0;
   // Collect parameter types
   ResourceMark rm(THREAD);
-  Symbol*  signature  = method->signature();
+  Symbol*  signature = method->signature();
   SignatureStream ss(signature);
   while (!ss.at_return_type()) {
     oop mirror = get_mirror_from_signature(method, &ss, CHECK_(objArrayHandle()));
@@ -635,22 +637,22 @@
   return mirrors;
 }
 
-objArrayHandle Reflection::get_exception_types(const methodHandle& method, TRAPS) {
+static objArrayHandle get_exception_types(methodHandle method, TRAPS) {
   return method->resolved_checked_exceptions(THREAD);
 }
 
-
-Handle Reflection::new_type(Symbol* signature, KlassHandle k, TRAPS) {
+static Handle new_type(Symbol* signature, KlassHandle k, TRAPS) {
   // Basic types
   BasicType type = vmSymbols::signature_type(signature);
   if (type != T_OBJECT) {
     return Handle(THREAD, Universe::java_mirror(type));
   }
 
-  Klass* result = SystemDictionary::resolve_or_fail(signature,
-                                    Handle(THREAD, k->class_loader()),
-                                    Handle(THREAD, k->protection_domain()),
-                                    true, CHECK_(Handle()));
+  Klass* result =
+    SystemDictionary::resolve_or_fail(signature,
+                                      Handle(THREAD, k->class_loader()),
+                                      Handle(THREAD, k->protection_domain()),
+                                      true, CHECK_(Handle()));
 
   if (TraceClassResolution) {
     trace_class_resolution(result);
@@ -686,7 +688,7 @@
   Handle name = Handle(THREAD, name_oop);
   if (name == NULL) return NULL;
 
-  int modifiers = method->access_flags().as_int() & JVM_RECOGNIZED_METHOD_MODIFIERS;
+  const int modifiers = method->access_flags().as_int() & JVM_RECOGNIZED_METHOD_MODIFIERS;
 
   Handle mh = java_lang_reflect_Method::create(CHECK_NULL);
 
@@ -738,7 +740,7 @@
   objArrayHandle exception_types = get_exception_types(method, CHECK_NULL);
   if (exception_types.is_null()) return NULL;
 
-  int modifiers = method->access_flags().as_int() & JVM_RECOGNIZED_METHOD_MODIFIERS;
+  const int modifiers = method->access_flags().as_int() & JVM_RECOGNIZED_METHOD_MODIFIERS;
 
   Handle ch = java_lang_reflect_Constructor::create(CHECK_NULL);
 
@@ -822,8 +824,12 @@
 }
 
 
-methodHandle Reflection::resolve_interface_call(instanceKlassHandle klass, const methodHandle& method,
-                                                KlassHandle recv_klass, Handle receiver, TRAPS) {
+static methodHandle resolve_interface_call(instanceKlassHandle klass,
+                                           const methodHandle& method,
+                                           KlassHandle recv_klass,
+                                           Handle receiver,
+                                           TRAPS) {
+
   assert(!method.is_null() , "method should not be null");
 
   CallInfo info;
@@ -836,10 +842,48 @@
   return info.selected_method();
 }
 
+// Conversion
+static BasicType basic_type_mirror_to_basic_type(oop basic_type_mirror, TRAPS) {
+  assert(java_lang_Class::is_primitive(basic_type_mirror),
+    "just checking");
+  return java_lang_Class::primitive_type(basic_type_mirror);
+}
 
-oop Reflection::invoke(instanceKlassHandle klass, const methodHandle& reflected_method,
-                       Handle receiver, bool override, objArrayHandle ptypes,
-                       BasicType rtype, objArrayHandle args, bool is_method_invoke, TRAPS) {
+// Narrowing of basic types. Used to create correct jvalues for
+// boolean, byte, char and short return return values from interpreter
+// which are returned as ints. Throws IllegalArgumentException.
+static void narrow(jvalue* value, BasicType narrow_type, TRAPS) {
+  switch (narrow_type) {
+  case T_BOOLEAN:
+    value->z = (jboolean)value->i;
+    return;
+  case T_BYTE:
+    value->b = (jbyte)value->i;
+    return;
+  case T_CHAR:
+    value->c = (jchar)value->i;
+    return;
+  case T_SHORT:
+    value->s = (jshort)value->i;
+    return;
+  default:
+    break; // fail
+  }
+  THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "argument type mismatch");
+}
+
+
+// Method call (shared by invoke_method and invoke_constructor)
+static oop invoke(instanceKlassHandle klass,
+                  methodHandle reflected_method,
+                  Handle receiver,
+                  bool override,
+                  objArrayHandle ptypes,
+                  BasicType rtype,
+                  objArrayHandle args,
+                  bool is_method_invoke,
+                  TRAPS) {
+
   ResourceMark rm(THREAD);
 
   methodHandle method;      // actual method to invoke
@@ -876,18 +920,18 @@
         // Linktime resolution & IllegalAccessCheck already done by Class.getMethod()
         method = resolve_interface_call(klass, reflected_method, target_klass, receiver, THREAD);
         if (HAS_PENDING_EXCEPTION) {
-        // Method resolution threw an exception; wrap it in an InvocationTargetException
+          // Method resolution threw an exception; wrap it in an InvocationTargetException
           oop resolution_exception = PENDING_EXCEPTION;
           CLEAR_PENDING_EXCEPTION;
           // JVMTI has already reported the pending exception
           // JVMTI internal flag reset is needed in order to report InvocationTargetException
           if (THREAD->is_Java_thread()) {
-            JvmtiExport::clear_detected_exception((JavaThread*) THREAD);
+            JvmtiExport::clear_detected_exception((JavaThread*)THREAD);
           }
           JavaCallArguments args(Handle(THREAD, resolution_exception));
           THROW_ARG_0(vmSymbols::java_lang_reflect_InvocationTargetException(),
-              vmSymbols::throwable_void_signature(),
-              &args);
+                      vmSymbols::throwable_void_signature(),
+                      &args);
         }
       }  else {
         // if the method can be overridden, we resolve using the vtable index.
@@ -906,10 +950,10 @@
             // new default: 6531596
             ResourceMark rm(THREAD);
             Handle h_origexception = Exceptions::new_exception(THREAD,
-                   vmSymbols::java_lang_AbstractMethodError(),
-                   Method::name_and_sig_as_C_string(target_klass(),
-                   method->name(),
-                   method->signature()));
+              vmSymbols::java_lang_AbstractMethodError(),
+              Method::name_and_sig_as_C_string(target_klass(),
+              method->name(),
+              method->signature()));
             JavaCallArguments args(h_origexception);
             THROW_ARG_0(vmSymbols::java_lang_reflect_InvocationTargetException(),
               vmSymbols::throwable_void_signature(),
@@ -926,15 +970,16 @@
     ResourceMark rm(THREAD);
     THROW_MSG_0(vmSymbols::java_lang_NoSuchMethodError(),
                 Method::name_and_sig_as_C_string(klass(),
-                                                        reflected_method->name(),
-                                                        reflected_method->signature()));
+                reflected_method->name(),
+                reflected_method->signature()));
   }
 
   assert(ptypes->is_objArray(), "just checking");
   int args_len = args.is_null() ? 0 : args->length();
   // Check number of arguments
   if (ptypes->length() != args_len) {
-    THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "wrong number of arguments");
+    THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
+                "wrong number of arguments");
   }
 
   // Create object to contain parameters for the JavaCall
@@ -950,9 +995,9 @@
     if (java_lang_Class::is_primitive(type_mirror)) {
       jvalue value;
       BasicType ptype = basic_type_mirror_to_basic_type(type_mirror, CHECK_NULL);
-      BasicType atype = unbox_for_primitive(arg, &value, CHECK_NULL);
+      BasicType atype = Reflection::unbox_for_primitive(arg, &value, CHECK_NULL);
       if (ptype != atype) {
-        widen(&value, atype, ptype, CHECK_NULL);
+        Reflection::widen(&value, atype, ptype, CHECK_NULL);
       }
       switch (ptype) {
         case T_BOOLEAN:     java_args.push_int(value.z);    break;
@@ -970,7 +1015,8 @@
       if (arg != NULL) {
         Klass* k = java_lang_Class::as_Klass(type_mirror);
         if (!arg->is_a(k)) {
-          THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "argument type mismatch");
+          THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
+                      "argument type mismatch");
         }
       }
       Handle arg_handle(THREAD, arg);         // Create handle for argument
@@ -978,7 +1024,8 @@
     }
   }
 
-  assert(java_args.size_of_parameters() == method->size_of_parameters(), "just checking");
+  assert(java_args.size_of_parameters() == method->size_of_parameters(),
+    "just checking");
 
   // All oops (including receiver) is passed in as Handles. An potential oop is returned as an
   // oop (i.e., NOT as an handle)
@@ -992,7 +1039,7 @@
     // JVMTI has already reported the pending exception
     // JVMTI internal flag reset is needed in order to report InvocationTargetException
     if (THREAD->is_Java_thread()) {
-      JvmtiExport::clear_detected_exception((JavaThread*) THREAD);
+      JvmtiExport::clear_detected_exception((JavaThread*)THREAD);
     }
 
     JavaCallArguments args(Handle(THREAD, target_exception));
@@ -1001,39 +1048,12 @@
                 &args);
   } else {
     if (rtype == T_BOOLEAN || rtype == T_BYTE || rtype == T_CHAR || rtype == T_SHORT) {
-      narrow((jvalue*) result.get_value_addr(), rtype, CHECK_NULL);
+      narrow((jvalue*)result.get_value_addr(), rtype, CHECK_NULL);
     }
-    return box((jvalue*) result.get_value_addr(), rtype, THREAD);
+    return Reflection::box((jvalue*)result.get_value_addr(), rtype, THREAD);
   }
 }
 
-
-void Reflection::narrow(jvalue* value, BasicType narrow_type, TRAPS) {
-  switch (narrow_type) {
-    case T_BOOLEAN:
-     value->z = (jboolean) value->i;
-     return;
-    case T_BYTE:
-     value->b = (jbyte) value->i;
-     return;
-    case T_CHAR:
-     value->c = (jchar) value->i;
-     return;
-    case T_SHORT:
-     value->s = (jshort) value->i;
-     return;
-    default:
-      break; // fail
-   }
-  THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "argument type mismatch");
-}
-
-
-BasicType Reflection::basic_type_mirror_to_basic_type(oop basic_type_mirror, TRAPS) {
-  assert(java_lang_Class::is_primitive(basic_type_mirror), "just checking");
-  return java_lang_Class::primitive_type(basic_type_mirror);
-}
-
 // This would be nicer if, say, java.lang.reflect.Method was a subclass
 // of java.lang.reflect.Constructor
 
--- a/src/share/vm/runtime/reflection.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/reflection.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -43,16 +43,6 @@
 class FieldStream;
 
 class Reflection: public AllStatic {
- private:
-  // Conversion
-  static Klass* basic_type_mirror_to_arrayklass(oop basic_type_mirror, TRAPS);
-  static oop      basic_type_arrayklass_to_mirror(Klass* basic_type_arrayklass, TRAPS);
-
-  static objArrayHandle get_parameter_types(const methodHandle& method, int parameter_count, oop* return_type, TRAPS);
-  static objArrayHandle get_exception_types(const methodHandle& method, TRAPS);
-  // Creating new java.lang.reflect.xxx wrappers
-  static Handle new_type(Symbol* signature, KlassHandle k, TRAPS);
-
  public:
   // Constants defined by java reflection api classes
   enum SomeConstants {
@@ -83,27 +73,27 @@
   static arrayOop reflect_new_multi_array(oop element_mirror, typeArrayOop dimensions, TRAPS);
 
   // Verification
-  static bool     verify_class_access(Klass* current_class, Klass* new_class, bool classloader_only);
+  static bool     verify_class_access(const Klass* current_class,
+                                      const Klass* new_class,
+                                      bool classloader_only);
 
-  static bool     verify_field_access(Klass* current_class,
-                                      Klass* resolved_class,
-                                      Klass* field_class,
+  static bool     verify_field_access(const Klass* current_class,
+                                      const Klass* resolved_class,
+                                      const Klass* field_class,
                                       AccessFlags access,
                                       bool classloader_only,
                                       bool protected_restriction = false);
-  static bool     is_same_class_package(Klass* class1, Klass* class2);
-  static bool     is_same_package_member(Klass* class1, Klass* class2, TRAPS);
-
-  static bool can_relax_access_check_for(
-    Klass* accessor, Klass* accesee, bool classloader_only);
+  static bool     is_same_class_package(const Klass* class1, const Klass* class2);
 
   // inner class reflection
   // raise an ICCE unless the required relationship can be proven to hold
   // If inner_is_member, require the inner to be a member of the outer.
   // If !inner_is_member, require the inner to be anonymous (a non-member).
   // Caller is responsible for figuring out in advance which case must be true.
-  static void check_for_inner_class(instanceKlassHandle outer, instanceKlassHandle inner,
-                                    bool inner_is_member, TRAPS);
+  static void check_for_inner_class(instanceKlassHandle outer,
+                                    instanceKlassHandle inner,
+                                    bool inner_is_member,
+                                    TRAPS);
 
   //
   // Support for reflection based on dynamic bytecode generation (JDK 1.4)
@@ -119,31 +109,11 @@
   // MethodParameterElement
   static oop new_parameter(Handle method, int index, Symbol* sym,
                            int flags, TRAPS);
-
-private:
-  // method resolution for invoke
-  static methodHandle resolve_interface_call(instanceKlassHandle klass, const methodHandle& method, KlassHandle recv_klass, Handle receiver, TRAPS);
-  // Method call (shared by invoke_method and invoke_constructor)
-  static oop  invoke(instanceKlassHandle klass,
-                     const methodHandle& method,
-                     Handle receiver,
-                     bool override,
-                     objArrayHandle ptypes,
-                     BasicType rtype,
-                     objArrayHandle args,
-                     bool is_method_invoke, TRAPS);
-
-  // Narrowing of basic types. Used to create correct jvalues for
-  // boolean, byte, char and short return return values from interpreter
-  // which are returned as ints. Throws IllegalArgumentException.
-  static void narrow(jvalue* value, BasicType narrow_type, TRAPS);
-
-  // Conversion
-  static BasicType basic_type_mirror_to_basic_type(oop basic_type_mirror, TRAPS);
-
-public:
   // Method invocation through java.lang.reflect.Method
-  static oop      invoke_method(oop method_mirror, Handle receiver, objArrayHandle args, TRAPS);
+  static oop      invoke_method(oop method_mirror,
+                               Handle receiver,
+                               objArrayHandle args,
+                               TRAPS);
   // Method invocation through java.lang.reflect.Constructor
   static oop      invoke_constructor(oop method_mirror, objArrayHandle args, TRAPS);
 
--- a/src/share/vm/runtime/safepoint.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/safepoint.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/stringTable.hpp"
+#include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
@@ -514,11 +515,6 @@
     StringTable::rehash_table();
   }
 
-  // rotate log files?
-  if (UseGCLogFileRotation) {
-    gclog_or_tty->rotate_log(false);
-  }
-
   {
     // CMS delays purging the CLDG until the beginning of the next safepoint and to
     // make sure concurrent sweep is done
--- a/src/share/vm/runtime/sharedRuntime.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -57,6 +57,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/vframe.hpp"
 #include "runtime/vframeArray.hpp"
+#include "trace/tracing.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
@@ -254,8 +255,10 @@
        ((ybits.i & float_sign_mask) == float_infinity) ) {
     return x;
   }
+  return ((jfloat)fmod_winx64((double)x, (double)y));
+#else
+  return ((jfloat)fmod((double)x,(double)y));
 #endif
-  return ((jfloat)fmod((double)x,(double)y));
 JRT_END
 
 
@@ -269,8 +272,10 @@
        ((ybits.l & double_sign_mask) == double_infinity) ) {
     return x;
   }
+  return ((jdouble)fmod_winx64((double)x, (double)y));
+#else
+  return ((jdouble)fmod((double)x,(double)y));
 #endif
-  return ((jdouble)fmod((double)x,(double)y));
 JRT_END
 
 #ifdef __SOFTFP__
@@ -483,8 +488,11 @@
       // unguarded. Reguard the stack otherwise if we return to the
       // deopt blob and the stack bang causes a stack overflow we
       // crash.
-      bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
+      bool guard_pages_enabled = thread->stack_guards_enabled();
       if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
+      if (thread->reserved_stack_activation() != thread->stack_base()) {
+        thread->set_reserved_stack_activation(thread->stack_base());
+      }
       assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
       return SharedRuntime::deopt_blob()->unpack_with_exception();
     } else {
@@ -757,10 +765,23 @@
 JRT_END
 
 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
+  throw_StackOverflowError_common(thread, false);
+JRT_END
+
+JRT_ENTRY(void, SharedRuntime::throw_delayed_StackOverflowError(JavaThread* thread))
+  throw_StackOverflowError_common(thread, true);
+JRT_END
+
+void SharedRuntime::throw_StackOverflowError_common(JavaThread* thread, bool delayed) {
   // We avoid using the normal exception construction in this case because
   // it performs an upcall to Java, and we're already out of stack space.
+  Thread* THREAD = thread;
   Klass* k = SystemDictionary::StackOverflowError_klass();
   oop exception_oop = InstanceKlass::cast(k)->allocate_instance(CHECK);
+  if (delayed) {
+    java_lang_Throwable::set_message(exception_oop,
+                                     Universe::delayed_stack_overflow_error_message());
+  }
   Handle exception (thread, exception_oop);
   if (StackTraceInThrowable) {
     java_lang_Throwable::fill_in_stack_trace(exception);
@@ -768,7 +789,7 @@
   // Increment counter for hs_err file reporting
   Atomic::inc(&Exceptions::_stack_overflow_errors);
   throw_and_post_jvmti_exception(thread, exception);
-JRT_END
+}
 
 #if INCLUDE_JVMCI
 address SharedRuntime::deoptimize_for_implicit_exception(JavaThread* thread, address pc, nmethod* nm, int deopt_reason) {
@@ -2975,3 +2996,68 @@
 }
 
 #endif /* PRODUCT */
+
+JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* thread))
+  assert(thread->is_Java_thread(), "Only Java threads have a stack reserved zone");
+  thread->enable_stack_reserved_zone();
+  thread->set_reserved_stack_activation(thread->stack_base());
+JRT_END
+
+frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* thread, frame fr) {
+  frame activation;
+  int decode_offset = 0;
+  nmethod* nm = NULL;
+  frame prv_fr = fr;
+  int count = 1;
+
+  assert(fr.is_java_frame(), "Must start on Java frame");
+
+  while (!fr.is_first_frame()) {
+    Method* method = NULL;
+    // Compiled java method case.
+    if (decode_offset != 0) {
+      DebugInfoReadStream stream(nm, decode_offset);
+      decode_offset = stream.read_int();
+      method = (Method*)nm->metadata_at(stream.read_int());
+    } else {
+      if (fr.is_first_java_frame()) break;
+      address pc = fr.pc();
+      prv_fr = fr;
+      if (fr.is_interpreted_frame()) {
+        method = fr.interpreter_frame_method();
+        fr = fr.java_sender();
+      } else {
+        CodeBlob* cb = fr.cb();
+        fr = fr.java_sender();
+        if (cb == NULL || !cb->is_nmethod()) {
+          continue;
+        }
+        nm = (nmethod*)cb;
+        if (nm->method()->is_native()) {
+          method = nm->method();
+        } else {
+          PcDesc* pd = nm->pc_desc_at(pc);
+          assert(pd != NULL, "PcDesc must not be NULL");
+          decode_offset = pd->scope_decode_offset();
+          // if decode_offset is not equal to 0, it will execute the
+          // "compiled java method case" at the beginning of the loop.
+          continue;
+        }
+      }
+    }
+    if (method->has_reserved_stack_access()) {
+      ResourceMark rm(thread);
+      activation = prv_fr;
+      warning("Potentially dangerous stack overflow in "
+              "ReservedStackAccess annotated method %s [%d]",
+              method->name_and_sig_as_C_string(), count++);
+      EventReservedStackActivation event;
+      if (event.should_commit()) {
+        event.set_method(method);
+        event.commit();
+      }
+    }
+  }
+  return activation;
+}
+
--- a/src/share/vm/runtime/sharedRuntime.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/sharedRuntime.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -30,7 +30,6 @@
 #include "interpreter/linkResolver.hpp"
 #include "memory/allocation.hpp"
 #include "memory/resourceArea.hpp"
-#include "runtime/threadLocalStorage.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/macros.hpp"
 
@@ -101,6 +100,12 @@
   static jfloat  frem(jfloat  x, jfloat  y);
   static jdouble drem(jdouble x, jdouble y);
 
+
+#ifdef _WIN64
+  // Workaround for fmod issue in the Windows x64 CRT
+  static double fmod_winx64(double x, double y);
+#endif
+
 #ifdef __SOFTFP__
   static jfloat  fadd(jfloat x, jfloat y);
   static jfloat  fsub(jfloat x, jfloat y);
@@ -196,6 +201,8 @@
   static void    throw_NullPointerException(JavaThread* thread);
   static void    throw_NullPointerException_at_call(JavaThread* thread);
   static void    throw_StackOverflowError(JavaThread* thread);
+  static void    throw_delayed_StackOverflowError(JavaThread* thread);
+  static void    throw_StackOverflowError_common(JavaThread* thread, bool delayed);
   static address continuation_for_implicit_exception(JavaThread* thread,
                                                      address faulting_pc,
                                                      ImplicitExceptionKind exception_kind);
@@ -203,6 +210,9 @@
   static address deoptimize_for_implicit_exception(JavaThread* thread, address pc, nmethod* nm, int deopt_reason);
 #endif
 
+  static void enable_stack_reserved_zone(JavaThread* thread);
+  static frame look_for_reserved_stack_annotated_method(JavaThread* thread, frame fr);
+
   // Shared stub locations
   static address get_poll_stub(address pc);
 
--- a/src/share/vm/runtime/stubRoutines.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/stubRoutines.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -54,6 +54,7 @@
 address StubRoutines::_throw_IncompatibleClassChangeError_entry = NULL;
 address StubRoutines::_throw_NullPointerException_at_call_entry = NULL;
 address StubRoutines::_throw_StackOverflowError_entry           = NULL;
+address StubRoutines::_throw_delayed_StackOverflowError_entry   = NULL;
 address StubRoutines::_handler_for_unsafe_access_entry          = NULL;
 jint    StubRoutines::_verify_oop_count                         = 0;
 address StubRoutines::_verify_oop_subroutine_entry              = NULL;
--- a/src/share/vm/runtime/stubRoutines.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/stubRoutines.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -111,6 +111,7 @@
   static address _throw_IncompatibleClassChangeError_entry;
   static address _throw_NullPointerException_at_call_entry;
   static address _throw_StackOverflowError_entry;
+  static address _throw_delayed_StackOverflowError_entry;
   static address _handler_for_unsafe_access_entry;
 
   static address _atomic_xchg_entry;
@@ -277,6 +278,7 @@
   static address throw_IncompatibleClassChangeError_entry(){ return _throw_IncompatibleClassChangeError_entry; }
   static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; }
   static address throw_StackOverflowError_entry()          { return _throw_StackOverflowError_entry; }
+  static address throw_delayed_StackOverflowError_entry()  { return _throw_delayed_StackOverflowError_entry; }
 
   // Exceptions during unsafe access - should throw Java exception rather
   // than crash.
--- a/src/share/vm/runtime/thread.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/thread.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -78,7 +78,6 @@
 #include "runtime/task.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadCritical.hpp"
-#include "runtime/threadLocalStorage.hpp"
 #include "runtime/vframe.hpp"
 #include "runtime/vframeArray.hpp"
 #include "runtime/vframe_hp.hpp"
@@ -142,6 +141,10 @@
 
 #endif // ndef DTRACE_ENABLED
 
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+// Current thread is maintained as a thread-local variable
+THREAD_LOCAL_DECL Thread* Thread::_thr_current = NULL;
+#endif
 
 // Class hierarchy
 // - Thread
@@ -281,22 +284,22 @@
 #endif // ASSERT
 }
 
-// Non-inlined version to be used where thread.inline.hpp shouldn't be included.
-Thread* Thread::current_noinline() {
-  return Thread::current();
+void Thread::initialize_thread_current() {
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+  assert(_thr_current == NULL, "Thread::current already initialized");
+  _thr_current = this;
+#endif
+  assert(ThreadLocalStorage::thread() == NULL, "ThreadLocalStorage::thread already initialized");
+  ThreadLocalStorage::set_thread(this);
+  assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
 }
 
-void Thread::initialize_thread_local_storage() {
-  // Note: Make sure this method only calls
-  // non-blocking operations. Otherwise, it might not work
-  // with the thread-startup/safepoint interaction.
-
-  // During Java thread startup, safepoint code should allow this
-  // method to complete because it may need to allocate memory to
-  // store information for the new thread.
-
-  // initialize structure dependent on thread local storage
-  ThreadLocalStorage::set_thread(this);
+void Thread::clear_thread_current() {
+  assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+  _thr_current = NULL;
+#endif
+  ThreadLocalStorage::set_thread(NULL);
 }
 
 void Thread::record_stack_base_and_size() {
@@ -304,6 +307,7 @@
   set_stack_size(os::current_stack_size());
   if (is_Java_thread()) {
     ((JavaThread*) this)->set_stack_overflow_limit();
+    ((JavaThread*) this)->set_reserved_stack_activation(stack_base());
   }
   // CR 7190089: on Solaris, primordial thread's stack is adjusted
   // in initialize_thread(). Without the adjustment, stack size is
@@ -364,15 +368,12 @@
 
   delete _SR_lock;
 
-  // clear thread local storage if the Thread is deleting itself
+  // clear Thread::current if thread is deleting itself.
+  // Needed to ensure JNI correctly detects non-attached threads.
   if (this == Thread::current()) {
-    ThreadLocalStorage::set_thread(NULL);
-  } else {
-    // In the case where we're not the current thread, invalidate all the
-    // caches in case some code tries to get the current thread or the
-    // thread that was destroyed, and gets stale information.
-    ThreadLocalStorage::invalidate_all();
+    clear_thread_current();
   }
+
   CHECK_UNHANDLED_OOPS_ONLY(if (CheckUnhandledOops) delete unhandled_oops();)
 }
 
@@ -908,7 +909,7 @@
 
 
 bool Thread::is_in_usable_stack(address adr) const {
-  size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
+  size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
   size_t usable_stack_size = _stack_size - stack_guard_size;
 
   return ((adr < stack_base()) && (adr >= stack_base() - usable_stack_size));
@@ -1273,7 +1274,6 @@
   assert(this == watcher_thread(), "just checking");
 
   this->record_stack_base_and_size();
-  this->initialize_thread_local_storage();
   this->set_native_thread_name(this->name());
   this->set_active_handles(JNIHandleBlock::allocate_block());
   while (true) {
@@ -1326,9 +1326,6 @@
     _watcher_thread = NULL;
     Terminator_lock->notify();
   }
-
-  // Thread destructor usually does this..
-  ThreadLocalStorage::set_thread(NULL);
 }
 
 void WatcherThread::start() {
@@ -1464,6 +1461,7 @@
     _jvmci_counters = NULL;
   }
 #endif // INCLUDE_JVMCI
+  _reserved_stack_activation = NULL;  // stack base not known yet
   (void)const_cast<oop&>(_exception_oop = oop(NULL));
   _exception_pc  = 0;
   _exception_handler_pc = 0;
@@ -1536,7 +1534,8 @@
 }
 
 bool JavaThread::reguard_stack(address cur_sp) {
-  if (_stack_guard_state != stack_guard_yellow_disabled) {
+  if (_stack_guard_state != stack_guard_yellow_disabled
+      && _stack_guard_state != stack_guard_reserved_disabled) {
     return true; // Stack already guarded or guard pages not needed.
   }
 
@@ -1553,8 +1552,15 @@
   // some exception code in c1, c2 or the interpreter isn't unwinding
   // when it should.
   guarantee(cur_sp > stack_yellow_zone_base(), "not enough space to reguard - increase StackShadowPages");
-
-  enable_stack_yellow_zone();
+  if (_stack_guard_state == stack_guard_yellow_disabled) {
+    enable_stack_yellow_zone();
+    if (reserved_stack_activation() != stack_base()) {
+      set_reserved_stack_activation(stack_base());
+    }
+  } else if (_stack_guard_state == stack_guard_reserved_disabled) {
+    set_reserved_stack_activation(stack_base());
+    enable_stack_reserved_zone();
+  }
   return true;
 }
 
@@ -1663,9 +1669,6 @@
   // Record real stack base and size.
   this->record_stack_base_and_size();
 
-  // Initialize thread local storage; set before calling MutexLocker
-  this->initialize_thread_local_storage();
-
   this->create_stack_guard_pages();
 
   this->cache_global_variables();
@@ -1997,8 +2000,7 @@
 
 
 JavaThread* JavaThread::active() {
-  Thread* thread = ThreadLocalStorage::thread();
-  assert(thread != NULL, "just checking");
+  Thread* thread = Thread::current();
   if (thread->is_Java_thread()) {
     return (JavaThread*) thread;
   } else {
@@ -2481,7 +2483,7 @@
 void JavaThread::create_stack_guard_pages() {
   if (! os::uses_stack_guard_pages() || _stack_guard_state != stack_guard_unused) return;
   address low_addr = stack_base() - stack_size();
-  size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size();
+  size_t len = (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size();
 
   int allocate = os::allocate_stack_guard_pages();
   // warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len);
@@ -2505,7 +2507,7 @@
   assert(Thread::current() == this, "from different thread");
   if (_stack_guard_state == stack_guard_unused) return;
   address low_addr = stack_base() - stack_size();
-  size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size();
+  size_t len = (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size();
 
   if (os::allocate_stack_guard_pages()) {
     if (os::remove_stack_guard_pages((char *) low_addr, len)) {
@@ -2523,6 +2525,44 @@
   }
 }
 
+void JavaThread::enable_stack_reserved_zone() {
+  assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
+  assert(_stack_guard_state != stack_guard_enabled, "already enabled");
+
+  // The base notation is from the stack's point of view, growing downward.
+  // We need to adjust it to work correctly with guard_memory()
+  address base = stack_reserved_zone_base() - stack_reserved_zone_size();
+
+  guarantee(base < stack_base(),"Error calculating stack reserved zone");
+  guarantee(base < os::current_stack_pointer(),"Error calculating stack reserved zone");
+
+  if (os::guard_memory((char *) base, stack_reserved_zone_size())) {
+    _stack_guard_state = stack_guard_enabled;
+  } else {
+    warning("Attempt to guard stack reserved zone failed.");
+  }
+  enable_register_stack_guard();
+}
+
+void JavaThread::disable_stack_reserved_zone() {
+  assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
+  assert(_stack_guard_state != stack_guard_reserved_disabled, "already disabled");
+
+  // Simply return if called for a thread that does not use guard pages.
+  if (_stack_guard_state == stack_guard_unused) return;
+
+  // The base notation is from the stack's point of view, growing downward.
+  // We need to adjust it to work correctly with guard_memory()
+  address base = stack_reserved_zone_base() - stack_reserved_zone_size();
+
+  if (os::unguard_memory((char *)base, stack_reserved_zone_size())) {
+    _stack_guard_state = stack_guard_reserved_disabled;
+  } else {
+    warning("Attempt to unguard stack reserved zone failed.");
+  }
+  disable_register_stack_guard();
+}
+
 void JavaThread::enable_stack_yellow_zone() {
   assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
   assert(_stack_guard_state != stack_guard_enabled, "already enabled");
@@ -3407,7 +3447,7 @@
   jint adjust_after_os_result = Arguments::adjust_after_os();
   if (adjust_after_os_result != JNI_OK) return adjust_after_os_result;
 
-  // initialize TLS
+  // Initialize library-based TLS
   ThreadLocalStorage::init();
 
   // Initialize output stream logging
@@ -3444,14 +3484,9 @@
   // Attach the main thread to this os thread
   JavaThread* main_thread = new JavaThread();
   main_thread->set_thread_state(_thread_in_vm);
-  // must do this before set_active_handles and initialize_thread_local_storage
-  // Note: on solaris initialize_thread_local_storage() will (indirectly)
-  // change the stack size recorded here to one based on the java thread
-  // stacksize. This adjusted size is what is used to figure the placement
-  // of the guard pages.
+  main_thread->initialize_thread_current();
+  // must do this before set_active_handles
   main_thread->record_stack_base_and_size();
-  main_thread->initialize_thread_local_storage();
-
   main_thread->set_active_handles(JNIHandleBlock::allocate_block());
 
   if (!main_thread->set_as_starting_thread()) {
--- a/src/share/vm/runtime/thread.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/thread.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -103,6 +103,12 @@
   friend class VMStructs;
   friend class JVMCIVMStructs;
  private:
+
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+  // Current thread is maintained as a thread-local variable
+  static THREAD_LOCAL_DECL Thread* _thr_current;
+#endif
+
   // Exception handling
   // (Note: _pending_exception and friends are in ThreadShadow)
   //oop       _pending_exception;                // pending exception for current thread
@@ -261,14 +267,13 @@
   friend class No_Alloc_Verifier;
   friend class No_Safepoint_Verifier;
   friend class Pause_No_Safepoint_Verifier;
-  friend class ThreadLocalStorage;
   friend class GC_locker;
 
   ThreadLocalAllocBuffer _tlab;                 // Thread-local eden
   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
                                                 // the Java heap
 
-  TRACE_DATA _trace_data;                       // Thread-local data for tracing
+  mutable TRACE_DATA _trace_data;               // Thread-local data for tracing
 
   ThreadExt _ext;
 
@@ -308,9 +313,12 @@
   Thread();
   virtual ~Thread();
 
-  // initializtion
-  void initialize_thread_local_storage();
+  // Manage Thread::current()
+  void initialize_thread_current();
+  private:
+  void clear_thread_current(); // needed for detaching JNI threads
 
+  public:
   // thread entry point
   virtual void run();
 
@@ -338,10 +346,13 @@
 
   virtual char* name() const { return (char*)"Unknown thread"; }
 
-  // Returns the current thread
+  // Returns the current thread (ASSERTS if NULL)
   static inline Thread* current();
-  // ... without having to include thread.inline.hpp.
-  static Thread* current_noinline();
+  // Returns the current thread, or NULL if not attached
+  static inline Thread* current_or_null();
+  // Returns the current thread, or NULL if not attached, and is
+  // safe for use from signal-handlers
+  static inline Thread* current_or_null_safe();
 
   // Common thread operations
   static void set_priority(Thread* thread, ThreadPriority priority);
@@ -650,25 +661,22 @@
 };
 
 // Inline implementation of Thread::current()
-// Thread::current is "hot" it's called > 128K times in the 1st 500 msecs of
-// startup.
-// ThreadLocalStorage::thread is warm -- it's called > 16K times in the same
-// period.   This is inlined in thread_<os_family>.inline.hpp.
+inline Thread* Thread::current() {
+  Thread* current = current_or_null();
+  assert(current != NULL, "Thread::current() called on detached thread");
+  return current;
+}
 
-inline Thread* Thread::current() {
-#ifdef ASSERT
-  // This function is very high traffic. Define PARANOID to enable expensive
-  // asserts.
-#ifdef PARANOID
-  // Signal handler should call ThreadLocalStorage::get_thread_slow()
-  Thread* t = ThreadLocalStorage::get_thread_slow();
-  assert(t != NULL && !t->is_inside_signal_handler(),
-         "Don't use Thread::current() inside signal handler");
+inline Thread* Thread::current_or_null() {
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+  return _thr_current;
+#else
+  return ThreadLocalStorage::thread();
 #endif
-#endif
-  Thread* thread = ThreadLocalStorage::thread();
-  assert(thread != NULL, "just checking");
-  return thread;
+}
+
+inline Thread* Thread::current_or_null_safe() {
+  return ThreadLocalStorage::thread();
 }
 
 // Name support for threads.  non-JavaThread subclasses with multiple
@@ -903,6 +911,7 @@
   // State of the stack guard pages for this thread.
   enum StackGuardState {
     stack_guard_unused,         // not needed
+    stack_guard_reserved_disabled,
     stack_guard_yellow_disabled,// disabled (temporarily) after stack overflow
     stack_guard_enabled         // enabled
   };
@@ -951,6 +960,7 @@
   // Precompute the limit of the stack as used in stack overflow checks.
   // We load it from here to simplify the stack overflow check in assembly.
   address          _stack_overflow_limit;
+  address          _reserved_stack_activation;
 
   // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is
   // used to temp. parsing values into and out of the runtime system during exception handling for compiled
@@ -1337,18 +1347,25 @@
 
   // Stack overflow support
   inline size_t stack_available(address cur_sp);
+  address stack_reserved_zone_base() {
+    return stack_yellow_zone_base(); }
+  size_t stack_reserved_zone_size() {
+    return StackReservedPages * os::vm_page_size(); }
   address stack_yellow_zone_base() {
     return (address)(stack_base() -
                      (stack_size() -
                      (stack_red_zone_size() + stack_yellow_zone_size())));
   }
   size_t  stack_yellow_zone_size() {
-    return StackYellowPages * os::vm_page_size();
+    return StackYellowPages * os::vm_page_size() + stack_reserved_zone_size();
   }
   address stack_red_zone_base() {
     return (address)(stack_base() - (stack_size() - stack_red_zone_size()));
   }
   size_t stack_red_zone_size() { return StackRedPages * os::vm_page_size(); }
+  bool in_stack_reserved_zone(address a) {
+    return (a <= stack_reserved_zone_base()) && (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size()));
+  }
   bool in_stack_yellow_zone(address a) {
     return (a <= stack_yellow_zone_base()) && (a >= stack_red_zone_base());
   }
@@ -1360,6 +1377,8 @@
   void create_stack_guard_pages();
   void remove_stack_guard_pages();
 
+  void enable_stack_reserved_zone();
+  void disable_stack_reserved_zone();
   void enable_stack_yellow_zone();
   void disable_stack_yellow_zone();
   void enable_stack_red_zone();
@@ -1367,7 +1386,16 @@
 
   inline bool stack_guard_zone_unused();
   inline bool stack_yellow_zone_disabled();
-  inline bool stack_yellow_zone_enabled();
+  inline bool stack_reserved_zone_disabled();
+  inline bool stack_guards_enabled();
+
+  address reserved_stack_activation() const { return _reserved_stack_activation; }
+  void      set_reserved_stack_activation(address addr) {
+    assert(_reserved_stack_activation == stack_base()
+            || _reserved_stack_activation == NULL
+            || addr == stack_base(), "Must not be set twice");
+    _reserved_stack_activation = addr;
+  }
 
   // Attempt to reguard the stack after a stack overflow may have occurred.
   // Returns true if (a) guard pages are not needed on this thread, (b) the
@@ -1384,6 +1412,7 @@
   void set_stack_overflow_limit() {
     _stack_overflow_limit = _stack_base - _stack_size +
                             ((StackShadowPages +
+                              StackReservedPages +
                               StackYellowPages +
                               StackRedPages) * os::vm_page_size());
   }
@@ -1433,6 +1462,7 @@
   static ByteSize stack_overflow_limit_offset()  { return byte_offset_of(JavaThread, _stack_overflow_limit); }
   static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state); }
+  static ByteSize reserved_stack_activation_offset() { return byte_offset_of(JavaThread, _reserved_stack_activation); }
   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags); }
 
   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
@@ -1844,8 +1874,8 @@
 
 // Inline implementation of JavaThread::current
 inline JavaThread* JavaThread::current() {
-  Thread* thread = ThreadLocalStorage::thread();
-  assert(thread != NULL && thread->is_Java_thread(), "just checking");
+  Thread* thread = Thread::current();
+  assert(thread->is_Java_thread(), "just checking");
   return (JavaThread*)thread;
 }
 
--- a/src/share/vm/runtime/thread.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/thread.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -30,21 +30,6 @@
 #include "runtime/atomic.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "runtime/thread.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "thread_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
 
 #undef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
 
@@ -145,6 +130,10 @@
   return _stack_guard_state == stack_guard_yellow_disabled;
 }
 
+inline bool JavaThread::stack_reserved_zone_disabled() {
+  return _stack_guard_state == stack_guard_reserved_disabled;
+}
+
 inline size_t JavaThread::stack_available(address cur_sp) {
   // This code assumes java stacks grow down
   address low_addr; // Limit on the address for deepest stack depth
@@ -156,7 +145,7 @@
   return cur_sp > low_addr ? cur_sp - low_addr : 0;
 }
 
-inline bool JavaThread::stack_yellow_zone_enabled() {
+inline bool JavaThread::stack_guards_enabled() {
 #ifdef ASSERT
   if (os::uses_stack_guard_pages()) {
     assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use");
--- a/src/share/vm/runtime/threadLocalStorage.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/os.inline.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-// Solaris no longer has this kind of ThreadLocalStorage implementation.
-// This will be removed from all platforms in the near future.
-
-#ifndef SOLARIS
-
-// static member initialization
-int ThreadLocalStorage::_thread_index = -1;
-
-Thread* ThreadLocalStorage::get_thread_slow() {
-  return (Thread*) os::thread_local_storage_at(ThreadLocalStorage::thread_index());
-}
-
-void ThreadLocalStorage::set_thread(Thread* thread) {
-  pd_set_thread(thread);
-
-  // The following ensure that any optimization tricks we have tried
-  // did not backfire on us:
-  guarantee(get_thread()      == thread, "must be the same thread, quickly");
-  guarantee(get_thread_slow() == thread, "must be the same thread, slowly");
-}
-
-void ThreadLocalStorage::init() {
-  assert(!is_initialized(),
-         "More than one attempt to initialize threadLocalStorage");
-  pd_init();
-  set_thread_index(os::allocate_thread_local_storage());
-  generate_code_for_get_thread();
-}
-
-bool ThreadLocalStorage::is_initialized() {
-    return (thread_index() != -1);
-}
-
-#endif // SOLARIS
--- a/src/share/vm/runtime/threadLocalStorage.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/threadLocalStorage.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -25,86 +25,26 @@
 #ifndef SHARE_VM_RUNTIME_THREADLOCALSTORAGE_HPP
 #define SHARE_VM_RUNTIME_THREADLOCALSTORAGE_HPP
 
-#include "gc/shared/gcUtil.hpp"
-#include "runtime/os.hpp"
 #include "utilities/top.hpp"
 
-// Interface for thread local storage
+// forward-decl as we can't have an include cycle
+class Thread;
 
-// Fast variant of ThreadLocalStorage::get_thread_slow
-extern "C" Thread*   get_thread();
-
-// Get raw thread id: e.g., %g7 on sparc, fs or gs on x86
-extern "C" uintptr_t _raw_thread_id();
+// Wrapper class for library-based (as opposed to compiler-based)
+// thread-local storage (TLS). All platforms require this for
+// signal-handler based TLS access (which while not strictly async-signal
+// safe in theory, is and has-been for a long time, in practice).
+// Platforms without compiler-based TLS (i.e. __thread storage-class modifier)
+// will use this implementation for all TLS access - see thread.hpp/cpp
 
 class ThreadLocalStorage : AllStatic {
 
  // Exported API
  public:
-  static void    set_thread(Thread* thread);
-  static Thread* get_thread_slow();
-  static void    invalidate_all() { pd_invalidate_all(); }
+  static Thread* thread(); // return current thread, if attached
+  static void    set_thread(Thread* thread); // set current thread
   static void    init();
-  static bool    is_initialized();
-
-  // Machine dependent stuff
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "threadLS_linux_x86.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_sparc
-# include "threadLS_linux_sparc.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "threadLS_linux_zero.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "threadLS_solaris_x86.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "threadLS_solaris_sparc.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "threadLS_windows_x86.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_arm
-# include "threadLS_linux_arm.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_ppc
-# include "threadLS_linux_ppc.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_aarch64
-# include "threadLS_linux_aarch64.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_aix_ppc
-# include "threadLS_aix_ppc.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "threadLS_bsd_x86.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "threadLS_bsd_zero.hpp"
-#endif
-
-#ifndef SOLARIS
- public:
-  // Accessor
-  static inline int  thread_index()              { return _thread_index; }
-  static inline void set_thread_index(int index) { _thread_index = index; }
-
- private:
-  static int     _thread_index;
-
-  static void    generate_code_for_get_thread();
-
-  // Processor dependent parts of set_thread and initialization
-  static void pd_set_thread(Thread* thread);
-  static void pd_init();
-
-#endif // SOLARIS
-
-  // Invalidate any thread cacheing or optimization schemes.
-  static void pd_invalidate_all();
-
+  static bool    is_initialized(); // can't use TLS prior to initialization
 };
 
 #endif // SHARE_VM_RUNTIME_THREADLOCALSTORAGE_HPP
--- a/src/share/vm/runtime/timer.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/timer.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -120,7 +120,6 @@
 
   if (_active) {
     _accum = NULL;
-    tty->stamp(PrintGCTimeStamps);
     tty->print("[%s", title);
     tty->flush();
     _t.start();
@@ -135,7 +134,6 @@
   _verbose = verbose;
   if (_active) {
     if (_verbose) {
-      tty->stamp(PrintGCTimeStamps);
       tty->print("[%s", title);
       tty->flush();
     }
--- a/src/share/vm/runtime/vframe.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/vframe.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -232,14 +232,12 @@
             // disable the extra printing below.
             mark = NULL;
           }
-        } else if (frame_count != 0 && ObjectMonitor::Knob_Verbose) {
+        } else if (frame_count != 0) {
           // This is not the first frame so we either own this monitor
           // or we owned the monitor before and called wait(). Because
           // wait() could have been called on any monitor in a lower
           // numbered frame on the stack, we have to check all the
           // monitors on the list for this frame.
-          // Note: Only enable this new output line in verbose mode
-          // since existing tests are not ready for it.
           mark = monitor->owner()->mark();
           if (mark->has_monitor() &&
               ( // we have marked ourself as pending on this monitor
--- a/src/share/vm/runtime/vmStructs.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/vmStructs.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -320,7 +320,7 @@
   nonstatic_field(InstanceKlass,               _constants,                                    ConstantPool*)                         \
   nonstatic_field(InstanceKlass,               _class_loader_data,                            ClassLoaderData*)                      \
   nonstatic_field(InstanceKlass,               _source_file_name_index,                       u2)                                    \
-  nonstatic_field(InstanceKlass,               _source_debug_extension,                       char*)                                 \
+  nonstatic_field(InstanceKlass,               _source_debug_extension,                       const char*)                           \
   nonstatic_field(InstanceKlass,               _inner_classes,                                Array<jushort>*)                       \
   nonstatic_field(InstanceKlass,               _nonstatic_field_size,                         int)                                   \
   nonstatic_field(InstanceKlass,               _static_field_size,                            int)                                   \
@@ -396,7 +396,7 @@
   nonstatic_field(Method,                      _access_flags,                                 AccessFlags)                           \
   nonstatic_field(Method,                      _vtable_index,                                 int)                                   \
   nonstatic_field(Method,                      _intrinsic_id,                                 u2)                                    \
-  nonstatic_field(Method,                      _flags,                                        u1)                                    \
+  nonstatic_field(Method,                      _flags,                                        u2)                                    \
   nonproduct_nonstatic_field(Method,           _compiled_invocation_count,                    int)                                   \
   volatile_nonstatic_field(Method,             _code,                                         nmethod*)                              \
   nonstatic_field(Method,                      _i2i_entry,                                    address)                               \
--- a/src/share/vm/runtime/vmThread.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/vmThread.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -240,7 +240,6 @@
 void VMThread::run() {
   assert(this == vm_thread(), "check");
 
-  this->initialize_thread_local_storage();
   this->initialize_named_thread();
   this->record_stack_base_and_size();
   // Notify_lock wait checks on active_handles() to rewait in
@@ -286,7 +285,7 @@
     os::check_heap();
     // Silent verification so as not to pollute normal output,
     // unless we really asked for it.
-    Universe::verify(!(PrintGCDetails || Verbose) || VerifySilently);
+    Universe::verify();
   }
 
   CompileBroker::set_should_block();
@@ -308,9 +307,6 @@
     _terminate_lock->notify();
   }
 
-  // Thread destructor usually does this.
-  ThreadLocalStorage::set_thread(NULL);
-
   // Deletion must be done synchronously by the JNI DestroyJavaVM thread
   // so that the VMThread deletion completes before the main thread frees
   // up the CodeHeap.
--- a/src/share/vm/runtime/vm_operations.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/vm_operations.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -189,7 +189,7 @@
 
 void VM_Verify::doit() {
   Universe::heap()->prepare_for_verify();
-  Universe::verify(_silent);
+  Universe::verify();
 }
 
 bool VM_PrintThreads::doit_prologue() {
@@ -378,7 +378,7 @@
 int VM_Exit::set_vm_exited() {
   CodeCacheExtensions::complete_step(CodeCacheExtensionsSteps::LastStep);
 
-  Thread * thr_cur = ThreadLocalStorage::get_thread_slow();
+  Thread * thr_cur = Thread::current();
 
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
 
@@ -400,7 +400,7 @@
   // to wait for threads in _thread_in_native state to be quiescent.
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
 
-  Thread * thr_cur = ThreadLocalStorage::get_thread_slow();
+  Thread * thr_cur = Thread::current();
   Monitor timer(Mutex::leaf, "VM_Exit timer", true,
                 Monitor::_safepoint_check_never);
 
@@ -477,7 +477,7 @@
 
 void VM_Exit::wait_if_vm_exited() {
   if (_vm_exited &&
-      ThreadLocalStorage::get_thread_slow() != _shutdown_thread) {
+      Thread::current_or_null() != _shutdown_thread) {
     // _vm_exited is set at safepoint, and the Threads_lock is never released
     // we will block here until the process dies
     Threads_lock->lock_without_safepoint_check();
--- a/src/share/vm/runtime/vm_operations.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/vm_operations.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -320,10 +320,7 @@
 };
 
 class VM_Verify: public VM_Operation {
- private:
-  bool _silent;
  public:
-  VM_Verify(bool silent = VerifySilently) : _silent(silent) {}
   VMOp_Type type() const { return VMOp_Verify; }
   void doit();
 };
@@ -427,17 +424,6 @@
   void doit();
 };
 
-
-class VM_RotateGCLog: public VM_Operation {
- private:
-  outputStream* _out;
-
- public:
-  VM_RotateGCLog(outputStream* st) : _out(st) {}
-  VMOp_Type type() const { return VMOp_RotateGCLog; }
-  void doit() { gclog_or_tty->rotate_log(true, _out); }
-};
-
 class VM_PrintCompileQueue: public VM_Operation {
  private:
   outputStream* _out;
--- a/src/share/vm/runtime/vm_version.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/vm_version.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -170,14 +170,19 @@
 #ifndef CPU
 #ifdef ZERO
 #define CPU      ZERO_LIBARCH
+#elif defined(PPC64)
+#if defined(VM_LITTLE_ENDIAN)
+#define CPU      "ppc64le"
+#else
+#define CPU      "ppc64"
+#endif
 #else
 #define CPU      IA32_ONLY("x86")                \
                  IA64_ONLY("ia64")               \
                  AMD64_ONLY("amd64")             \
-                 PPC64_ONLY("ppc64")             \
                  AARCH64_ONLY("aarch64")         \
                  SPARC_ONLY("sparc")
-#endif // ZERO
+#endif //
 #endif
 
 const char *Abstract_VM_Version::vm_platform_string() {
--- a/src/share/vm/runtime/vm_version.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/runtime/vm_version.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -56,6 +56,12 @@
                                                   unsigned int dem,
                                                   unsigned int switch_pt);
  public:
+  // Called as part of the runtime services initialization which is
+  // called from the management module initialization (via init_globals())
+  // after argument parsing and attaching of the main thread has
+  // occurred.  Examines a variety of the hardware capabilities of
+  // the platform to determine which features can be used to execute the
+  // program.
   static void initialize();
 
   // This allows for early initialization of VM_Version information
@@ -65,6 +71,11 @@
   // need to specialize this define VM_Version::early_initialize().
   static void early_initialize() { }
 
+  // Called to initialize VM variables needing initialization
+  // after command line parsing. Platforms that need to specialize
+  // this should define VM_Version::init_before_ergo().
+  static void init_before_ergo() {}
+
   // Name
   static const char* vm_name();
   // Vendor
--- a/src/share/vm/services/diagnosticCommand.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/services/diagnosticCommand.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -73,7 +73,6 @@
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<JVMTIDataDumpDCmd>(full_export, true, false));
 #endif // INCLUDE_JVMTI
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ThreadDumpDCmd>(full_export, true, false));
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<RotateGCLogDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassLoaderStatsDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CompileQueueDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CodeListDCmd>(full_export, true, false));
@@ -826,15 +825,6 @@
   output()->cr();
 }
 
-void RotateGCLogDCmd::execute(DCmdSource source, TRAPS) {
-  if (UseGCLogFileRotation) {
-    VM_RotateGCLog rotateop(output());
-    VMThread::execute(&rotateop);
-  } else {
-    output()->print_cr("Target VM does not support GC log file rotation.");
-  }
-}
-
 void CompileQueueDCmd::execute(DCmdSource source, TRAPS) {
   VM_PrintCompileQueue printCompileQueueOp(output());
   VMThread::execute(&printCompileQueueOp);
--- a/src/share/vm/services/diagnosticCommand.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/services/diagnosticCommand.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -549,23 +549,6 @@
 
 };
 
-class RotateGCLogDCmd : public DCmd {
-public:
-  RotateGCLogDCmd(outputStream* output, bool heap) : DCmd(output, heap) {}
-  static const char* name() { return "GC.rotate_log"; }
-  static const char* description() {
-    return "Force the GC log file to be rotated.";
-  }
-  static const char* impact() { return "Low"; }
-  virtual void execute(DCmdSource source, TRAPS);
-  static int num_arguments() { return 0; }
-  static const JavaPermission permission() {
-    JavaPermission p = {"java.lang.management.ManagementPermission",
-                        "control", NULL};
-    return p;
-  }
-};
-
 class CompileQueueDCmd : public DCmd {
 public:
   CompileQueueDCmd(outputStream* output, bool heap) : DCmd(output, heap) {}
--- a/src/share/vm/services/memoryService.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/services/memoryService.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -32,6 +32,7 @@
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/generationSpec.hpp"
+#include "logging/logConfiguration.hpp"
 #include "memory/heap.hpp"
 #include "memory/memRegion.hpp"
 #include "oops/oop.inline.hpp"
@@ -517,8 +518,12 @@
 bool MemoryService::set_verbose(bool verbose) {
   MutexLocker m(Management_lock);
   // verbose will be set to the previous value
-  Flag::Error error = CommandLineFlags::boolAtPut("PrintGC", &verbose, Flag::MANAGEMENT);
-  assert(error==Flag::SUCCESS, "Setting PrintGC flag failed with error %s", Flag::flag_error_str(error));
+  MutexLocker ml(LogConfiguration_lock);
+  if (verbose) {
+    LogConfiguration::parse_log_arguments("stdout", "gc", NULL, NULL, NULL);
+  } else {
+    LogConfiguration::parse_log_arguments("stdout", "gc=off", NULL, NULL, NULL);
+  }
   ClassLoadingService::reset_trace_class_unloading();
 
   return verbose;
--- a/src/share/vm/services/memoryService.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/services/memoryService.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -27,6 +27,7 @@
 
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/generation.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/handles.hpp"
 #include "services/memoryUsage.hpp"
@@ -164,7 +165,7 @@
 
   static void oops_do(OopClosure* f);
 
-  static bool get_verbose() { return PrintGC; }
+  static bool get_verbose() { return log_is_enabled(Info, gc); }
   static bool set_verbose(bool verbose);
 
   // Create an instance of java/lang/management/MemoryUsage
--- a/src/share/vm/services/runtimeService.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/services/runtimeService.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/classLoader.hpp"
+#include "logging/log.hpp"
 #include "runtime/vm_version.hpp"
 #include "services/attachListener.hpp"
 #include "services/management.hpp"
@@ -87,11 +88,8 @@
   HS_PRIVATE_SAFEPOINT_BEGIN();
 
   // Print the time interval in which the app was executing
-  if (PrintGCApplicationConcurrentTime && _app_timer.is_updated()) {
-    gclog_or_tty->date_stamp(PrintGCDateStamps);
-    gclog_or_tty->stamp(PrintGCTimeStamps);
-    gclog_or_tty->print_cr("Application time: %3.7f seconds",
-                                last_application_time_sec());
+  if (_app_timer.is_updated()) {
+    log_info(safepoint)("Application time: %3.7f seconds", last_application_time_sec());
   }
 
   // update the time stamp to begin recording safepoint time
@@ -109,7 +107,7 @@
   if (UsePerfData) {
     _sync_time_ticks->inc(_safepoint_timer.ticks_since_update());
   }
-  if (PrintGCApplicationStoppedTime) {
+  if (log_is_enabled(Info, safepoint)) {
     _last_safepoint_sync_time_sec = last_safepoint_time_sec();
   }
 }
@@ -119,15 +117,8 @@
 
   // Print the time interval for which the app was stopped
   // during the current safepoint operation.
-  if (PrintGCApplicationStoppedTime) {
-    gclog_or_tty->date_stamp(PrintGCDateStamps);
-    gclog_or_tty->stamp(PrintGCTimeStamps);
-    gclog_or_tty->print_cr("Total time for which application threads "
-                           "were stopped: %3.7f seconds, "
-                           "Stopping threads took: %3.7f seconds",
-                           last_safepoint_time_sec(),
-                           _last_safepoint_sync_time_sec);
-  }
+  log_info(safepoint)("Total time for which application threads were stopped: %3.7f seconds, Stopping threads took: %3.7f seconds",
+                      last_safepoint_time_sec(), _last_safepoint_sync_time_sec);
 
   // update the time stamp to begin recording app time
   _app_timer.update();
--- a/src/share/vm/trace/trace.xml	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/trace/trace.xml	Fri Dec 18 12:39:02 2015 -0800
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="utf-8"?>
 <!--
- Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -109,6 +109,11 @@
       <value type="ADDRESS" field="address" label="Monitor Address" description="Address of object waited on" relation="JAVA_MONITOR_ADDRESS"/>
     </event>
 
+    <event id="ReservedStackActivation" path="java/reserved_stack_activation" label="Reserved Stack Activation" description="Activation of Reserved Stack Area caused by stack overflow with ReservedStackAccess annotated method in call stack"
+            has_thread="true" has_stacktrace="true" is_instant="true">
+        <value type="METHOD" field="method" label="Java Method"/>
+    </event>
+
     <event id="ClassLoad" path="vm/class/load" label="Class Load"
             has_thread="true" has_stacktrace="true" is_instant="false">
       <value type="CLASS" field="loadedClass" label="Loaded Class"/>
--- a/src/share/vm/utilities/accessFlags.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/accessFlags.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -100,6 +100,9 @@
   jint _flags;
 
  public:
+  AccessFlags() : _flags(0) {}
+  explicit AccessFlags(jint flags) : _flags(flags) {}
+
   // Java access flags
   bool is_public      () const         { return (_flags & JVM_ACC_PUBLIC      ) != 0; }
   bool is_private     () const         { return (_flags & JVM_ACC_PRIVATE     ) != 0; }
--- a/src/share/vm/utilities/debug.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/debug.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -215,7 +215,7 @@
   if (Debugging || error_is_suppressed(file, line)) return;
   va_list detail_args;
   va_start(detail_args, detail_fmt);
-  VMError::report_and_die(ThreadLocalStorage::get_thread_slow(), file, line, error_msg, detail_fmt, detail_args);
+  VMError::report_and_die(Thread::current_or_null(), file, line, error_msg, detail_fmt, detail_args);
   va_end(detail_args);
 }
 
@@ -224,7 +224,7 @@
   if (Debugging || error_is_suppressed(file, line)) return;
   va_list detail_args;
   va_start(detail_args, detail_fmt);
-  VMError::report_and_die(ThreadLocalStorage::get_thread_slow(), file, line, "fatal error", detail_fmt, detail_args);
+  VMError::report_and_die(Thread::current_or_null(), file, line, "fatal error", detail_fmt, detail_args);
   va_end(detail_args);
 }
 
@@ -233,7 +233,7 @@
   if (Debugging) return;
   va_list detail_args;
   va_start(detail_args, detail_fmt);
-  VMError::report_and_die(ThreadLocalStorage::get_thread_slow(), file, line, size, vm_err_type, detail_fmt, detail_args);
+  VMError::report_and_die(Thread::current_or_null(), file, line, size, vm_err_type, detail_fmt, detail_args);
   va_end(detail_args);
 
   // The UseOSErrorReporting option in report_and_die() may allow a return
@@ -305,6 +305,16 @@
     if (OnOutOfMemoryError && OnOutOfMemoryError[0]) {
       VMError::report_java_out_of_memory(message);
     }
+
+    if (CrashOnOutOfMemoryError) {
+      tty->print_cr("Aborting due to java.lang.OutOfMemoryError: %s", message);
+      fatal("OutOfMemory encountered: %s", message);
+    }
+
+    if (ExitOnOutOfMemoryError) {
+      tty->print_cr("Terminating due to java.lang.OutOfMemoryError: %s", message);
+      os::exit(3);
+    }
   }
 }
 
@@ -495,7 +505,7 @@
 
 extern "C" void universe() {
   Command c("universe");
-  Universe::print();
+  Universe::print_on(tty);
 }
 
 
@@ -536,7 +546,7 @@
 #endif // !PRODUCT
 
 extern "C" void ps() { // print stack
-  if (Thread::current() == NULL) return;
+  if (Thread::current_or_null() == NULL) return;
   Command c("ps");
 
 
@@ -615,7 +625,7 @@
 #endif // !PRODUCT
 
 extern "C" void pss() { // print all stacks
-  if (Thread::current() == NULL) return;
+  if (Thread::current_or_null() == NULL) return;
   Command c("pss");
   Threads::print(true, PRODUCT_ONLY(false) NOT_PRODUCT(true));
 }
@@ -772,7 +782,7 @@
 extern "C" void pns(void* sp, void* fp, void* pc) { // print native stack
   Command c("pns");
   static char buf[O_BUFLEN];
-  Thread* t = ThreadLocalStorage::get_thread_slow();
+  Thread* t = Thread::current_or_null();
   // Call generic frame constructor (certain arguments may be ignored)
   frame fr(sp, fp, pc);
   print_native_stack(tty, fr, t, buf, sizeof(buf));
--- a/src/share/vm/utilities/elfSymbolTable.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/elfSymbolTable.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -69,6 +69,26 @@
   }
 }
 
+bool ElfSymbolTable::compare(const Elf_Sym* sym, address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable) {
+  if (STT_FUNC == ELF_ST_TYPE(sym->st_info)) {
+    Elf_Word st_size = sym->st_size;
+    address sym_addr;
+    if (funcDescTable != NULL && funcDescTable->get_index() == sym->st_shndx) {
+      // We need to go another step trough the function descriptor table (currently PPC64 only)
+      sym_addr = funcDescTable->lookup(sym->st_value);
+    } else {
+      sym_addr = (address)sym->st_value;
+    }
+    if (sym_addr <= addr && (Elf_Word)(addr - sym_addr) < st_size) {
+      *offset = (int)(addr - sym_addr);
+      *posIndex = sym->st_name;
+      *stringtableIndex = m_shdr.sh_link;
+      return true;
+    }
+  }
+  return false;
+}
+
 bool ElfSymbolTable::lookup(address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable) {
   assert(stringtableIndex, "null string table index pointer");
   assert(posIndex, "null string table offset pointer");
@@ -83,21 +103,8 @@
   int count = m_shdr.sh_size / sym_size;
   if (m_symbols != NULL) {
     for (int index = 0; index < count; index ++) {
-      if (STT_FUNC == ELF_ST_TYPE(m_symbols[index].st_info)) {
-        Elf_Word st_size = m_symbols[index].st_size;
-        address sym_addr;
-        if (funcDescTable != NULL && funcDescTable->get_index() == m_symbols[index].st_shndx) {
-          // We need to go another step trough the function descriptor table (currently PPC64 only)
-          sym_addr = funcDescTable->lookup(m_symbols[index].st_value);
-        } else {
-          sym_addr = (address)m_symbols[index].st_value;
-        }
-        if (sym_addr <= addr && (Elf_Word)(addr - sym_addr) < st_size) {
-          *offset = (int)(addr - sym_addr);
-          *posIndex = m_symbols[index].st_name;
-          *stringtableIndex = m_shdr.sh_link;
-          return true;
-        }
+      if (compare(&m_symbols[index], addr, stringtableIndex, posIndex, offset, funcDescTable)) {
+        return true;
       }
     }
   } else {
@@ -111,21 +118,8 @@
     Elf_Sym sym;
     for (int index = 0; index < count; index ++) {
       if (fread(&sym, sym_size, 1, m_file) == 1) {
-        if (STT_FUNC == ELF_ST_TYPE(sym.st_info)) {
-          Elf_Word st_size = sym.st_size;
-          address sym_addr;
-          if (funcDescTable != NULL && funcDescTable->get_index() == sym.st_shndx) {
-            // We need to go another step trough the function descriptor table (currently PPC64 only)
-            sym_addr = funcDescTable->lookup(sym.st_value);
-          } else {
-            sym_addr = (address)sym.st_value;
-          }
-          if (sym_addr <= addr && (Elf_Word)(addr - sym_addr) < st_size) {
-            *offset = (int)(addr - sym_addr);
-            *posIndex = sym.st_name;
-            *stringtableIndex = m_shdr.sh_link;
-            return true;
-          }
+        if (compare(&sym, addr, stringtableIndex, posIndex, offset, funcDescTable)) {
+          return true;
         }
       } else {
         m_status = NullDecoder::file_invalid;
@@ -134,7 +128,7 @@
     }
     fseek(m_file, cur_pos, SEEK_SET);
   }
-  return true;
+  return false;
 }
 
 #endif // !_WINDOWS && !__APPLE__
--- a/src/share/vm/utilities/elfSymbolTable.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/elfSymbolTable.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -63,6 +63,8 @@
   Elf_Shdr            m_shdr;
 
   NullDecoder::decoder_status  m_status;
+
+  bool compare(const Elf_Sym* sym, address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable);
 };
 
 #endif // !_WINDOWS and !__APPLE__
--- a/src/share/vm/utilities/events.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/events.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -29,7 +29,6 @@
 #include "runtime/osThread.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadCritical.hpp"
-#include "runtime/threadLocalStorage.hpp"
 #include "runtime/timer.hpp"
 #include "utilities/events.hpp"
 
--- a/src/share/vm/utilities/events.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/events.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -248,8 +248,8 @@
 
 template <class T>
 inline void EventLogBase<T>::print_log_on(outputStream* out) {
-  if (ThreadLocalStorage::get_thread_slow() == NULL) {
-    // Not a regular Java thread so don't bother locking
+  if (Thread::current_or_null() == NULL) {
+    // Not yet attached? Don't try to use locking
     print_log_impl(out);
   } else {
     MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
--- a/src/share/vm/utilities/globalDefinitions.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -949,7 +949,6 @@
 // (in order to reduce interface dependencies & reduce
 // number of unnecessary compilations after changes)
 
-class symbolTable;
 class ClassFileStream;
 
 class Event;
--- a/src/share/vm/utilities/globalDefinitions_gcc.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/globalDefinitions_gcc.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -326,4 +326,8 @@
 #define JLONG_FORMAT           "%ld"
 #endif // _LP64 && __APPLE__
 
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+#define THREAD_LOCAL_DECL __thread
+#endif
+
 #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_GCC_HPP
--- a/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -273,4 +273,8 @@
 
 #define offset_of(klass,field) offsetof(klass,field)
 
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+#define THREAD_LOCAL_DECL __thread
+#endif
+
 #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SPARCWORKS_HPP
--- a/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -234,4 +234,8 @@
 
 #define offset_of(klass,field) offsetof(klass,field)
 
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+#define THREAD_LOCAL_DECL __declspec( thread )
+#endif
+
 #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_VISCPP_HPP
--- a/src/share/vm/utilities/globalDefinitions_xlc.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/globalDefinitions_xlc.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -180,5 +180,8 @@
 #define SIZE_64G  ((uint64_t) UCONST64( 0x1000000000))
 #define SIZE_1T   ((uint64_t) UCONST64(0x10000000000))
 
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+#define THREAD_LOCAL_DECL __thread
+#endif
 
 #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP
--- a/src/share/vm/utilities/hashtable.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/hashtable.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -151,7 +151,7 @@
   void copy_table(char** top, char* end);
 
   // Bucket handling
-  int hash_to_index(unsigned int full_hash) {
+  int hash_to_index(unsigned int full_hash) const {
     int h = full_hash % _table_size;
     assert(h >= 0 && h < _table_size, "Illegal hash value");
     return h;
@@ -173,8 +173,8 @@
 protected:
 
 #ifdef ASSERT
-  int               _lookup_count;
-  int               _lookup_length;
+  mutable int       _lookup_count;
+  mutable int       _lookup_length;
   void verify_lookup_length(double load);
 #endif
 
@@ -184,7 +184,7 @@
   int entry_size() const { return _entry_size; }
 
   // The following method is MT-safe and may be used with caution.
-  BasicHashtableEntry<F>* bucket(int i);
+  BasicHashtableEntry<F>* bucket(int i) const;
 
   // The following method is not MT-safe and must be done under lock.
   BasicHashtableEntry<F>** bucket_addr(int i) { return _buckets[i].entry_addr(); }
@@ -263,7 +263,7 @@
   HashtableEntry<T, F>* new_entry(unsigned int hashValue, T obj);
 
   // The following method is MT-safe and may be used with caution.
-  HashtableEntry<T, F>* bucket(int i) {
+  HashtableEntry<T, F>* bucket(int i) const {
     return (HashtableEntry<T, F>*)BasicHashtable<F>::bucket(i);
   }
 
@@ -329,7 +329,7 @@
     : Hashtable<T, F>(table_size, entry_size, t, number_of_entries) {}
 
 public:
-  unsigned int compute_hash(Symbol* name, ClassLoaderData* loader_data) {
+  unsigned int compute_hash(const Symbol* name, const ClassLoaderData* loader_data) const {
     unsigned int name_hash = name->identity_hash();
     // loader is null with CDS
     assert(loader_data != NULL || UseSharedSpaces || DumpSharedSpaces,
--- a/src/share/vm/utilities/hashtable.inline.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/hashtable.inline.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -72,7 +72,7 @@
 
 
 // The following method is MT-safe and may be used with caution.
-template <MEMFLAGS F> inline BasicHashtableEntry<F>* BasicHashtable<F>::bucket(int i) {
+template <MEMFLAGS F> inline BasicHashtableEntry<F>* BasicHashtable<F>::bucket(int i) const {
   return _buckets[i].get_entry();
 }
 
--- a/src/share/vm/utilities/numberSeq.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/numberSeq.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -234,7 +234,7 @@
 
 // Printing/Debugging Support
 
-void AbsSeq::dump() { dump_on(gclog_or_tty); }
+void AbsSeq::dump() { dump_on(tty); }
 
 void AbsSeq::dump_on(outputStream* s) {
   s->print_cr("\t _num = %d, _sum = %7.3f, _sum_of_squares = %7.3f",
--- a/src/share/vm/utilities/ostream.cpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/ostream.cpp	Fri Dec 18 12:39:02 2015 -0800
@@ -239,14 +239,6 @@
   return;
 }
 
-void outputStream::gclog_stamp() {
-  date_stamp(PrintGCDateStamps);
-  stamp(PrintGCTimeStamps);
-  if (PrintGCID) {
-    print("#%u: ", GCId::current());
-  }
-}
-
 outputStream& outputStream::indent() {
   while (_position < _indentation) sp();
   return *this;
@@ -366,7 +358,6 @@
 
 xmlStream*   xtty;
 outputStream* tty;
-outputStream* gclog_or_tty;
 CDS_ONLY(fileStream* classlist_file;) // Only dump the classes that can be stored into the CDS archive
 extern Mutex* tty_lock;
 
@@ -482,7 +473,7 @@
   return buf;
 }
 
-// log_name comes from -XX:LogFile=log_name, -Xloggc:log_name or
+// log_name comes from -XX:LogFile=log_name or
 // -XX:DumpLoadedClassList=<file_name>
 // in log_name, %p => pid1234 and
 //              %t => YYYY-MM-DD_HH-MM-SS
@@ -493,95 +484,6 @@
                                 timestr);
 }
 
-#ifndef PRODUCT
-void test_loggc_filename() {
-  int pid;
-  char  tms[32];
-  char  i_result[JVM_MAXPATHLEN];
-  const char* o_result;
-  get_datetime_string(tms, sizeof(tms));
-  pid = os::current_process_id();
-
-  // test.log
-  jio_snprintf(i_result, JVM_MAXPATHLEN, "test.log", tms);
-  o_result = make_log_name_internal("test.log", NULL, pid, tms);
-  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test.log\", NULL)");
-  FREE_C_HEAP_ARRAY(char, o_result);
-
-  // test-%t-%p.log
-  jio_snprintf(i_result, JVM_MAXPATHLEN, "test-%s-pid%u.log", tms, pid);
-  o_result = make_log_name_internal("test-%t-%p.log", NULL, pid, tms);
-  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t-%%p.log\", NULL)");
-  FREE_C_HEAP_ARRAY(char, o_result);
-
-  // test-%t%p.log
-  jio_snprintf(i_result, JVM_MAXPATHLEN, "test-%spid%u.log", tms, pid);
-  o_result = make_log_name_internal("test-%t%p.log", NULL, pid, tms);
-  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t%%p.log\", NULL)");
-  FREE_C_HEAP_ARRAY(char, o_result);
-
-  // %p%t.log
-  jio_snprintf(i_result, JVM_MAXPATHLEN, "pid%u%s.log", pid, tms);
-  o_result = make_log_name_internal("%p%t.log", NULL, pid, tms);
-  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p%%t.log\", NULL)");
-  FREE_C_HEAP_ARRAY(char, o_result);
-
-  // %p-test.log
-  jio_snprintf(i_result, JVM_MAXPATHLEN, "pid%u-test.log", pid);
-  o_result = make_log_name_internal("%p-test.log", NULL, pid, tms);
-  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p-test.log\", NULL)");
-  FREE_C_HEAP_ARRAY(char, o_result);
-
-  // %t.log
-  jio_snprintf(i_result, JVM_MAXPATHLEN, "%s.log", tms);
-  o_result = make_log_name_internal("%t.log", NULL, pid, tms);
-  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%t.log\", NULL)");
-  FREE_C_HEAP_ARRAY(char, o_result);
-
-  {
-    // longest filename
-    char longest_name[JVM_MAXPATHLEN];
-    memset(longest_name, 'a', sizeof(longest_name));
-    longest_name[JVM_MAXPATHLEN - 1] = '\0';
-    o_result = make_log_name_internal((const char*)&longest_name, NULL, pid, tms);
-    assert(strcmp(longest_name, o_result) == 0, "longest name does not match. expected '%s' but got '%s'", longest_name, o_result);
-    FREE_C_HEAP_ARRAY(char, o_result);
-  }
-
-  {
-    // too long file name
-    char too_long_name[JVM_MAXPATHLEN + 100];
-    int too_long_length = sizeof(too_long_name);
-    memset(too_long_name, 'a', too_long_length);
-    too_long_name[too_long_length - 1] = '\0';
-    o_result = make_log_name_internal((const char*)&too_long_name, NULL, pid, tms);
-    assert(o_result == NULL, "Too long file name should return NULL, but got '%s'", o_result);
-  }
-
-  {
-    // too long with timestamp
-    char longest_name[JVM_MAXPATHLEN];
-    memset(longest_name, 'a', JVM_MAXPATHLEN);
-    longest_name[JVM_MAXPATHLEN - 3] = '%';
-    longest_name[JVM_MAXPATHLEN - 2] = 't';
-    longest_name[JVM_MAXPATHLEN - 1] = '\0';
-    o_result = make_log_name_internal((const char*)&longest_name, NULL, pid, tms);
-    assert(o_result == NULL, "Too long file name after timestamp expansion should return NULL, but got '%s'", o_result);
-  }
-
-  {
-    // too long with pid
-    char longest_name[JVM_MAXPATHLEN];
-    memset(longest_name, 'a', JVM_MAXPATHLEN);
-    longest_name[JVM_MAXPATHLEN - 3] = '%';
-    longest_name[JVM_MAXPATHLEN - 2] = 'p';
-    longest_name[JVM_MAXPATHLEN - 1] = '\0';
-    o_result = make_log_name_internal((const char*)&longest_name, NULL, pid, tms);
-    assert(o_result == NULL, "Too long file name after pid expansion should return NULL, but got '%s'", o_result);
-  }
-}
-#endif // PRODUCT
-
 fileStream::fileStream(const char* file_name) {
   _file = fopen(file_name, "w");
   if (_file != NULL) {
@@ -660,202 +562,6 @@
   update_position(s, len);
 }
 
-// dump vm version, os version, platform info, build id,
-// memory usage and command line flags into header
-void gcLogFileStream::dump_loggc_header() {
-  if (is_open()) {
-    print_cr("%s", Abstract_VM_Version::internal_vm_info_string());
-    os::print_memory_info(this);
-    print("CommandLine flags: ");
-    CommandLineFlags::printSetFlags(this);
-  }
-}
-
-gcLogFileStream::~gcLogFileStream() {
-  if (_file != NULL) {
-    if (_need_close) fclose(_file);
-    _file = NULL;
-  }
-  if (_file_name != NULL) {
-    FREE_C_HEAP_ARRAY(char, _file_name);
-    _file_name = NULL;
-  }
-}
-
-gcLogFileStream::gcLogFileStream(const char* file_name) {
-  _cur_file_num = 0;
-  _bytes_written = 0L;
-  _file_name = make_log_name(file_name, NULL);
-
-  if (_file_name == NULL) {
-    warning("Cannot open file %s: file name is too long.\n", file_name);
-    _need_close = false;
-    UseGCLogFileRotation = false;
-    return;
-  }
-
-  // gc log file rotation
-  if (UseGCLogFileRotation && NumberOfGCLogFiles > 1) {
-    char tempbuf[JVM_MAXPATHLEN];
-    jio_snprintf(tempbuf, sizeof(tempbuf), "%s.%d" CURRENTAPPX, _file_name, _cur_file_num);
-    _file = fopen(tempbuf, "w");
-  } else {
-    _file = fopen(_file_name, "w");
-  }
-  if (_file != NULL) {
-    _need_close = true;
-    dump_loggc_header();
-  } else {
-    warning("Cannot open file %s due to %s\n", _file_name, strerror(errno));
-    _need_close = false;
-  }
-}
-
-void gcLogFileStream::write(const char* s, size_t len) {
-  if (_file != NULL) {
-    size_t count = fwrite(s, 1, len, _file);
-    _bytes_written += count;
-  }
-  update_position(s, len);
-}
-
-// rotate_log must be called from VMThread at safepoint. In case need change parameters
-// for gc log rotation from thread other than VMThread, a sub type of VM_Operation
-// should be created and be submitted to VMThread's operation queue. DO NOT call this
-// function directly. Currently, it is safe to rotate log at safepoint through VMThread.
-// That is, no mutator threads and concurrent GC threads run parallel with VMThread to
-// write to gc log file at safepoint. If in future, changes made for mutator threads or
-// concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log
-// must be synchronized.
-void gcLogFileStream::rotate_log(bool force, outputStream* out) {
-  char time_msg[O_BUFLEN];
-  char time_str[EXTRACHARLEN];
-  char current_file_name[JVM_MAXPATHLEN];
-  char renamed_file_name[JVM_MAXPATHLEN];
-
-  if (!should_rotate(force)) {
-    return;
-  }
-
-#ifdef ASSERT
-  Thread *thread = Thread::current();
-  assert(thread == NULL ||
-         (thread->is_VM_thread() && SafepointSynchronize::is_at_safepoint()),
-         "Must be VMThread at safepoint");
-#endif
-  if (NumberOfGCLogFiles == 1) {
-    // rotate in same file
-    rewind();
-    _bytes_written = 0L;
-    jio_snprintf(time_msg, sizeof(time_msg), "File  %s rotated at %s\n",
-                 _file_name, os::local_time_string((char *)time_str, sizeof(time_str)));
-    write(time_msg, strlen(time_msg));
-
-    if (out != NULL) {
-      out->print("%s", time_msg);
-    }
-
-    dump_loggc_header();
-    return;
-  }
-
-#if defined(_WINDOWS)
-#ifndef F_OK
-#define F_OK 0
-#endif
-#endif // _WINDOWS
-
-  // rotate file in names extended_filename.0, extended_filename.1, ...,
-  // extended_filename.<NumberOfGCLogFiles - 1>. Current rotation file name will
-  // have a form of extended_filename.<i>.current where i is the current rotation
-  // file number. After it reaches max file size, the file will be saved and renamed
-  // with .current removed from its tail.
-  if (_file != NULL) {
-    jio_snprintf(renamed_file_name, JVM_MAXPATHLEN, "%s.%d",
-                 _file_name, _cur_file_num);
-    int result = jio_snprintf(current_file_name, JVM_MAXPATHLEN,
-                              "%s.%d" CURRENTAPPX, _file_name, _cur_file_num);
-    if (result >= JVM_MAXPATHLEN) {
-      warning("Cannot create new log file name: %s: file name is too long.\n", current_file_name);
-      return;
-    }
-
-    const char* msg = force ? "GC log rotation request has been received."
-                            : "GC log file has reached the maximum size.";
-    jio_snprintf(time_msg, sizeof(time_msg), "%s %s Saved as %s\n",
-                     os::local_time_string((char *)time_str, sizeof(time_str)),
-                                                         msg, renamed_file_name);
-    write(time_msg, strlen(time_msg));
-
-    if (out != NULL) {
-      out->print("%s", time_msg);
-    }
-
-    fclose(_file);
-    _file = NULL;
-
-    bool can_rename = true;
-    if (access(current_file_name, F_OK) != 0) {
-      // current file does not exist?
-      warning("No source file exists, cannot rename\n");
-      can_rename = false;
-    }
-    if (can_rename) {
-      if (access(renamed_file_name, F_OK) == 0) {
-        if (remove(renamed_file_name) != 0) {
-          warning("Could not delete existing file %s\n", renamed_file_name);
-          can_rename = false;
-        }
-      } else {
-        // file does not exist, ok to rename
-      }
-    }
-    if (can_rename && rename(current_file_name, renamed_file_name) != 0) {
-      warning("Could not rename %s to %s\n", _file_name, renamed_file_name);
-    }
-  }
-
-  _cur_file_num++;
-  if (_cur_file_num > NumberOfGCLogFiles - 1) _cur_file_num = 0;
-  int result = jio_snprintf(current_file_name,  JVM_MAXPATHLEN, "%s.%d" CURRENTAPPX,
-               _file_name, _cur_file_num);
-  if (result >= JVM_MAXPATHLEN) {
-    warning("Cannot create new log file name: %s: file name is too long.\n", current_file_name);
-    return;
-  }
-
-  _file = fopen(current_file_name, "w");
-
-  if (_file != NULL) {
-    _bytes_written = 0L;
-    _need_close = true;
-    // reuse current_file_name for time_msg
-    jio_snprintf(current_file_name, JVM_MAXPATHLEN,
-                 "%s.%d", _file_name, _cur_file_num);
-    jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file created %s\n",
-                 os::local_time_string((char *)time_str, sizeof(time_str)), current_file_name);
-    write(time_msg, strlen(time_msg));
-
-    if (out != NULL) {
-      out->print("%s", time_msg);
-    }
-
-    dump_loggc_header();
-    // remove the existing file
-    if (access(current_file_name, F_OK) == 0) {
-      if (remove(current_file_name) != 0) {
-        warning("Could not delete existing file %s\n", current_file_name);
-      }
-    }
-  } else {
-    warning("failed to open rotation log file %s due to %s\n"
-            "Turned off GC log file rotation\n",
-                  _file_name, strerror(errno));
-    _need_close = false;
-    FLAG_SET_DEFAULT(UseGCLogFileRotation, false);
-  }
-}
-
 defaultStream* defaultStream::instance = NULL;
 int defaultStream::_output_fd = 1;
 int defaultStream::_error_fd  = 2;
@@ -1058,8 +764,8 @@
       // bootstrap problem
       tty_lock == NULL ||
 
-      // can't grab a lock or call Thread::current() if TLS isn't initialized
-      ThreadLocalStorage::thread() == NULL ||
+      // can't grab a lock if current Thread isn't set
+      Thread::current_or_null() == NULL ||
 
       // developer hook
       !SerializeVMOutput ||
@@ -1194,21 +900,8 @@
 }
 
 void ostream_init_log() {
-  // For -Xloggc:<file> option - called in runtime/thread.cpp
   // Note : this must be called AFTER ostream_init()
 
-  gclog_or_tty = tty; // default to tty
-  if (Arguments::gc_log_filename() != NULL) {
-    fileStream * gclog  = new(ResourceObj::C_HEAP, mtInternal)
-                             gcLogFileStream(Arguments::gc_log_filename());
-    if (gclog->is_open()) {
-      // now we update the time stamp of the GC log to be synced up
-      // with tty.
-      gclog->time_stamp().update_to(tty->time_stamp().ticks());
-    }
-    gclog_or_tty = gclog;
-  }
-
 #if INCLUDE_CDS
   // For -XX:DumpLoadedClassList=<file> option
   if (DumpLoadedClassList != NULL) {
@@ -1236,9 +929,6 @@
     delete classlist_file;
   }
 #endif
-  if (gclog_or_tty != tty) {
-      delete gclog_or_tty;
-  }
   {
       // we temporaly disable PrintMallocFree here
       // as otherwise it'll lead to using of almost deleted
@@ -1254,14 +944,12 @@
   }
   tty = NULL;
   xtty = NULL;
-  gclog_or_tty = NULL;
   defaultStream::instance = NULL;
 }
 
 // ostream_abort() is called by os::abort() when VM is about to die.
 void ostream_abort() {
-  // Here we can't delete gclog_or_tty and tty, just flush their output
-  if (gclog_or_tty) gclog_or_tty->flush();
+  // Here we can't delete tty, just flush its output
   if (tty) tty->flush();
 
   if (defaultStream::instance != NULL) {
--- a/src/share/vm/utilities/ostream.hpp	Thu Dec 17 23:36:28 2015 +0000
+++ b/src/share/vm/utilities/ostream.hpp	Fri Dec 18 12:39:02 2015 -0800
@@ -108,7 +108,6 @@
    void date_stamp(bool guard) {
      date_stamp(guard, "", ": ");
    }
-   void gclog_stamp();
 
    // portable printing of 64 bit integers
    void print_jlong(jlong value);
@@ -127,7 +126,6 @@
 // standard output
 // ANSI C++ name collision
 extern outputStream* tty;           // tty output
-extern outputStream* gclog_or_tty;  // stream for gc log if -Xloggc:<f>, or tty
 
 class streamIndentor : public StackObj {
  private:
@@ -247,30 +245,6 @@
   }
 };
 
-class gcLogFileStream : public fileStream {
- protected:
-  const char*  _file_name;
-  jlong  _bytes_written;
-  uintx  _cur_file_num;             // current logfile rotation number, from 0 to NumberOfGCLogFiles-1
- public:
-  gcLogFileStream(const char* file_name);
-  ~gcLogFileStream();
-  virtual void write(const char* c, size_t len);
-  virtual void rotate_log(bool force, outputStream* out = NULL);
-  void dump_loggc_header();
-
-  /* If "force" sets true, force log file rotation from outside JVM */
-  bool should_rotate(bool force) {
-    return force ||
-             ((GCLogFileSize != 0) && (_bytes_written >= (jlong)GCLogFileSize));
-  }
-};
-
-#ifndef PRODUCT
-// unit test for checking -Xloggc:<filename> parsing result
-void test_loggc_filename();
-#endif
-
 void ostream_init();
 void ostream_init_log();
 void ostream_exit();
--- a/test/TEST.ROOT	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/TEST.ROOT	Fri Dec 18 12:39:02 2015 -0800
@@ -30,7 +30,7 @@
 keys=cte_test jcmd nmt regression gc stress
 
 groups=TEST.groups [closed/TEST.groups]
-requires.properties=sun.arch.data.model
+requires.properties=sun.arch.data.model java.version
 
 # Tests using jtreg 4.1 b12 features
 requiredVersion=4.1 b12
--- a/test/TEST.groups	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/TEST.groups	Fri Dec 18 12:39:02 2015 -0800
@@ -145,7 +145,6 @@
   gc/g1/TestShrinkAuxiliaryData25.java \
   gc/g1/TestShrinkAuxiliaryData30.java \
   gc/survivorAlignment \
-  gc/TestGCLogRotationViaJcmd.java \
   runtime/InternalApi/ThreadCpuTimesDeadlock.java \
   runtime/NMT/JcmdSummaryDiff.java \
   runtime/RedefineTests/RedefineAnnotations.java
--- a/test/compiler/floatingpoint/ModNaN.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/compiler/floatingpoint/ModNaN.java	Fri Dec 18 12:39:02 2015 -0800
@@ -25,7 +25,6 @@
  * @test
  * @bug 8015396
  * @summary double a%b returns NaN for some (a,b) (|a| < inf, |b|>0) (on Core i7 980X)
- * @ignore 8015396
  * @run main ModNaN
  */
 public class ModNaN {
--- a/test/gc/6941923/Test6941923.java	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test Test6941923.java
- * @bug 6941923
- * @summary test flags for gc log rotation
- * @library /testlibrary
- * @modules java.base/sun.misc
- *          java.management
- * @run main/othervm/timeout=600 Test6941923
- *
- */
-import jdk.test.lib.*;
-import java.io.File;
-import java.io.FilenameFilter;
-import java.util.ArrayList;
-import java.util.Arrays;
-
-class GCLoggingGenerator {
-
-    public static void main(String[] args) throws Exception {
-
-        long sizeOfLog = Long.parseLong(args[0]);
-        long lines = sizeOfLog / 80;
-        // full.GC generates ad least 1-line which is not shorter then 80 chars
-        // for some GC 2 shorter lines are generated
-        for (long i = 0; i < lines; i++) {
-            System.gc();
-        }
-    }
-}
-
-public class Test6941923 {
-
-    static final File currentDirectory = new File(".");
-    static final String logFileName = "test.log";
-    static final int logFileSizeK = 16;
-    static FilenameFilter logFilter = new FilenameFilter() {
-        @Override
-        public boolean accept(File dir, String name) {
-            return name.startsWith(logFileName);
-        }
-    };
-
-    public static void cleanLogs() {
-        for (File log : currentDirectory.listFiles(logFilter)) {
-            if (!log.delete()) {
-                throw new Error("Unable to delete " + log.getAbsolutePath());
-            }
-        }
-    }
-
-    public static void runTest(int numberOfFiles) throws Exception {
-
-        ArrayList<String> args = new ArrayList();
-        String[] logOpts = new String[]{
-            "-cp", System.getProperty("java.class.path"),
-            "-Xloggc:" + logFileName,
-            "-XX:-DisableExplicitGC", // to sure that System.gc() works
-            "-XX:+PrintGC", "-XX:+PrintGCDetails", "-XX:+UseGCLogFileRotation",
-            "-XX:NumberOfGCLogFiles=" + numberOfFiles,
-            "-XX:GCLogFileSize=" + logFileSizeK + "K", "-Xmx128M"};
-        // System.getProperty("test.java.opts") is '' if no options is set
-        // need to skip such empty
-        String[] externalVMopts = System.getProperty("test.java.opts").length() == 0
-                ? new String[0]
-                : System.getProperty("test.java.opts").split(" ");
-        args.addAll(Arrays.asList(externalVMopts));
-        args.addAll(Arrays.asList(logOpts));
-        args.add(GCLoggingGenerator.class.getName());
-        args.add(String.valueOf(numberOfFiles * logFileSizeK * 1024));
-        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args.toArray(new String[0]));
-        pb.redirectErrorStream(true);
-        pb.redirectOutput(new File(GCLoggingGenerator.class.getName() + ".log"));
-        Process process = pb.start();
-        int result = process.waitFor();
-        if (result != 0) {
-            throw new Error("Unexpected exit code = " + result);
-        }
-        File[] logs = currentDirectory.listFiles(logFilter);
-        int smallFilesNumber = 0;
-        for (File log : logs) {
-            if (log.length() < logFileSizeK * 1024) {
-                smallFilesNumber++;
-            }
-        }
-        if (logs.length != numberOfFiles) {
-            throw new Error("There are only " + logs.length + " logs instead " + numberOfFiles);
-        }
-        if (smallFilesNumber > 1) {
-            throw new Error("There should maximum one log with size < " + logFileSizeK + "K");
-        }
-    }
-
-    public static void main(String[] args) throws Exception {
-        cleanLogs();
-        runTest(1);
-        cleanLogs();
-        runTest(3);
-        cleanLogs();
-    }
-}
--- a/test/gc/7072527/TestFullGCCount.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/7072527/TestFullGCCount.java	Fri Dec 18 12:39:02 2015 -0800
@@ -26,7 +26,7 @@
  * @bug 7072527
  * @summary CMS: JMM GC counters overcount in some cases
  * @modules java.management
- * @run main/othervm -XX:+PrintGC TestFullGCCount
+ * @run main/othervm -Xlog:gc TestFullGCCount
  */
 import java.util.*;
 import java.lang.management.*;
--- a/test/gc/TestDisableExplicitGC.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/TestDisableExplicitGC.java	Fri Dec 18 12:39:02 2015 -0800
@@ -26,9 +26,9 @@
  * @requires vm.opt.DisableExplicitGC == null
  * @summary Verify GC behavior with DisableExplicitGC flag.
  * @library /testlibrary
- * @run main/othervm                             -XX:+PrintGCDetails TestDisableExplicitGC
- * @run main/othervm/fail -XX:+DisableExplicitGC -XX:+PrintGCDetails TestDisableExplicitGC
- * @run main/othervm      -XX:-DisableExplicitGC -XX:+PrintGCDetails TestDisableExplicitGC
+ * @run main/othervm                             -Xlog:gc=debug TestDisableExplicitGC
+ * @run main/othervm/fail -XX:+DisableExplicitGC -Xlog:gc=debug TestDisableExplicitGC
+ * @run main/othervm      -XX:-DisableExplicitGC -Xlog:gc=debug TestDisableExplicitGC
  */
 import java.lang.management.GarbageCollectorMXBean;
 import java.util.List;
--- a/test/gc/TestGCLogRotationViaJcmd.java	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test TestGCLogRotationViaJcmd.java
- * @bug 7090324
- * @summary test for gc log rotation via jcmd
- * @library /testlibrary
- * @modules java.base/sun.misc
- *          java.management
- * @run main/othervm -Xloggc:test.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=3 TestGCLogRotationViaJcmd
- *
- */
-import jdk.test.lib.*;
-import java.io.File;
-import java.io.FilenameFilter;
-
-public class TestGCLogRotationViaJcmd {
-
-    static final File currentDirectory = new File(".");
-    static final String LOG_FILE_NAME = "test.log";
-    static final int NUM_LOGS = 3;
-
-    static FilenameFilter logFilter = new FilenameFilter() {
-        @Override
-        public boolean accept(File dir, String name) {
-            return name.startsWith(LOG_FILE_NAME);
-        }
-    };
-
-    public static void main(String[] args) throws Exception {
-        // Grab the pid from the current java process
-        String pid = Integer.toString(ProcessTools.getProcessId());
-
-        // Create a JDKToolLauncher
-        JDKToolLauncher jcmd = JDKToolLauncher.create("jcmd")
-                                              .addToolArg(pid)
-                                              .addToolArg("GC.rotate_log");
-
-        for (int times = 1; times < NUM_LOGS; times++) {
-            // Run jcmd <pid> GC.rotate_log
-            ProcessBuilder pb = new ProcessBuilder(jcmd.getCommand());
-
-            // Make sure we didn't crash
-            OutputAnalyzer output = new OutputAnalyzer(pb.start());
-            output.shouldHaveExitValue(0);
-        }
-
-        // GC log check
-        File[] logs = currentDirectory.listFiles(logFilter);
-        if (logs.length != NUM_LOGS) {
-            throw new Error("There are only " + logs.length
-                                              + " logs instead " + NUM_LOGS);
-        }
-
-    }
-
-}
-
--- a/test/gc/TestVerifyDuringStartup.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/TestVerifyDuringStartup.java	Fri Dec 18 12:39:02 2015 -0800
@@ -48,6 +48,7 @@
     Collections.addAll(vmOpts, new String[] {"-XX:-UseTLAB",
                                              "-XX:+UnlockDiagnosticVMOptions",
                                              "-XX:+VerifyDuringStartup",
+                                             "-Xlog:gc+verify=debug",
                                              "-version"});
 
     System.out.print("Testing:\n" + JDKToolFinder.getJDKTool("java"));
@@ -62,7 +63,7 @@
 
     System.out.println("Output:\n" + output.getOutput());
 
-    output.shouldContain("[Verifying");
+    output.shouldContain("Verifying");
     output.shouldHaveExitValue(0);
   }
 }
--- a/test/gc/TestVerifySilently.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/TestVerifySilently.java	Fri Dec 18 12:39:02 2015 -0800
@@ -60,7 +60,7 @@
                                              "-XX:+VerifyDuringStartup",
                                              "-XX:+VerifyBeforeGC",
                                              "-XX:+VerifyAfterGC",
-                                             "-XX:" + (verifySilently ? "+":"-") + "VerifySilently",
+                                             (verifySilently ? "-Xlog:gc":"-Xlog:gc+verify=debug"),
                                              RunSystemGC.class.getName()});
     ProcessBuilder pb =
       ProcessTools.createJavaProcessBuilder(vmOpts.toArray(new String[vmOpts.size()]));
@@ -76,11 +76,11 @@
     OutputAnalyzer output;
 
     output = runTest(false);
-    output.shouldContain("[Verifying");
+    output.shouldContain("Verifying");
     output.shouldHaveExitValue(0);
 
     output = runTest(true);
-    output.shouldNotContain("[Verifying");
+    output.shouldNotContain("Verifying");
     output.shouldHaveExitValue(0);
   }
 }
--- a/test/gc/arguments/TestTargetSurvivorRatioFlag.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/arguments/TestTargetSurvivorRatioFlag.java	Fri Dec 18 12:39:02 2015 -0800
@@ -79,7 +79,7 @@
 
     // Patterns used during log parsing
     public static final String TENURING_DISTRIBUTION = "Desired survivor size";
-    public static final String AGE_TABLE_ENTRY = "-[\\s]+age[\\s]+([0-9]+):[\\s]+([0-9]+)[\\s]+bytes,[\\s]+([0-9]+)[\\s]+total";
+    public static final String AGE_TABLE_ENTRY = ".*-[\\s]+age[\\s]+([0-9]+):[\\s]+([0-9]+)[\\s]+bytes,[\\s]+([0-9]+)[\\s]+total";
     public static final String MAX_SURVIVOR_SIZE = "Max survivor size: ([0-9]+)";
 
     public static void main(String args[]) throws Exception {
@@ -133,7 +133,7 @@
                 "-XX:+UnlockDiagnosticVMOptions",
                 "-XX:+WhiteBoxAPI",
                 "-XX:+UseAdaptiveSizePolicy",
-                "-XX:+PrintTenuringDistribution",
+                "-Xlog:gc+age=trace",
                 "-XX:MaxTenuringThreshold=" + MAX_TENURING_THRESHOLD,
                 "-XX:NewSize=" + MAX_NEW_SIZE,
                 "-XX:MaxNewSize=" + MAX_NEW_SIZE,
--- a/test/gc/arguments/TestUnrecognizedVMOptionsHandling.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/arguments/TestUnrecognizedVMOptionsHandling.java	Fri Dec 18 12:39:02 2015 -0800
@@ -39,11 +39,11 @@
   public static void main(String args[]) throws Exception {
     // The first two JAVA processes are expected to fail, but with a correct VM option suggestion
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-      "-XX:+PrintGc",
+      "-XX:+UseDynamicNumberOfGcThreads",
       "-version"
       );
     OutputAnalyzer outputWithError = new OutputAnalyzer(pb.start());
-    outputWithError.shouldContain("Did you mean '(+/-)PrintGC'?");
+    outputWithError.shouldContain("Did you mean '(+/-)UseDynamicNumberOfGCThreads'?");
     if (outputWithError.getExitValue() == 0) {
       throw new RuntimeException("Not expected to get exit value 0");
     }
@@ -60,11 +60,11 @@
 
     // The last JAVA process should run successfully for the purpose of sanity check
     pb = ProcessTools.createJavaProcessBuilder(
-      "-XX:+PrintGC",
+      "-XX:+UseDynamicNumberOfGCThreads",
       "-version"
       );
     OutputAnalyzer outputWithNoError = new OutputAnalyzer(pb.start());
-    outputWithNoError.shouldNotContain("Did you mean '(+/-)PrintGC'?");
+    outputWithNoError.shouldNotContain("Did you mean '(+/-)UseDynamicNumberOfGCThreads'?");
     outputWithNoError.shouldHaveExitValue(0);
   }
 }
--- a/test/gc/arguments/TestVerifyBeforeAndAfterGCFlags.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/arguments/TestVerifyBeforeAndAfterGCFlags.java	Fri Dec 18 12:39:02 2015 -0800
@@ -43,18 +43,18 @@
 public class TestVerifyBeforeAndAfterGCFlags {
 
     // VerifyBeforeGC:[Verifying threads heap tenured eden syms strs zone dict metaspace chunks hand C-heap code cache ]
-    public static final String VERIFY_BEFORE_GC_PATTERN = "VerifyBeforeGC:\\[Verifying\\s+([^]\\s]+\\s+)+\\]";
+    public static final String VERIFY_BEFORE_GC_PATTERN = "Verifying Before GC";
     // VerifyBeforeGC: VerifyBeforeGC: VerifyBeforeGC:
     public static final String VERIFY_BEFORE_GC_CORRUPTED_PATTERN = "VerifyBeforeGC:(?!\\[Verifying[^]]+\\])";
 
     // VerifyAfterGC:[Verifying threads heap tenured eden syms strs zone dict metaspace chunks hand C-heap code cache ]
-    public static final String VERIFY_AFTER_GC_PATTERN = "VerifyAfterGC:\\[Verifying\\s+([^]\\s]+\\s+)+\\]";
+    public static final String VERIFY_AFTER_GC_PATTERN = "Verifying After GC";
     // VerifyAfterGC: VerifyAfterGC: VerifyAfterGC:
     public static final String VERIFY_AFTER_GC_CORRUPTED_PATTERN = "VerifyAfterGC:(?!\\[Verifying[^]]+\\])";
 
     public static void main(String args[]) throws Exception {
         String[] filteredOpts = Utils.getFilteredTestJavaOpts(
-                                    new String[] { "-Xloggc:",
+                                    new String[] { "-Xlog:gc+verify=debug",
                                                    "-XX:+UseGCLogFileRotation",
                                                    "-XX:-DisplayVMOutput",
                                                    "VerifyBeforeGC",
@@ -74,6 +74,7 @@
         }
 
         Collections.addAll(vmOpts, new String[] {
+                                       "-Xlog:gc+verify=debug",
                                        "-Xmx5m",
                                        "-Xms5m",
                                        "-Xmn3m",
--- a/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Fri Dec 18 12:39:02 2015 -0800
@@ -59,9 +59,7 @@
       "-Xmn" + YoungGenSize,
       "-XX:+UseConcMarkSweepGC",
       "-XX:" + (enableUnloading ? "+" : "-") + "CMSClassUnloadingEnabled",
-      "-XX:+PrintHeapAtGC",
-      "-XX:+PrintGCDetails",
-      "-XX:+PrintGCTimeStamps",
+      "-Xlog:gc",
       TestCMSClassUnloadingEnabledHWM.AllocateBeyondMetaspaceSize.class.getName(),
       "" + MetaspaceSize);
     return new OutputAnalyzer(pb.start());
@@ -79,16 +77,16 @@
     // -XX:-CMSClassUnloadingEnabled is used, so we expect a full GC instead of a concurrent cycle.
     OutputAnalyzer out = runWithoutCMSClassUnloading();
 
-    out.shouldMatch(".*Full GC.*");
-    out.shouldNotMatch(".*CMS Initial Mark.*");
+    out.shouldMatch(".*Pause Full.*");
+    out.shouldNotMatch(".*Pause Initial Mark.*");
   }
 
   public static void testWithCMSClassUnloading() throws Exception {
     // -XX:+CMSClassUnloadingEnabled is used, so we expect a concurrent cycle instead of a full GC.
     OutputAnalyzer out = runWithCMSClassUnloading();
 
-    out.shouldMatch(".*CMS Initial Mark.*");
-    out.shouldNotMatch(".*Full GC.*");
+    out.shouldMatch(".*Pause Initial Mark.*");
+    out.shouldNotMatch(".*Pause Full.*");
   }
 
   public static void main(String args[]) throws Exception {
--- a/test/gc/class_unloading/TestG1ClassUnloadingHWM.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/class_unloading/TestG1ClassUnloadingHWM.java	Fri Dec 18 12:39:02 2015 -0800
@@ -54,8 +54,7 @@
       "-Xmn" + YoungGenSize,
       "-XX:+UseG1GC",
       "-XX:" + (enableUnloading ? "+" : "-") + "ClassUnloadingWithConcurrentMark",
-      "-XX:+PrintHeapAtGC",
-      "-XX:+PrintGCDetails",
+      "-Xlog:gc",
       TestG1ClassUnloadingHWM.AllocateBeyondMetaspaceSize.class.getName(),
       "" + MetaspaceSize,
       "" + YoungGenSize);
@@ -74,16 +73,16 @@
     // -XX:-ClassUnloadingWithConcurrentMark is used, so we expect a full GC instead of a concurrent cycle.
     OutputAnalyzer out = runWithoutG1ClassUnloading();
 
-    out.shouldMatch(".*Full GC.*");
-    out.shouldNotMatch(".*initial-mark.*");
+    out.shouldMatch(".*Pause Full.*");
+    out.shouldNotMatch(".*Pause Initial Mark.*");
   }
 
   public static void testWithG1ClassUnloading() throws Exception {
     // -XX:+ClassUnloadingWithConcurrentMark is used, so we expect a concurrent cycle instead of a full GC.
     OutputAnalyzer out = runWithG1ClassUnloading();
 
-    out.shouldMatch(".*initial-mark.*");
-    out.shouldNotMatch(".*Full GC.*");
+    out.shouldMatch(".*Pause Initial Mark.*");
+    out.shouldNotMatch(".*Pause Full.*");
   }
 
   public static void main(String args[]) throws Exception {
--- a/test/gc/cms/DisableResizePLAB.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/cms/DisableResizePLAB.java	Fri Dec 18 12:39:02 2015 -0800
@@ -28,7 +28,7 @@
  * @author filipp.zhinkin@oracle.com, john.coomes@oracle.com
  * @requires vm.gc=="ConcMarkSweep" | vm.gc=="null"
  * @summary Run CMS with PLAB resizing disabled and a small OldPLABSize
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:-ResizePLAB -XX:OldPLABSize=1k -Xmx256m -XX:+PrintGCDetails DisableResizePLAB
+ * @run main/othervm -XX:+UseConcMarkSweepGC -XX:-ResizePLAB -XX:OldPLABSize=1k -Xmx256m -Xlog:gc=debug DisableResizePLAB
  */
 
 public class DisableResizePLAB {
--- a/test/gc/cms/TestCMSScavengeBeforeRemark.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/cms/TestCMSScavengeBeforeRemark.java	Fri Dec 18 12:39:02 2015 -0800
@@ -27,7 +27,7 @@
  * @bug 8139868
  * @requires vm.gc=="ConcMarkSweep" | vm.gc=="null"
  * @summary Run CMS with CMSScavengeBeforeRemark
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+CMSScavengeBeforeRemark -XX:+ExplicitGCInvokesConcurrent -Xmx256m -XX:+PrintGCDetails TestCMSScavengeBeforeRemark
+ * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+CMSScavengeBeforeRemark -XX:+ExplicitGCInvokesConcurrent -Xmx256m -Xlog:gc=debug TestCMSScavengeBeforeRemark
  */
 
 public class TestCMSScavengeBeforeRemark {
--- a/test/gc/ergonomics/TestDynamicNumberOfGCThreads.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/ergonomics/TestDynamicNumberOfGCThreads.java	Fri Dec 18 12:39:02 2015 -0800
@@ -50,7 +50,7 @@
 
   private static void testDynamicNumberOfGCThreads(String gcFlag) throws Exception {
     // UseDynamicNumberOfGCThreads and TraceDynamicGCThreads enabled
-    String[] baseArgs = {"-XX:+" + gcFlag, "-Xmx10M", "-XX:+PrintGCDetails",  "-XX:+UseDynamicNumberOfGCThreads", "-XX:+TraceDynamicGCThreads", GCTest.class.getName()};
+    String[] baseArgs = {"-XX:+" + gcFlag, "-Xmx10M", "-XX:+UseDynamicNumberOfGCThreads", "-Xlog:gc+task=trace", GCTest.class.getName()};
 
     // Base test with gc and +UseDynamicNumberOfGCThreads:
     ProcessBuilder pb_enabled = ProcessTools.createJavaProcessBuilder(baseArgs);
--- a/test/gc/g1/TestEagerReclaimHumongousRegions.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/g1/TestEagerReclaimHumongousRegions.java	Fri Dec 18 12:39:02 2015 -0800
@@ -82,7 +82,7 @@
             "-Xms128M",
             "-Xmx128M",
             "-Xmn16M",
-            "-XX:+PrintGC",
+            "-Xlog:gc",
             ReclaimRegionFast.class.getName());
 
         Pattern p = Pattern.compile("Full GC");
--- a/test/gc/g1/TestEagerReclaimHumongousRegionsClearMarkBits.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/g1/TestEagerReclaimHumongousRegionsClearMarkBits.java	Fri Dec 18 12:39:02 2015 -0800
@@ -120,7 +120,7 @@
             "-Xmn2M",
             "-XX:G1HeapRegionSize=1M",
             "-XX:InitiatingHeapOccupancyPercent=0", // Want to have as much as possible initial marks.
-            "-XX:+PrintGC",
+            "-Xlog:gc",
             "-XX:+UnlockDiagnosticVMOptions",
             "-XX:+VerifyAfterGC",
             "-XX:ConcGCThreads=1", // Want to make marking as slow as possible.
--- a/test/gc/g1/TestEagerReclaimHumongousRegionsWithRefs.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/g1/TestEagerReclaimHumongousRegionsWithRefs.java	Fri Dec 18 12:39:02 2015 -0800
@@ -94,7 +94,7 @@
             "-Xms128M",
             "-Xmx128M",
             "-Xmn16M",
-            "-XX:+PrintGC",
+            "-Xlog:gc",
             ReclaimRegionFast.class.getName());
 
         Pattern p = Pattern.compile("Full GC");
--- a/test/gc/g1/TestG1TraceEagerReclaimHumongousObjects.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/g1/TestG1TraceEagerReclaimHumongousObjects.java	Fri Dec 18 12:39:02 2015 -0800
@@ -49,20 +49,18 @@
                                                "-Xmx128M",
                                                "-Xmn16M",
                                                "-XX:G1HeapRegionSize=1M",
-                                               "-XX:+PrintGC",
+                                               "-Xlog:gc+phases=trace",
                                                "-XX:+UnlockExperimentalVMOptions",
-                                               "-XX:G1LogLevel=finest",
-                                               "-XX:+G1TraceEagerReclaimHumongousObjects",
                                                GCTest.class.getName());
 
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
 
     // As G1EagerReclaimHumongousObjects is set(default), below logs should be displayed.
     // And GCTest doesn't have humongous objects, so values should be zero.
-    output.shouldContain("[Humongous Reclaim");
-    output.shouldContain("[Humongous Total: 0]");
-    output.shouldContain("[Humongous Candidate: 0]");
-    output.shouldContain("[Humongous Reclaimed: 0]");
+    output.shouldContain("Humongous Reclaim");
+    output.shouldContain("Humongous Total: 0");
+    output.shouldContain("Humongous Candidate: 0");
+    output.shouldContain("Humongous Reclaimed: 0");
 
     output.shouldHaveExitValue(0);
   }
@@ -73,19 +71,17 @@
                                                "-Xmx128M",
                                                "-Xmn16M",
                                                "-XX:G1HeapRegionSize=1M",
-                                               "-XX:+PrintGC",
+                                               "-Xlog:gc+phases=trace,gc+humongous=trace",
                                                "-XX:+UnlockExperimentalVMOptions",
-                                               "-XX:G1LogLevel=finest",
-                                               "-XX:+G1TraceEagerReclaimHumongousObjects",
                                                GCWithHumongousObjectTest.class.getName());
 
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
 
     // As G1ReclaimDeadHumongousObjectsAtYoungGC is set(default), below logs should be displayed.
-    output.shouldContain("[Humongous Reclaim");
-    output.shouldContain("[Humongous Total");
-    output.shouldContain("[Humongous Candidate");
-    output.shouldContain("[Humongous Reclaimed");
+    output.shouldContain("Humongous Reclaim");
+    output.shouldContain("Humongous Total");
+    output.shouldContain("Humongous Candidate");
+    output.shouldContain("Humongous Reclaimed");
 
     // As G1TraceReclaimDeadHumongousObjectsAtYoungGC is set and GCWithHumongousObjectTest has humongous objects,
     // these logs should be displayed.
--- a/test/gc/g1/TestGCLogMessages.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/g1/TestGCLogMessages.java	Fri Dec 18 12:39:02 2015 -0800
@@ -24,7 +24,7 @@
 /*
  * @test TestGCLogMessages
  * @bug 8035406 8027295 8035398 8019342 8027959 8048179 8027962 8069330
- * @summary Ensure that the PrintGCDetails output for a minor GC with G1
+ * @summary Ensure the output for a minor GC with G1
  * includes the expected necessary messages.
  * @key gc
  * @library /testlibrary
@@ -38,7 +38,7 @@
 public class TestGCLogMessages {
 
     private enum Level {
-        OFF, FINER, FINEST;
+        OFF, DEBUG, TRACE;
         public boolean lessOrEqualTo(Level other) {
             return this.compareTo(other) < 0;
         }
@@ -56,36 +56,36 @@
 
     private LogMessageWithLevel allLogMessages[] = new LogMessageWithLevel[] {
         // Update RS
-        new LogMessageWithLevel("Scan HCC (ms)", Level.FINER),
+        new LogMessageWithLevel("Scan HCC", Level.DEBUG),
         // Ext Root Scan
-        new LogMessageWithLevel("Thread Roots (ms)", Level.FINEST),
-        new LogMessageWithLevel("StringTable Roots (ms)", Level.FINEST),
-        new LogMessageWithLevel("Universe Roots (ms)", Level.FINEST),
-        new LogMessageWithLevel("JNI Handles Roots (ms)", Level.FINEST),
-        new LogMessageWithLevel("ObjectSynchronizer Roots (ms)", Level.FINEST),
-        new LogMessageWithLevel("FlatProfiler Roots", Level.FINEST),
-        new LogMessageWithLevel("Management Roots", Level.FINEST),
-        new LogMessageWithLevel("SystemDictionary Roots", Level.FINEST),
-        new LogMessageWithLevel("CLDG Roots", Level.FINEST),
-        new LogMessageWithLevel("JVMTI Roots", Level.FINEST),
-        new LogMessageWithLevel("SATB Filtering", Level.FINEST),
-        new LogMessageWithLevel("CM RefProcessor Roots", Level.FINEST),
-        new LogMessageWithLevel("Wait For Strong CLD", Level.FINEST),
-        new LogMessageWithLevel("Weak CLD Roots", Level.FINEST),
+        new LogMessageWithLevel("Thread Roots:", Level.DEBUG),
+        new LogMessageWithLevel("StringTable Roots:", Level.DEBUG),
+        new LogMessageWithLevel("Universe Roots:", Level.DEBUG),
+        new LogMessageWithLevel("JNI Handles Roots:", Level.DEBUG),
+        new LogMessageWithLevel("ObjectSynchronizer Roots:", Level.DEBUG),
+        new LogMessageWithLevel("FlatProfiler Roots", Level.DEBUG),
+        new LogMessageWithLevel("Management Roots", Level.DEBUG),
+        new LogMessageWithLevel("SystemDictionary Roots", Level.DEBUG),
+        new LogMessageWithLevel("CLDG Roots", Level.DEBUG),
+        new LogMessageWithLevel("JVMTI Roots", Level.DEBUG),
+        new LogMessageWithLevel("SATB Filtering", Level.DEBUG),
+        new LogMessageWithLevel("CM RefProcessor Roots", Level.DEBUG),
+        new LogMessageWithLevel("Wait For Strong CLD", Level.DEBUG),
+        new LogMessageWithLevel("Weak CLD Roots", Level.DEBUG),
         // Redirty Cards
-        new LogMessageWithLevel("Redirty Cards", Level.FINER),
-        new LogMessageWithLevel("Parallel Redirty", Level.FINEST),
-        new LogMessageWithLevel("Redirtied Cards", Level.FINEST),
+        new LogMessageWithLevel("Redirty Cards", Level.DEBUG),
+        new LogMessageWithLevel("Parallel Redirty", Level.DEBUG),
+        new LogMessageWithLevel("Redirtied Cards", Level.DEBUG),
         // Misc Top-level
-        new LogMessageWithLevel("Code Root Purge", Level.FINER),
-        new LogMessageWithLevel("String Dedup Fixup", Level.FINER),
-        new LogMessageWithLevel("Expand Heap After Collection", Level.FINER),
+        new LogMessageWithLevel("Code Root Purge", Level.DEBUG),
+        new LogMessageWithLevel("String Dedup Fixup", Level.DEBUG),
+        new LogMessageWithLevel("Expand Heap After Collection", Level.DEBUG),
         // Free CSet
-        new LogMessageWithLevel("Young Free CSet", Level.FINEST),
-        new LogMessageWithLevel("Non-Young Free CSet", Level.FINEST),
+        new LogMessageWithLevel("Young Free CSet", Level.TRACE),
+        new LogMessageWithLevel("Non-Young Free CSet", Level.TRACE),
         // Humongous Eager Reclaim
-        new LogMessageWithLevel("Humongous Reclaim", Level.FINER),
-        new LogMessageWithLevel("Humongous Register", Level.FINER),
+        new LogMessageWithLevel("Humongous Reclaim", Level.DEBUG),
+        new LogMessageWithLevel("Humongous Register", Level.DEBUG),
     };
 
     void checkMessagesAtLevel(OutputAnalyzer output, LogMessageWithLevel messages[], Level level) throws Exception {
@@ -116,53 +116,49 @@
         pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
                                                    "-XX:+UseStringDeduplication",
                                                    "-Xmx10M",
-                                                   "-XX:+PrintGCDetails",
+                                                   "-Xlog:gc+phases=debug",
                                                    GCTest.class.getName());
 
         output = new OutputAnalyzer(pb.start());
-        checkMessagesAtLevel(output, allLogMessages, Level.FINER);
+        checkMessagesAtLevel(output, allLogMessages, Level.DEBUG);
 
         pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
                                                    "-XX:+UseStringDeduplication",
                                                    "-Xmx10M",
-                                                   "-XX:+PrintGCDetails",
-                                                   "-XX:+UnlockExperimentalVMOptions",
-                                                   "-XX:G1LogLevel=finest",
+                                                   "-Xlog:gc+phases=trace",
                                                    GCTest.class.getName());
 
         output = new OutputAnalyzer(pb.start());
-        checkMessagesAtLevel(output, allLogMessages, Level.FINEST);
+        checkMessagesAtLevel(output, allLogMessages, Level.TRACE);
         output.shouldHaveExitValue(0);
     }
 
     LogMessageWithLevel exhFailureMessages[] = new LogMessageWithLevel[] {
-        new LogMessageWithLevel("Evacuation Failure", Level.FINER),
-        new LogMessageWithLevel("Recalculate Used", Level.FINEST),
-        new LogMessageWithLevel("Remove Self Forwards", Level.FINEST),
-        new LogMessageWithLevel("Restore RemSet", Level.FINEST),
+        new LogMessageWithLevel("Evacuation Failure", Level.DEBUG),
+        new LogMessageWithLevel("Recalculate Used", Level.TRACE),
+        new LogMessageWithLevel("Remove Self Forwards", Level.TRACE),
+        new LogMessageWithLevel("Restore RemSet", Level.TRACE),
     };
 
     private void testWithToSpaceExhaustionLogs() throws Exception {
         ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
                                                                   "-Xmx32M",
                                                                   "-Xmn16M",
-                                                                  "-XX:+PrintGCDetails",
+                                                                  "-Xlog:gc+phases=debug",
                                                                   GCTestWithToSpaceExhaustion.class.getName());
 
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
-        checkMessagesAtLevel(output, exhFailureMessages, Level.FINER);
+        checkMessagesAtLevel(output, exhFailureMessages, Level.DEBUG);
         output.shouldHaveExitValue(0);
 
         pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
                                                    "-Xmx32M",
                                                    "-Xmn16M",
-                                                   "-XX:+PrintGCDetails",
-                                                   "-XX:+UnlockExperimentalVMOptions",
-                                                   "-XX:G1LogLevel=finest",
+                                                   "-Xlog:gc+phases=trace",
                                                    GCTestWithToSpaceExhaustion.class.getName());
 
         output = new OutputAnalyzer(pb.start());
-        checkMessagesAtLevel(output, exhFailureMessages, Level.FINEST);
+        checkMessagesAtLevel(output, exhFailureMessages, Level.TRACE);
         output.shouldHaveExitValue(0);
     }
 
--- a/test/gc/g1/TestHumongousAllocInitialMark.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/g1/TestHumongousAllocInitialMark.java	Fri Dec 18 12:39:02 2015 -0800
@@ -46,11 +46,11 @@
             "-Xmx" + heapSize + "m",
             "-XX:G1HeapRegionSize=" + heapRegionSize + "m",
             "-XX:InitiatingHeapOccupancyPercent=" + initiatingHeapOccupancyPercent,
-            "-XX:+PrintGC",
+            "-Xlog:gc",
             HumongousObjectAllocator.class.getName());
 
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
-        output.shouldContain("GC pause (G1 Humongous Allocation) (young) (initial-mark)");
+        output.shouldContain("Pause Initial Mark (G1 Humongous Allocation)");
         output.shouldNotContain("Full GC");
         output.shouldHaveExitValue(0);
     }
--- a/test/gc/g1/TestHumongousAllocNearlyFullRegion.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/g1/TestHumongousAllocNearlyFullRegion.java	Fri Dec 18 12:39:02 2015 -0800
@@ -44,11 +44,11 @@
             "-Xms" + heapSize + "m",
             "-Xmx" + heapSize + "m",
             "-XX:G1HeapRegionSize=" + heapRegionSize + "m",
-            "-XX:+PrintGC",
+            "-Xlog:gc",
             HumongousObjectAllocator.class.getName());
 
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
-        output.shouldContain("GC pause (G1 Humongous Allocation) (young) (initial-mark)");
+        output.shouldContain("Pause Initial Mark (G1 Humongous Allocation)");
         output.shouldHaveExitValue(0);
     }
 
--- a/test/gc/g1/TestNoEagerReclaimOfHumongousRegions.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/g1/TestNoEagerReclaimOfHumongousRegions.java	Fri Dec 18 12:39:02 2015 -0800
@@ -34,7 +34,7 @@
  * @build TestNoEagerReclaimOfHumongousRegions
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+PrintGC -XX:+UseG1GC -XX:MaxTenuringThreshold=0 -XX:G1RSetSparseRegionEntries=32 -XX:G1HeapRegionSize=1m -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+G1TraceEagerReclaimHumongousObjects TestNoEagerReclaimOfHumongousRegions
+ * @run main/othervm -Xbootclasspath/a:. -Xlog:gc,gc+humongous=debug -XX:+UseG1GC -XX:MaxTenuringThreshold=0 -XX:G1RSetSparseRegionEntries=32 -XX:G1HeapRegionSize=1m -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI TestNoEagerReclaimOfHumongousRegions
  */
 
 import java.util.LinkedList;
--- a/test/gc/g1/TestPLABOutput.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/g1/TestPLABOutput.java	Fri Dec 18 12:39:02 2015 -0800
@@ -54,8 +54,7 @@
             "-XX:+WhiteBoxAPI",
             "-XX:+UseG1GC",
             "-Xmx10M",
-            "-XX:+PrintGC",
-            "-XX:+PrintPLAB",
+            "-Xlog:gc+plab=debug",
             GCTest.class.getName()
             };
 
@@ -66,7 +65,7 @@
 
         System.out.println(output.getStdout());
 
-        String pattern = "#0:.*allocated = (\\d+).*";
+        String pattern = ".*GC\\(0\\) .*allocated = (\\d+).*";
         Pattern r = Pattern.compile(pattern);
         Matcher m = r.matcher(output.getStdout());
 
--- a/test/gc/g1/TestPrintGCDetails.java	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test TestPrintGCDetails
- * @bug 8010738
- * @summary Ensure that the PrintGCDetails for a full GC with G1 includes Metaspace.
- * @key gc
- * @key regression
- * @library /testlibrary
- * @modules java.base/sun.misc
- *          java.management
- */
-
-import jdk.test.lib.ProcessTools;
-import jdk.test.lib.OutputAnalyzer;
-
-public class TestPrintGCDetails {
-  public static void main(String[] args) throws Exception {
-
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
-                                                              "-XX:+PrintGCDetails",
-                                                              SystemGCTest.class.getName());
-
-    OutputAnalyzer output = new OutputAnalyzer(pb.start());
-
-    System.out.println("Output:\n" + output.getOutput());
-
-    output.shouldContain("Metaspace");
-    output.shouldHaveExitValue(0);
-  }
-
-  static class SystemGCTest {
-    public static void main(String [] args) {
-      System.out.println("Calling System.gc()");
-      System.gc();
-    }
-  }
-}
--- a/test/gc/g1/TestPrintRegionRememberedSetInfo.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/g1/TestPrintRegionRememberedSetInfo.java	Fri Dec 18 12:39:02 2015 -0800
@@ -57,7 +57,6 @@
             "-Xmx10m",
             "-XX:+ExplicitGCInvokesConcurrent",
             "-XX:+UnlockDiagnosticVMOptions",
-            "-XX:+G1PrintRegionLivenessInfo",
             "-XX:G1HeapRegionSize=1M",
             "-XX:InitiatingHeapOccupancyPercent=0",
         };
@@ -79,13 +78,13 @@
     public static void main(String[] args) throws Exception {
         String result;
 
-        result = runTest("-XX:+G1PrintRegionLivenessInfo");
+        result = runTest("-Xlog:gc+liveness=trace");
         // check that we got region statistics output
         if (result.indexOf("PHASE") == -1) {
             throw new RuntimeException("Unexpected output from -XX:+PrintRegionLivenessInfo found.");
         }
 
-        result = runTest("-XX:-G1PrintRegionLivenessInfo");
+        result = runTest("-Xlog:gc+liveness");
         if (result.indexOf("remset") != -1) {
             throw new RuntimeException("Should find remembered set information in output.");
         }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestRemsetLogging.java	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestRemsetLogging.java
+ * @bug 8013895 8129977
+ * @library /testlibrary
+ * @modules java.base/sun.misc
+ *          java.management/sun.management
+ * @build TestRemsetLoggingTools TestRemsetLogging
+ * @summary Verify output of -Xlog:gc+remset*=trace
+ * @run main TestRemsetLogging
+ *
+ * Test the output of -Xlog:gc+remset*=trace in conjunction with G1SummarizeRSetStatsPeriod.
+ */
+
+public class TestRemsetLogging {
+
+    public static void main(String[] args) throws Exception {
+        String result;
+
+        if (!TestRemsetLoggingTools.testingG1GC()) {
+            return;
+        }
+
+        // no remembered set summary output
+        result = TestRemsetLoggingTools.runTest(null, 0);
+        TestRemsetLoggingTools.expectRSetSummaries(result, 0, 0);
+
+        // no remembered set summary output
+        result = TestRemsetLoggingTools.runTest(null, 2);
+        TestRemsetLoggingTools.expectRSetSummaries(result, 0, 0);
+
+        // no remembered set summary output
+        result = TestRemsetLoggingTools.runTest(new String[] { "-XX:G1SummarizeRSetStatsPeriod=1" }, 3);
+        TestRemsetLoggingTools.expectRSetSummaries(result, 0, 0);
+
+        // single remembered set summary output at the end
+        result = TestRemsetLoggingTools.runTest(new String[] { "-Xlog:gc+remset*=trace" }, 0);
+        TestRemsetLoggingTools.expectRSetSummaries(result, 1, 0);
+
+        // single remembered set summary output at the end
+        result = TestRemsetLoggingTools.runTest(new String[] { "-Xlog:gc+remset*=trace" }, 2);
+        TestRemsetLoggingTools.expectRSetSummaries(result, 1, 0);
+
+        // single remembered set summary output
+        result = TestRemsetLoggingTools.runTest(new String[] { "-Xlog:gc+remset*=trace", "-XX:G1SummarizeRSetStatsPeriod=1" }, 0);
+        TestRemsetLoggingTools.expectRSetSummaries(result, 1, 0);
+
+        // two times remembered set summary output
+        result = TestRemsetLoggingTools.runTest(new String[] { "-Xlog:gc+remset*=trace", "-XX:G1SummarizeRSetStatsPeriod=1" }, 1);
+        TestRemsetLoggingTools.expectRSetSummaries(result, 1, 2);
+
+        // four times remembered set summary output
+        result = TestRemsetLoggingTools.runTest(new String[] { "-Xlog:gc+remset*=trace", "-XX:G1SummarizeRSetStatsPeriod=1" }, 3);
+        TestRemsetLoggingTools.expectRSetSummaries(result, 1, 6);
+
+        // three times remembered set summary output
+        result = TestRemsetLoggingTools.runTest(new String[] { "-Xlog:gc+remset*=trace", "-XX:G1SummarizeRSetStatsPeriod=2" }, 3);
+        TestRemsetLoggingTools.expectRSetSummaries(result, 1, 4);
+
+        // single remembered set summary output
+        result = TestRemsetLoggingTools.runTest(new String[] { "-Xlog:gc+remset*=trace", "-XX:G1SummarizeRSetStatsPeriod=100" }, 3);
+        TestRemsetLoggingTools.expectRSetSummaries(result, 1, 2);
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestRemsetLoggingPerRegion.java	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestRemsetLoggingPerRegion.java
+ * @bug 8014078 8129977
+ * @library /testlibrary
+ * @modules java.base/sun.misc
+ *          java.management/sun.management
+ * @build TestRemsetLoggingTools TestRemsetLoggingPerRegion
+ * @summary Verify output of -Xlog:gc+remset*=trace in regards to per-region type output
+ * @run main TestRemsetLoggingPerRegion
+ */
+
+import jdk.test.lib.*;
+import java.lang.Thread;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+public class TestRemsetLoggingPerRegion {
+
+    public static void main(String[] args) throws Exception {
+        String result;
+
+        if (!TestRemsetLoggingTools.testingG1GC()) {
+            return;
+        }
+
+        // single remembered set summary output at the end
+        result = TestRemsetLoggingTools.runTest(new String[] { "-Xlog:gc+remset*=trace" }, 0);
+        TestRemsetLoggingTools.expectPerRegionRSetSummaries(result, 1, 0);
+
+        // two times remembered set summary output
+        result = TestRemsetLoggingTools.runTest(new String[] { "-Xlog:gc+remset*=trace", "-XX:G1SummarizeRSetStatsPeriod=1" }, 1);
+        TestRemsetLoggingTools.expectPerRegionRSetSummaries(result, 1, 2);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestRemsetLoggingThreads.java	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestRemsetLoggingThreads
+ * @bug 8025441
+ * @summary Ensure that various values of worker threads/concurrent
+ * refinement threads do not crash the VM.
+ * @key gc
+ * @library /testlibrary
+ * @modules java.base/sun.misc
+ *          java.management/sun.management
+ */
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import jdk.test.lib.ProcessTools;
+import jdk.test.lib.OutputAnalyzer;
+
+public class TestRemsetLoggingThreads {
+
+  private static void runTest(int refinementThreads, int workerThreads) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+                                                              "-XX:+UnlockDiagnosticVMOptions",
+                                                              "-Xlog:gc+remset+exit=trace",
+                                                              "-XX:G1ConcRefinementThreads=" + refinementThreads,
+                                                              "-XX:ParallelGCThreads=" + workerThreads,
+                                                              "-version");
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    // a zero in refinement thread numbers indicates that the value in ParallelGCThreads should be used.
+    // Additionally use at least one thread.
+    int expectedNumRefinementThreads = refinementThreads;
+
+    String pattern = "Concurrent RS threads times \\(s\\)$";
+    Matcher m = Pattern.compile(pattern, Pattern.MULTILINE).matcher(output.getStdout());
+
+    if (!m.find()) {
+      throw new Exception("Could not find correct output for concurrent RS threads times in stdout," +
+        " should match the pattern \"" + pattern + "\", but stdout is \n" + output.getStdout());
+    }
+    output.shouldHaveExitValue(0);
+  }
+
+  public static void main(String[] args) throws Exception {
+    if (!TestRemsetLoggingTools.testingG1GC()) {
+      return;
+    }
+    // different valid combinations of number of refinement and gc worker threads
+    runTest(1, 1);
+    runTest(1, 5);
+    runTest(5, 1);
+    runTest(10, 10);
+    runTest(1, 2);
+    runTest(4, 3);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestRemsetLoggingTools.java	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * Common helpers for TestRemsetLogging* tests
+ */
+
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+
+import jdk.test.lib.*;
+import java.lang.management.ManagementFactory;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+class VerifySummaryOutput {
+    // 4M size, both are directly allocated into the old gen
+    static Object[] largeObject1 = new Object[1024 * 1024];
+    static Object[] largeObject2 = new Object[1024 * 1024];
+
+    static int[] temp;
+
+    public static void main(String[] args) {
+        // create some cross-references between these objects
+        for (int i = 0; i < largeObject1.length; i++) {
+            largeObject1[i] = largeObject2;
+        }
+
+        for (int i = 0; i < largeObject2.length; i++) {
+            largeObject2[i] = largeObject1;
+        }
+
+        int numGCs = Integer.parseInt(args[0]);
+
+        if (numGCs > 0) {
+            // try to force a minor collection: the young gen is 4M, the
+            // amount of data allocated below is roughly that (4*1024*1024 +
+            // some header data)
+            for (int i = 0; i < 1024 ; i++) {
+                temp = new int[1024];
+            }
+        }
+
+        for (int i = 0; i < numGCs - 1; i++) {
+            System.gc();
+        }
+    }
+}
+
+public class TestRemsetLoggingTools {
+
+    // the VM is currently run using G1GC, i.e. trying to test G1 functionality.
+    public static boolean testingG1GC() {
+        HotSpotDiagnosticMXBean diagnostic =
+            ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class);
+
+        VMOption option = diagnostic.getVMOption("UseG1GC");
+        if (option.getValue().equals("false")) {
+          System.out.println("Skipping this test. It is only a G1 test.");
+          return false;
+        }
+        return true;
+    }
+
+    public static String runTest(String[] additionalArgs, int numGCs) throws Exception {
+        ArrayList<String> finalargs = new ArrayList<String>();
+        String[] defaultArgs = new String[] {
+            "-XX:+UseG1GC",
+            "-Xmn4m",
+            "-Xms20m",
+            "-Xmx20m",
+            "-XX:InitiatingHeapOccupancyPercent=100", // we don't want the additional GCs due to initial marking
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:G1HeapRegionSize=1M",
+        };
+
+        finalargs.addAll(Arrays.asList(defaultArgs));
+
+        if (additionalArgs != null) {
+            finalargs.addAll(Arrays.asList(additionalArgs));
+        }
+
+        finalargs.add(VerifySummaryOutput.class.getName());
+        finalargs.add(String.valueOf(numGCs));
+
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            finalargs.toArray(new String[0]));
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+        output.shouldHaveExitValue(0);
+
+        String result = output.getStdout();
+        return result;
+    }
+
+    private static void checkCounts(int expected, int actual, String which) throws Exception {
+        if (expected != actual) {
+            throw new Exception("RSet summaries mention " + which + " regions an incorrect number of times. Expected " + expected + ", got " + actual);
+        }
+    }
+
+    public static void expectPerRegionRSetSummaries(String result, int expectedCumulative, int expectedPeriodic) throws Exception {
+        expectRSetSummaries(result, expectedCumulative, expectedPeriodic);
+        int actualYoung = result.split("Young regions").length - 1;
+        int actualHumonguous = result.split("Humonguous regions").length - 1;
+        int actualFree = result.split("Free regions").length - 1;
+        int actualOther = result.split("Old regions").length - 1;
+
+        // the strings we check for above are printed four times per summary
+        int expectedPerRegionTypeInfo = (expectedCumulative + expectedPeriodic) * 4;
+
+        checkCounts(expectedPerRegionTypeInfo, actualYoung, "Young");
+        checkCounts(expectedPerRegionTypeInfo, actualHumonguous, "Humonguous");
+        checkCounts(expectedPerRegionTypeInfo, actualFree, "Free");
+        checkCounts(expectedPerRegionTypeInfo, actualOther, "Old");
+    }
+
+    public static void expectRSetSummaries(String result, int expectedCumulative, int expectedPeriodic) throws Exception {
+        int actualTotal = result.split("concurrent refinement").length - 1;
+        int actualCumulative = result.split("Cumulative RS summary").length - 1;
+
+        if (expectedCumulative != actualCumulative) {
+            throw new Exception("Incorrect amount of RSet summaries at the end. Expected " + expectedCumulative + ", got " + actualCumulative);
+        }
+
+        if (expectedPeriodic != (actualTotal - actualCumulative)) {
+            throw new Exception("Incorrect amount of per-period RSet summaries at the end. Expected " + expectedPeriodic + ", got " + (actualTotal - actualCumulative));
+        }
+    }
+}
--- a/test/gc/g1/TestShrinkAuxiliaryData.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/g1/TestShrinkAuxiliaryData.java	Fri Dec 18 12:39:02 2015 -0800
@@ -49,7 +49,7 @@
         "-XX:+UseG1GC",
         "-XX:G1HeapRegionSize=" + REGION_SIZE,
         "-XX:-ExplicitGCInvokesConcurrent",
-        "-XX:+PrintGCDetails",
+        "-Xlog:gc=debug",
         "-XX:+UnlockDiagnosticVMOptions",
         "-XX:+WhiteBoxAPI",
         "-Xbootclasspath/a:.",
--- a/test/gc/g1/TestStringDeduplicationTools.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/g1/TestStringDeduplicationTools.java	Fri Dec 18 12:39:02 2015 -0800
@@ -304,10 +304,8 @@
         }
 
         public static OutputAnalyzer run() throws Exception {
-            return runTest("-XX:+PrintGC",
-                           "-XX:+PrintGCDetails",
+            return runTest("-Xlog:gc=debug,gc+stringdedup=trace",
                            "-XX:+UseStringDeduplication",
-                           "-XX:+PrintStringDeduplicationStatistics",
                            "-XX:StringDeduplicationAgeThreshold=" + DefaultAgeThreshold,
                            InternedTest.class.getName(),
                            "" + DefaultAgeThreshold);
@@ -333,11 +331,10 @@
         OutputAnalyzer output = DeduplicationTest.run(LargeNumberOfStrings,
                                                       DefaultAgeThreshold,
                                                       YoungGC,
-                                                      "-XX:+PrintGC",
-                                                      "-XX:+PrintStringDeduplicationStatistics");
+                                                      "-Xlog:gc,gc+stringdedup=trace");
         output.shouldNotContain("Full GC");
-        output.shouldContain("GC pause (G1 Evacuation Pause) (young)");
-        output.shouldContain("GC concurrent-string-deduplication");
+        output.shouldContain("Pause Young (G1 Evacuation Pause)");
+        output.shouldContain("Concurrent String Deduplication");
         output.shouldContain("Deduplicated:");
         output.shouldHaveExitValue(0);
     }
@@ -347,11 +344,10 @@
         OutputAnalyzer output = DeduplicationTest.run(LargeNumberOfStrings,
                                                       DefaultAgeThreshold,
                                                       FullGC,
-                                                      "-XX:+PrintGC",
-                                                      "-XX:+PrintStringDeduplicationStatistics");
-        output.shouldNotContain("GC pause (G1 Evacuation Pause) (young)");
+                                                      "-Xlog:gc,gc+stringdedup=trace");
+        output.shouldNotContain("Pause Young (G1 Evacuation Pause)");
         output.shouldContain("Full GC");
-        output.shouldContain("GC concurrent-string-deduplication");
+        output.shouldContain("Concurrent String Deduplication");
         output.shouldContain("Deduplicated:");
         output.shouldHaveExitValue(0);
     }
@@ -361,10 +357,9 @@
         OutputAnalyzer output = DeduplicationTest.run(LargeNumberOfStrings,
                                                       DefaultAgeThreshold,
                                                       YoungGC,
-                                                      "-XX:+PrintGC",
-                                                      "-XX:+PrintStringDeduplicationStatistics",
+                                                      "-Xlog:gc,gc+stringdedup=trace",
                                                       "-XX:+StringDeduplicationResizeALot");
-        output.shouldContain("GC concurrent-string-deduplication");
+        output.shouldContain("Concurrent String Deduplication");
         output.shouldContain("Deduplicated:");
         output.shouldNotContain("Resize Count: 0");
         output.shouldHaveExitValue(0);
@@ -375,10 +370,9 @@
         OutputAnalyzer output = DeduplicationTest.run(LargeNumberOfStrings,
                                                       DefaultAgeThreshold,
                                                       YoungGC,
-                                                      "-XX:+PrintGC",
-                                                      "-XX:+PrintStringDeduplicationStatistics",
+                                                      "-Xlog:gc,gc+stringdedup=trace",
                                                       "-XX:+StringDeduplicationRehashALot");
-        output.shouldContain("GC concurrent-string-deduplication");
+        output.shouldContain("Concurrent String Deduplication");
         output.shouldContain("Deduplicated:");
         output.shouldNotContain("Rehash Count: 0");
         output.shouldNotContain("Hash Seed: 0x0");
@@ -392,9 +386,8 @@
         output = DeduplicationTest.run(SmallNumberOfStrings,
                                        MaxAgeThreshold,
                                        YoungGC,
-                                       "-XX:+PrintGC",
-                                       "-XX:+PrintStringDeduplicationStatistics");
-        output.shouldContain("GC concurrent-string-deduplication");
+                                       "-Xlog:gc,gc+stringdedup=trace");
+        output.shouldContain("Concurrent String Deduplication");
         output.shouldContain("Deduplicated:");
         output.shouldHaveExitValue(0);
 
@@ -402,9 +395,8 @@
         output = DeduplicationTest.run(SmallNumberOfStrings,
                                        MinAgeThreshold,
                                        YoungGC,
-                                       "-XX:+PrintGC",
-                                       "-XX:+PrintStringDeduplicationStatistics");
-        output.shouldContain("GC concurrent-string-deduplication");
+                                       "-Xlog:gc,gc+stringdedup=trace");
+        output.shouldContain("Concurrent String Deduplication");
         output.shouldContain("Deduplicated:");
         output.shouldHaveExitValue(0);
 
@@ -426,20 +418,20 @@
     public static void testPrintOptions() throws Exception {
         OutputAnalyzer output;
 
-        // Test without PrintGC and without PrintStringDeduplicationStatistics
+        // Test without -Xlog:gc
         output = DeduplicationTest.run(SmallNumberOfStrings,
                                        DefaultAgeThreshold,
                                        YoungGC);
-        output.shouldNotContain("GC concurrent-string-deduplication");
+        output.shouldNotContain("Concurrent String Deduplication");
         output.shouldNotContain("Deduplicated:");
         output.shouldHaveExitValue(0);
 
-        // Test with PrintGC but without PrintStringDeduplicationStatistics
+        // Test with -Xlog:gc+stringdedup
         output = DeduplicationTest.run(SmallNumberOfStrings,
                                        DefaultAgeThreshold,
                                        YoungGC,
-                                       "-XX:+PrintGC");
-        output.shouldContain("GC concurrent-string-deduplication");
+                                       "-Xlog:gc+stringdedup");
+        output.shouldContain("Concurrent String Deduplication");
         output.shouldNotContain("Deduplicated:");
         output.shouldHaveExitValue(0);
     }
--- a/test/gc/g1/TestStringSymbolTableStats.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/g1/TestStringSymbolTableStats.java	Fri Dec 18 12:39:02 2015 -0800
@@ -39,7 +39,7 @@
 
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
                                                               "-XX:+UnlockExperimentalVMOptions",
-                                                              "-XX:+G1TraceStringSymbolTableScrubbing",
+                                                              "-Xlog:gc+stringdedup=trace",
                                                               SystemGCTest.class.getName());
 
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
--- a/test/gc/g1/TestSummarizeRSetStats.java	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test TestSummarizeRSetStats.java
- * @bug 8013895 8129977
- * @library /testlibrary
- * @modules java.base/sun.misc
- *          java.management/sun.management
- * @build TestSummarizeRSetStatsTools TestSummarizeRSetStats
- * @summary Verify output of -XX:+G1SummarizeRSetStats
- * @run main TestSummarizeRSetStats
- *
- * Test the output of G1SummarizeRSetStats in conjunction with G1SummarizeRSetStatsPeriod.
- */
-
-public class TestSummarizeRSetStats {
-
-    public static void main(String[] args) throws Exception {
-        String result;
-
-        if (!TestSummarizeRSetStatsTools.testingG1GC()) {
-            return;
-        }
-
-        // no remembered set summary output
-        result = TestSummarizeRSetStatsTools.runTest(null, 0);
-        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 0, 0);
-
-        // no remembered set summary output
-        result = TestSummarizeRSetStatsTools.runTest(null, 2);
-        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 0, 0);
-
-        // no remembered set summary output
-        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:G1SummarizeRSetStatsPeriod=1" }, 3);
-        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 0, 0);
-
-        // single remembered set summary output at the end
-        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 0);
-        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 0);
-
-        // single remembered set summary output at the end
-        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 2);
-        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 0);
-
-        // single remembered set summary output
-        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 0);
-        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 0);
-
-        // two times remembered set summary output
-        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 1);
-        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 2);
-
-        // four times remembered set summary output
-        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 3);
-        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 6);
-
-        // three times remembered set summary output
-        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=2" }, 3);
-        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 4);
-
-        // single remembered set summary output
-        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=100" }, 3);
-        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 2);
-    }
-}
-
--- a/test/gc/g1/TestSummarizeRSetStatsPerRegion.java	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test TestSummarizeRSetStatsPerRegion.java
- * @bug 8014078 8129977
- * @library /testlibrary
- * @modules java.base/sun.misc
- *          java.management/sun.management
- * @build TestSummarizeRSetStatsTools TestSummarizeRSetStatsPerRegion
- * @summary Verify output of -XX:+G1SummarizeRSetStats in regards to per-region type output
- * @run main TestSummarizeRSetStatsPerRegion
- */
-
-import jdk.test.lib.*;
-import java.lang.Thread;
-import java.util.ArrayList;
-import java.util.Arrays;
-
-public class TestSummarizeRSetStatsPerRegion {
-
-    public static void main(String[] args) throws Exception {
-        String result;
-
-        if (!TestSummarizeRSetStatsTools.testingG1GC()) {
-            return;
-        }
-
-        // single remembered set summary output at the end
-        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 0);
-        TestSummarizeRSetStatsTools.expectPerRegionRSetSummaries(result, 1, 0);
-
-        // two times remembered set summary output
-        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 1);
-        TestSummarizeRSetStatsTools.expectPerRegionRSetSummaries(result, 1, 2);
-    }
-}
--- a/test/gc/g1/TestSummarizeRSetStatsThreads.java	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test TestSummarizeRSetStatsThreads
- * @bug 8025441
- * @summary Ensure that various values of worker threads/concurrent
- * refinement threads do not crash the VM.
- * @key gc
- * @library /testlibrary
- * @modules java.base/sun.misc
- *          java.management/sun.management
- */
-
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import jdk.test.lib.ProcessTools;
-import jdk.test.lib.OutputAnalyzer;
-
-public class TestSummarizeRSetStatsThreads {
-
-  private static void runTest(int refinementThreads, int workerThreads) throws Exception {
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
-                                                              "-XX:+UnlockDiagnosticVMOptions",
-                                                              "-XX:+G1SummarizeRSetStats",
-                                                              "-XX:G1ConcRefinementThreads=" + refinementThreads,
-                                                              "-XX:ParallelGCThreads=" + workerThreads,
-                                                              "-version");
-
-    OutputAnalyzer output = new OutputAnalyzer(pb.start());
-
-    // check output to contain the string "Concurrent RS threads times (s)" followed by
-    // the correct number of values in the next line.
-
-    // a zero in refinement thread numbers indicates that the value in ParallelGCThreads should be used.
-    // Additionally use at least one thread.
-    int expectedNumRefinementThreads = refinementThreads;
-
-    // create the pattern made up of n copies of a floating point number pattern
-    String numberPattern = String.format("%0" + expectedNumRefinementThreads + "d", 0)
-      .replace("0", "\\s+\\d+\\.\\d+");
-    String pattern = "Concurrent RS threads times \\(s\\)$" + numberPattern + "$";
-    Matcher m = Pattern.compile(pattern, Pattern.MULTILINE).matcher(output.getStdout());
-
-    if (!m.find()) {
-      throw new Exception("Could not find correct output for concurrent RS threads times in stdout," +
-        " should match the pattern \"" + pattern + "\", but stdout is \n" + output.getStdout());
-    }
-    output.shouldHaveExitValue(0);
-  }
-
-  public static void main(String[] args) throws Exception {
-    if (!TestSummarizeRSetStatsTools.testingG1GC()) {
-      return;
-    }
-    // different valid combinations of number of refinement and gc worker threads
-    runTest(1, 1);
-    runTest(1, 5);
-    runTest(5, 1);
-    runTest(10, 10);
-    runTest(1, 2);
-    runTest(4, 3);
-  }
-}
--- a/test/gc/g1/TestSummarizeRSetStatsTools.java	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,151 +0,0 @@
-/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * Common helpers for TestSummarizeRSetStats* tests
- */
-
-import com.sun.management.HotSpotDiagnosticMXBean;
-import com.sun.management.VMOption;
-
-import jdk.test.lib.*;
-import java.lang.management.ManagementFactory;
-import java.util.ArrayList;
-import java.util.Arrays;
-
-class VerifySummaryOutput {
-    // 4M size, both are directly allocated into the old gen
-    static Object[] largeObject1 = new Object[1024 * 1024];
-    static Object[] largeObject2 = new Object[1024 * 1024];
-
-    static int[] temp;
-
-    public static void main(String[] args) {
-        // create some cross-references between these objects
-        for (int i = 0; i < largeObject1.length; i++) {
-            largeObject1[i] = largeObject2;
-        }
-
-        for (int i = 0; i < largeObject2.length; i++) {
-            largeObject2[i] = largeObject1;
-        }
-
-        int numGCs = Integer.parseInt(args[0]);
-
-        if (numGCs > 0) {
-            // try to force a minor collection: the young gen is 4M, the
-            // amount of data allocated below is roughly that (4*1024*1024 +
-            // some header data)
-            for (int i = 0; i < 1024 ; i++) {
-                temp = new int[1024];
-            }
-        }
-
-        for (int i = 0; i < numGCs - 1; i++) {
-            System.gc();
-        }
-    }
-}
-
-public class TestSummarizeRSetStatsTools {
-
-    // the VM is currently run using G1GC, i.e. trying to test G1 functionality.
-    public static boolean testingG1GC() {
-        HotSpotDiagnosticMXBean diagnostic =
-            ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class);
-
-        VMOption option = diagnostic.getVMOption("UseG1GC");
-        if (option.getValue().equals("false")) {
-          System.out.println("Skipping this test. It is only a G1 test.");
-          return false;
-        }
-        return true;
-    }
-
-    public static String runTest(String[] additionalArgs, int numGCs) throws Exception {
-        ArrayList<String> finalargs = new ArrayList<String>();
-        String[] defaultArgs = new String[] {
-            "-XX:+UseG1GC",
-            "-Xmn4m",
-            "-Xms20m",
-            "-Xmx20m",
-            "-XX:InitiatingHeapOccupancyPercent=100", // we don't want the additional GCs due to initial marking
-            "-XX:+PrintGC",
-            "-XX:+UnlockDiagnosticVMOptions",
-            "-XX:G1HeapRegionSize=1M",
-        };
-
-        finalargs.addAll(Arrays.asList(defaultArgs));
-
-        if (additionalArgs != null) {
-            finalargs.addAll(Arrays.asList(additionalArgs));
-        }
-
-        finalargs.add(VerifySummaryOutput.class.getName());
-        finalargs.add(String.valueOf(numGCs));
-
-        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-            finalargs.toArray(new String[0]));
-        OutputAnalyzer output = new OutputAnalyzer(pb.start());
-
-        output.shouldHaveExitValue(0);
-
-        String result = output.getStdout();
-        return result;
-    }
-
-    private static void checkCounts(int expected, int actual, String which) throws Exception {
-        if (expected != actual) {
-            throw new Exception("RSet summaries mention " + which + " regions an incorrect number of times. Expected " + expected + ", got " + actual);
-        }
-    }
-
-    public static void expectPerRegionRSetSummaries(String result, int expectedCumulative, int expectedPeriodic) throws Exception {
-        expectRSetSummaries(result, expectedCumulative, expectedPeriodic);
-        int actualYoung = result.split("Young regions").length - 1;
-        int actualHumonguous = result.split("Humonguous regions").length - 1;
-        int actualFree = result.split("Free regions").length - 1;
-        int actualOther = result.split("Old regions").length - 1;
-
-        // the strings we check for above are printed four times per summary
-        int expectedPerRegionTypeInfo = (expectedCumulative + expectedPeriodic) * 4;
-
-        checkCounts(expectedPerRegionTypeInfo, actualYoung, "Young");
-        checkCounts(expectedPerRegionTypeInfo, actualHumonguous, "Humonguous");
-        checkCounts(expectedPerRegionTypeInfo, actualFree, "Free");
-        checkCounts(expectedPerRegionTypeInfo, actualOther, "Old");
-    }
-
-    public static void expectRSetSummaries(String result, int expectedCumulative, int expectedPeriodic) throws Exception {
-        int actualTotal = result.split("concurrent refinement").length - 1;
-        int actualCumulative = result.split("Cumulative RS summary").length - 1;
-
-        if (expectedCumulative != actualCumulative) {
-            throw new Exception("Incorrect amount of RSet summaries at the end. Expected " + expectedCumulative + ", got " + actualCumulative);
-        }
-
-        if (expectedPeriodic != (actualTotal - actualCumulative)) {
-            throw new Exception("Incorrect amount of per-period RSet summaries at the end. Expected " + expectedPeriodic + ", got " + (actualTotal - actualCumulative));
-        }
-    }
-}
--- a/test/gc/g1/mixedgc/TestLogging.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/g1/mixedgc/TestLogging.java	Fri Dec 18 12:39:02 2015 -0800
@@ -68,10 +68,10 @@
     public static final int ALLOCATION_COUNT = 15;
 
     public static void main(String args[]) throws Exception {
-        // Test turns logging on by giving -XX:+PrintGC flag
-        test("-XX:+PrintGC");
-        // Test turns logging on by giving -XX:+PrintGCDetails
-        test("-XX:+PrintGCDetails");
+        // Test turns logging on by giving -Xlog:gc flag
+        test("-Xlog:gc");
+        // Test turns logging on by giving -Xlog:gc=debug flag
+        test("-Xlog:gc=debug");
     }
 
     private static void test(String vmFlag) throws Exception {
@@ -79,7 +79,7 @@
         OutputAnalyzer output = spawnMixedGCProvoker(vmFlag);
         System.out.println(output.getStdout());
         output.shouldHaveExitValue(0);
-        output.shouldContain("GC pause (G1 Evacuation Pause) (mixed)");
+        output.shouldContain("Pause Mixed (G1 Evacuation Pause)");
     }
 
     /**
--- a/test/gc/logging/TestGCId.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/logging/TestGCId.java	Fri Dec 18 12:39:02 2015 -0800
@@ -36,44 +36,21 @@
 
 public class TestGCId {
   public static void main(String[] args) throws Exception {
-    testGCId("UseParallelGC", "PrintGC");
-    testGCId("UseParallelGC", "PrintGCDetails");
-
-    testGCId("UseG1GC", "PrintGC");
-    testGCId("UseG1GC", "PrintGCDetails");
-
-    testGCId("UseConcMarkSweepGC", "PrintGC");
-    testGCId("UseConcMarkSweepGC", "PrintGCDetails");
-
-    testGCId("UseSerialGC", "PrintGC");
-    testGCId("UseSerialGC", "PrintGCDetails");
+    testGCId("UseParallelGC");
+    testGCId("UseG1GC");
+    testGCId("UseConcMarkSweepGC");
+    testGCId("UseSerialGC");
   }
 
   private static void verifyContainsGCIDs(OutputAnalyzer output) {
-    output.shouldMatch("^#0: \\[");
-    output.shouldMatch("^#1: \\[");
+    output.shouldMatch("\\[.*\\]\\[.*\\]\\[.*\\] GC\\(0\\) ");
+    output.shouldMatch("\\[.*\\]\\[.*\\]\\[.*\\] GC\\(1\\) ");
     output.shouldHaveExitValue(0);
   }
 
-  private static void verifyContainsNoGCIDs(OutputAnalyzer output) {
-    output.shouldNotMatch("^#[0-9]+: \\[");
-    output.shouldHaveExitValue(0);
-  }
-
-  private static void testGCId(String gcFlag, String logFlag) throws Exception {
-    // GCID logging enabled
-    ProcessBuilder pb_enabled =
-      ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", "-XX:+PrintGCID", GCTest.class.getName());
-    verifyContainsGCIDs(new OutputAnalyzer(pb_enabled.start()));
-
-    // GCID logging disabled
-    ProcessBuilder pb_disabled =
-      ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", "-XX:-PrintGCID", GCTest.class.getName());
-    verifyContainsNoGCIDs(new OutputAnalyzer(pb_disabled.start()));
-
-    // GCID logging default
+  private static void testGCId(String gcFlag) throws Exception {
     ProcessBuilder pb_default =
-      ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", GCTest.class.getName());
+      ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-Xlog:gc", "-Xmx10M", GCTest.class.getName());
     verifyContainsGCIDs(new OutputAnalyzer(pb_default.start()));
   }
 
--- a/test/gc/logging/TestPrintReferences.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/logging/TestPrintReferences.java	Fri Dec 18 12:39:02 2015 -0800
@@ -37,18 +37,18 @@
 public class TestPrintReferences {
   public static void main(String[] args) throws Exception {
     ProcessBuilder pb_enabled =
-      ProcessTools.createJavaProcessBuilder("-XX:+PrintGCDetails", "-XX:+PrintReferenceGC", "-Xmx10M", GCTest.class.getName());
+      ProcessTools.createJavaProcessBuilder("-Xlog:gc+ref=debug", "-Xmx10M", GCTest.class.getName());
     OutputAnalyzer output = new OutputAnalyzer(pb_enabled.start());
 
     String countRegex = "[0-9]+ refs";
-    String timeRegex = "[0-9]+[.,][0-9]+ secs";
+    String timeRegex = "\\([0-9]+[.,][0-9]+s, [0-9]+[.,][0-9]+s\\) [0-9]+[.,][0-9]+ms";
 
-    output.shouldMatch(
-      "#[0-9]+: \\[SoftReference, " + countRegex + ", " + timeRegex + "\\]" +
-      "#[0-9]+: \\[WeakReference, " + countRegex + ", " + timeRegex + "\\]" +
-      "#[0-9]+: \\[FinalReference, " + countRegex + ", " + timeRegex + "\\]" +
-      "#[0-9]+: \\[PhantomReference, " + countRegex + ", " + timeRegex + "\\]" +
-      "#[0-9]+: \\[JNI Weak Reference, (" + countRegex + ", )?" + timeRegex + "\\]");
+    output.shouldMatch(".* GC\\([0-9]+\\) SoftReference " + timeRegex + "\n" +
+                       ".* GC\\([0-9]+\\) WeakReference " + timeRegex + "\n" +
+                       ".* GC\\([0-9]+\\) FinalReference " + timeRegex + "\n" +
+                       ".* GC\\([0-9]+\\) PhantomReference " + timeRegex + "\n" +
+                       ".* GC\\([0-9]+\\) JNI Weak Reference " + timeRegex + "\n" +
+                       ".* GC\\([0-9]+\\) Ref Counts: Soft: [0-9]+ Weak: [0-9]+ Final: [0-9]+ Phantom: [0-9]+\n");
 
     output.shouldHaveExitValue(0);
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/parallel/TestPrintGCDetailsVerbose.java	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestPrintGCDetailsVerbose
+ * @bug 8016740
+ * @summary Tests that jvm with PrintGCDetails and Verbose flags do not crash when ParOldGC has no memory
+ * @key gc
+ * @requires java.version ~= ".*fastdebug"
+ * @requires vm.gc=="Parallel" | vm.gc=="null"
+ * @library /testlibrary
+ * @run main/othervm -Xmx50m -XX:+UseParallelOldGC -XX:+PrintGCDetails -XX:+Verbose TestPrintGCDetailsVerbose
+ */
+public class TestPrintGCDetailsVerbose {
+
+    public static void main(String[] args) {
+        for (int t = 0; t <= 10; t++) {
+            byte a[][] = new byte[100000][];
+            try {
+                for (int i = 0; i < a.length; i++) {
+                    a[i] = new byte[100000];
+                }
+            } catch (OutOfMemoryError oome) {
+                a = null;
+                System.out.println("OOM!");
+                continue;
+            }
+        }
+    }
+}
+
--- a/test/gc/serial/HeapChangeLogging.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/serial/HeapChangeLogging.java	Fri Dec 18 12:39:02 2015 -0800
@@ -39,11 +39,11 @@
 
 public class HeapChangeLogging {
   public static void main(String[] args) throws Exception {
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xmx128m", "-Xmn100m", "-XX:+UseSerialGC", "-XX:+PrintGC", "HeapFiller");
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xmx128m", "-Xmn100m", "-XX:+UseSerialGC", "-Xlog:gc", "HeapFiller");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
     String stdout = output.getStdout();
     System.out.println(stdout);
-    Matcher stdoutMatcher = Pattern.compile("\\[GC .Allocation Failure.*K->.*K\\(.*K\\), .* secs\\]", Pattern.MULTILINE).matcher(stdout);
+    Matcher stdoutMatcher = Pattern.compile(".*\\(Allocation Failure\\) [0-9]+[KMG]->[0-9]+[KMG]\\([0-9]+[KMG]\\)", Pattern.MULTILINE).matcher(stdout);
     if (!stdoutMatcher.find()) {
       throw new RuntimeException("No proper GC log line found");
     }
--- a/test/gc/whitebox/TestWBGC.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/gc/whitebox/TestWBGC.java	Fri Dec 18 12:39:02 2015 -0800
@@ -44,7 +44,7 @@
                 "-XX:+UnlockDiagnosticVMOptions",
                 "-XX:+WhiteBoxAPI",
                 "-XX:MaxTenuringThreshold=1",
-                "-XX:+PrintGC",
+                "-Xlog:gc",
                 GCYoungTest.class.getName());
 
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
--- a/test/runtime/7158988/FieldMonitor.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/runtime/7158988/FieldMonitor.java	Fri Dec 18 12:39:02 2015 -0800
@@ -63,7 +63,7 @@
 
   public static final String CLASS_NAME = "TestPostFieldModification";
   public static final String FIELD_NAME = "value";
-  public static final String ARGUMENTS = "-Xshare:off -XX:+PrintGC";
+  public static final String ARGUMENTS = "-Xshare:off -Xlog:gc";
 
   public static void main(String[] args)
       throws IOException, InterruptedException {
--- a/test/runtime/CommandLine/IgnoreUnrecognizedVMOptions.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/runtime/CommandLine/IgnoreUnrecognizedVMOptions.java	Fri Dec 18 12:39:02 2015 -0800
@@ -163,9 +163,9 @@
       -IgnoreUnrecognizedVMOptions               ERR                           ERR
       +IgnoreUnrecognizedVMOptions               ERR                           ERR
     */
-    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
-    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:-UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
-    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
-    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:+UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
+    runJavaAndCheckExitValue(false, "-XX:-IgnoreUnrecognizedVMOptions", "-XX:+UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:+UnlockDiagnosticVMOptions", "-XX:PrintInlining", "-version");
+    runJavaAndCheckExitValue(false, "-XX:+IgnoreUnrecognizedVMOptions", "-XX:+UnlockExperimentalVMOptions", "-XX:AlwaysSafeConstructors", "-version");
   }
 }
--- a/test/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java	Fri Dec 18 12:39:02 2015 -0800
@@ -29,7 +29,7 @@
  *          java.management
  *          jdk.attach
  *          jdk.management/sun.tools.attach
- * @run main/othervm/timeout=780 TestOptionsWithRanges
+ * @run main/othervm/timeout=900 TestOptionsWithRanges
  */
 
 import java.util.ArrayList;
@@ -63,12 +63,26 @@
         allOptionsAsMap.remove(optionName);
     }
 
+    private static void setAllowedExitCodes(String optionName, Integer... allowedExitCodes) {
+        JVMOption option = allOptionsAsMap.get(optionName);
+
+        if (option != null) {
+            option.setAllowedExitCodes(allowedExitCodes);
+        }
+    }
+
     public static void main(String[] args) throws Exception {
         int failedTests;
         List<JVMOption> allOptions;
 
         allOptionsAsMap = JVMOptionsUtils.getOptionsWithRangeAsMap();
 
+        /* Shared flags can cause JVM to exit with error code 2 */
+        setAllowedExitCodes("SharedReadWriteSize", 2);
+        setAllowedExitCodes("SharedReadOnlySize", 2);
+        setAllowedExitCodes("SharedMiscDataSize", 2);
+        setAllowedExitCodes("SharedMiscCodeSize", 2);
+
         /*
          * Remove CICompilerCount from testing because currently it can hang system
          */
@@ -82,23 +96,13 @@
         excludeTestRange("ThreadStackSize");
 
         /*
-         * JDK-8141650
-         * Temporarily exclude SharedMiscDataSize as it will exit the VM with exit code 2 and
-         * "The shared miscellaneous data space is not large enough to preload requested classes."
-         * message at min value.
+         * JDK-8143958
+         * Temporarily exclude testing of max range for Shared* flags
          */
-        excludeTestRange("SharedMiscDataSize");
-
-        /*
-         * JDK-8142874
-         * Temporarily exclude Shared* flagse as they will exit the VM with exit code 2 and
-         * "The shared miscellaneous data space is not large enough to preload requested classes."
-         * message at max values.
-         */
-        excludeTestRange("SharedReadWriteSize");
-        excludeTestRange("SharedReadOnlySize");
-        excludeTestRange("SharedMiscDataSize");
-        excludeTestRange("SharedMiscCodeSize");
+        excludeTestMaxRange("SharedReadWriteSize");
+        excludeTestMaxRange("SharedReadOnlySize");
+        excludeTestMaxRange("SharedMiscDataSize");
+        excludeTestMaxRange("SharedMiscCodeSize");
 
         /*
          * Remove the flag controlling the size of the stack because the
@@ -116,11 +120,26 @@
          * Exclude below options as their maximum value would consume too much memory
          * and would affect other tests that run in parallel.
          */
+        excludeTestMaxRange("ConcGCThreads");
         excludeTestMaxRange("G1ConcRefinementThreads");
         excludeTestMaxRange("G1RSetRegionEntries");
         excludeTestMaxRange("G1RSetSparseRegionEntries");
         excludeTestMaxRange("G1UpdateBufferSize");
         excludeTestMaxRange("InitialBootClassLoaderMetaspaceSize");
+        excludeTestMaxRange("InitialHeapSize");
+        excludeTestMaxRange("MaxHeapSize");
+        excludeTestMaxRange("MaxRAM");
+        excludeTestMaxRange("NewSize");
+        excludeTestMaxRange("OldSize");
+        excludeTestMaxRange("ParallelGCThreads");
+
+        excludeTestMaxRange("VMThreadStackSize");
+
+        /*
+         * JDK-8145027
+         * Temporarily exclude as current range/constraint is not enough for some option combinations.
+         */
+        excludeTestRange("NUMAInterleaveGranularity");
 
         /*
          * Remove parameters controlling the code cache. As these
--- a/test/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOption.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOption.java	Fri Dec 18 12:39:02 2015 -0800
@@ -25,7 +25,10 @@
 import com.sun.tools.attach.VirtualMachine;
 import com.sun.tools.attach.AttachOperationFailedException;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 import jdk.test.lib.DynamicVMOption;
 import jdk.test.lib.OutputAnalyzer;
 import jdk.test.lib.ProcessTools;
@@ -64,6 +67,8 @@
      */
     protected boolean testMaxRange;
 
+    private Set<Integer> allowedExitCodes;
+
     /**
      * Prepend string which added before testing option to the command line
      */
@@ -73,6 +78,9 @@
     protected JVMOption() {
         this.prepend = new ArrayList<>();
         prependString = new StringBuilder();
+        allowedExitCodes = new HashSet<>();
+        allowedExitCodes.add(0);
+        allowedExitCodes.add(1);
         withRange = false;
         testMinRange = true;
         testMaxRange = true;
@@ -161,6 +169,10 @@
         testMaxRange = false;
     }
 
+    public final void setAllowedExitCodes(Integer... allowedExitCodes) {
+        this.allowedExitCodes.addAll(Arrays.asList(allowedExitCodes));
+    }
+
     /**
      * Set new minimum option value
      *
@@ -384,13 +396,13 @@
             printOutputContent(out);
             result = false;
         } else if (valid == true) {
-            if ((exitCode != 0) && (exitCode != 1)) {
+            if (!allowedExitCodes.contains(exitCode)) {
                 failedMessage(name, fullOptionString, valid, "JVM exited with unexpected error code = " + exitCode);
                 printOutputContent(out);
                 result = false;
-            } else if ((exitCode == 1) && (out.getOutput().isEmpty() == true)) {
-                failedMessage(name, fullOptionString, valid, "JVM exited with error(exitcode == 1)"
-                        + ", but with empty stdout and stderr. Description of error is needed!");
+            } else if ((exitCode != 0) && (out.getOutput().isEmpty() == true)) {
+                failedMessage(name, fullOptionString, valid, "JVM exited with error(exitcode == " + exitCode +
+                        "), but with empty stdout and stderr. Description of error is needed!");
                 result = false;
             } else if (out.getOutput().contains("is outside the allowed range")) {
                 failedMessage(name, fullOptionString, valid, "JVM output contains \"is outside the allowed range\"");
--- a/test/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOptionsUtils.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOptionsUtils.java	Fri Dec 18 12:39:02 2015 -0800
@@ -161,11 +161,8 @@
             option.addPrepend("-XX:+UseConcMarkSweepGC");
         }
 
-        if (name.startsWith("Shared")) {
-            option.addPrepend("-XX:+UnlockDiagnosticVMOptions");
-            String fileName = "Test" + name + ".jsa";
-            option.addPrepend("-XX:SharedArchiveFile=" + fileName);
-            option.addPrepend("-Xshare:dump");
+        if (name.startsWith("NUMA")) {
+            option.addPrepend("-XX:+UseNUMA");
         }
 
         switch (name) {
@@ -196,6 +193,29 @@
             case "InitialTenuringThreshold":
                 option.addPrepend("-XX:MaxTenuringThreshold=" + option.getMax());
                 break;
+            case "NUMAInterleaveGranularity":
+                option.addPrepend("-XX:+UseNUMAInterleaving");
+                break;
+            case "CPUForCMSThread":
+                option.addPrepend("-XX:+BindCMSThreadToCPU");
+                break;
+            case "VerifyGCStartAt":
+                option.addPrepend("-XX:+VerifyBeforeGC");
+                option.addPrepend("-XX:+VerifyAfterGC");
+                break;
+            case "NewSizeThreadIncrease":
+                option.addPrepend("-XX:+UseSerialGC");
+                break;
+            case "SharedReadWriteSize":
+            case "SharedReadOnlySize":
+            case "SharedMiscDataSize":
+            case "SharedMiscCodeSize":
+            case "SharedBaseAddress":
+            case "SharedSymbolTableBucketSize":
+                option.addPrepend("-XX:+UnlockDiagnosticVMOptions");
+                option.addPrepend("-XX:SharedArchiveFile=TestOptionsWithRanges.jsa");
+                option.addPrepend("-Xshare:dump");
+                break;
             default:
                 /* Do nothing */
                 break;
--- a/test/runtime/CommandLine/PrintGCApplicationConcurrentTime.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/runtime/CommandLine/PrintGCApplicationConcurrentTime.java	Fri Dec 18 12:39:02 2015 -0800
@@ -24,7 +24,7 @@
  /*
  * @test
  * @bug 8026041
- * @run main/othervm -XX:+PrintGCApplicationConcurrentTime -Xcomp PrintGCApplicationConcurrentTime
+ * @run main/othervm -Xlog:safepoint -Xcomp PrintGCApplicationConcurrentTime
  */
 
 public class PrintGCApplicationConcurrentTime {
--- a/test/runtime/CommandLine/TestVMOptions.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/runtime/CommandLine/TestVMOptions.java	Fri Dec 18 12:39:02 2015 -0800
@@ -41,7 +41,7 @@
         "-XX:+IgnoreUnrecognizedVMOptions",
         "-XX:+PrintFlagsInitial");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldContain("bool PrintGCDetails");
+    output.shouldContain("bool UseSerialGC");
 
     pb = ProcessTools.createJavaProcessBuilder(
         "-XX:-PrintVMOptions", "-version");
--- a/test/runtime/CompressedOops/CompressedClassPointers.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/runtime/CompressedOops/CompressedClassPointers.java	Fri Dec 18 12:39:02 2015 -0800
@@ -39,7 +39,7 @@
             "-XX:+UnlockDiagnosticVMOptions",
             "-XX:SharedBaseAddress=8g",
             "-Xmx128m",
-            "-XX:+PrintCompressedOopsMode",
+            "-Xlog:gc+metaspace=trace",
             "-XX:+VerifyBeforeGC", "-version");
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
         output.shouldContain("Narrow klass base: 0x0000000000000000");
@@ -51,7 +51,7 @@
             "-XX:+UnlockDiagnosticVMOptions",
             "-XX:CompressedClassSpaceSize=3g",
             "-Xmx128m",
-            "-XX:+PrintCompressedOopsMode",
+            "-Xlog:gc+metaspace=trace",
             "-XX:+VerifyBeforeGC", "-version");
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
         output.shouldContain("Narrow klass base: 0x0000000000000000, Narrow klass shift: 3");
@@ -62,7 +62,7 @@
         ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
             "-XX:+UnlockDiagnosticVMOptions",
             "-Xmx30g",
-            "-XX:+PrintCompressedOopsMode",
+            "-Xlog:gc+metaspace=trace",
             "-XX:+VerifyBeforeGC", "-version");
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
         output.shouldNotContain("Narrow klass base: 0x0000000000000000");
@@ -75,7 +75,7 @@
             "-XX:+UnlockDiagnosticVMOptions",
             "-Xmx128m",
             "-XX:+UseLargePages",
-            "-XX:+PrintCompressedOopsMode",
+            "-Xlog:gc+metaspace=trace",
             "-XX:+VerifyBeforeGC", "-version");
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
         output.shouldContain("Narrow klass base:");
--- a/test/runtime/CompressedOops/CompressedClassSpaceSize.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/runtime/CompressedOops/CompressedClassSpaceSize.java	Fri Dec 18 12:39:02 2015 -0800
@@ -64,7 +64,7 @@
             // Make sure the minimum size is set correctly and printed
             pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions",
                                                        "-XX:CompressedClassSpaceSize=1m",
-                                                       "-XX:+PrintCompressedOopsMode",
+                                                       "-Xlog:gc+metaspace=trace",
                                                        "-version");
             output = new OutputAnalyzer(pb.start());
             output.shouldContain("Compressed class space size: 1048576")
@@ -74,7 +74,7 @@
             // Make sure the maximum size is set correctly and printed
             pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions",
                                                        "-XX:CompressedClassSpaceSize=3g",
-                                                       "-XX:+PrintCompressedOopsMode",
+                                                       "-Xlog:gc+metaspace=trace",
                                                        "-version");
             output = new OutputAnalyzer(pb.start());
             output.shouldContain("Compressed class space size: 3221225472")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/ErrorHandling/TestCrashOnOutOfMemoryError.java	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestCrashOnOutOfMemoryError
+ * @summary Test using -XX:+CrashOnOutOfMemoryError
+ * @library /testlibrary
+ * @build jdk.test.lib.*
+ * @run driver TestCrashOnOutOfMemoryError
+ * @bug 8138745
+ */
+
+import jdk.test.lib.OutputAnalyzer;
+import jdk.test.lib.ProcessTools;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.InputStreamReader;
+import java.io.IOException;
+
+public class TestCrashOnOutOfMemoryError {
+
+    public static void main(String[] args) throws Exception {
+        if (args.length == 1) {
+            // This should guarantee to throw:
+            // java.lang.OutOfMemoryError: Requested array size exceeds VM limit
+            try {
+                Object[] oa = new Object[Integer.MAX_VALUE];
+                throw new Error("OOME not triggered");
+            } catch (OutOfMemoryError err) {
+                throw new Error("OOME didn't abort JVM!");
+            }
+        }
+        // else this is the main test
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+CrashOnOutOfMemoryError",
+                 "-XX:-CreateCoredumpOnCrash", "-Xmx64m", TestCrashOnOutOfMemoryError.class.getName(),"throwOOME");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        int exitValue = output.getExitValue();
+        if (0 == exitValue) {
+            //expecting a non zero value
+            throw new Error("Expected to get non zero exit value");
+        }
+
+        /* Output should look something like this. The actual text will depend on the OS and its core dump processing.
+           Aborting due to java.lang.OutOfMemoryError: Requested array size exceeds VM limit
+           # To suppress the following error report, specify this argument
+           # after -XX: or in .hotspotrc:  SuppressErrorAt=/debug.cpp:303
+           #
+           # A fatal error has been detected by the Java Runtime Environment:
+           #
+           #  Internal Error (/home/cheleswer/Desktop/jdk9/dev/hotspot/src/share/vm/utilities/debug.cpp:303), pid=6212, tid=6213
+           #  fatal error: OutOfMemory encountered: Requested array size exceeds VM limit
+           #
+           # JRE version: OpenJDK Runtime Environment (9.0) (build 1.9.0-internal-debug-cheleswer_2015_10_20_14_32-b00)
+           # Java VM: OpenJDK 64-Bit Server VM (1.9.0-internal-debug-cheleswer_2015_10_20_14_32-b00, mixed mode, tiered, compressed oops, serial gc, linux-amd64)
+           # Core dump will be written. Default location: Core dumps may be processed with "/usr/share/apport/apport %p %s %c %P" (or dumping to
+             /home/cheleswer/Desktop/core.6212)
+           #
+           # An error report file with more information is saved as:
+           # /home/cheleswer/Desktop/hs_err_pid6212.log
+           #
+           # If you would like to submit a bug report, please visit:
+           #   http://bugreport.java.com/bugreport/crash.jsp
+           #
+           Current thread is 6213
+           Dumping core ...
+           Aborted (core dumped)
+        */
+        output.shouldContain("Aborting due to java.lang.OutOfMemoryError: Requested array size exceeds VM limit");
+        // extract hs-err file
+        String hs_err_file = output.firstMatch("# *(\\S*hs_err_pid\\d+\\.log)", 1);
+        if (hs_err_file == null) {
+            throw new Error("Did not find hs-err file in output.\n");
+        }
+
+        /*
+         * Check if hs_err files exist or not
+         */
+        File f = new File(hs_err_file);
+        if (!f.exists()) {
+            throw new Error("hs-err file missing at "+ f.getAbsolutePath() + ".\n");
+        }
+
+        /*
+         * Checking the completness of hs_err file. If last line of hs_err file is "END"
+         * then it proves that file is complete.
+         */
+        try (FileInputStream fis = new FileInputStream(f);
+            BufferedReader br = new BufferedReader(new InputStreamReader(fis))) {
+            String line = null;
+            String lastLine = null;
+            while ((line = br.readLine()) != null) {
+                lastLine = line;
+            }
+            if (!lastLine.equals("END.")) {
+                throw new Error("hs-err file incomplete (missing END marker.)");
+            } else {
+                System.out.println("End marker found.");
+            }
+        }
+        System.out.println("PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/ErrorHandling/TestExitOnOutOfMemoryError.java	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestExitOnOutOfMemoryError
+ * @summary Test using -XX:ExitOnOutOfMemoryError
+ * @library /testlibrary
+ * @build jdk.test.lib.*
+ * @run driver TestExitOnOutOfMemoryError
+ * @bug 8138745
+ */
+
+import jdk.test.lib.ProcessTools;
+import jdk.test.lib.OutputAnalyzer;
+
+public class TestExitOnOutOfMemoryError {
+
+    public static void main(String[] args) throws Exception {
+        if (args.length == 1) {
+            // This should guarantee to throw:
+            // java.lang.OutOfMemoryError: Requested array size exceeds VM limit
+            try {
+                Object[] oa = new Object[Integer.MAX_VALUE];
+                throw new Error("OOME not triggered");
+            } catch (OutOfMemoryError err) {
+                throw new Error("OOME didn't terminate JVM!");
+            }
+        }
+
+        // else this is the main test
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+ExitOnOutOfMemoryError",
+                "-Xmx64m", TestExitOnOutOfMemoryError.class.getName(), "throwOOME");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+        /*
+         * Actual output should look like this:
+         * Terminating due to java.lang.OutOfMemoryError: Requested array size exceeds VM limit
+         */
+        output.shouldHaveExitValue(3);
+        output.stdoutShouldNotBeEmpty();
+        output.shouldContain("Terminating due to java.lang.OutOfMemoryError: Requested array size exceeds VM limit");
+        System.out.println("PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/ReservedStack/ReservedStackTest.java	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test ReservedStackTest
+ * @run main/othervm -XX:-Inline -XX:CompileCommand=exclude,java/util/concurrent/locks/AbstractOwnableSynchronizer.setExclusiveOwnerThread ReservedStackTest
+ */
+
+/* The exclusion of java.util.concurrent.locks.AbstractOwnableSynchronizer.setExclusiveOwnerThread()
+ * from the compilable methods is required to ensure that the test will be able
+ * to trigger a StackOverflowError on the right method.
+ */
+
+
+/*
+ * Notes about this test:
+ * This test tries to reproduce a rare but nasty corruption bug that
+ * occurs when a StackOverflowError is thrown in some critical sections
+ * of the ReentrantLock implementation.
+ *
+ * Here's the critical section where a corruption could occur
+ * (from java.util.concurrent.ReentrantLock.java)
+ *
+ * final void lock() {
+ *     if (compareAndSetState(0, 1))
+ *         setExclusiveOwnerThread(Thread.currentThread());
+ *     else
+ *         acquire(1);
+ * }
+ *
+ * The corruption occurs when the compareAndSetState(0, 1)
+ * successfully updates the status of the lock but the method
+ * fails to set the owner because of a stack overflow.
+ * HotSpot checks for stack overflow on method invocations.
+ * The test must trigger a stack overflow either when
+ * Thread.currentThread() or setExclusiveOwnerThread() is
+ * invoked.
+ *
+ * The test starts with a recursive invocation loop until a
+ * first StackOverflowError is thrown, the Error is caught
+ * and a few dozen frames are exited. Now the thread has
+ * little free space on its execution stack and will try
+ * to trigger a stack overflow in the critical section.
+ * The test has a huge array of ReentrantLocks instances.
+ * The thread invokes a recursive method which, at each
+ * of its invocations, tries to acquire the next lock
+ * in the array. The execution continues until a
+ * StackOverflowError is thrown or the end of the array
+ * is reached.
+ * If no StackOverflowError has been thrown, the test
+ * is non conclusive (recommendation: increase the size
+ * of the ReentrantLock array).
+ * The status of all Reentrant locks in the array is checked,
+ * if a corruption is detected, the test failed, otherwise
+ * the test passed.
+ *
+ * To have a chance that the stack overflow occurs on one
+ * of the two targeted method invocations, the test is
+ * repeated in different threads. Each Java thread has a
+ * random size area allocated at the beginning of its
+ * stack to prevent false sharing. The test relies on this
+ * to have different stack alignments when it hits the targeted
+ * methods (the test could have been written with a native
+ * method with alloca, but using different Java threads makes
+ * the test 100% Java).
+ *
+ * One additional trick is required to ensure that the stack
+ * overflow will occur on the Thread.currentThread() getter
+ * or the setExclusiveOwnerThread() setter.
+ *
+ * Potential stack overflows are detected by stack banging,
+ * at method invocation time.
+ * In interpreted code, the stack banging performed for the
+ * lock() method goes further than the stack banging performed
+ * for the getter or the setter method, so the potential stack
+ * overflow is detected before entering the critical section.
+ * In compiled code, the getter and the setter are in-lined,
+ * so the stack banging is only performed before entering the
+ * critical section.
+ * In order to have a stack banging that goes further for the
+ * getter/setter methods than for the lock() method, the test
+ * exploits the property that interpreter frames are (much)
+ * bigger than compiled code frames. When the test is run,
+ * a compiler option disables the compilation of the
+ * setExclusiveOwnerThread() method.
+ *
+ */
+
+import java.util.concurrent.locks.ReentrantLock;
+
+public class ReservedStackTest {
+
+    private static boolean isWindows() {
+        return System.getProperty("os.name").toLowerCase().startsWith("win");
+    }
+
+    static class ReentrantLockTest {
+
+        private ReentrantLock lockArray[];
+        // Frame sizes vary a lot between interpreted code and compiled code
+        // so the lock array has to be big enough to cover all cases.
+        // If test fails with message "Not conclusive test", try to increase
+        // LOCK_ARRAY_SIZE value
+        private static final int LOCK_ARRAY_SIZE = 8192;
+        private boolean stackOverflowErrorReceived;
+        StackOverflowError soe = null;
+        private int index = -1;
+
+        public void initialize() {
+            lockArray = new ReentrantLock[LOCK_ARRAY_SIZE];
+            for (int i = 0; i < LOCK_ARRAY_SIZE; i++) {
+                lockArray[i] = new ReentrantLock();
+            }
+            stackOverflowErrorReceived = false;
+        }
+
+        public String getResult() {
+            if (!stackOverflowErrorReceived) {
+                return "ERROR: Not conclusive test: no StackOverflowError received";
+            }
+            for (int i = 0; i < LOCK_ARRAY_SIZE; i++) {
+                if (lockArray[i].isLocked()) {
+                    if (!lockArray[i].isHeldByCurrentThread()) {
+                        StringBuilder s = new StringBuilder();
+                        s.append("FAILED: ReentrantLock ");
+                        s.append(i);
+                        s.append(" looks corrupted");
+                        return s.toString();
+                    }
+                }
+            }
+            return "PASSED";
+        }
+
+        public void run() {
+            try {
+                lockAndCall(0);
+            } catch (StackOverflowError e) {
+                soe = e;
+                stackOverflowErrorReceived = true;
+            }
+        }
+
+        private void lockAndCall(int i) {
+            index = i;
+            if (i < LOCK_ARRAY_SIZE) {
+                lockArray[i].lock();
+                lockAndCall(i + 1);
+            }
+        }
+    }
+
+    static class RunWithSOEContext implements Runnable {
+
+        int counter;
+        int deframe;
+        int decounter;
+        int setupSOEFrame;
+        int testStartFrame;
+        ReentrantLockTest test;
+
+        public RunWithSOEContext(ReentrantLockTest test, int deframe) {
+            this.test = test;
+            this.deframe = deframe;
+        }
+
+        @Override
+        @jdk.internal.vm.annotation.ReservedStackAccess
+        public void run() {
+            counter = 0;
+            decounter = deframe;
+            test.initialize();
+            recursiveCall();
+            System.out.println("Framework got StackOverflowError at frame = " + counter);
+            System.out.println("Test started execution at frame = " + (counter - deframe));
+            String result = test.getResult();
+            System.out.println(result);
+            // The feature is not fully implemented on Windows platforms,
+            // corruptions are still possible
+            if (!isWindows() && !result.contains("PASSED")) {
+                System.exit(-1);
+            }
+        }
+
+        void recursiveCall() {
+            // Unused local variables to increase the frame size
+            long l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16, l17, l18, l19;
+            long l20, l21, l22, l23, l24, l25, l26, l27, l28, l30, l31, l32, l33, l34, l35, l36, l37;
+            counter++;
+            try {
+                recursiveCall();
+            } catch (StackOverflowError e) {
+            }
+            decounter--;
+            if (decounter == 0) {
+                setupSOEFrame = counter;
+                testStartFrame = counter - deframe;
+                test.run();
+            }
+        }
+    }
+
+    public static void main(String[] args) {
+        for (int i = 0; i < 1000; i++) {
+            // Each iteration has to be executed by a new thread. The test
+            // relies on the random size area pushed by the VM at the beginning
+            // of the stack of each Java thread it creates.
+            Thread thread = new Thread(new RunWithSOEContext(new ReentrantLockTest(), 256));
+            thread.start();
+            try {
+                thread.join();
+            } catch (InterruptedException ex) { }
+        }
+    }
+}
--- a/test/runtime/Thread/Fibonacci.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/runtime/Thread/Fibonacci.java	Fri Dec 18 12:39:02 2015 -0800
@@ -29,7 +29,7 @@
  *     make this test inherently unstable on Windows with 32-bit VM data model.
  * @requires !(os.family == "windows" & sun.arch.data.model == "32")
  * @library /testlibrary
- * @run main Fibonacci 15
+ * @run main/othervm Fibonacci 15
  */
 
 import jdk.test.lib.Asserts;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/logging/BadMap50.jasm	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * This class should throw VerifyError because the StackMap for bytecode
+ * index 45 is incorrect. The stack maps for bytecode indexes 45 and 49 are
+ * incompatible because 45 doesn't supply enough locals to satisfy 49.
+ *
+ * The astore_2 bytecode at bytecode index 45 changes the type state,
+ * preventing the stackmap mismatch.  But, if the incoming type state is used,
+ * as required by JVM Spec 8, then the verifier will detected the stackmap
+ * mismatch, and throw VerifyError.
+ */
+
+super public class BadMap50
+    version 50:0
+{
+
+
+public Method "<init>":"()V"
+    stack 1 locals 1
+{
+        aload_0;
+        invokespecial    Method java/lang/Object."<init>":"()V";
+        return;
+}
+
+public static Method main:"([Ljava/lang/String;)V"
+    throws java/lang/Throwable
+    stack 0 locals 1
+{
+        return;
+}
+
+public static Method foo:"()V"
+    stack 3 locals 5
+{
+        iconst_0;
+        ifne    L5;
+        nop;
+        try t7;
+    L5:    stack_frame_type full;
+        aconst_null;
+        astore_0;
+        iconst_3;
+        istore_1;
+        try t0;
+        aconst_null;
+        astore_0;
+        endtry t0;
+        goto    L19;
+        catch t0 java/io/IOException;
+        stack_frame_type full;
+        locals_map class java/lang/Object, int;
+        stack_map class java/io/IOException;
+        astore_2;
+        aconst_null;
+        astore_0;
+        iconst_2;
+        istore_1;
+        try t1;
+    L19:    stack_frame_type full;
+        locals_map class java/lang/Object, int;
+        iconst_0;
+        istore_2;
+        endtry t1;
+        iload_1;
+        ifeq    L37;
+        nop;
+        goto    L37;
+        catch t1 #0;
+        catch t2 #0;
+        try t2;
+        stack_frame_type full;
+        locals_map class java/lang/Object, int;
+        stack_map class java/lang/Throwable;
+astore_3;
+iconst_2;
+istore_2;
+        endtry t2;
+        iload_1;
+        ifeq    L35;
+        nop;
+    L35:    stack_frame_type full;
+        locals_map class java/lang/Object, int, bogus, class java/lang/Throwable;
+aload_3;
+        athrow;
+        try t3, t4;
+    L37:    stack_frame_type full;
+        locals_map class java/lang/Object, int, int;
+        iload_2;
+        ifeq    L42;
+        nop;
+        endtry t3, t4;
+    L42:    stack_frame_type full;
+        locals_map class java/lang/Object, int, int;
+        goto    L54;
+        catch t3 java/lang/Exception;
+        try t5;
+        stack_frame_type full;
+        locals_map class java/lang/Object, int;
+        stack_map class java/lang/Exception;
+        // astore_2;                  // astore_2, at bci 45, that changes the type state.
+// pop;
+iconst_1;
+        istore_2;                  // astore_2, at bci 45, that changes the type state.
+        endtry t5;
+        goto    L54;
+        catch t4 #0;
+        catch t5 #0;
+        catch t6 #0;
+        try t6;
+      stack_frame_type full;
+      locals_map class java/lang/Object, int, int;
+      stack_map class java/lang/Throwable;
+// astore    3;
+ istore_1;
+        endtry t6;
+// aload    3;
+//         athrow;
+    L54:    stack_frame_type full;
+        locals_map class java/lang/Object, int, int;
+        goto    L57;
+    L57:    stack_frame_type full;
+        locals_map class java/lang/Object, int, int;
+        nop;
+        endtry t7;
+        return;
+        catch t7 #0;
+        stack_frame_type full;
+        stack_map class java/lang/Throwable;
+        nop;
+        athrow;
+}
+
+} // end Class BadMap50
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/logging/ClassInitializationTest.java	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test ClassInitializationTest
+ * @bug 8142976
+ * @library /testlibrary
+ * @compile BadMap50.jasm
+ * @run driver ClassInitializationTest
+ */
+
+import jdk.test.lib.*;
+
+public class ClassInitializationTest {
+
+    public static void main(String... args) throws Exception {
+
+        // (1)
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xlog:classinit=info", "-Xverify:all", "-Xmx64m", "BadMap50");
+        OutputAnalyzer out = new OutputAnalyzer(pb.start());
+        out.shouldContain("Start class verification for:");
+        out.shouldContain("End class verification for:");
+        out.shouldContain("Initializing");
+        out.shouldContain("Verification for BadMap50 failed");
+        out.shouldContain("Fail over class verification to old verifier for: BadMap50");
+
+        // (2)
+        if (Platform.isDebugBuild()) {
+          pb = ProcessTools.createJavaProcessBuilder("-Xlog:classinit=info", "-Xverify:all", "-XX:+EagerInitialization", "-Xmx64m", "-version");
+          out = new OutputAnalyzer(pb.start());
+          out.shouldContain("[Initialized").shouldContain("without side effects]");
+          out.shouldHaveExitValue(0);
+        }
+        // (3) Ensure that VerboseVerification still triggers appropriate messages.
+        pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions", "-XX:+VerboseVerification", "-Xverify:all", "-Xmx64m", "BadMap50");
+        out = new OutputAnalyzer(pb.start());
+        out.shouldContain("End class verification for:");
+        out.shouldContain("Verification for BadMap50 failed");
+        out.shouldContain("Fail over class verification to old verifier for: BadMap50");
+    }
+}
--- a/test/runtime/logging/DefaultMethodsTest.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/runtime/logging/DefaultMethodsTest.java	Fri Dec 18 12:39:02 2015 -0800
@@ -28,7 +28,7 @@
  * @library /testlibrary
  * @modules java.base/sun.misc
  *          java.management
- * @run main DefaultMethodsTest
+ * @run driver DefaultMethodsTest
  */
 
 import jdk.test.lib.*;
--- a/test/runtime/logging/SafepointTest.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/runtime/logging/SafepointTest.java	Fri Dec 18 12:39:02 2015 -0800
@@ -26,19 +26,18 @@
  * @bug 8140348
  * @summary safepoint=trace should have output from each log statement in the code
  * @library /testlibrary
- * @compile SafepointTestMain.java
  * @modules java.base/sun.misc
  *          java.management
- * @build SafepointTest
- * @run main SafepointTest
+ * @run driver SafepointTest
  */
 
 import jdk.test.lib.*;
+import java.lang.ref.WeakReference;
 
 public class SafepointTest {
     public static void main(String[] args) throws Exception {
         ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-            "-Xlog:safepoint=trace", "SafepointTestMain");
+            "-Xlog:safepoint=trace", InnerClass.class.getName());
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
         output.shouldContain("Safepoint synchronization initiated. (");
         output.shouldContain("Entering safepoint region: ");
@@ -46,4 +45,25 @@
         output.shouldContain("_at_poll_safepoint");
         output.shouldHaveExitValue(0);
     }
+
+    public static class InnerClass {
+        public static byte[] garbage;
+        public static volatile WeakReference<Object> weakref;
+
+        public static void createweakref() {
+            Object o = new Object();
+            weakref = new WeakReference<>(o);
+        }
+
+        public static void main(String[] args) throws Exception {
+            // Cause several safepoints to run GC to see safepoint messages
+            for (int i = 0; i < 2; i++) {
+                createweakref();
+                while(weakref.get() != null) {
+                    garbage = new byte[8192];
+                    System.gc();
+                }
+            }
+        }
+    }
 }
--- a/test/runtime/logging/SafepointTestMain.java	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-import java.lang.ref.WeakReference;
-
-public class SafepointTestMain {
-    public static byte[] garbage;
-    public static volatile WeakReference<Object> weakref;
-
-    public static void createweakref() {
-        Object o = new Object();
-        weakref = new WeakReference<>(o);
-    }
-
-    public static void main(String[] args) throws Exception {
-        // Cause several safepoints to run GC to see safepoint messages
-        for (int i = 0; i < 2; i++) {
-            createweakref();
-            while(weakref.get() != null) {
-                garbage = new byte[8192];
-                System.gc();
-            }
-        }
-    }
-}
--- a/test/runtime/logging/VMOperationTest.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/runtime/logging/VMOperationTest.java	Fri Dec 18 12:39:02 2015 -0800
@@ -26,21 +26,41 @@
  * @bug 8143157
  * @summary vmoperation=debug should have logging output
  * @library /testlibrary
- * @compile VMOperationTestMain.java
  * @modules java.base/sun.misc
  *          java.management
- * @run main VMOperationTest
+ * @run driver VMOperationTest
  */
 
 import jdk.test.lib.*;
+import java.lang.ref.WeakReference;
 
 public class VMOperationTest {
     public static void main(String[] args) throws Exception {
         ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-            "-Xlog:vmoperation=debug", "-Xmx64m", "-Xms64m", "VMOperationTestMain");
+            "-Xlog:vmoperation=debug", "-Xmx64m", "-Xms64m",
+            InternalClass.class.getName());
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
         output.shouldContain("VM_Operation (");
         output.shouldHaveExitValue(0);
     }
+
+    public static class InternalClass {
+        public static byte[] garbage;
+        public static volatile WeakReference<Object> weakref;
+
+        public static void createweakref() {
+            Object o = new Object();
+            weakref = new WeakReference<>(o);
+        }
+
+        // Loop until a GC runs.
+        public static void main(String[] args) throws Exception {
+            createweakref();
+            while (weakref.get() != null) {
+                garbage = new byte[8192];
+                System.gc();
+            }
+        }
+    }
 }
 
--- a/test/runtime/logging/VMOperationTestMain.java	Thu Dec 17 23:36:28 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-import java.lang.ref.WeakReference;
-
-public class VMOperationTestMain {
-    public static byte[] garbage;
-    public static volatile WeakReference<Object> weakref;
-
-    public static void createweakref() {
-        Object o = new Object();
-        weakref = new WeakReference<>(o);
-    }
-
-    // Loop until a GC runs.
-    public static void main(String[] args) throws Exception {
-        createweakref();
-        while (weakref.get() != null) {
-            garbage = new byte[8192];
-            System.gc();
-        }
-    }
-}
--- a/test/serviceability/dcmd/gc/RunFinalizationTest.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/serviceability/dcmd/gc/RunFinalizationTest.java	Fri Dec 18 12:39:02 2015 -0800
@@ -23,20 +23,22 @@
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
 
-import jdk.test.lib.OutputAnalyzer;
-import jdk.test.lib.ProcessTools;
+import jdk.test.lib.process.ProcessTools;
 
 /*
  * @test
  * @summary Test of diagnostic command GC.run_finalization
  * @library /testlibrary
+ * @library /test/lib/share/classes
  * @modules java.base/sun.misc
  *          java.compiler
  *          java.management
  *          jdk.jvmstat/sun.jvmstat.monitor
  * @build jdk.test.lib.*
  * @build jdk.test.lib.dcmd.*
+ * @build jdk.test.lib.process.*
  * @build RunFinalizationTest FinalizationRunner
  * @run main RunFinalizationTest
  */
@@ -50,8 +52,21 @@
         javaArgs.add(TEST_APP_NAME);
         ProcessBuilder testAppPb = ProcessTools.createJavaProcessBuilder(javaArgs.toArray(new String[javaArgs.size()]));
 
-        OutputAnalyzer out = ProcessTools.executeProcess(testAppPb);
-        out.stderrShouldNotMatch("^" + FinalizationRunner.FAILED + ".*")
-           .stdoutShouldMatch("^" + FinalizationRunner.PASSED + ".*");
+        final AtomicBoolean failed = new AtomicBoolean();
+        final AtomicBoolean passed = new AtomicBoolean();
+
+        Process runner = ProcessTools.startProcess(
+            "FinalizationRunner",
+            testAppPb,
+            l -> {
+                failed.compareAndSet(false, l.contains(FinalizationRunner.FAILED));
+                passed.compareAndSet(false, l.contains(FinalizationRunner.PASSED));
+            }
+        );
+        runner.waitFor();
+
+        if (failed.get() || !passed.get()) {
+            throw new Error("RunFinalizationTest failed");
+        }
     }
 }
--- a/test/serviceability/dcmd/gc/RunGCTest.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/serviceability/dcmd/gc/RunGCTest.java	Fri Dec 18 12:39:02 2015 -0800
@@ -43,7 +43,7 @@
  *          jdk.jvmstat/sun.jvmstat.monitor
  * @build jdk.test.lib.*
  * @build jdk.test.lib.dcmd.*
- * @run testng/othervm -XX:+PrintGCDetails -Xloggc:RunGC.gclog -XX:-ExplicitGCInvokesConcurrent RunGCTest
+ * @run testng/othervm -Xlog:gc=debug:RunGC.gclog -XX:-ExplicitGCInvokesConcurrent RunGCTest
  */
 public class RunGCTest {
     public void run(CommandExecutor executor) {
@@ -59,7 +59,7 @@
         }
 
         OutputAnalyzer output = new OutputAnalyzer(gcLog, "");
-        output.shouldContain("[Full GC (Diagnostic Command)");
+        output.shouldContain("Pause Full (Diagnostic Command)");
     }
 
     @Test
--- a/test/serviceability/dcmd/vm/FlagsTest.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/serviceability/dcmd/vm/FlagsTest.java	Fri Dec 18 12:39:02 2015 -0800
@@ -36,14 +36,13 @@
  *          jdk.jvmstat/sun.jvmstat.monitor
  * @build jdk.test.lib.*
  * @build jdk.test.lib.dcmd.*
- * @run testng/othervm -Xmx129m -XX:+PrintGC -XX:+UnlockDiagnosticVMOptions -XX:+IgnoreUnrecognizedVMOptions -XX:+ThereShouldNotBeAnyVMOptionNamedLikeThis_Right -XX:-TieredCompilation FlagsTest
+ * @run testng/othervm -Xmx129m -XX:+UnlockDiagnosticVMOptions -XX:+IgnoreUnrecognizedVMOptions -XX:+ThereShouldNotBeAnyVMOptionNamedLikeThis_Right -XX:-TieredCompilation FlagsTest
  */
 public class FlagsTest {
     public void run(CommandExecutor executor) {
         OutputAnalyzer output = executor.execute("VM.flags");
 
         /* The following are interpreted by the JVM as actual "flags" */
-        output.shouldContain("-XX:+PrintGC");
         output.shouldContain("-XX:+UnlockDiagnosticVMOptions");
         output.shouldContain("-XX:+IgnoreUnrecognizedVMOptions");
         output.shouldContain("-XX:-TieredCompilation");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/serviceability/logging/TestLogRotation.java	Fri Dec 18 12:39:02 2015 -0800
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestLogRotation.java
+ * @summary test flags for log rotation
+ * @library /testlibrary
+ * @modules java.base/sun.misc
+ *          java.management
+ * @run main/othervm/timeout=600 TestLogRotation
+ *
+ */
+import jdk.test.lib.*;
+import java.io.File;
+import java.io.FilenameFilter;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+class GCLoggingGenerator {
+
+    public static void main(String[] args) throws Exception {
+
+        long sizeOfLog = Long.parseLong(args[0]);
+        long lines = sizeOfLog / 80;
+        // full.GC generates ad least 1-line which is not shorter then 80 chars
+        // for some GC 2 shorter lines are generated
+        for (long i = 0; i < lines; i++) {
+            System.gc();
+        }
+    }
+}
+
+public class TestLogRotation {
+
+    static final File currentDirectory = new File(".");
+    static final String logFileName = "test.log";
+    static final int logFileSizeK = 16;
+    static FilenameFilter logFilter = new FilenameFilter() {
+        @Override
+        public boolean accept(File dir, String name) {
+            return name.startsWith(logFileName);
+        }
+    };
+
+    public static void cleanLogs() {
+        for (File log : currentDirectory.listFiles(logFilter)) {
+            if (!log.delete()) {
+                throw new Error("Unable to delete " + log.getAbsolutePath());
+            }
+        }
+    }
+
+    public static void runTest(int numberOfFiles) throws Exception {
+
+        ArrayList<String> args = new ArrayList();
+        String[] logOpts = new String[]{
+            "-cp", System.getProperty("java.class.path"),
+            "-Xlog:gc=debug:" + logFileName + "::filesize=" + logFileSizeK + ",filecount=" + numberOfFiles,
+            "-XX:-DisableExplicitGC", // to ensure that System.gc() works
+            "-Xmx128M"};
+        // System.getProperty("test.java.opts") is '' if no options is set
+        // need to skip such empty
+        String[] externalVMopts = System.getProperty("test.java.opts").length() == 0
+                ? new String[0]
+                : System.getProperty("test.java.opts").split(" ");
+        args.addAll(Arrays.asList(externalVMopts));
+        args.addAll(Arrays.asList(logOpts));
+        args.add(GCLoggingGenerator.class.getName());
+        args.add(String.valueOf(numberOfFiles * logFileSizeK * 1024));
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args.toArray(new String[0]));
+        pb.redirectErrorStream(true);
+        pb.redirectOutput(new File(GCLoggingGenerator.class.getName() + ".log"));
+        Process process = pb.start();
+        int result = process.waitFor();
+        if (result != 0) {
+            throw new Error("Unexpected exit code = " + result);
+        }
+        File[] logs = currentDirectory.listFiles(logFilter);
+        int smallFilesNumber = 0;
+        for (File log : logs) {
+            if (log.length() < logFileSizeK * 1024) {
+                smallFilesNumber++;
+            }
+        }
+        if (logs.length != numberOfFiles) {
+            throw new Error("There are only " + logs.length + " logs instead " + numberOfFiles);
+        }
+        if (smallFilesNumber > 1) {
+            throw new Error("There should maximum one log with size < " + logFileSizeK + "K");
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        cleanLogs();
+        runTest(1);
+        cleanLogs();
+        runTest(3);
+        cleanLogs();
+    }
+}
--- a/test/test_env.sh	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/test_env.sh	Fri Dec 18 12:39:02 2015 -0800
@@ -192,6 +192,11 @@
   if [ $VM_BITS = "64" ]
   then
     VM_CPU="ppc64"
+    grep "ppc64le" vm_version.out > ${NULL}
+    if [ $? = 0 ]
+    then
+      VM_CPU="ppc64le"
+    fi
   fi
 fi
 grep "ia64" vm_version.out > ${NULL}
--- a/test/testlibrary/jdk/test/lib/Asserts.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/testlibrary/jdk/test/lib/Asserts.java	Fri Dec 18 12:39:02 2015 -0800
@@ -41,7 +41,10 @@
  * multiple times, then the line number won't provide enough context to
  * understand the failure.
  * </pre>
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib}
  */
+@Deprecated
 public class Asserts {
 
     /**
--- a/test/testlibrary/jdk/test/lib/JDKToolFinder.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/testlibrary/jdk/test/lib/JDKToolFinder.java	Fri Dec 18 12:39:02 2015 -0800
@@ -27,6 +27,11 @@
 import java.nio.file.Path;
 import java.nio.file.Paths;
 
+/**
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib}
+ */
+@Deprecated
 public final class JDKToolFinder {
 
     private JDKToolFinder() {
--- a/test/testlibrary/jdk/test/lib/JDKToolLauncher.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/testlibrary/jdk/test/lib/JDKToolLauncher.java	Fri Dec 18 12:39:02 2015 -0800
@@ -46,7 +46,10 @@
  * Process p = pb.start();
  * }
  * </pre>
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib}
  */
+@Deprecated
 public class JDKToolLauncher {
     private final String executable;
     private final List<String> vmArgs = new ArrayList<String>();
--- a/test/testlibrary/jdk/test/lib/OutputAnalyzer.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/testlibrary/jdk/test/lib/OutputAnalyzer.java	Fri Dec 18 12:39:02 2015 -0800
@@ -41,7 +41,11 @@
    *
    * @param process Process to analyze
    * @throws IOException If an I/O error occurs.
+   *
+   * @deprecated This class is deprecated. Use the one from
+   *             {@code <root>/test/lib/share/classes/jdk/test/lib/process}
    */
+  @Deprecated
   public OutputAnalyzer(Process process) throws IOException {
     OutputBuffer output = ProcessTools.getOutput(process);
     exitValue = process.exitValue();
--- a/test/testlibrary/jdk/test/lib/OutputBuffer.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/testlibrary/jdk/test/lib/OutputBuffer.java	Fri Dec 18 12:39:02 2015 -0800
@@ -23,6 +23,11 @@
 
 package jdk.test.lib;
 
+/**
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib/process}
+ */
+@Deprecated
 public class OutputBuffer {
   private final String stdout;
   private final String stderr;
--- a/test/testlibrary/jdk/test/lib/Platform.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/testlibrary/jdk/test/lib/Platform.java	Fri Dec 18 12:39:02 2015 -0800
@@ -25,6 +25,11 @@
 
 import java.util.regex.Pattern;
 
+/**
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib}
+ */
+@Deprecated
 public class Platform {
     private static final String osName      = System.getProperty("os.name");
     private static final String dataModel   = System.getProperty("sun.arch.data.model");
--- a/test/testlibrary/jdk/test/lib/ProcessTools.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/testlibrary/jdk/test/lib/ProcessTools.java	Fri Dec 18 12:39:02 2015 -0800
@@ -31,6 +31,11 @@
 import java.util.Collections;
 import java.util.List;
 
+/**
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib/process}
+ */
+@Deprecated
 public final class ProcessTools {
 
   private ProcessTools() {
--- a/test/testlibrary/jdk/test/lib/StreamPumper.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/testlibrary/jdk/test/lib/StreamPumper.java	Fri Dec 18 12:39:02 2015 -0800
@@ -27,6 +27,11 @@
 import java.io.InputStream;
 import java.io.IOException;
 
+/**
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib/process}
+ */
+@Deprecated
 public final class StreamPumper implements Runnable {
 
   private static final int BUF_SIZE = 256;
--- a/test/testlibrary/jdk/test/lib/Utils.java	Thu Dec 17 23:36:28 2015 +0000
+++ b/test/testlibrary/jdk/test/lib/Utils.java	Fri Dec 18 12:39:02 2015 -0800
@@ -55,7 +55,11 @@
 
 /**
  * Common library for various test helper functions.
+ *
+ * @deprecated This class is deprecated. Use the one from
+ *             {@code <root>/test/lib/share/classes/jdk/test/lib}
  */
+@Deprecated
 public final class Utils {
 
     /**