Mercurial > hg > release > icedtea7-forest-2.0 > hotspot
changeset 2562:d4dac52dafbc
Merge
line wrap: on
line diff
--- a/.hgignore Wed Jun 01 17:09:56 2011 +0100 +++ b/.hgignore Thu Jun 02 18:59:50 2011 +0100 @@ -5,3 +5,4 @@ ^src/share/tools/IdealGraphVisualizer/[a-zA-Z0-9]*/build/ ^src/share/tools/IdealGraphVisualizer/build/ ^src/share/tools/IdealGraphVisualizer/dist/ +^.hgtip
--- a/.hgtags Wed Jun 01 17:09:56 2011 +0100 +++ b/.hgtags Thu Jun 02 18:59:50 2011 +0100 @@ -163,3 +163,17 @@ bd586e392d93b7ed7a1636dcc8da2b6a4203a102 jdk7-b136 bd586e392d93b7ed7a1636dcc8da2b6a4203a102 hs21-b06 591c7dc0b2ee879f87a7b5519a5388e0d81520be icedtea-1.14 +2dbcb4a4d8dace5fe78ceb563b134f1fb296cd8f jdk7-b137 +2dbcb4a4d8dace5fe78ceb563b134f1fb296cd8f hs21-b07 +0930dc920c185afbf40fed9a655290b8e5b16783 jdk7-b138 +0930dc920c185afbf40fed9a655290b8e5b16783 hs21-b08 +611e19a16519d6fb5deea9ab565336e6e6ee475d jdk7-b139 +611e19a16519d6fb5deea9ab565336e6e6ee475d hs21-b09 +d283b82966712b353fa307845a1316da42a355f4 jdk7-b140 +d283b82966712b353fa307845a1316da42a355f4 hs21-b10 +5d07913abd59261c77f24cc04a759cb75d804099 jdk7-b141 +3aea9e9feb073f5500e031be6186666bcae89aa2 hs21-b11 +9ad1548c6b63d596c411afc35147ffd5254426d9 jdk7-b142 +9ad1548c6b63d596c411afc35147ffd5254426d9 hs21-b12 +c149193c768b8b7233da4c3a3fdc0756b975848e hs21-b13 +c149193c768b8b7233da4c3a3fdc0756b975848e jdk7-b143
--- a/agent/make/Makefile Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/make/Makefile Thu Jun 02 18:59:50 2011 +0100 @@ -257,7 +257,7 @@ all: filelist @mkdir -p $(OUTPUT_DIR) @echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES) - $(JAVAC) -source 1.4 -classpath $(CLASSPATH) -deprecation -sourcepath $(SRC_DIR) -g -d $(OUTPUT_DIR) @filelist + $(JAVAC) -classpath $(CLASSPATH) -deprecation -sourcepath $(SRC_DIR) -g -d $(OUTPUT_DIR) @filelist $(RMIC) -classpath $(OUTPUT_DIR) -d $(OUTPUT_DIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer rm -f $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql/sa.js cp $(SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql @@ -269,7 +269,7 @@ allprof: filelist @mkdir -p $(OUTPUT_DIR) @echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES) - $(JAVAC) -source 1.4 -J-Xprof -classpath $(CLASSPATH) -deprecation -sourcepath $(SRC_DIR) -g -d $(OUTPUT_DIR) @filelist + $(JAVAC) -J-Xprof -classpath $(CLASSPATH) -deprecation -sourcepath $(SRC_DIR) -g -d $(OUTPUT_DIR) @filelist $(RMIC) -classpath $(OUTPUT_DIR) -d $(OUTPUT_DIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer rm -f $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql/sa.js cp $(SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(OUTPUT_DIR)/sun/jvm/hotspot/utilities/soql
--- a/agent/src/os/solaris/proc/libproc.h Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/os/solaris/proc/libproc.h Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -420,7 +420,22 @@ /* * Stack frame iteration interface. */ +#ifdef SOLARIS_11_B159_OR_LATER +/* building on Nevada-B159 or later so define the new callback */ +typedef int proc_stack_f( + void *, /* the cookie given to Pstack_iter() */ + const prgregset_t, /* the frame's registers */ + uint_t, /* argc for the frame's function */ + const long *, /* argv for the frame's function */ + int, /* bitwise flags describing the frame (see below) */ + int); /* a signal number */ + +#define PR_SIGNAL_FRAME 1 /* called by a signal handler */ +#define PR_FOUND_SIGNAL 2 /* we found the corresponding signal number */ +#else +/* building on Nevada-B158 or earlier so define the old callback */ typedef int proc_stack_f(void *, const prgregset_t, uint_t, const long *); +#endif extern int Pstack_iter(struct ps_prochandle *, const prgregset_t, proc_stack_f *, void *);
--- a/agent/src/os/solaris/proc/salibproc.h Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/os/solaris/proc/salibproc.h Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -101,7 +101,23 @@ /* * Stack frame iteration interface. */ +#ifdef SOLARIS_11_B159_OR_LATER +/* building on Nevada-B159 or later so define the new callback */ +typedef int proc_stack_f( + void *, /* the cookie given to Pstack_iter() */ + const prgregset_t, /* the frame's registers */ + uint_t, /* argc for the frame's function */ + const long *, /* argv for the frame's function */ + int, /* bitwise flags describing the frame (see below) */ + int); /* a signal number */ + +#define PR_SIGNAL_FRAME 1 /* called by a signal handler */ +#define PR_FOUND_SIGNAL 2 /* we found the corresponding signal number */ +#else +/* building on Nevada-B158 or earlier so define the old callback */ typedef int proc_stack_f(void *, const prgregset_t, uint_t, const long *); +#endif + extern int Pstack_iter(struct ps_prochandle *, const prgregset_t, proc_stack_f *, void *);
--- a/agent/src/os/solaris/proc/saproc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/os/solaris/proc/saproc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,9 @@ #include "salibproc.h" #include "sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal.h" +#ifndef SOLARIS_11_B159_OR_LATER +#include <sys/utsname.h> +#endif #include <thread_db.h> #include <strings.h> #include <limits.h> @@ -40,8 +43,22 @@ #define SYMBOL_BUF_SIZE 256 #define ERR_MSG_SIZE (PATH_MAX + 256) -// debug mode +// debug modes static int _libsaproc_debug = 0; +#ifndef SOLARIS_11_B159_OR_LATER +static bool _Pstack_iter_debug = false; + +static void dprintf_2(const char* format,...) { + if (_Pstack_iter_debug) { + va_list alist; + + va_start(alist, format); + fputs("Pstack_iter DEBUG: ", stderr); + vfprintf(stderr, format, alist); + va_end(alist); + } +} +#endif // !SOLARIS_11_B159_OR_LATER static void print_debug(const char* format,...) { if (_libsaproc_debug) { @@ -450,6 +467,7 @@ return 0; } +// Pstack_iter() proc_stack_f callback prior to Nevada-B159 static int fill_cframe_list(void *cd, const prgregset_t regs, uint_t argc, const long *argv) { DebuggerWith2Objects* dbgo2 = (DebuggerWith2Objects*) cd; @@ -472,6 +490,14 @@ return 0; } +// Pstack_iter() proc_stack_f callback in Nevada-B159 or later +/*ARGSUSED*/ +static int +wrapper_fill_cframe_list(void *cd, const prgregset_t regs, uint_t argc, + const long *argv, int frame_flags, int sig) { + return(fill_cframe_list(cd, regs, argc, argv)); +} + // part of the class sharing workaround // FIXME: !!HACK ALERT!! @@ -970,6 +996,11 @@ TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS); } +#ifndef SOLARIS_11_B159_OR_LATER +// building on Nevada-B158 or earlier so more hoops to jump through +static bool has_newer_Pstack_iter = false; // older version by default +#endif + /* * Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal * Method: fillCFrameList0 @@ -997,7 +1028,24 @@ env->ReleaseLongArrayElements(regsArray, ptr, JNI_ABORT); CHECK_EXCEPTION_(0); - Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs, fill_cframe_list, &dbgo2); + +#ifdef SOLARIS_11_B159_OR_LATER + // building on Nevada-B159 or later so use the new callback + Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs, + wrapper_fill_cframe_list, &dbgo2); +#else + // building on Nevada-B158 or earlier so figure out which callback to use + + if (has_newer_Pstack_iter) { + // Since we're building on Nevada-B158 or earlier, we have to + // cast wrapper_fill_cframe_list to make the compiler happy. + Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs, + (proc_stack_f *)wrapper_fill_cframe_list, &dbgo2); + } else { + Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs, + fill_cframe_list, &dbgo2); + } +#endif // SOLARIS_11_B159_OR_LATER return dbgo2.obj; } @@ -1218,6 +1266,102 @@ return res; } +#ifndef SOLARIS_11_B159_OR_LATER +// Determine if the OS we're running on has the newer version +// of libproc's Pstack_iter. +// +// Set env var PSTACK_ITER_DEBUG=true to debug this logic. +// Set env var PSTACK_ITER_DEBUG_RELEASE to simulate a 'release' value. +// Set env var PSTACK_ITER_DEBUG_VERSION to simulate a 'version' value. +// +// frankenputer 'uname -r -v': 5.10 Generic_141445-09 +// jurassic 'uname -r -v': 5.11 snv_164 +// lonepeak 'uname -r -v': 5.11 snv_127 +// +static void set_has_newer_Pstack_iter(JNIEnv *env) { + static bool done_set = false; + + if (done_set) { + // already set has_newer_Pstack_iter + return; + } + + struct utsname name; + if (uname(&name) == -1) { + THROW_NEW_DEBUGGER_EXCEPTION("uname() failed!"); + } + dprintf_2("release='%s' version='%s'\n", name.release, name.version); + + if (_Pstack_iter_debug) { + char *override = getenv("PSTACK_ITER_DEBUG_RELEASE"); + if (override != NULL) { + strncpy(name.release, override, SYS_NMLN - 1); + name.release[SYS_NMLN - 2] = '\0'; + dprintf_2("overriding with release='%s'\n", name.release); + } + override = getenv("PSTACK_ITER_DEBUG_VERSION"); + if (override != NULL) { + strncpy(name.version, override, SYS_NMLN - 1); + name.version[SYS_NMLN - 2] = '\0'; + dprintf_2("overriding with version='%s'\n", name.version); + } + } + + // the major number corresponds to the old SunOS major number + int major = atoi(name.release); + if (major >= 6) { + dprintf_2("release is SunOS 6 or later\n"); + has_newer_Pstack_iter = true; + done_set = true; + return; + } + if (major < 5) { + dprintf_2("release is SunOS 4 or earlier\n"); + done_set = true; + return; + } + + // some SunOS 5.* build so now check for Solaris versions + char *dot = strchr(name.release, '.'); + int minor = 0; + if (dot != NULL) { + // release is major.minor format + *dot = NULL; + minor = atoi(dot + 1); + } + + if (minor <= 10) { + dprintf_2("release is Solaris 10 or earlier\n"); + done_set = true; + return; + } else if (minor >= 12) { + dprintf_2("release is Solaris 12 or later\n"); + has_newer_Pstack_iter = true; + done_set = true; + return; + } + + // some Solaris 11 build so now check for internal build numbers + if (strncmp(name.version, "snv_", 4) != 0) { + dprintf_2("release is Solaris 11 post-GA or later\n"); + has_newer_Pstack_iter = true; + done_set = true; + return; + } + + // version begins with "snv_" so a pre-GA build of Solaris 11 + int build = atoi(&name.version[4]); + if (build >= 159) { + dprintf_2("release is Nevada-B159 or later\n"); + has_newer_Pstack_iter = true; + } else { + dprintf_2("release is Nevada-B158 or earlier\n"); + } + + done_set = true; +} +#endif // !SOLARIS_11_B159_OR_LATER + /* * Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal * Method: initIDs @@ -1237,6 +1381,14 @@ if (libproc_handle == 0) THROW_NEW_DEBUGGER_EXCEPTION("can't load libproc.so, if you are using Solaris 5.7 or below, copy libproc.so from 5.8!"); +#ifndef SOLARIS_11_B159_OR_LATER + _Pstack_iter_debug = getenv("PSTACK_ITER_DEBUG") != NULL; + + set_has_newer_Pstack_iter(env); + CHECK_EXCEPTION; + dprintf_2("has_newer_Pstack_iter=%d\n", has_newer_Pstack_iter); +#endif + p_ps_prochandle_ID = env->GetFieldID(clazz, "p_ps_prochandle", "J"); CHECK_EXCEPTION;
--- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1028,7 +1028,12 @@ if (AddressOps.equal(val, value)) { if (!printed) { printed = true; - blob.printOn(out); + try { + blob.printOn(out); + } catch (Exception e) { + out.println("Exception printing blob at " + base); + e.printStackTrace(); + } } out.println("found at " + base + "\n"); }
--- a/agent/src/share/classes/sun/jvm/hotspot/HelloWorld.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/HelloWorld.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/AdapterBlob.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.code; + +import java.util.*; +import sun.jvm.hotspot.debugger.*; +import sun.jvm.hotspot.runtime.*; +import sun.jvm.hotspot.types.*; + +public class AdapterBlob extends CodeBlob { + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + private static void initialize(TypeDataBase db) { + // Type type = db.lookupType("AdapterBlob"); + + // // FIXME: add any needed fields + } + + public AdapterBlob(Address addr) { + super(addr); + } + + public boolean isAdapterBlob() { + return true; + } + + public String getName() { + return "AdapterBlob: " + super.getName(); + } +}
--- a/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,6 +93,8 @@ public boolean isUncommonTrapStub() { return false; } public boolean isExceptionStub() { return false; } public boolean isSafepointStub() { return false; } + public boolean isRicochetBlob() { return false; } + public boolean isAdapterBlob() { return false; } // Fine grain nmethod support: isNmethod() == isJavaMethod() || isNativeMethod() || isOSRMethod() public boolean isJavaMethod() { return false; }
--- a/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,6 +57,8 @@ virtualConstructor.addMapping("BufferBlob", BufferBlob.class); virtualConstructor.addMapping("nmethod", NMethod.class); virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class); + virtualConstructor.addMapping("RicochetBlob", RicochetBlob.class); + virtualConstructor.addMapping("AdapterBlob", AdapterBlob.class); virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class); virtualConstructor.addMapping("DeoptimizationBlob", DeoptimizationBlob.class); if (VM.getVM().isServerCompiler()) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/RicochetBlob.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.code; + +import java.util.*; +import sun.jvm.hotspot.debugger.*; +import sun.jvm.hotspot.runtime.*; +import sun.jvm.hotspot.types.*; + +/** RicochetBlob (currently only used by Compiler 2) */ + +public class RicochetBlob extends SingletonBlob { + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + private static void initialize(TypeDataBase db) { + // Type type = db.lookupType("RicochetBlob"); + + // FIXME: add any needed fields + } + + public RicochetBlob(Address addr) { + super(addr); + } + + public boolean isRicochetBlob() { + return true; + } +}
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeLoadConstant.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeLoadConstant.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithKlass.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithKlass.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ByteValueImpl.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ByteValueImpl.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/CharValueImpl.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/CharValueImpl.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ConnectorImpl.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ConnectorImpl.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/DoubleValueImpl.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/DoubleValueImpl.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/FloatValueImpl.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/FloatValueImpl.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/IntegerValueImpl.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/IntegerValueImpl.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/LocalVariableImpl.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/LocalVariableImpl.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/LocationImpl.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/LocationImpl.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/LongValueImpl.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/LongValueImpl.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/MethodImpl.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/MethodImpl.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ReferenceTypeImpl.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ReferenceTypeImpl.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ShortValueImpl.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ShortValueImpl.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2006, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/DictionaryEntry.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/DictionaryEntry.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/LoaderConstraintEntry.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/LoaderConstraintEntry.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/PlaceholderEntry.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/PlaceholderEntry.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/StringTable.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/StringTable.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -331,8 +331,6 @@ if (Assert.ASSERTS_ENABLED) { Assert.that(getTagAt(i).isInvokeDynamic(), "Corrupted constant pool"); } - if (getTagAt(i).value() == JVM_CONSTANT_InvokeDynamicTrans) - return null; int bsmSpec = extractLowShortFromInt(this.getIntAt(i)); TypeArray operands = getOperands(); if (operands == null) return null; // safety first @@ -368,7 +366,6 @@ case JVM_CONSTANT_MethodHandle: return "JVM_CONSTANT_MethodHandle"; case JVM_CONSTANT_MethodType: return "JVM_CONSTANT_MethodType"; case JVM_CONSTANT_InvokeDynamic: return "JVM_CONSTANT_InvokeDynamic"; - case JVM_CONSTANT_InvokeDynamicTrans: return "JVM_CONSTANT_InvokeDynamic/transitional"; case JVM_CONSTANT_Invalid: return "JVM_CONSTANT_Invalid"; case JVM_CONSTANT_UnresolvedClass: return "JVM_CONSTANT_UnresolvedClass"; case JVM_CONSTANT_UnresolvedClassInError: return "JVM_CONSTANT_UnresolvedClassInError"; @@ -428,7 +425,6 @@ case JVM_CONSTANT_MethodHandle: case JVM_CONSTANT_MethodType: case JVM_CONSTANT_InvokeDynamic: - case JVM_CONSTANT_InvokeDynamicTrans: visitor.doInt(new IntField(new NamedFieldIdentifier(nameForTag(ctag)), indexOffset(index), true), true); break; } @@ -592,7 +588,6 @@ break; } - case JVM_CONSTANT_InvokeDynamicTrans: case JVM_CONSTANT_InvokeDynamic: { dos.writeByte(cpConstType); int value = getIntAt(ci);
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Symbol.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Symbol.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java Thu Jun 02 18:59:50 2011 +0100 @@ -42,7 +42,7 @@ public static final int JVM_CONSTANT_NameAndType = 12; public static final int JVM_CONSTANT_MethodHandle = 15; public static final int JVM_CONSTANT_MethodType = 16; - public static final int JVM_CONSTANT_InvokeDynamicTrans = 17; // only occurs in old class files + // static final int JVM_CONSTANT_(unused) = 17; public static final int JVM_CONSTANT_InvokeDynamic = 18; // JVM_CONSTANT_MethodHandle subtypes
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/ServiceThread.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/ServiceThread.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -321,7 +321,6 @@ break; } - case JVM_CONSTANT_InvokeDynamicTrans: case JVM_CONSTANT_InvokeDynamic: { dos.writeByte(cpConstType); int value = cpool.getIntAt(ci);
--- a/agent/src/share/classes/sun/jvm/hotspot/types/Field.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/types/Field.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -598,7 +598,6 @@ buf.cell(Integer.toString(cpool.getIntAt(index))); break; - case JVM_CONSTANT_InvokeDynamicTrans: case JVM_CONSTANT_InvokeDynamic: buf.cell("JVM_CONSTANT_InvokeDynamic"); buf.cell(genLowHighShort(cpool.getIntAt(index)) +
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java Thu Jun 02 18:59:50 2011 +0100 @@ -40,7 +40,7 @@ private static int JVM_CONSTANT_NameAndType = 12; private static int JVM_CONSTANT_MethodHandle = 15; // JSR 292 private static int JVM_CONSTANT_MethodType = 16; // JSR 292 - private static int JVM_CONSTANT_InvokeDynamicTrans = 17; // JSR 292, only occurs in old class files + // static int JVM_CONSTANT_(unused) = 17; // JSR 292 early drafts only private static int JVM_CONSTANT_InvokeDynamic = 18; // JSR 292 private static int JVM_CONSTANT_Invalid = 0; // For bad value initialization private static int JVM_CONSTANT_UnresolvedClass = 100; // Temporary tag until actual use @@ -83,7 +83,6 @@ public boolean isMethodHandle() { return tag == JVM_CONSTANT_MethodHandle; } public boolean isMethodType() { return tag == JVM_CONSTANT_MethodType; } public boolean isInvokeDynamic() { return tag == JVM_CONSTANT_InvokeDynamic; } - public boolean isInvokeDynamicTrans() { return tag == JVM_CONSTANT_InvokeDynamicTrans; } public boolean isInvalid() { return tag == JVM_CONSTANT_Invalid; }
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HashtableEntry.java Wed Jun 01 17:09:56 2011 +0100 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HashtableEntry.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/make/altsrc.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/altsrc.make Thu Jun 02 18:59:50 2011 +0100 @@ -24,7 +24,8 @@ # This file defines variables and macros which are used in the makefiles to # allow distributions to augment or replace common hotspot code with -# distribution-specific source files. +# distribution-specific source files. This capability is disabled when +# an OPENJDK build is requested, unless HS_ALT_SRC_REL has been set externally. # Requires: GAMMADIR # Provides: @@ -33,14 +34,17 @@ HS_COMMON_SRC_REL=src -# This needs to be changed to a more generic location, but we keep it as this -# for now for compatibility -HS_ALT_SRC_REL=src/closed +ifneq ($(OPENJDK),true) + # This needs to be changed to a more generic location, but we keep it + # as this for now for compatibility + HS_ALT_SRC_REL=src/closed +else + HS_ALT_SRC_REL=NO_SUCH_PATH +endif HS_COMMON_SRC=$(GAMMADIR)/$(HS_COMMON_SRC_REL) HS_ALT_SRC=$(GAMMADIR)/$(HS_ALT_SRC_REL) - ## altsrc-equiv # # Convert a common source path to an alternative source path
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/cscope.make Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,141 @@ +# +# Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + +# The cscope.out file is generated in the current directory. The old cscope.out +# file is *not* removed because cscope is smart enough to only build what has +# changed. cscope can be confused if files are renamed or removed, so it may be +# necessary to remove cscope.out (gmake cscope.clean) if a lot of reorganization +# has occurred. + +include $(GAMMADIR)/make/scm.make + +RM = rm -f +HG = hg +CS_TOP = $(GAMMADIR) + +CSDIRS = $(CS_TOP)/src $(CS_TOP)/make +CSINCS = $(CSDIRS:%=-I%) + +CSCOPE = cscope +CSCOPE_OUT = cscope.out +CSCOPE_FLAGS = -b + +# Allow .java files to be added from the environment (CSCLASSES=yes). +ifdef CSCLASSES +ADDCLASSES= -o -name '*.java' +endif + +# Adding CClassHeaders also pushes the file count of a full workspace up about +# 200 files (these files also don't exist in a new workspace, and thus will +# cause the recreation of the database as they get created, which might seem +# a little confusing). Thus allow these files to be added from the environment +# (CSHEADERS=yes). +ifndef CSHEADERS +RMCCHEADERS= -o -name CClassHeaders +endif + +# Ignore build products. +CS_PRUNE_GENERATED = -o -name '${OSNAME}_*_core' -o \ + -name '${OSNAME}_*_compiler?' + +# O/S-specific files for all systems are included by default. Set CS_OS to a +# space-separated list of identifiers to include only those systems. +ifdef CS_OS +CS_PRUNE_OS = $(patsubst %,-o -name '*%*',\ + $(filter-out ${CS_OS},linux macos solaris windows)) +endif + +# CPU-specific files for all processors are included by default. Set CS_CPU +# space-separated list identifiers to include only those CPUs. +ifdef CS_CPU +CS_PRUNE_CPU = $(patsubst %,-o -name '*%*',\ + $(filter-out ${CS_CPU},arm ppc sparc x86 zero)) +endif + +# What files should we include? A simple rule might be just those files under +# SCCS control, however this would miss files we create like the opcodes and +# CClassHeaders. The following attempts to find everything that is *useful*. +# (.del files are created by sccsrm, demo directories contain many .java files +# that probably aren't useful for development, and the pkgarchive may contain +# duplicates of files within the source hierarchy). + +# Directories to exclude. +CS_PRUNE_STD = $(SCM_DIRS) \ + -o -name '.del-*' \ + -o -name '*demo' \ + -o -name pkgarchive + +# Placeholder for user-defined excludes. +CS_PRUNE_EX = + +CS_PRUNE = $(CS_PRUNE_STD) \ + $(CS_PRUNE_OS) \ + $(CS_PRUNE_CPU) \ + $(CS_PRUNE_GENERATED) \ + $(CS_PRUNE_EX) \ + $(RMCCHEADERS) + +# File names to include. +CSFILENAMES = -name '*.[ch]pp' \ + -o -name '*.[Ccshlxy]' \ + $(CS_ADD_GENERATED) \ + -o -name '*.d' \ + -o -name '*.il' \ + -o -name '*.cc' \ + -o -name '*[Mm]akefile*' \ + -o -name '*.gmk' \ + -o -name '*.make' \ + -o -name '*.ad' \ + $(ADDCLASSES) + +.PHONY: cscope cscope.clean cscope.scratch TAGS.clean FORCE +.PRECIOUS: cscope.out + +cscope $(CSCOPE_OUT): cscope.files FORCE + $(CSCOPE) -f $(CSCOPE_OUT) $(CSCOPE_FLAGS) + +cscope.clean: + $(QUIETLY) $(RM) $(CSCOPE_OUT) cscope.files + +cscope.scratch: cscope.clean cscope + +# The raw list is reordered so cscope displays the most relevant files first. +cscope.files: + $(QUIETLY) \ + raw=cscope.$$$$; \ + find $(CSDIRS) -type d \( $(CS_PRUNE) \) -prune -o \ + -type f \( $(CSFILENAMES) \) -print > $$raw; \ + { \ + echo "$(CSINCS)"; \ + egrep -v "\.java|/make/" $$raw; \ + fgrep ".java" $$raw; \ + fgrep "/make/" $$raw; \ + } > $@; \ + rm -f $$raw + +TAGS: cscope.files FORCE + egrep -v '^-|^$$' $< | etags --members - + +TAGS.clean: + $(RM) TAGS
--- a/make/hotspot_version Wed Jun 01 17:09:56 2011 +0100 +++ b/make/hotspot_version Thu Jun 02 18:59:50 2011 +0100 @@ -35,7 +35,7 @@ HS_MAJOR_VER=21 HS_MINOR_VER=0 -HS_BUILD_NUMBER=07 +HS_BUILD_NUMBER=14 JDK_MAJOR_VER=1 JDK_MINOR_VER=7
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/jdk6_hotspot_distro Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,32 @@ +# +# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# + +# +# This file format must remain compatible with both +# GNU Makefile and Microsoft nmake formats. +# + +# Don't put quotes (fail windows build). +HOTSPOT_VM_DISTRO=Java HotSpot(TM) +COMPANY_NAME=Sun Microsystems, Inc. +PRODUCT_NAME=Java(TM) Platform SE
--- a/make/jprt.gmk Wed Jun 01 17:09:56 2011 +0100 +++ b/make/jprt.gmk Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,24 @@ ZIPFLAGS=-q -y endif +jprt_build_productEmb: + $(MAKE) JAVASE_EMBEDDED=true jprt_build_product + +jprt_build_debugEmb: + $(MAKE) JAVASE_EMBEDDED=true jprt_build_debug + +jprt_build_fastdebugEmb: + $(MAKE) JAVASE_EMBEDDED=true jprt_build_fastdebug + +jprt_build_productOpen: + $(MAKE) OPENJDK=true jprt_build_product + +jprt_build_debugOpen: + $(MAKE) OPENJDK=true jprt_build_debug + +jprt_build_fastdebugOpen: + $(MAKE) OPENJDK=true jprt_build_fastdebug + jprt_build_product: all_product copy_product_jdk export_product_jdk ( $(CD) $(JDK_IMAGE_DIR) && \ $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
--- a/make/jprt.properties Wed Jun 01 17:09:56 2011 +0100 +++ b/make/jprt.properties Thu Jun 02 18:59:50 2011 +0100 @@ -202,16 +202,21 @@ ${jprt.my.windows.i586}-{product|fastdebug|debug}, \ ${jprt.my.windows.x64}-{product|fastdebug|debug} +jprt.build.targets.open= \ + ${jprt.my.solaris.i586}-{productOpen}, \ + ${jprt.my.solaris.x64}-{debugOpen}, \ + ${jprt.my.linux.x64}-{productOpen} + jprt.build.targets.embedded= \ - ${jprt.my.linux.i586}-{product|fastdebug|debug}, \ - ${jprt.my.linux.ppc}-{product|fastdebug}, \ - ${jprt.my.linux.ppcv2}-{product|fastdebug}, \ - ${jprt.my.linux.ppcsflt}-{product|fastdebug}, \ - ${jprt.my.linux.armvfp}-{product|fastdebug}, \ - ${jprt.my.linux.armsflt}-{product|fastdebug} + ${jprt.my.linux.i586}-{productEmb|fastdebugEmb|debugEmb}, \ + ${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \ + ${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \ + ${jprt.my.linux.ppcsflt}-{productEmb|fastdebugEmb}, \ + ${jprt.my.linux.armvfp}-{productEmb|fastdebugEmb}, \ + ${jprt.my.linux.armsflt}-{productEmb|fastdebugEmb} jprt.build.targets.all=${jprt.build.targets.standard}, \ - ${jprt.build.targets.embedded} + ${jprt.build.targets.embedded}, ${jprt.build.targets.open} jprt.build.targets.jdk7=${jprt.build.targets.all} jprt.build.targets.jdk7temp=${jprt.build.targets.all} @@ -453,6 +458,12 @@ ${jprt.my.windows.x64}-product-c2-jbb_G1, \ ${jprt.my.windows.x64}-product-c2-jbb_ParOldGC +# Some basic "smoke" tests for OpenJDK builds +jprt.test.targets.open = \ + ${jprt.my.solaris.x64}-{productOpen|debugOpen|fastdebugOpen}-c2-jvm98_tiered, \ + ${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98_tiered, \ + ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98_tiered + # Testing for actual embedded builds is different to standard jprt.my.linux.i586.test.targets.embedded = \ linux_i586_2.6-product-c1-scimark @@ -461,6 +472,7 @@ # Note: no PPC or ARM tests at this stage jprt.test.targets.standard = \ + ${jprt.my.linux.i586.test.targets.embedded}, \ ${jprt.my.solaris.sparc.test.targets}, \ ${jprt.my.solaris.sparcv9.test.targets}, \ ${jprt.my.solaris.i586.test.targets}, \ @@ -468,7 +480,8 @@ ${jprt.my.linux.i586.test.targets}, \ ${jprt.my.linux.x64.test.targets}, \ ${jprt.my.windows.i586.test.targets}, \ - ${jprt.my.windows.x64.test.targets} + ${jprt.my.windows.x64.test.targets}, \ + ${jprt.test.targets.open} jprt.test.targets.embedded= \ ${jprt.my.linux.i586.test.targets.embedded}, \
--- a/make/linux/Makefile Wed Jun 01 17:09:56 2011 +0100 +++ b/make/linux/Makefile Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -359,7 +359,7 @@ clean: clean_compiler2 clean_compiler1 clean_core clean_zero clean_shark clean_docs -include $(GAMMADIR)/make/$(OSNAME)/makefiles/cscope.make +include $(GAMMADIR)/make/cscope.make #-------------------------------------------------------------------------------
--- a/make/linux/README Wed Jun 01 17:09:56 2011 +0100 +++ b/make/linux/README Thu Jun 02 18:59:50 2011 +0100 @@ -1,4 +1,4 @@ -Copyright (c) 2007 Oracle and/or its affiliates. All rights reserved. +Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved. DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/arm.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/linux/makefiles/arm.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. # ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. #
--- a/make/linux/makefiles/cscope.make Wed Jun 01 17:09:56 2011 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,160 +0,0 @@ -# -# Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# -# The cscope.out file is made in the current directory and spans the entire -# source tree. -# -# Things to note: -# 1. We use relative names for cscope. -# 2. We *don't* remove the old cscope.out file, because cscope is smart -# enough to only build what has changed. It can be confused, however, -# if files are renamed or removed, so it may be necessary to manually -# remove cscope.out if a lot of reorganization has occurred. -# - -include $(GAMMADIR)/make/scm.make - -NAWK = awk -RM = rm -f -HG = hg -CS_TOP = ../.. - -CSDIRS = $(CS_TOP)/src $(CS_TOP)/build -CSINCS = $(CSDIRS:%=-I%) - -CSCOPE = cscope -CSCOPE_FLAGS = -b - -# Allow .java files to be added from the environment (CSCLASSES=yes). -ifdef CSCLASSES -ADDCLASSES= -o -name '*.java' -endif - -# Adding CClassHeaders also pushes the file count of a full workspace up about -# 200 files (these files also don't exist in a new workspace, and thus will -# cause the recreation of the database as they get created, which might seem -# a little confusing). Thus allow these files to be added from the environment -# (CSHEADERS=yes). -ifndef CSHEADERS -RMCCHEADERS= -o -name CClassHeaders -endif - -# Use CS_GENERATED=x to include auto-generated files in the build directories. -ifdef CS_GENERATED -CS_ADD_GENERATED = -o -name '*.incl' -else -CS_PRUNE_GENERATED = -o -name '${OS}_*_core' -o -name '${OS}_*_compiler?' -endif - -# OS-specific files for other systems are excluded by default. Use CS_OS=yes -# to include platform-specific files for other platforms. -ifndef CS_OS -CS_OS = linux macos solaris win32 -CS_PRUNE_OS = $(patsubst %,-o -name '*%*',$(filter-out ${OS},${CS_OS})) -endif - -# Processor-specific files for other processors are excluded by default. Use -# CS_CPU=x to include platform-specific files for other platforms. -ifndef CS_CPU -CS_CPU = i486 sparc amd64 ia64 -CS_PRUNE_CPU = $(patsubst %,-o -name '*%*',$(filter-out ${SRCARCH},${CS_CPU})) -endif - -# What files should we include? A simple rule might be just those files under -# SCCS control, however this would miss files we create like the opcodes and -# CClassHeaders. The following attempts to find everything that is *useful*. -# (.del files are created by sccsrm, demo directories contain many .java files -# that probably aren't useful for development, and the pkgarchive may contain -# duplicates of files within the source hierarchy). - -# Directories to exclude. -CS_PRUNE_STD = $(SCM_DIRS) \ - -o -name '.del-*' \ - -o -name '*demo' \ - -o -name pkgarchive - -CS_PRUNE = $(CS_PRUNE_STD) \ - $(CS_PRUNE_OS) \ - $(CS_PRUNE_CPU) \ - $(CS_PRUNE_GENERATED) \ - $(RMCCHEADERS) - -# File names to include. -CSFILENAMES = -name '*.[ch]pp' \ - -o -name '*.[Ccshlxy]' \ - $(CS_ADD_GENERATED) \ - -o -name '*.il' \ - -o -name '*.cc' \ - -o -name '*[Mm]akefile*' \ - -o -name '*.gmk' \ - -o -name '*.make' \ - -o -name '*.ad' \ - $(ADDCLASSES) - -.PRECIOUS: cscope.out - -cscope cscope.out: cscope.files FORCE - $(CSCOPE) $(CSCOPE_FLAGS) - -# The .raw file is reordered here in an attempt to make cscope display the most -# relevant files first. -cscope.files: .cscope.files.raw - echo "$(CSINCS)" > $@ - -egrep -v "\.java|\/make\/" $< >> $@ - -fgrep ".java" $< >> $@ - -fgrep "/make/" $< >> $@ - -.cscope.files.raw: .nametable.files - -find $(CSDIRS) -type d \( $(CS_PRUNE) \) -prune -o \ - -type f \( $(CSFILENAMES) \) -print > $@ - -cscope.clean: nametable.clean - -$(RM) cscope.out cscope.files .cscope.files.raw - -TAGS: cscope.files FORCE - egrep -v '^-|^$$' $< | etags --members - - -TAGS.clean: nametable.clean - -$(RM) TAGS - -# .nametable.files and .nametable.files.tmp are used to determine if any files -# were added to/deleted from/renamed in the workspace. If not, then there's -# normally no need to rebuild the cscope database. To force a rebuild of -# the cscope database: gmake nametable.clean. -.nametable.files: .nametable.files.tmp - ( cmp -s $@ $< ) || ( cp $< $@ ) - -$(RM) $< - -# `hg status' is slightly faster than `hg fstatus'. Both are -# quite a bit slower on an NFS mounted file system, so this is -# really geared towards repos on local file systems. -.nametable.files.tmp: - -$(HG) fstatus -acmn > $@ -nametable.clean: - -$(RM) .nametable.files .nametable.files.tmp - -FORCE: - -.PHONY: cscope cscope.clean TAGS.clean nametable.clean FORCE
--- a/make/linux/makefiles/gcc.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/linux/makefiles/gcc.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -205,7 +205,7 @@ SHARED_FLAG = -shared # Keep symbols even they are not used -AOUT_FLAGS += -export-dynamic +AOUT_FLAGS += -Xlinker -export-dynamic #------------------------------------------------------------------------ # Debug flags
--- a/make/linux/makefiles/mapfile-vers-debug Wed Jun 01 17:09:56 2011 +0100 +++ b/make/linux/makefiles/mapfile-vers-debug Thu Jun 02 18:59:50 2011 +0100 @@ -3,7 +3,7 @@ # # -# Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/mapfile-vers-product Wed Jun 01 17:09:56 2011 +0100 +++ b/make/linux/makefiles/mapfile-vers-product Thu Jun 02 18:59:50 2011 +0100 @@ -3,7 +3,7 @@ # # -# Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/ppc.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/linux/makefiles/ppc.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved. # ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. #
--- a/make/linux/makefiles/sa.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/linux/makefiles/sa.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/sparcWorks.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/linux/makefiles/sparcWorks.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/top.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/linux/makefiles/top.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/vm.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/linux/makefiles/vm.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -110,6 +110,10 @@ CFLAGS += $(EXTRA_CFLAGS) LFLAGS += $(EXTRA_CFLAGS) +# Don't set excutable bit on stack segment +# the same could be done by separate execstack command +LFLAGS += -Xlinker -z -Xlinker noexecstack + LIBS += -lm -ldl -lpthread # By default, link the *.o into the library, not the executable.
--- a/make/solaris/Makefile Wed Jun 01 17:09:56 2011 +0100 +++ b/make/solaris/Makefile Thu Jun 02 18:59:50 2011 +0100 @@ -296,7 +296,7 @@ clean: clean_compiler2 clean_compiler1 clean_core clean_docs clean_kernel -include $(GAMMADIR)/make/$(OSNAME)/makefiles/cscope.make +include $(GAMMADIR)/make/cscope.make #-------------------------------------------------------------------------------
--- a/make/solaris/makefiles/adlc.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/solaris/makefiles/adlc.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/buildtree.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/solaris/makefiles/buildtree.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/cscope.make Wed Jun 01 17:09:56 2011 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,162 +0,0 @@ -# -# Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# -# The cscope.out file is made in the current directory and spans the entire -# source tree. -# -# Things to note: -# 1. We use relative names for cscope. -# 2. We *don't* remove the old cscope.out file, because cscope is smart -# enough to only build what has changed. It can be confused, however, -# if files are renamed or removed, so it may be necessary to manually -# remove cscope.out if a lot of reorganization has occurred. -# - -include $(GAMMADIR)/make/scm.make - -NAWK = /usr/xpg4/bin/awk -RM = rm -f -HG = hg -CS_TOP = ../.. - -CSDIRS = $(CS_TOP)/src $(CS_TOP)/make -CSINCS = $(CSDIRS:%=-I%) - -CSCOPE = cscope -CSCOPE_FLAGS = -b - -# Allow .java files to be added from the environment (CSCLASSES=yes). -ifdef CSCLASSES -ADDCLASSES= -o -name '*.java' -endif - -# Adding CClassHeaders also pushes the file count of a full workspace up about -# 200 files (these files also don't exist in a new workspace, and thus will -# cause the recreation of the database as they get created, which might seem -# a little confusing). Thus allow these files to be added from the environment -# (CSHEADERS=yes). -ifndef CSHEADERS -RMCCHEADERS= -o -name CClassHeaders -endif - -# Use CS_GENERATED=x to include auto-generated files in the make directories. -ifdef CS_GENERATED -CS_ADD_GENERATED = -o -name '*.incl' -else -CS_PRUNE_GENERATED = -o -name '${OS}_*_core' -o -name '${OS}_*_compiler?' -endif - -# OS-specific files for other systems are excluded by default. Use CS_OS=yes -# to include platform-specific files for other platforms. -ifndef CS_OS -CS_OS = linux macos solaris win32 -CS_PRUNE_OS = $(patsubst %,-o -name '*%*',$(filter-out ${OS},${CS_OS})) -endif - -# Processor-specific files for other processors are excluded by default. Use -# CS_CPU=x to include platform-specific files for other platforms. -ifndef CS_CPU -CS_CPU = i486 sparc amd64 ia64 -CS_PRUNE_CPU = $(patsubst %,-o -name '*%*',$(filter-out ${SRCARCH},${CS_CPU})) -endif - -# What files should we include? A simple rule might be just those files under -# SCCS control, however this would miss files we create like the opcodes and -# CClassHeaders. The following attempts to find everything that is *useful*. -# (.del files are created by sccsrm, demo directories contain many .java files -# that probably aren't useful for development, and the pkgarchive may contain -# duplicates of files within the source hierarchy). - -# Directories to exclude. -CS_PRUNE_STD = $(SCM_DIRS) \ - -o -name '.del-*' \ - -o -name '*demo' \ - -o -name pkgarchive - -CS_PRUNE = $(CS_PRUNE_STD) \ - $(CS_PRUNE_OS) \ - $(CS_PRUNE_CPU) \ - $(CS_PRUNE_GENERATED) \ - $(RMCCHEADERS) - -# File names to include. -CSFILENAMES = -name '*.[ch]pp' \ - -o -name '*.[Ccshlxy]' \ - $(CS_ADD_GENERATED) \ - -o -name '*.d' \ - -o -name '*.il' \ - -o -name '*.cc' \ - -o -name '*[Mm]akefile*' \ - -o -name '*.gmk' \ - -o -name '*.make' \ - -o -name '*.ad' \ - $(ADDCLASSES) - -.PRECIOUS: cscope.out - -cscope cscope.out: cscope.files FORCE - $(CSCOPE) $(CSCOPE_FLAGS) - -# The .raw file is reordered here in an attempt to make cscope display the most -# relevant files first. -cscope.files: .cscope.files.raw - echo "$(CSINCS)" > $@ - -egrep -v "\.java|\/make\/" $< >> $@ - -fgrep ".java" $< >> $@ - -fgrep "/make/" $< >> $@ - -.cscope.files.raw: .nametable.files - -find $(CSDIRS) -type d \( $(CS_PRUNE) \) -prune -o \ - -type f \( $(CSFILENAMES) \) -print > $@ - -cscope.clean: nametable.clean - -$(RM) cscope.out cscope.files .cscope.files.raw - -TAGS: cscope.files FORCE - egrep -v '^-|^$$' $< | etags --members - - -TAGS.clean: nametable.clean - -$(RM) TAGS - -# .nametable.files and .nametable.files.tmp are used to determine if any files -# were added to/deleted from/renamed in the workspace. If not, then there's -# normally no need to rebuild the cscope database. To force a rebuild of -# the cscope database: gmake nametable.clean. -.nametable.files: .nametable.files.tmp - ( cmp -s $@ $< ) || ( cp $< $@ ) - -$(RM) $< - -# `hg status' is slightly faster than `hg fstatus'. Both are -# quite a bit slower on an NFS mounted file system, so this is -# really geared towards repos on local file systems. -.nametable.files.tmp: - -$(HG) fstatus -acmn > $@ - -nametable.clean: - -$(RM) .nametable.files .nametable.files.tmp - -FORCE: - -.PHONY: cscope cscope.clean TAGS.clean nametable.clean FORCE
--- a/make/solaris/makefiles/rules.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/solaris/makefiles/rules.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/sa.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/solaris/makefiles/sa.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/saproc.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/solaris/makefiles/saproc.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -56,6 +56,33 @@ SA_LFLAGS += -mt -xnolib -norunpath endif +# The libproc Pstack_iter() interface changed in Nevada-B159. +# Use 'uname -r -v' to determine the Solaris version as per +# Solaris Nevada team request. This logic needs to match: +# agent/src/os/solaris/proc/saproc.cpp: set_has_newer_Pstack_iter(): +# - skip SunOS 4 or older +# - skip Solaris 10 or older +# - skip two digit internal Nevada builds +# - skip three digit internal Nevada builds thru 149 +# - skip internal Nevada builds 150-158 +# - if not skipped, print define for Nevada-B159 or later +SOLARIS_11_B159_OR_LATER := \ +$(shell uname -r -v \ + | sed -n \ + -e '/^[0-4]\. /b' \ + -e '/^5\.[0-9] /b' \ + -e '/^5\.10 /b' \ + -e '/ snv_[0-9][0-9]$/b' \ + -e '/ snv_[01][0-4][0-9]$/b' \ + -e '/ snv_15[0-8]$/b' \ + -e 's/.*/-DSOLARIS_11_B159_OR_LATER/' \ + -e 'p' \ + ) + +# Uncomment the following to simulate building on Nevada-B159 or later +# when actually building on Nevada-B158 or earlier: +#SOLARIS_11_B159_OR_LATER=-DSOLARIS_11_B159_OR_LATER + $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE) $(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \ echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ @@ -68,6 +95,7 @@ -I$(GENERATED) \ -I$(BOOT_JAVA_HOME)/include \ -I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \ + $(SOLARIS_11_B159_OR_LATER) \ $(SASRCFILES) \ $(SA_LFLAGS) \ -o $@ \
--- a/make/solaris/makefiles/sparcWorks.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/solaris/makefiles/sparcWorks.make Thu Jun 02 18:59:50 2011 +0100 @@ -100,11 +100,6 @@ LINK_LIB.CC/PRE_HOOK += $(JVM_CHECK_SYMBOLS) || exit 1; -# Some interfaces (_lwp_create) changed with LP64 and Solaris 7 -SOLARIS_7_OR_LATER := \ -$(shell uname -r | awk -F. '{ if ($$2 >= 7) print "-DSOLARIS_7_OR_LATER"; }') -CFLAGS += ${SOLARIS_7_OR_LATER} - # New architecture options started in SS12 (5.9), we need both styles to build. # The older arch options for SS11 (5.8) or older and also for /usr/ccs/bin/as. # Note: default for 32bit sparc is now the same as v8plus, so the
--- a/make/solaris/makefiles/top.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/solaris/makefiles/top.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/vm.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/solaris/makefiles/vm.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/build.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/windows/build.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -125,7 +125,25 @@ # or make/hotspot_distro. !ifndef HOTSPOT_VM_DISTRO !if exists($(WorkSpace)\src\closed) + +# if the build is for JDK6 or earlier version, it should include jdk6_hotspot_distro, +# instead of hotspot_distro. +JDK6_OR_EARLIER=0 +!if "$(JDK_MAJOR_VERSION)" != "" && "$(JDK_MINOR_VERSION)" != "" && "$(JDK_MICRO_VERSION)" != "" +!if $(JDK_MAJOR_VERSION) == 1 && $(JDK_MINOR_VERSION) < 7 +JDK6_OR_EARLIER=1 +!endif +!else +!if $(JDK_MAJOR_VER) == 1 && $(JDK_MINOR_VER) < 7 +JDK6_OR_EARLIER=1 +!endif +!endif + +!if $(JDK6_OR_EARLIER) == 1 +!include $(WorkSpace)\make\jdk6_hotspot_distro +!else !include $(WorkSpace)\make\hotspot_distro +!endif !else !include $(WorkSpace)\make\openjdk_distro !endif @@ -260,7 +278,7 @@ @ echo Variant=$(realVariant) >> $@ @ echo WorkSpace=$(WorkSpace) >> $@ @ echo BootStrapDir=$(BootStrapDir) >> $@ - @ if "$(USERNAME)" NEQ "" echo BuildUser=$(USERNAME) >> $@ + @ if "$(USERNAME)" NEQ "" echo BuildUser=$(USERNAME) >> $@ @ echo HS_VER=$(HS_VER) >> $@ @ echo HS_DOTVER=$(HS_DOTVER) >> $@ @ echo HS_COMPANY=$(COMPANY_NAME) >> $@
--- a/make/windows/create_obj_files.sh Wed Jun 01 17:09:56 2011 +0100 +++ b/make/windows/create_obj_files.sh Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/makefiles/launcher.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/windows/makefiles/launcher.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/makefiles/sa.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/windows/makefiles/sa.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/makefiles/vm.make Wed Jun 01 17:09:56 2011 +0100 +++ b/make/windows/makefiles/vm.make Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/projectfiles/kernel/Makefile Wed Jun 01 17:09:56 2011 +0100 +++ b/make/windows/projectfiles/kernel/Makefile Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ # -# Copyright (c) 2007, 2010 Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/assembler_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/assembler_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -4257,34 +4257,14 @@ /////////////////////////////////////////////////////////////////////////////////// #ifndef SERIALGC -static uint num_stores = 0; -static uint num_null_pre_stores = 0; - -static void count_null_pre_vals(void* pre_val) { - num_stores++; - if (pre_val == NULL) num_null_pre_stores++; - if ((num_stores % 1000000) == 0) { - tty->print_cr(UINT32_FORMAT " stores, " UINT32_FORMAT " (%5.2f%%) with null pre-vals.", - num_stores, num_null_pre_stores, - 100.0*(float)num_null_pre_stores/(float)num_stores); - } -} - -static address satb_log_enqueue_with_frame = 0; -static u_char* satb_log_enqueue_with_frame_end = 0; - -static address satb_log_enqueue_frameless = 0; -static u_char* satb_log_enqueue_frameless_end = 0; +static address satb_log_enqueue_with_frame = NULL; +static u_char* satb_log_enqueue_with_frame_end = NULL; + +static address satb_log_enqueue_frameless = NULL; +static u_char* satb_log_enqueue_frameless_end = NULL; static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions? -// The calls to this don't work. We'd need to do a fair amount of work to -// make it work. -static void check_index(int ind) { - assert(0 <= ind && ind <= 64*K && ((ind % oopSize) == 0), - "Invariants."); -} - static void generate_satb_log_enqueue(bool with_frame) { BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize); CodeBuffer buf(bb); @@ -4388,13 +4368,27 @@ } } -void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs) { - assert(offset == 0 || index == noreg, "choose one"); - - if (G1DisablePreBarrier) return; - // satb_log_barrier(tmp, obj, offset, preserve_o_regs); +void MacroAssembler::g1_write_barrier_pre(Register obj, + Register index, + int offset, + Register pre_val, + Register tmp, + bool preserve_o_regs) { Label filtered; - // satb_log_barrier_work0(tmp, filtered); + + if (obj == noreg) { + // We are not loading the previous value so make + // sure that we don't trash the value in pre_val + // with the code below. + assert_different_registers(pre_val, tmp); + } else { + // We will be loading the previous value + // in this code so... + assert(offset == 0 || index == noreg, "choose one"); + assert(pre_val == noreg, "check this code"); + } + + // Is marking active? if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { ld(G2, in_bytes(JavaThread::satb_mark_queue_offset() + @@ -4413,61 +4407,46 @@ br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered); delayed() -> nop(); - // satb_log_barrier_work1(tmp, offset); - if (index == noreg) { - if (Assembler::is_simm13(offset)) { - load_heap_oop(obj, offset, tmp); + // Do we need to load the previous value? + if (obj != noreg) { + // Load the previous value... + if (index == noreg) { + if (Assembler::is_simm13(offset)) { + load_heap_oop(obj, offset, tmp); + } else { + set(offset, tmp); + load_heap_oop(obj, tmp, tmp); + } } else { - set(offset, tmp); - load_heap_oop(obj, tmp, tmp); + load_heap_oop(obj, index, tmp); } - } else { - load_heap_oop(obj, index, tmp); + // Previous value has been loaded into tmp + pre_val = tmp; } - // satb_log_barrier_work2(obj, tmp, offset); - - // satb_log_barrier_work3(tmp, filtered, preserve_o_regs); - - const Register pre_val = tmp; - - if (G1SATBBarrierPrintNullPreVals) { - save_frame(0); - mov(pre_val, O0); - // Save G-regs that target may use. - mov(G1, L1); - mov(G2, L2); - mov(G3, L3); - mov(G4, L4); - mov(G5, L5); - call(CAST_FROM_FN_PTR(address, &count_null_pre_vals)); - delayed()->nop(); - // Restore G-regs that target may have used. - mov(L1, G1); - mov(L2, G2); - mov(L3, G3); - mov(L4, G4); - mov(L5, G5); - restore(G0, G0, G0); - } - + assert(pre_val != noreg, "must have a real register"); + + // Is the previous value null? // Check on whether to annul. br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, pre_val, filtered); delayed() -> nop(); // OK, it's not filtered, so we'll need to call enqueue. In the normal - // case, pre_val will be a scratch G-reg, but there's some cases in which - // it's an O-reg. In the first case, do a normal call. In the latter, - // do a save here and call the frameless version. + // case, pre_val will be a scratch G-reg, but there are some cases in + // which it's an O-reg. In the first case, do a normal call. In the + // latter, do a save here and call the frameless version. guarantee(pre_val->is_global() || pre_val->is_out(), "Or we need to think harder."); + if (pre_val->is_global() && !preserve_o_regs) { - generate_satb_log_enqueue_if_necessary(true); // with frame. + generate_satb_log_enqueue_if_necessary(true); // with frame + call(satb_log_enqueue_with_frame); delayed()->mov(pre_val, O0); } else { - generate_satb_log_enqueue_if_necessary(false); // with frameless. + generate_satb_log_enqueue_if_necessary(false); // frameless + save_frame(0); call(satb_log_enqueue_frameless); delayed()->mov(pre_val->after_save(), O0); @@ -4614,7 +4593,6 @@ MacroAssembler* post_filter_masm = this; if (new_val == G0) return; - if (G1DisablePostBarrier) return; G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set(); assert(bs->kind() == BarrierSet::G1SATBCT || @@ -4626,6 +4604,7 @@ #else srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp); #endif + if (G1PrintCTFilterStats) { guarantee(tmp->is_global(), "Or stats won't work..."); // This is a sleazy hack: I'm temporarily hijacking G2, which I
--- a/src/cpu/sparc/vm/assembler_sparc.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/assembler_sparc.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -2210,15 +2210,11 @@ void card_write_barrier_post(Register store_addr, Register new_val, Register tmp); #ifndef SERIALGC - // Array store and offset - void g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs); - + // General G1 pre-barrier generator. + void g1_write_barrier_pre(Register obj, Register index, int offset, Register pre_val, Register tmp, bool preserve_o_regs); + + // General G1 post-barrier generator void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp); - - // May do filtering, depending on the boolean arguments. - void g1_card_table_write(jbyte* byte_map_base, - Register tmp, Register obj, Register new_val, - bool region_filter, bool null_filter); #endif // SERIALGC // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -408,13 +408,20 @@ #ifndef SERIALGC void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { + // At this point we know that marking is in progress. + // If do_load() is true then we have to emit the + // load of the previous value; otherwise it has already + // been loaded into _pre_val. + __ bind(_entry); assert(pre_val()->is_register(), "Precondition."); - Register pre_val_reg = pre_val()->as_register(); - ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); + if (do_load()) { + ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); + } + if (__ is_in_wdisp16_range(_continuation)) { __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt, pre_val_reg, _continuation); @@ -431,6 +438,96 @@ } +void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) { + // At this point we know that offset == referent_offset. + // + // So we might have to emit: + // if (src == null) goto continuation. + // + // and we definitely have to emit: + // if (klass(src).reference_type == REF_NONE) goto continuation + // if (!marking_active) goto continuation + // if (pre_val == null) goto continuation + // call pre_barrier(pre_val) + // goto continuation + // + __ bind(_entry); + + assert(src()->is_register(), "sanity"); + Register src_reg = src()->as_register(); + + if (gen_src_check()) { + // The original src operand was not a constant. + // Generate src == null? + if (__ is_in_wdisp16_range(_continuation)) { + __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt, + src_reg, _continuation); + } else { + __ cmp(src_reg, G0); + __ brx(Assembler::equal, false, Assembler::pt, _continuation); + } + __ delayed()->nop(); + } + + // Generate src->_klass->_reference_type() == REF_NONE)? + assert(tmp()->is_register(), "sanity"); + Register tmp_reg = tmp()->as_register(); + + __ load_klass(src_reg, tmp_reg); + + Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc)); + __ ld(ref_type_adr, tmp_reg); + + if (__ is_in_wdisp16_range(_continuation)) { + __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt, + tmp_reg, _continuation); + } else { + __ cmp(tmp_reg, G0); + __ brx(Assembler::equal, false, Assembler::pt, _continuation); + } + __ delayed()->nop(); + + // Is marking active? + assert(thread()->is_register(), "precondition"); + Register thread_reg = thread()->as_pointer_register(); + + Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_active())); + + if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { + __ ld(in_progress, tmp_reg); + } else { + assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); + __ ldsb(in_progress, tmp_reg); + } + if (__ is_in_wdisp16_range(_continuation)) { + __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt, + tmp_reg, _continuation); + } else { + __ cmp(tmp_reg, G0); + __ brx(Assembler::equal, false, Assembler::pt, _continuation); + } + __ delayed()->nop(); + + // val == null? + assert(val()->is_register(), "Precondition."); + Register val_reg = val()->as_register(); + + if (__ is_in_wdisp16_range(_continuation)) { + __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt, + val_reg, _continuation); + } else { + __ cmp(val_reg, G0); + __ brx(Assembler::equal, false, Assembler::pt, _continuation); + } + __ delayed()->nop(); + + __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id)); + __ delayed()->mov(val_reg, G4); + __ br(Assembler::always, false, Assembler::pt, _continuation); + __ delayed()->nop(); +} + jbyte* G1PostBarrierStub::_byte_map_base = NULL; jbyte* G1PostBarrierStub::byte_map_base_slow() {
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -2058,6 +2058,13 @@ BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; if (basic_type == T_ARRAY) basic_type = T_OBJECT; +#ifdef _LP64 + // higher 32bits must be null + __ sra(dst_pos, 0, dst_pos); + __ sra(src_pos, 0, src_pos); + __ sra(length, 0, length); +#endif + // set up the arraycopy stub information ArrayCopyStub* stub = op->stub(); @@ -2065,20 +2072,36 @@ // the known type isn't loaded since the code sanity checks // in debug mode and the type isn't required when we know the exact type // also check that the type is an array type. - // We also, for now, always call the stub if the barrier set requires a - // write_ref_pre barrier (which the stub does, but none of the optimized - // cases currently does). - if (op->expected_type() == NULL || - Universe::heap()->barrier_set()->has_write_ref_pre_barrier()) { + if (op->expected_type() == NULL) { __ mov(src, O0); __ mov(src_pos, O1); __ mov(dst, O2); __ mov(dst_pos, O3); __ mov(length, O4); - __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy)); - - __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry()); - __ delayed()->nop(); + address copyfunc_addr = StubRoutines::generic_arraycopy(); + + if (copyfunc_addr == NULL) { // Use C version if stub was not generated + __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy)); + } else { +#ifndef PRODUCT + if (PrintC1Statistics) { + address counter = (address)&Runtime1::_generic_arraycopystub_cnt; + __ inc_counter(counter, G1, G3); + } +#endif + __ call_VM_leaf(tmp, copyfunc_addr); + } + + if (copyfunc_addr != NULL) { + __ xor3(O0, -1, tmp); + __ sub(length, tmp, length); + __ add(src_pos, tmp, src_pos); + __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry()); + __ delayed()->add(dst_pos, tmp, dst_pos); + } else { + __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry()); + __ delayed()->nop(); + } __ bind(*stub->continuation()); return; } @@ -2135,20 +2158,137 @@ __ delayed()->nop(); } + int shift = shift_amount(basic_type); + if (flags & LIR_OpArrayCopy::type_check) { - if (UseCompressedOops) { - // We don't need decode because we just need to compare - __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); - __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); - __ cmp(tmp, tmp2); - __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); + // We don't know the array types are compatible + if (basic_type != T_OBJECT) { + // Simple test for basic type arrays + if (UseCompressedOops) { + // We don't need decode because we just need to compare + __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); + __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); + __ cmp(tmp, tmp2); + __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); + } else { + __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); + __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); + __ cmp(tmp, tmp2); + __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); + } + __ delayed()->nop(); } else { - __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); - __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); - __ cmp(tmp, tmp2); - __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); + // For object arrays, if src is a sub class of dst then we can + // safely do the copy. + address copyfunc_addr = StubRoutines::checkcast_arraycopy(); + + Label cont, slow; + assert_different_registers(tmp, tmp2, G3, G1); + + __ load_klass(src, G3); + __ load_klass(dst, G1); + + __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL); + + __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + __ delayed()->nop(); + + __ cmp(G3, 0); + if (copyfunc_addr != NULL) { // use stub if available + // src is not a sub class of dst so we have to do a + // per-element check. + __ br(Assembler::notEqual, false, Assembler::pt, cont); + __ delayed()->nop(); + + __ bind(slow); + + int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; + if ((flags & mask) != mask) { + // Check that at least both of them object arrays. + assert(flags & mask, "one of the two should be known to be an object array"); + + if (!(flags & LIR_OpArrayCopy::src_objarray)) { + __ load_klass(src, tmp); + } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { + __ load_klass(dst, tmp); + } + int lh_offset = klassOopDesc::header_size() * HeapWordSize + + Klass::layout_helper_offset_in_bytes(); + + __ lduw(tmp, lh_offset, tmp2); + + jint objArray_lh = Klass::array_layout_helper(T_OBJECT); + __ set(objArray_lh, tmp); + __ cmp(tmp, tmp2); + __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); + __ delayed()->nop(); + } + + Register src_ptr = O0; + Register dst_ptr = O1; + Register len = O2; + Register chk_off = O3; + Register super_k = O4; + + __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); + if (shift == 0) { + __ add(src_ptr, src_pos, src_ptr); + } else { + __ sll(src_pos, shift, tmp); + __ add(src_ptr, tmp, src_ptr); + } + + __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); + if (shift == 0) { + __ add(dst_ptr, dst_pos, dst_ptr); + } else { + __ sll(dst_pos, shift, tmp); + __ add(dst_ptr, tmp, dst_ptr); + } + __ mov(length, len); + __ load_klass(dst, tmp); + + int ek_offset = (klassOopDesc::header_size() * HeapWordSize + + objArrayKlass::element_klass_offset_in_bytes()); + __ ld_ptr(tmp, ek_offset, super_k); + + int sco_offset = (klassOopDesc::header_size() * HeapWordSize + + Klass::super_check_offset_offset_in_bytes()); + __ lduw(super_k, sco_offset, chk_off); + + __ call_VM_leaf(tmp, copyfunc_addr); + +#ifndef PRODUCT + if (PrintC1Statistics) { + Label failed; + __ br_notnull(O0, false, Assembler::pn, failed); + __ delayed()->nop(); + __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3); + __ bind(failed); + } +#endif + + __ br_null(O0, false, Assembler::pt, *stub->continuation()); + __ delayed()->xor3(O0, -1, tmp); + +#ifndef PRODUCT + if (PrintC1Statistics) { + __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3); + } +#endif + + __ sub(length, tmp, length); + __ add(src_pos, tmp, src_pos); + __ br(Assembler::always, false, Assembler::pt, *stub->entry()); + __ delayed()->add(dst_pos, tmp, dst_pos); + + __ bind(cont); + } else { + __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); + __ delayed()->nop(); + __ bind(cont); + } } - __ delayed()->nop(); } #ifdef ASSERT @@ -2207,14 +2347,18 @@ } #endif - int shift = shift_amount(basic_type); +#ifndef PRODUCT + if (PrintC1Statistics) { + address counter = Runtime1::arraycopy_count_address(basic_type); + __ inc_counter(counter, G1, G3); + } +#endif Register src_ptr = O0; Register dst_ptr = O1; Register len = O2; __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); - LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null if (shift == 0) { __ add(src_ptr, src_pos, src_ptr); } else { @@ -2223,7 +2367,6 @@ } __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); - LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null if (shift == 0) { __ add(dst_ptr, dst_pos, dst_ptr); } else { @@ -2231,18 +2374,14 @@ __ add(dst_ptr, tmp, dst_ptr); } - if (basic_type != T_OBJECT) { - if (shift == 0) { - __ mov(length, len); - } else { - __ sll(length, shift, len); - } - __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy)); - } else { - // oop_arraycopy takes a length in number of elements, so don't scale it. - __ mov(length, len); - __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy)); - } + bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; + bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; + const char *name; + address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); + + // arraycopy stubs takes a length in number of elements, so don't scale it. + __ mov(length, len); + __ call_VM_leaf(tmp, entry); __ bind(*stub->continuation()); }
--- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -387,7 +387,8 @@ if (obj_store) { // Needs GC write barriers. - pre_barrier(LIR_OprFact::address(array_addr), false, NULL); + pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */, + true /* do_load */, false /* patch */, NULL); } __ move(value.result(), array_addr, null_check_info); if (obj_store) { @@ -687,7 +688,8 @@ __ add(obj.result(), offset.result(), addr); if (type == objectType) { // Write-barrier needed for Object fields. - pre_barrier(addr, false, NULL); + pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */, + true /* do_load */, false /* patch */, NULL); } if (type == objectType) @@ -1187,7 +1189,8 @@ } if (is_obj) { - pre_barrier(LIR_OprFact::address(addr), false, NULL); + pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, + true /* do_load */, false /* patch */, NULL); // _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr)); } __ move(data, addr);
--- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -387,7 +387,7 @@ void C1_MacroAssembler::verify_not_null_oop(Register r) { Label not_null; - br_zero(Assembler::notEqual, false, Assembler::pt, r, not_null); + br_notnull(r, false, Assembler::pt, not_null); delayed()->nop(); stop("non-null oop required"); bind(not_null);
--- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -551,6 +551,26 @@ return NULL; } +address InterpreterGenerator::generate_Reference_get_entry(void) { +#ifndef SERIALGC + if (UseG1GC) { + // We need to generate have a routine that generates code to: + // * load the value in the referent field + // * passes that value to the pre-barrier. + // + // In the case of G1 this will record the value of the + // referent in an SATB buffer if marking is active. + // This will cause concurrent marking to mark the referent + // field as live. + Unimplemented(); + } +#endif // SERIALGC + + // If G1 is not enabled then attempt to go through the accessor entry point + // Reference.get is an accessor + return generate_accessor_entry(); +} + // // Interpreter stub for calling a native method. (C++ interpreter) // This sets up a somewhat different looking stack for calling the native method @@ -2156,6 +2176,7 @@ int tempcount, // Number of slots on java expression stack in use int popframe_extra_args, int moncount, // Number of active monitors + int caller_actual_parameters, int callee_param_size, int callee_locals_size, frame* caller,
--- a/src/cpu/sparc/vm/dump_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/dump_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/frame_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/frame_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -806,3 +806,34 @@ int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize) - 1; return &interpreter_frame_tos_address()[index]; } + + +#ifdef ASSERT + +#define DESCRIBE_FP_OFFSET(name) \ + values.describe(frame_no, fp() + frame::name##_offset, #name) + +void frame::describe_pd(FrameValues& values, int frame_no) { + for (int w = 0; w < frame::register_save_words; w++) { + values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1); + } + + if (is_interpreted_frame()) { + DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp); + DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp); + DESCRIBE_FP_OFFSET(interpreter_frame_padding); + DESCRIBE_FP_OFFSET(interpreter_frame_oop_temp); + } + + if (!is_compiled_frame()) { + if (frame::callee_aggregate_return_pointer_words != 0) { + values.describe(frame_no, sp() + frame::callee_aggregate_return_pointer_sp_offset, "callee_aggregate_return_pointer_word"); + } + for (int w = 0; w < frame::callee_register_argument_save_area_words; w++) { + values.describe(frame_no, sp() + frame::callee_register_argument_save_area_sp_offset + w, + err_msg("callee_register_argument_save_area_words %d", w)); + } + } +} + +#endif
--- a/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,7 @@ address generate_math_entry(AbstractInterpreter::MethodKind kind); address generate_empty_entry(void); address generate_accessor_entry(void); + address generate_Reference_get_entry(void); void lock_method(void); void save_native_result(void); void restore_native_result(void);
--- a/src/cpu/sparc/vm/interpreter_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/interpreter_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -407,6 +407,8 @@ case Interpreter::java_lang_math_abs : break; case Interpreter::java_lang_math_log : break; case Interpreter::java_lang_math_log10 : break; + case Interpreter::java_lang_ref_reference_get + : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; default : ShouldNotReachHere(); break; } @@ -421,25 +423,6 @@ return true; } -// This method tells the deoptimizer how big an interpreted frame must be: -int AbstractInterpreter::size_activation(methodOop method, - int tempcount, - int popframe_extra_args, - int moncount, - int callee_param_count, - int callee_locals, - bool is_top_frame) { - return layout_activation(method, - tempcount, - popframe_extra_args, - moncount, - callee_param_count, - callee_locals, - (frame*)NULL, - (frame*)NULL, - is_top_frame); -} - void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
--- a/src/cpu/sparc/vm/jni_sparc.h Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/jni_sparc.h Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -142,18 +142,8 @@ Register O2_form = O2_scratch; Register O3_adapter = O3_scratch; __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O2_form); - // load_heap_oop(Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); - // deal with old JDK versions: - __ add( Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); - __ cmp(O3_adapter, O2_form); - Label sorry_no_invoke_generic; - __ brx(Assembler::lessUnsigned, false, Assembler::pn, sorry_no_invoke_generic); - __ delayed()->nop(); - - __ load_heap_oop(Address(O3_adapter, 0), O3_adapter); - __ tst(O3_adapter); - __ brx(Assembler::zero, false, Assembler::pn, sorry_no_invoke_generic); - __ delayed()->nop(); + __ load_heap_oop(Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); + __ verify_oop(O3_adapter); __ st_ptr(O3_adapter, Address(O4_argbase, 1 * Interpreter::stackElementSize)); // As a trusted first argument, pass the type being called, so the adapter knows // the actual types of the arguments and return values. @@ -164,12 +154,6 @@ trace_method_handle(_masm, "invokeGeneric"); __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); - __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available! - __ mov(O0_mtype, G5_method_type); // required by throw_WrongMethodType - // mov(G3_method_handle, G3_method_handle); // already in this register - __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch); - __ delayed()->nop(); - return entry_point; } @@ -350,8 +334,9 @@ #ifndef PRODUCT extern "C" void print_method_handle(oop mh); void trace_method_handle_stub(const char* adaptername, - oopDesc* mh) { - printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh); + oopDesc* mh, + intptr_t* saved_sp) { + tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp); print_method_handle(mh); } void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { @@ -361,6 +346,7 @@ __ save_frame(16); __ set((intptr_t) adaptername, O0); __ mov(G3_method_handle, O1); + __ mov(I5_savedSP, O2); __ mov(G3_method_handle, L3); __ mov(Gargs, L4); __ mov(G5_method_type, L5); @@ -486,7 +472,7 @@ if (ek == _invokespecial_mh) { // Must load & check the first argument before entering the target method. __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); - __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle); + __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle); __ null_check(G3_method_handle); __ verify_oop(G3_method_handle); } @@ -643,9 +629,10 @@ // Live at this point: // - G5_klass : klass required by the target method + // - O0_argslot : argslot index in vmarg; may be required in the failing path // - O1_scratch : argument klass to test // - G3_method_handle: adapter method handle - __ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done); + __ check_klass_subtype(O1_scratch, G5_klass, O2_scratch, O3_scratch, done); // If we get here, the type check failed! __ load_heap_oop(G3_amh_argument, O2_required); // required class
--- a/src/cpu/sparc/vm/nativeInst_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/nativeInst_sparc.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/nativeInst_sparc.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/relocInfo_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/relocInfo_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -763,6 +763,87 @@ return NULL; } +// Method entry for java.lang.ref.Reference.get. +address InterpreterGenerator::generate_Reference_get_entry(void) { +#ifndef SERIALGC + // Code: _aload_0, _getfield, _areturn + // parameter size = 1 + // + // The code that gets generated by this routine is split into 2 parts: + // 1. The "intrinsified" code for G1 (or any SATB based GC), + // 2. The slow path - which is an expansion of the regular method entry. + // + // Notes:- + // * In the G1 code we do not check whether we need to block for + // a safepoint. If G1 is enabled then we must execute the specialized + // code for Reference.get (except when the Reference object is null) + // so that we can log the value in the referent field with an SATB + // update buffer. + // If the code for the getfield template is modified so that the + // G1 pre-barrier code is executed when the current method is + // Reference.get() then going through the normal method entry + // will be fine. + // * The G1 code can, however, check the receiver object (the instance + // of java.lang.Reference) and jump to the slow path if null. If the + // Reference object is null then we obviously cannot fetch the referent + // and so we don't need to call the G1 pre-barrier. Thus we can use the + // regular method entry code to generate the NPE. + // + // This code is based on generate_accessor_enty. + + address entry = __ pc(); + + const int referent_offset = java_lang_ref_Reference::referent_offset; + guarantee(referent_offset > 0, "referent offset not initialized"); + + if (UseG1GC) { + Label slow_path; + + // In the G1 code we don't check if we need to reach a safepoint. We + // continue and the thread will safepoint at the next bytecode dispatch. + + // Check if local 0 != NULL + // If the receiver is null then it is OK to jump to the slow path. + __ ld_ptr(Gargs, G0, Otos_i ); // get local 0 + __ tst(Otos_i); // check if local 0 == NULL and go the slow path + __ brx(Assembler::zero, false, Assembler::pn, slow_path); + __ delayed()->nop(); + + + // Load the value of the referent field. + if (Assembler::is_simm13(referent_offset)) { + __ load_heap_oop(Otos_i, referent_offset, Otos_i); + } else { + __ set(referent_offset, G3_scratch); + __ load_heap_oop(Otos_i, G3_scratch, Otos_i); + } + + // Generate the G1 pre-barrier code to log the value of + // the referent field in an SATB buffer. Note with + // these parameters the pre-barrier does not generate + // the load of the previous value + + __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */, + Otos_i /* pre_val */, + G3_scratch /* tmp */, + true /* preserve_o_regs */); + + // _areturn + __ retl(); // return from leaf routine + __ delayed()->mov(O5_savedSP, SP); + + // Generate regular method entry + __ bind(slow_path); + (void) generate_normal_entry(false); + return entry; + } +#endif // SERIALGC + + // If G1 is not enabled then attempt to go through the accessor entry point + // Reference.get is an accessor + return generate_accessor_entry(); +} + // // Interpreter stub for calling a native method. (asm interpreter) // This sets up a somewhat different looking stack for calling the native method @@ -1542,6 +1623,7 @@ int tempcount, int popframe_extra_args, int moncount, + int caller_actual_parameters, int callee_param_count, int callee_local_count, frame* caller, @@ -1617,7 +1699,6 @@ popframe_extra_args; int local_words = method->max_locals() * Interpreter::stackElementWords; - int parm_words = method->size_of_parameters() * Interpreter::stackElementWords; NEEDS_CLEANUP; intptr_t* locals; if (caller->is_interpreted_frame()) { @@ -1625,6 +1706,7 @@ intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1; // Note that this computation means we replace size_of_parameters() values from the caller // interpreter frame's expression stack with our argument locals + int parm_words = caller_actual_parameters * Interpreter::stackElementWords; locals = Lesp_ptr + parm_words; int delta = local_words - parm_words; int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -57,7 +57,11 @@ case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: { - __ g1_write_barrier_pre( base, index, offset, tmp, /*preserve_o_regs*/true); + // Load and record the previous value. + __ g1_write_barrier_pre(base, index, offset, + noreg /* pre_val */, + tmp, true /*preserve_o_regs*/); + if (index == noreg ) { assert(Assembler::is_simm13(offset), "fix this code"); __ store_heap_oop(val, base, offset); @@ -3289,8 +3293,6 @@ /*virtual*/ false, /*vfinal*/ false, /*indy*/ true); __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore - __ verify_oop(G5_callsite); - // profile this call __ profile_call(O4); @@ -3303,8 +3305,10 @@ __ sll(Rret, LogBytesPerWord, Rret); __ ld_ptr(Rtemp, Rret, Rret); // get return address + __ verify_oop(G5_callsite); __ load_heap_oop(G5_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle); __ null_check(G3_method_handle); + __ verify_oop(G3_method_handle); // Adjust Rret first so Llast_SP can be same as Rret __ add(Rret, -frame::pc_return_offset, O7);
--- a/src/cpu/x86/vm/assembler_x86.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/assembler_x86.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -2317,7 +2317,7 @@ } void Assembler::prefetchr(Address src) { - NOT_LP64(assert(VM_Version::supports_3dnow(), "must support")); + NOT_LP64(assert(VM_Version::supports_3dnow_prefetch(), "must support")); InstructionMark im(this); prefetch_prefix(src); emit_byte(0x0D); @@ -2349,7 +2349,7 @@ } void Assembler::prefetchw(Address src) { - NOT_LP64(assert(VM_Version::supports_3dnow(), "must support")); + NOT_LP64(assert(VM_Version::supports_3dnow_prefetch(), "must support")); InstructionMark im(this); prefetch_prefix(src); emit_byte(0x0D); @@ -6039,6 +6039,43 @@ call_VM_leaf(entry_point, 3); } +void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { + pass_arg0(this, arg_0); + MacroAssembler::call_VM_leaf_base(entry_point, 1); +} + +void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { + + LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); + pass_arg1(this, arg_1); + pass_arg0(this, arg_0); + MacroAssembler::call_VM_leaf_base(entry_point, 2); +} + +void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { + LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg")); + LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); + pass_arg2(this, arg_2); + LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); + pass_arg1(this, arg_1); + pass_arg0(this, arg_0); + MacroAssembler::call_VM_leaf_base(entry_point, 3); +} + +void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { + LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg")); + LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); + LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); + pass_arg3(this, arg_3); + LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg")); + LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); + pass_arg2(this, arg_2); + LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); + pass_arg1(this, arg_1); + pass_arg0(this, arg_0); + MacroAssembler::call_VM_leaf_base(entry_point, 4); +} + void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } @@ -6902,26 +6939,39 @@ #ifndef SERIALGC void MacroAssembler::g1_write_barrier_pre(Register obj, -#ifndef _LP64 + Register pre_val, Register thread, -#endif Register tmp, - Register tmp2, - bool tosca_live) { - LP64_ONLY(Register thread = r15_thread;) + bool tosca_live, + bool expand_call) { + + // If expand_call is true then we expand the call_VM_leaf macro + // directly to skip generating the check by + // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. + +#ifdef _LP64 + assert(thread == r15_thread, "must be"); +#endif // _LP64 + + Label done; + Label runtime; + + assert(pre_val != noreg, "check this code"); + + if (obj != noreg) { + assert_different_registers(obj, pre_val, tmp); + assert(pre_val != rax, "check this code"); + } + Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active())); - Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index())); Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_buf())); - Label done; - Label runtime; - - // if (!marking_in_progress) goto done; + // Is marking active? if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { cmpl(in_progress, 0); } else { @@ -6930,65 +6980,92 @@ } jcc(Assembler::equal, done); - // if (x.f == NULL) goto done; -#ifdef _LP64 - load_heap_oop(tmp2, Address(obj, 0)); -#else - movptr(tmp2, Address(obj, 0)); -#endif - cmpptr(tmp2, (int32_t) NULL_WORD); + // Do we need to load the previous value? + if (obj != noreg) { + load_heap_oop(pre_val, Address(obj, 0)); + } + + // Is the previous value null? + cmpptr(pre_val, (int32_t) NULL_WORD); jcc(Assembler::equal, done); // Can we store original value in the thread's buffer? - -#ifdef _LP64 - movslq(tmp, index); - cmpq(tmp, 0); -#else - cmpl(index, 0); -#endif - jcc(Assembler::equal, runtime); -#ifdef _LP64 - subq(tmp, wordSize); - movl(index, tmp); - addq(tmp, buffer); -#else - subl(index, wordSize); - movl(tmp, buffer); - addl(tmp, index); -#endif - movptr(Address(tmp, 0), tmp2); + // Is index == 0? + // (The index field is typed as size_t.) + + movptr(tmp, index); // tmp := *index_adr + cmpptr(tmp, 0); // tmp == 0? + jcc(Assembler::equal, runtime); // If yes, goto runtime + + subptr(tmp, wordSize); // tmp := tmp - wordSize + movptr(index, tmp); // *index_adr := tmp + addptr(tmp, buffer); // tmp := tmp + *buffer_adr + + // Record the previous value + movptr(Address(tmp, 0), pre_val); jmp(done); + bind(runtime); // save the live input values if(tosca_live) push(rax); - push(obj); -#ifdef _LP64 - call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, r15_thread); -#else - push(thread); - call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, thread); - pop(thread); -#endif - pop(obj); + + if (obj != noreg && obj != rax) + push(obj); + + if (pre_val != rax) + push(pre_val); + + // Calling the runtime using the regular call_VM_leaf mechanism generates + // code (generated by InterpreterMacroAssember::call_VM_leaf_base) + // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL. + // + // If we care generating the pre-barrier without a frame (e.g. in the + // intrinsified Reference.get() routine) then ebp might be pointing to + // the caller frame and so this check will most likely fail at runtime. + // + // Expanding the call directly bypasses the generation of the check. + // So when we do not have have a full interpreter frame on the stack + // expand_call should be passed true. + + NOT_LP64( push(thread); ) + + if (expand_call) { + LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); ) + pass_arg1(this, thread); + pass_arg0(this, pre_val); + MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2); + } else { + call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread); + } + + NOT_LP64( pop(thread); ) + + // save the live input values + if (pre_val != rax) + pop(pre_val); + + if (obj != noreg && obj != rax) + pop(obj); + if(tosca_live) pop(rax); + bind(done); - } void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, -#ifndef _LP64 Register thread, -#endif Register tmp, Register tmp2) { - - LP64_ONLY(Register thread = r15_thread;) +#ifdef _LP64 + assert(thread == r15_thread, "must be"); +#endif // _LP64 + Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index())); Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_buf())); + BarrierSet* bs = Universe::heap()->barrier_set(); CardTableModRefBS* ct = (CardTableModRefBS*)bs; Label done; @@ -7067,7 +7144,6 @@ pop(store_addr); bind(done); - } #endif // SERIALGC @@ -7941,12 +8017,12 @@ #endif push(rax); // save rax, // addr may contain rsp so we will have to adjust it based on the push - // we just did + // we just did (and on 64 bit we do two pushes) // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which // stores rax into addr which is backwards of what was intended. if (addr.uses(rsp)) { lea(rax, addr); - pushptr(Address(rax, BytesPerWord)); + pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord)); } else { pushptr(addr); } @@ -8396,6 +8472,17 @@ movptr(dst, src); } +// Doesn't do verfication, generates fixed size code +void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) { +#ifdef _LP64 + if (UseCompressedOops) { + movl(dst, src); + decode_heap_oop_not_null(dst); + } else +#endif + movptr(dst, src); +} + void MacroAssembler::store_heap_oop(Address dst, Register src) { #ifdef _LP64 if (UseCompressedOops) {
--- a/src/cpu/x86/vm/assembler_x86.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/assembler_x86.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -234,6 +234,20 @@ a._disp += disp; return a; } + Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const { + Address a = (*this); + a._disp += disp.constant_or_zero() * scale_size(scale); + if (disp.is_register()) { + assert(!a.index()->is_valid(), "competing indexes"); + a._index = disp.as_register(); + a._scale = scale; + } + return a; + } + bool is_same_address(Address a) const { + // disregard _rspec + return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale; + } // The following two overloads are used in connection with the // ByteSize type (see sizes.hpp). They simplify the use of @@ -385,10 +399,18 @@ }; class ExternalAddress: public AddressLiteral { - - public: - - ExternalAddress(address target) : AddressLiteral(target, relocInfo::external_word_type){} + private: + static relocInfo::relocType reloc_for_target(address target) { + // Sometimes ExternalAddress is used for values which aren't + // exactly addresses, like the card table base. + // external_word_type can't be used for values in the first page + // so just skip the reloc in that case. + return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; + } + + public: + + ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {} }; @@ -1445,6 +1467,7 @@ class MacroAssembler: public Assembler { friend class LIR_Assembler; friend class Runtime1; // as_Address() + protected: Address as_Address(AddressLiteral adr); @@ -1646,6 +1669,14 @@ void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); + // These always tightly bind to MacroAssembler::call_VM_leaf_base + // bypassing the virtual implementation + void super_call_VM_leaf(address entry_point); + void super_call_VM_leaf(address entry_point, Register arg_1); + void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); + void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); + void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); + // last Java Frame (fills frame anchor) void set_last_Java_frame(Register thread, Register last_java_sp, @@ -1666,21 +1697,22 @@ void store_check(Register obj); // store check for obj - register is destroyed afterwards void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) +#ifndef SERIALGC + void g1_write_barrier_pre(Register obj, -#ifndef _LP64 + Register pre_val, Register thread, -#endif Register tmp, - Register tmp2, - bool tosca_live); + bool tosca_live, + bool expand_call); + void g1_write_barrier_post(Register store_addr, Register new_val, -#ifndef _LP64 Register thread, -#endif Register tmp, Register tmp2); +#endif // SERIALGC // split store_check(Register obj) to enhance instruction interleaving void store_check_part_1(Register obj); @@ -1701,6 +1733,7 @@ void store_klass(Register dst, Register src); void load_heap_oop(Register dst, Address src); + void load_heap_oop_not_null(Register dst, Address src); void store_heap_oop(Address dst, Register src); // Used for storing NULL. All other oop constants should be @@ -2010,6 +2043,10 @@ void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } void addptr(Register dst, int32_t src); void addptr(Register dst, Register src); + void addptr(Register dst, RegisterOrConstant src) { + if (src.is_constant()) addptr(dst, (int) src.as_constant()); + else addptr(dst, src.as_register()); + } void andptr(Register dst, int32_t src); void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } @@ -2071,7 +2108,10 @@ void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } void subptr(Register dst, int32_t src); void subptr(Register dst, Register src); - + void subptr(Register dst, RegisterOrConstant src) { + if (src.is_constant()) subptr(dst, (int) src.as_constant()); + else subptr(dst, src.as_register()); + } void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } @@ -2269,6 +2309,11 @@ void movptr(Address dst, Register src); + void movptr(Register dst, RegisterOrConstant src) { + if (src.is_constant()) movptr(dst, src.as_constant()); + else movptr(dst, src.as_register()); + } + #ifdef _LP64 // Generally the next two are only used for moving NULL // Although there are situations in initializing the mark word where
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -316,7 +316,9 @@ Register tmp2 = rbx; __ push(tmp); __ push(tmp2); - __ load_heap_oop(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes())); + // Load without verification to keep code size small. We need it because + // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null. + __ load_heap_oop_not_null(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes())); __ get_thread(tmp); __ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc))); __ pop(tmp2); @@ -464,15 +466,19 @@ #ifndef SERIALGC void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { - - // At this point we know that marking is in progress + // At this point we know that marking is in progress. + // If do_load() is true then we have to emit the + // load of the previous value; otherwise it has already + // been loaded into _pre_val. __ bind(_entry); assert(pre_val()->is_register(), "Precondition."); Register pre_val_reg = pre_val()->as_register(); - ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); + if (do_load()) { + ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); + } __ cmpptr(pre_val_reg, (int32_t) NULL_WORD); __ jcc(Assembler::equal, _continuation); @@ -482,6 +488,68 @@ } +void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) { + // At this point we know that offset == referent_offset. + // + // So we might have to emit: + // if (src == null) goto continuation. + // + // and we definitely have to emit: + // if (klass(src).reference_type == REF_NONE) goto continuation + // if (!marking_active) goto continuation + // if (pre_val == null) goto continuation + // call pre_barrier(pre_val) + // goto continuation + // + __ bind(_entry); + + assert(src()->is_register(), "sanity"); + Register src_reg = src()->as_register(); + + if (gen_src_check()) { + // The original src operand was not a constant. + // Generate src == null? + __ cmpptr(src_reg, (int32_t) NULL_WORD); + __ jcc(Assembler::equal, _continuation); + } + + // Generate src->_klass->_reference_type == REF_NONE)? + assert(tmp()->is_register(), "sanity"); + Register tmp_reg = tmp()->as_register(); + + __ load_klass(tmp_reg, src_reg); + + Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc)); + __ cmpl(ref_type_adr, REF_NONE); + __ jcc(Assembler::equal, _continuation); + + // Is marking active? + assert(thread()->is_register(), "precondition"); + Register thread_reg = thread()->as_pointer_register(); + + Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_active())); + + if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { + __ cmpl(in_progress, 0); + } else { + assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); + __ cmpb(in_progress, 0); + } + __ jcc(Assembler::equal, _continuation); + + // val == null? + assert(val()->is_register(), "Precondition."); + Register val_reg = val()->as_register(); + + __ cmpptr(val_reg, (int32_t) NULL_WORD); + __ jcc(Assembler::equal, _continuation); + + ce->store_parameter(val()->as_register(), 0); + __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id))); + __ jmp(_continuation); +} + jbyte* G1PostBarrierStub::_byte_map_base = NULL; jbyte* G1PostBarrierStub::byte_map_base_slow() {
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1401,7 +1401,7 @@ default: ShouldNotReachHere(); break; } - } else if (VM_Version::supports_3dnow()) { + } else if (VM_Version::supports_3dnow_prefetch()) { __ prefetchr(from_addr); } } @@ -1424,7 +1424,7 @@ default: ShouldNotReachHere(); break; } - } else if (VM_Version::supports_3dnow()) { + } else if (VM_Version::supports_3dnow_prefetch()) { __ prefetchw(from_addr); } } @@ -3102,7 +3102,7 @@ BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; if (basic_type == T_ARRAY) basic_type = T_OBJECT; - // if we don't know anything or it's an object array, just go through the generic arraycopy + // if we don't know anything, just go through the generic arraycopy if (default_type == NULL) { Label done; // save outgoing arguments on stack in case call to System.arraycopy is needed @@ -3123,7 +3123,9 @@ store_parameter(src, 4); NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) - address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); + address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); + + address copyfunc_addr = StubRoutines::generic_arraycopy(); // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint #ifdef _LP64 @@ -3141,11 +3143,29 @@ // Allocate abi space for args but be sure to keep stack aligned __ subptr(rsp, 6*wordSize); store_parameter(j_rarg4, 4); - __ call(RuntimeAddress(entry)); + if (copyfunc_addr == NULL) { // Use C version if stub was not generated + __ call(RuntimeAddress(C_entry)); + } else { +#ifndef PRODUCT + if (PrintC1Statistics) { + __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); + } +#endif + __ call(RuntimeAddress(copyfunc_addr)); + } __ addptr(rsp, 6*wordSize); #else __ mov(c_rarg4, j_rarg4); - __ call(RuntimeAddress(entry)); + if (copyfunc_addr == NULL) { // Use C version if stub was not generated + __ call(RuntimeAddress(C_entry)); + } else { +#ifndef PRODUCT + if (PrintC1Statistics) { + __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); + } +#endif + __ call(RuntimeAddress(copyfunc_addr)); + } #endif // _WIN64 #else __ push(length); @@ -3153,13 +3173,28 @@ __ push(dst); __ push(src_pos); __ push(src); - __ call_VM_leaf(entry, 5); // removes pushed parameter from the stack + + if (copyfunc_addr == NULL) { // Use C version if stub was not generated + __ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack + } else { +#ifndef PRODUCT + if (PrintC1Statistics) { + __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); + } +#endif + __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack + } #endif // _LP64 __ cmpl(rax, 0); __ jcc(Assembler::equal, *stub->continuation()); + if (copyfunc_addr != NULL) { + __ mov(tmp, rax); + __ xorl(tmp, -1); + } + // Reload values from the stack so they are where the stub // expects them. __ movptr (dst, Address(rsp, 0*BytesPerWord)); @@ -3167,6 +3202,12 @@ __ movptr (length, Address(rsp, 2*BytesPerWord)); __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); __ movptr (src, Address(rsp, 4*BytesPerWord)); + + if (copyfunc_addr != NULL) { + __ subl(length, tmp); + __ addl(src_pos, tmp); + __ addl(dst_pos, tmp); + } __ jmp(*stub->entry()); __ bind(*stub->continuation()); @@ -3226,10 +3267,6 @@ __ testl(dst_pos, dst_pos); __ jcc(Assembler::less, *stub->entry()); } - if (flags & LIR_OpArrayCopy::length_positive_check) { - __ testl(length, length); - __ jcc(Assembler::less, *stub->entry()); - } if (flags & LIR_OpArrayCopy::src_range_check) { __ lea(tmp, Address(src_pos, length, Address::times_1, 0)); @@ -3242,15 +3279,190 @@ __ jcc(Assembler::above, *stub->entry()); } + if (flags & LIR_OpArrayCopy::length_positive_check) { + __ testl(length, length); + __ jcc(Assembler::less, *stub->entry()); + __ jcc(Assembler::zero, *stub->continuation()); + } + +#ifdef _LP64 + __ movl2ptr(src_pos, src_pos); //higher 32bits must be null + __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null +#endif + if (flags & LIR_OpArrayCopy::type_check) { - if (UseCompressedOops) { - __ movl(tmp, src_klass_addr); - __ cmpl(tmp, dst_klass_addr); + // We don't know the array types are compatible + if (basic_type != T_OBJECT) { + // Simple test for basic type arrays + if (UseCompressedOops) { + __ movl(tmp, src_klass_addr); + __ cmpl(tmp, dst_klass_addr); + } else { + __ movptr(tmp, src_klass_addr); + __ cmpptr(tmp, dst_klass_addr); + } + __ jcc(Assembler::notEqual, *stub->entry()); } else { - __ movptr(tmp, src_klass_addr); - __ cmpptr(tmp, dst_klass_addr); + // For object arrays, if src is a sub class of dst then we can + // safely do the copy. + Label cont, slow; + + __ push(src); + __ push(dst); + + __ load_klass(src, src); + __ load_klass(dst, dst); + + __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); + + __ push(src); + __ push(dst); + __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); + __ pop(dst); + __ pop(src); + + __ cmpl(src, 0); + __ jcc(Assembler::notEqual, cont); + + __ bind(slow); + __ pop(dst); + __ pop(src); + + address copyfunc_addr = StubRoutines::checkcast_arraycopy(); + if (copyfunc_addr != NULL) { // use stub if available + // src is not a sub class of dst so we have to do a + // per-element check. + + int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; + if ((flags & mask) != mask) { + // Check that at least both of them object arrays. + assert(flags & mask, "one of the two should be known to be an object array"); + + if (!(flags & LIR_OpArrayCopy::src_objarray)) { + __ load_klass(tmp, src); + } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { + __ load_klass(tmp, dst); + } + int lh_offset = klassOopDesc::header_size() * HeapWordSize + + Klass::layout_helper_offset_in_bytes(); + Address klass_lh_addr(tmp, lh_offset); + jint objArray_lh = Klass::array_layout_helper(T_OBJECT); + __ cmpl(klass_lh_addr, objArray_lh); + __ jcc(Assembler::notEqual, *stub->entry()); + } + +#ifndef _LP64 + // save caller save registers + store_parameter(rax, 2); + store_parameter(rcx, 1); + store_parameter(rdx, 0); + + __ movptr(tmp, dst_klass_addr); + __ movptr(tmp, Address(tmp, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); + __ push(tmp); + __ movl(tmp, Address(tmp, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc))); + __ push(tmp); + __ push(length); + __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); + __ push(tmp); + __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); + __ push(tmp); + + __ call_VM_leaf(copyfunc_addr, 5); +#else + __ movl2ptr(length, length); //higher 32bits must be null + + // save caller save registers: copy them to callee save registers + __ mov(rbx, rdx); + __ mov(r13, r8); + __ mov(r14, r9); +#ifndef _WIN64 + store_parameter(rsi, 1); + store_parameter(rcx, 0); + // on WIN64 other incoming parameters are in rdi and rsi saved + // across the call +#endif + + __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); + assert_different_registers(c_rarg0, dst, dst_pos, length); + __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); + assert_different_registers(c_rarg1, dst, length); + + __ mov(c_rarg2, length); + assert_different_registers(c_rarg2, dst); + +#ifdef _WIN64 + // Allocate abi space for args but be sure to keep stack aligned + __ subptr(rsp, 6*wordSize); + __ load_klass(c_rarg3, dst); + __ movptr(c_rarg3, Address(c_rarg3, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); + store_parameter(c_rarg3, 4); + __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc))); + __ call(RuntimeAddress(copyfunc_addr)); + __ addptr(rsp, 6*wordSize); +#else + __ load_klass(c_rarg4, dst); + __ movptr(c_rarg4, Address(c_rarg4, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); + __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc))); + __ call(RuntimeAddress(copyfunc_addr)); +#endif + +#endif + +#ifndef PRODUCT + if (PrintC1Statistics) { + Label failed; + __ testl(rax, rax); + __ jcc(Assembler::notZero, failed); + __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt)); + __ bind(failed); + } +#endif + + __ testl(rax, rax); + __ jcc(Assembler::zero, *stub->continuation()); + +#ifndef PRODUCT + if (PrintC1Statistics) { + __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt)); + } +#endif + + __ mov(tmp, rax); + + __ xorl(tmp, -1); + +#ifndef _LP64 + // restore caller save registers + assert_different_registers(tmp, rdx, rcx, rax); // result of stub will be lost + __ movptr(rdx, Address(rsp, 0*BytesPerWord)); + __ movptr(rcx, Address(rsp, 1*BytesPerWord)); + __ movptr(rax, Address(rsp, 2*BytesPerWord)); +#else + // restore caller save registers + __ mov(rdx, rbx); + __ mov(r8, r13); + __ mov(r9, r14); +#ifndef _WIN64 + assert_different_registers(tmp, rdx, r8, r9, rcx, rsi); // result of stub will be lost + __ movptr(rcx, Address(rsp, 0*BytesPerWord)); + __ movptr(rsi, Address(rsp, 1*BytesPerWord)); +#else + assert_different_registers(tmp, rdx, r8, r9); // result of stub will be lost +#endif +#endif + + __ subl(length, tmp); + __ addl(src_pos, tmp); + __ addl(dst_pos, tmp); + } + + __ jmp(*stub->entry()); + + __ bind(cont); + __ pop(dst); + __ pop(src); } - __ jcc(Assembler::notEqual, *stub->entry()); } #ifdef ASSERT @@ -3291,16 +3503,16 @@ } #endif - if (shift_amount > 0 && basic_type != T_OBJECT) { - __ shlptr(length, shift_amount); +#ifndef PRODUCT + if (PrintC1Statistics) { + __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type))); } +#endif #ifdef _LP64 assert_different_registers(c_rarg0, dst, dst_pos, length); - __ movl2ptr(src_pos, src_pos); //higher 32bits must be null __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); assert_different_registers(c_rarg1, length); - __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); __ mov(c_rarg2, length); @@ -3311,11 +3523,12 @@ store_parameter(tmp, 1); store_parameter(length, 2); #endif // _LP64 - if (basic_type == T_OBJECT) { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy), 0); - } else { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy), 0); - } + + bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; + bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; + const char *name; + address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); + __ call_VM_leaf(entry, 0); __ bind(*stub->continuation()); }
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -326,7 +326,8 @@ if (obj_store) { // Needs GC write barriers. - pre_barrier(LIR_OprFact::address(array_addr), false, NULL); + pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */, + true /* do_load */, false /* patch */, NULL); __ move(value.result(), array_addr, null_check_info); // Seems to be a precise post_barrier(LIR_OprFact::address(array_addr), value.result()); @@ -794,7 +795,8 @@ if (type == objectType) { // Write-barrier needed for Object fields. // Do the pre-write barrier, if any. - pre_barrier(addr, false, NULL); + pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */, + true /* do_load */, false /* patch */, NULL); } LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience @@ -1339,7 +1341,8 @@ bool is_obj = (type == T_ARRAY || type == T_OBJECT); if (is_obj) { // Do the pre-write barrier, if any. - pre_barrier(LIR_OprFact::address(addr), false, NULL); + pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, + true /* do_load */, false /* patch */, NULL); __ move(data, addr); assert(src->is_register(), "must be register"); // Seems to be a precise address
--- a/src/cpu/x86/vm/cppInterpreterGenerator_x86.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/cppInterpreterGenerator_x86.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,7 @@ address generate_math_entry(AbstractInterpreter::MethodKind kind); address generate_empty_entry(void); address generate_accessor_entry(void); + address generate_Reference_get_entry(void); void lock_method(void); void generate_stack_overflow_check(void);
--- a/src/cpu/x86/vm/cppInterpreter_x86.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -936,6 +936,26 @@ } +address InterpreterGenerator::generate_Reference_get_entry(void) { +#ifndef SERIALGC + if (UseG1GC) { + // We need to generate have a routine that generates code to: + // * load the value in the referent field + // * passes that value to the pre-barrier. + // + // In the case of G1 this will record the value of the + // referent in an SATB buffer if marking is active. + // This will cause concurrent marking to mark the referent + // field as live. + Unimplemented(); + } +#endif // SERIALGC + + // If G1 is not enabled then attempt to go through the accessor entry point + // Reference.get is an accessor + return generate_accessor_entry(); +} + // // C++ Interpreter stub for calling a native method. // This sets up a somewhat different looking stack for calling the native method @@ -2210,6 +2230,8 @@ case Interpreter::java_lang_math_log : // fall thru case Interpreter::java_lang_math_log10 : // fall thru case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break; + case Interpreter::java_lang_ref_reference_get + : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; default : ShouldNotReachHere(); break; } @@ -2317,14 +2339,15 @@ } int AbstractInterpreter::layout_activation(methodOop method, - int tempcount, // - int popframe_extra_args, - int moncount, - int callee_param_count, - int callee_locals, - frame* caller, - frame* interpreter_frame, - bool is_top_frame) { + int tempcount, // + int popframe_extra_args, + int moncount, + int caller_actual_parameters, + int callee_param_count, + int callee_locals, + frame* caller, + frame* interpreter_frame, + bool is_top_frame) { assert(popframe_extra_args == 0, "FIX ME"); // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
--- a/src/cpu/x86/vm/frame_x86.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/frame_x86.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -339,7 +339,6 @@ return fr; } - //------------------------------------------------------------------------------ // frame::verify_deopt_original_pc // @@ -361,6 +360,55 @@ } #endif +//------------------------------------------------------------------------------ +// frame::adjust_unextended_sp +void frame::adjust_unextended_sp() { + // If we are returning to a compiled MethodHandle call site, the + // saved_fp will in fact be a saved value of the unextended SP. The + // simplest way to tell whether we are returning to such a call site + // is as follows: + + nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null(); + if (sender_nm != NULL) { + // If the sender PC is a deoptimization point, get the original + // PC. For MethodHandle call site the unextended_sp is stored in + // saved_fp. + if (sender_nm->is_deopt_mh_entry(_pc)) { + DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, _fp)); + _unextended_sp = _fp; + } + else if (sender_nm->is_deopt_entry(_pc)) { + DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp)); + } + else if (sender_nm->is_method_handle_return(_pc)) { + _unextended_sp = _fp; + } + } +} + +//------------------------------------------------------------------------------ +// frame::update_map_with_saved_link +void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) { + // The interpreter and compiler(s) always save EBP/RBP in a known + // location on entry. We must record where that location is + // so this if EBP/RBP was live on callout from c2 we can find + // the saved copy no matter what it called. + + // Since the interpreter always saves EBP/RBP if we record where it is then + // we don't have to always save EBP/RBP on entry and exit to c2 compiled + // code, on entry will be enough. + map->set_location(rbp->as_VMReg(), (address) link_addr); +#ifdef AMD64 + // this is weird "H" ought to be at a higher address however the + // oopMaps seems to have the "H" regs at the same address and the + // vanilla register. + // XXXX make this go away + if (true) { + map->set_location(rbp->as_VMReg()->next(), (address) link_addr); + } +#endif // AMD64 +} + //------------------------------------------------------------------------------ // frame::sender_for_interpreter_frame @@ -372,54 +420,13 @@ // This is the sp before any possible extension (adapter/locals). intptr_t* unextended_sp = interpreter_frame_sender_sp(); - // Stored FP. - intptr_t* saved_fp = link(); - - address sender_pc = this->sender_pc(); - CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc); - assert(sender_cb, "sanity"); - nmethod* sender_nm = sender_cb->as_nmethod_or_null(); - - if (sender_nm != NULL) { - // If the sender PC is a deoptimization point, get the original - // PC. For MethodHandle call site the unextended_sp is stored in - // saved_fp. - if (sender_nm->is_deopt_mh_entry(sender_pc)) { - DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp)); - unextended_sp = saved_fp; - } - else if (sender_nm->is_deopt_entry(sender_pc)) { - DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp)); - } - else if (sender_nm->is_method_handle_return(sender_pc)) { - unextended_sp = saved_fp; - } - } - - // The interpreter and compiler(s) always save EBP/RBP in a known - // location on entry. We must record where that location is - // so this if EBP/RBP was live on callout from c2 we can find - // the saved copy no matter what it called. - - // Since the interpreter always saves EBP/RBP if we record where it is then - // we don't have to always save EBP/RBP on entry and exit to c2 compiled - // code, on entry will be enough. #ifdef COMPILER2 if (map->update_map()) { - map->set_location(rbp->as_VMReg(), (address) addr_at(link_offset)); -#ifdef AMD64 - // this is weird "H" ought to be at a higher address however the - // oopMaps seems to have the "H" regs at the same address and the - // vanilla register. - // XXXX make this go away - if (true) { - map->set_location(rbp->as_VMReg()->next(), (address)addr_at(link_offset)); - } -#endif // AMD64 + update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset)); } #endif // COMPILER2 - return frame(sender_sp, unextended_sp, saved_fp, sender_pc); + return frame(sender_sp, unextended_sp, link(), sender_pc()); } @@ -427,6 +434,7 @@ // frame::sender_for_compiled_frame frame frame::sender_for_compiled_frame(RegisterMap* map) const { assert(map != NULL, "map must be set"); + assert(!is_ricochet_frame(), "caller must handle this"); // frame owned by optimizing compiler assert(_cb->frame_size() >= 0, "must have non-zero frame size"); @@ -438,31 +446,7 @@ // This is the saved value of EBP which may or may not really be an FP. // It is only an FP if the sender is an interpreter frame (or C1?). - intptr_t* saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset); - - // If we are returning to a compiled MethodHandle call site, the - // saved_fp will in fact be a saved value of the unextended SP. The - // simplest way to tell whether we are returning to such a call site - // is as follows: - CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc); - assert(sender_cb, "sanity"); - nmethod* sender_nm = sender_cb->as_nmethod_or_null(); - - if (sender_nm != NULL) { - // If the sender PC is a deoptimization point, get the original - // PC. For MethodHandle call site the unextended_sp is stored in - // saved_fp. - if (sender_nm->is_deopt_mh_entry(sender_pc)) { - DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp)); - unextended_sp = saved_fp; - } - else if (sender_nm->is_deopt_entry(sender_pc)) { - DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp)); - } - else if (sender_nm->is_method_handle_return(sender_pc)) { - unextended_sp = saved_fp; - } - } + intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset); if (map->update_map()) { // Tell GC to use argument oopmaps for some runtime stubs that need it. @@ -472,23 +456,15 @@ if (_cb->oop_maps() != NULL) { OopMapSet::update_register_map(this, map); } + // Since the prolog does the save and restore of EBP there is no oopmap // for it so we must fill in its location as if there was an oopmap entry // since if our caller was compiled code there could be live jvm state in it. - map->set_location(rbp->as_VMReg(), (address) (sender_sp - frame::sender_sp_offset)); -#ifdef AMD64 - // this is weird "H" ought to be at a higher address however the - // oopMaps seems to have the "H" regs at the same address and the - // vanilla register. - // XXXX make this go away - if (true) { - map->set_location(rbp->as_VMReg()->next(), (address) (sender_sp - frame::sender_sp_offset)); - } -#endif // AMD64 + update_map_with_saved_link(map, saved_fp_addr); } assert(sender_sp != sp(), "must have changed"); - return frame(sender_sp, unextended_sp, saved_fp, sender_pc); + return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc); } @@ -502,6 +478,7 @@ if (is_entry_frame()) return sender_for_entry_frame(map); if (is_interpreted_frame()) return sender_for_interpreter_frame(map); assert(_cb == CodeCache::find_blob(pc()),"Must be the same"); + if (is_ricochet_frame()) return sender_for_ricochet_frame(map); if (_cb != NULL) { return sender_for_compiled_frame(map); @@ -669,3 +646,23 @@ int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize); return &interpreter_frame_tos_address()[index]; } + +#ifdef ASSERT + +#define DESCRIBE_FP_OFFSET(name) \ + values.describe(frame_no, fp() + frame::name##_offset, #name) + +void frame::describe_pd(FrameValues& values, int frame_no) { + if (is_interpreted_frame()) { + DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp); + DESCRIBE_FP_OFFSET(interpreter_frame_last_sp); + DESCRIBE_FP_OFFSET(interpreter_frame_method); + DESCRIBE_FP_OFFSET(interpreter_frame_mdx); + DESCRIBE_FP_OFFSET(interpreter_frame_cache); + DESCRIBE_FP_OFFSET(interpreter_frame_locals); + DESCRIBE_FP_OFFSET(interpreter_frame_bcx); + DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp); + } + +} +#endif
--- a/src/cpu/x86/vm/frame_x86.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/frame_x86.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -164,6 +164,7 @@ // original sp we use that convention. intptr_t* _unextended_sp; + void adjust_unextended_sp(); intptr_t* ptr_at_addr(int offset) const { return (intptr_t*) addr_at(offset); @@ -197,6 +198,9 @@ // expression stack tos if we are nested in a java call intptr_t* interpreter_frame_last_sp() const; + // helper to update a map with callee-saved RBP + static void update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr); + #ifndef CC_INTERP // deoptimization support void interpreter_frame_set_last_sp(intptr_t* sp);
--- a/src/cpu/x86/vm/frame_x86.inline.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/frame_x86.inline.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -62,6 +62,7 @@ _pc = pc; assert(pc != NULL, "no pc?"); _cb = CodeCache::find_blob(pc); + adjust_unextended_sp(); address original_pc = nmethod::get_deopt_original_pc(this); if (original_pc != NULL) {
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -383,32 +383,6 @@ movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); } -void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) { - MacroAssembler::call_VM_leaf_base(entry_point, 0); -} - - -void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1) { - push(arg_1); - MacroAssembler::call_VM_leaf_base(entry_point, 1); -} - - -void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2) { - push(arg_2); - push(arg_1); - MacroAssembler::call_VM_leaf_base(entry_point, 2); -} - - -void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) { - push(arg_3); - push(arg_2); - push(arg_1); - MacroAssembler::call_VM_leaf_base(entry_point, 3); -} - - void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { // set sender sp lea(rsi, Address(rsp, wordSize));
--- a/src/cpu/x86/vm/interp_masm_x86_32.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -124,12 +124,6 @@ void load_ptr(int n, Register val); void store_ptr(int n, Register val); - // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls - void super_call_VM_leaf(address entry_point); - void super_call_VM_leaf(address entry_point, Register arg_1); - void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); - void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); - // Generate a subtype check: branch to ok_is_subtype if sub_klass is // a subtype of super_klass. EAX holds the super_klass. Blows ECX // and EDI. Register sub_klass cannot be any of the above.
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -381,56 +381,6 @@ } -void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) { - MacroAssembler::call_VM_leaf_base(entry_point, 0); -} - - -void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, - Register arg_1) { - if (c_rarg0 != arg_1) { - mov(c_rarg0, arg_1); - } - MacroAssembler::call_VM_leaf_base(entry_point, 1); -} - - -void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, - Register arg_1, - Register arg_2) { - assert(c_rarg0 != arg_2, "smashed argument"); - assert(c_rarg1 != arg_1, "smashed argument"); - if (c_rarg0 != arg_1) { - mov(c_rarg0, arg_1); - } - if (c_rarg1 != arg_2) { - mov(c_rarg1, arg_2); - } - MacroAssembler::call_VM_leaf_base(entry_point, 2); -} - -void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, - Register arg_1, - Register arg_2, - Register arg_3) { - assert(c_rarg0 != arg_2, "smashed argument"); - assert(c_rarg0 != arg_3, "smashed argument"); - assert(c_rarg1 != arg_1, "smashed argument"); - assert(c_rarg1 != arg_3, "smashed argument"); - assert(c_rarg2 != arg_1, "smashed argument"); - assert(c_rarg2 != arg_2, "smashed argument"); - if (c_rarg0 != arg_1) { - mov(c_rarg0, arg_1); - } - if (c_rarg1 != arg_2) { - mov(c_rarg1, arg_2); - } - if (c_rarg2 != arg_3) { - mov(c_rarg2, arg_3); - } - MacroAssembler::call_VM_leaf_base(entry_point, 3); -} - void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { // set sender sp lea(r13, Address(rsp, wordSize));
--- a/src/cpu/x86/vm/interp_masm_x86_64.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -136,13 +136,6 @@ void load_ptr(int n, Register val); void store_ptr(int n, Register val); - // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls - void super_call_VM_leaf(address entry_point); - void super_call_VM_leaf(address entry_point, Register arg_1); - void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); - void super_call_VM_leaf(address entry_point, - Register arg_1, Register arg_2, Register arg_3); - // Generate a subtype check: branch to ok_is_subtype if sub_klass is // a subtype of super_klass. void gen_subtype_check( Register sub_klass, Label &ok_is_subtype );
--- a/src/cpu/x86/vm/interpreterGenerator_x86.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/interpreterGenerator_x86.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,7 @@ address generate_math_entry(AbstractInterpreter::MethodKind kind); address generate_empty_entry(void); address generate_accessor_entry(void); + address generate_Reference_get_entry(); void lock_method(void); void generate_stack_overflow_check(void);
--- a/src/cpu/x86/vm/interpreter_x86.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/interpreter_x86.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -26,7 +26,9 @@ #define CPU_X86_VM_INTERPRETER_X86_HPP public: - static Address::ScaleFactor stackElementScale() { return Address::times_4; } + static Address::ScaleFactor stackElementScale() { + return NOT_LP64(Address::times_4) LP64_ONLY(Address::times_8); + } // Offset from rsp (which points to the last stack element) static int expr_offset_in_bytes(int i) { return stackElementSize * i; }
--- a/src/cpu/x86/vm/interpreter_x86_32.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/interpreter_x86_32.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -242,26 +242,6 @@ return entry_point; } - -// This method tells the deoptimizer how big an interpreted frame must be: -int AbstractInterpreter::size_activation(methodOop method, - int tempcount, - int popframe_extra_args, - int moncount, - int callee_param_count, - int callee_locals, - bool is_top_frame) { - return layout_activation(method, - tempcount, - popframe_extra_args, - moncount, - callee_param_count, - callee_locals, - (frame*) NULL, - (frame*) NULL, - is_top_frame); -} - void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
--- a/src/cpu/x86/vm/interpreter_x86_64.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/interpreter_x86_64.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -362,20 +362,6 @@ } -// This method tells the deoptimizer how big an interpreted frame must be: -int AbstractInterpreter::size_activation(methodOop method, - int tempcount, - int popframe_extra_args, - int moncount, - int callee_param_count, - int callee_locals, - bool is_top_frame) { - return layout_activation(method, - tempcount, popframe_extra_args, moncount, - callee_param_count, callee_locals, - (frame*) NULL, (frame*) NULL, is_top_frame); -} - void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
--- a/src/cpu/x86/vm/jni_x86.h Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/jni_x86.h Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/methodHandles_x86.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/methodHandles_x86.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -69,23 +69,475 @@ return me; } +// stack walking support + +frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) { + RicochetFrame* f = RicochetFrame::from_frame(fr); + if (map->update_map()) + frame::update_map_with_saved_link(map, &f->_sender_link); + return frame(f->extended_sender_sp(), f->exact_sender_sp(), f->sender_link(), f->sender_pc()); +} + +void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) { + RicochetFrame* f = RicochetFrame::from_frame(fr); + + // pick up the argument type descriptor: + Thread* thread = Thread::current(); + Handle cookie(thread, f->compute_saved_args_layout(true, true)); + + // process fixed part + blk->do_oop((oop*)f->saved_target_addr()); + blk->do_oop((oop*)f->saved_args_layout_addr()); + + // process variable arguments: + if (cookie.is_null()) return; // no arguments to describe + + // the cookie is actually the invokeExact method for my target + // his argument signature is what I'm interested in + assert(cookie->is_method(), ""); + methodHandle invoker(thread, methodOop(cookie())); + assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method"); + assert(!invoker->is_static(), "must have MH argument"); + int slot_count = invoker->size_of_parameters(); + assert(slot_count >= 1, "must include 'this'"); + intptr_t* base = f->saved_args_base(); + intptr_t* retval = NULL; + if (f->has_return_value_slot()) + retval = f->return_value_slot_addr(); + int slot_num = slot_count; + intptr_t* loc = &base[slot_num -= 1]; + //blk->do_oop((oop*) loc); // original target, which is irrelevant + int arg_num = 0; + for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) { + if (ss.at_return_type()) continue; + BasicType ptype = ss.type(); + if (ptype == T_ARRAY) ptype = T_OBJECT; // fold all refs to T_OBJECT + assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void"); + loc = &base[slot_num -= type2size[ptype]]; + bool is_oop = (ptype == T_OBJECT && loc != retval); + if (is_oop) blk->do_oop((oop*)loc); + arg_num += 1; + } + assert(slot_num == 0, "must have processed all the arguments"); +} + +oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) { + oop cookie = NULL; + if (read_cache) { + cookie = saved_args_layout(); + if (cookie != NULL) return cookie; + } + oop target = saved_target(); + oop mtype = java_lang_invoke_MethodHandle::type(target); + oop mtform = java_lang_invoke_MethodType::form(mtype); + cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform); + if (write_cache) { + (*saved_args_layout_addr()) = cookie; + } + return cookie; +} + +void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm, + // output params: + int* frame_size_in_words, + int* bounce_offset, + int* exception_offset) { + (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize; + + address start = __ pc(); + #ifdef ASSERT -static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, - const char* error_message) { + __ hlt(); __ hlt(); __ hlt(); + // here's a hint of something special: + __ push(MAGIC_NUMBER_1); + __ push(MAGIC_NUMBER_2); +#endif //ASSERT + __ hlt(); // not reached + + // A return PC has just been popped from the stack. + // Return values are in registers. + // The ebp points into the RicochetFrame, which contains + // a cleanup continuation we must return to. + + (*bounce_offset) = __ pc() - start; + BLOCK_COMMENT("ricochet_blob.bounce"); + + if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); + trace_method_handle(_masm, "return/ricochet_blob.bounce"); + + __ jmp(frame_address(continuation_offset_in_bytes())); + __ hlt(); + DEBUG_ONLY(__ push(MAGIC_NUMBER_2)); + + (*exception_offset) = __ pc() - start; + BLOCK_COMMENT("ricochet_blob.exception"); + + // compare this to Interpreter::rethrow_exception_entry, which is parallel code + // for example, see TemplateInterpreterGenerator::generate_throw_exception + // Live registers in: + // rax: exception + // rdx: return address/pc that threw exception (ignored, always equal to bounce addr) + __ verify_oop(rax); + + // no need to empty_FPU_stack or reinit_heapbase, since caller frame will do the same if needed + + // Take down the frame. + + // Cf. InterpreterMacroAssembler::remove_activation. + leave_ricochet_frame(_masm, /*rcx_recv=*/ noreg, + saved_last_sp_register(), + /*sender_pc_reg=*/ rdx); + + // In between activations - previous activation type unknown yet + // compute continuation point - the continuation point expects the + // following registers set up: + // + // rax: exception + // rdx: return address/pc that threw exception + // rsp: expression stack of caller + // rbp: ebp of caller + __ push(rax); // save exception + __ push(rdx); // save return address + Register thread_reg = LP64_ONLY(r15_thread) NOT_LP64(rdi); + NOT_LP64(__ get_thread(thread_reg)); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, + SharedRuntime::exception_handler_for_return_address), + thread_reg, rdx); + __ mov(rbx, rax); // save exception handler + __ pop(rdx); // restore return address + __ pop(rax); // restore exception + __ jmp(rbx); // jump to exception + // handler of caller +} + +void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm, + Register rcx_recv, + Register rax_argv, + address return_handler, + Register rbx_temp) { + const Register saved_last_sp = saved_last_sp_register(); + Address rcx_mh_vmtarget( rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() ); + Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() ); + + // Push the RicochetFrame a word at a time. + // This creates something similar to an interpreter frame. + // Cf. TemplateInterpreterGenerator::generate_fixed_frame. + BLOCK_COMMENT("push RicochetFrame {"); + DEBUG_ONLY(int rfo = (int) sizeof(RicochetFrame)); + assert((rfo -= wordSize) == RicochetFrame::sender_pc_offset_in_bytes(), ""); +#define RF_FIELD(push_value, name) \ + { push_value; \ + assert((rfo -= wordSize) == RicochetFrame::name##_offset_in_bytes(), ""); } + RF_FIELD(__ push(rbp), sender_link); + RF_FIELD(__ push(saved_last_sp), exact_sender_sp); // rsi/r13 + RF_FIELD(__ pushptr(rcx_amh_conversion), conversion); + RF_FIELD(__ push(rax_argv), saved_args_base); // can be updated if args are shifted + RF_FIELD(__ push((int32_t) NULL_WORD), saved_args_layout); // cache for GC layout cookie + if (UseCompressedOops) { + __ load_heap_oop(rbx_temp, rcx_mh_vmtarget); + RF_FIELD(__ push(rbx_temp), saved_target); + } else { + RF_FIELD(__ pushptr(rcx_mh_vmtarget), saved_target); + } + __ lea(rbx_temp, ExternalAddress(return_handler)); + RF_FIELD(__ push(rbx_temp), continuation); +#undef RF_FIELD + assert(rfo == 0, "fully initialized the RicochetFrame"); + // compute new frame pointer: + __ lea(rbp, Address(rsp, RicochetFrame::sender_link_offset_in_bytes())); + // Push guard word #1 in debug mode. + DEBUG_ONLY(__ push((int32_t) RicochetFrame::MAGIC_NUMBER_1)); + // For debugging, leave behind an indication of which stub built this frame. + DEBUG_ONLY({ Label L; __ call(L, relocInfo::none); __ bind(L); }); + BLOCK_COMMENT("} RicochetFrame"); +} + +void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm, + Register rcx_recv, + Register new_sp_reg, + Register sender_pc_reg) { + assert_different_registers(rcx_recv, new_sp_reg, sender_pc_reg); + const Register saved_last_sp = saved_last_sp_register(); + // Take down the frame. + // Cf. InterpreterMacroAssembler::remove_activation. + BLOCK_COMMENT("end_ricochet_frame {"); + // TO DO: If (exact_sender_sp - extended_sender_sp) > THRESH, compact the frame down. + // This will keep stack in bounds even with unlimited tailcalls, each with an adapter. + if (rcx_recv->is_valid()) + __ movptr(rcx_recv, RicochetFrame::frame_address(RicochetFrame::saved_target_offset_in_bytes())); + __ movptr(sender_pc_reg, RicochetFrame::frame_address(RicochetFrame::sender_pc_offset_in_bytes())); + __ movptr(saved_last_sp, RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes())); + __ movptr(rbp, RicochetFrame::frame_address(RicochetFrame::sender_link_offset_in_bytes())); + __ mov(rsp, new_sp_reg); + BLOCK_COMMENT("} end_ricochet_frame"); +} + +// Emit code to verify that RBP is pointing at a valid ricochet frame. +#ifdef ASSERT +enum { + ARG_LIMIT = 255, SLOP = 4, + // use this parameter for checking for garbage stack movements: + UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP) + // the slop defends against false alarms due to fencepost errors +}; + +void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) { + // The stack should look like this: + // ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args | + // Check various invariants. + verify_offsets(); + + Register rdi_temp = rdi; + Register rcx_temp = rcx; + { __ push(rdi_temp); __ push(rcx_temp); } +#define UNPUSH_TEMPS \ + { __ pop(rcx_temp); __ pop(rdi_temp); } + + Address magic_number_1_addr = RicochetFrame::frame_address(RicochetFrame::magic_number_1_offset_in_bytes()); + Address magic_number_2_addr = RicochetFrame::frame_address(RicochetFrame::magic_number_2_offset_in_bytes()); + Address continuation_addr = RicochetFrame::frame_address(RicochetFrame::continuation_offset_in_bytes()); + Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); + Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes()); + + Label L_bad, L_ok; + BLOCK_COMMENT("verify_clean {"); + // Magic numbers must check out: + __ cmpptr(magic_number_1_addr, (int32_t) MAGIC_NUMBER_1); + __ jcc(Assembler::notEqual, L_bad); + __ cmpptr(magic_number_2_addr, (int32_t) MAGIC_NUMBER_2); + __ jcc(Assembler::notEqual, L_bad); + + // Arguments pointer must look reasonable: + __ movptr(rcx_temp, saved_args_base_addr); + __ cmpptr(rcx_temp, rbp); + __ jcc(Assembler::below, L_bad); + __ subptr(rcx_temp, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize); + __ cmpptr(rcx_temp, rbp); + __ jcc(Assembler::above, L_bad); + + load_conversion_dest_type(_masm, rdi_temp, conversion_addr); + __ cmpl(rdi_temp, T_VOID); + __ jcc(Assembler::equal, L_ok); + __ movptr(rcx_temp, saved_args_base_addr); + load_conversion_vminfo(_masm, rdi_temp, conversion_addr); + __ cmpptr(Address(rcx_temp, rdi_temp, Interpreter::stackElementScale()), + (int32_t) RETURN_VALUE_PLACEHOLDER); + __ jcc(Assembler::equal, L_ok); + __ BIND(L_bad); + UNPUSH_TEMPS; + __ stop("damaged ricochet frame"); + __ BIND(L_ok); + UNPUSH_TEMPS; + BLOCK_COMMENT("} verify_clean"); + +#undef UNPUSH_TEMPS + +} +#endif //ASSERT + +void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) { + if (VerifyMethodHandles) + verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), + "AMH argument is a Class"); + __ load_heap_oop(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes())); +} + +void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr) { + int bits = BitsPerByte; + int offset = (CONV_VMINFO_SHIFT / bits); + int shift = (CONV_VMINFO_SHIFT % bits); + __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset)); + assert(CONV_VMINFO_MASK == right_n_bits(bits - shift), "else change type of previous load"); + assert(shift == 0, "no shift needed"); +} + +void MethodHandles::load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr) { + int bits = BitsPerByte; + int offset = (CONV_DEST_TYPE_SHIFT / bits); + int shift = (CONV_DEST_TYPE_SHIFT % bits); + __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset)); + assert(CONV_TYPE_MASK == right_n_bits(bits - shift), "else change type of previous load"); + __ shrl(reg, shift); + DEBUG_ONLY(int conv_type_bits = (int) exact_log2(CONV_TYPE_MASK+1)); + assert((shift + conv_type_bits) == bits, "left justified in byte"); +} + +void MethodHandles::load_stack_move(MacroAssembler* _masm, + Register rdi_stack_move, + Register rcx_amh, + bool might_be_negative) { + BLOCK_COMMENT("load_stack_move"); + Address rcx_amh_conversion(rcx_amh, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes()); + __ movl(rdi_stack_move, rcx_amh_conversion); + __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT); +#ifdef _LP64 + if (might_be_negative) { + // clean high bits of stack motion register (was loaded as an int) + __ movslq(rdi_stack_move, rdi_stack_move); + } +#endif //_LP64 + if (VerifyMethodHandles) { + Label L_ok, L_bad; + int32_t stack_move_limit = 0x4000; // extra-large + __ cmpptr(rdi_stack_move, stack_move_limit); + __ jcc(Assembler::greaterEqual, L_bad); + __ cmpptr(rdi_stack_move, -stack_move_limit); + __ jcc(Assembler::greater, L_ok); + __ bind(L_bad); + __ stop("load_stack_move of garbage value"); + __ BIND(L_ok); + } +} + +#ifdef ASSERT +void MethodHandles::RicochetFrame::verify_offsets() { + // Check compatibility of this struct with the more generally used offsets of class frame: + int ebp_off = sender_link_offset_in_bytes(); // offset from struct base to local rbp value + assert(ebp_off + wordSize*frame::interpreter_frame_method_offset == saved_args_base_offset_in_bytes(), ""); + assert(ebp_off + wordSize*frame::interpreter_frame_last_sp_offset == conversion_offset_in_bytes(), ""); + assert(ebp_off + wordSize*frame::interpreter_frame_sender_sp_offset == exact_sender_sp_offset_in_bytes(), ""); + // These last two have to be exact: + assert(ebp_off + wordSize*frame::link_offset == sender_link_offset_in_bytes(), ""); + assert(ebp_off + wordSize*frame::return_addr_offset == sender_pc_offset_in_bytes(), ""); +} + +void MethodHandles::RicochetFrame::verify() const { + verify_offsets(); + assert(magic_number_1() == MAGIC_NUMBER_1, ""); + assert(magic_number_2() == MAGIC_NUMBER_2, ""); + if (!Universe::heap()->is_gc_active()) { + if (saved_args_layout() != NULL) { + assert(saved_args_layout()->is_method(), "must be valid oop"); + } + if (saved_target() != NULL) { + assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value"); + } + } + int conv_op = adapter_conversion_op(conversion()); + assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS || + conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS || + conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF, + "must be a sane conversion"); + if (has_return_value_slot()) { + assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, ""); + } +} +#endif //PRODUCT + +#ifdef ASSERT +void MethodHandles::verify_argslot(MacroAssembler* _masm, + Register argslot_reg, + const char* error_message) { // Verify that argslot lies within (rsp, rbp]. Label L_ok, L_bad; - BLOCK_COMMENT("{ verify_argslot"); + BLOCK_COMMENT("verify_argslot {"); __ cmpptr(argslot_reg, rbp); __ jccb(Assembler::above, L_bad); __ cmpptr(rsp, argslot_reg); __ jccb(Assembler::below, L_ok); __ bind(L_bad); __ stop(error_message); - __ bind(L_ok); + __ BIND(L_ok); BLOCK_COMMENT("} verify_argslot"); } -#endif + +void MethodHandles::verify_argslots(MacroAssembler* _masm, + RegisterOrConstant arg_slots, + Register arg_slot_base_reg, + bool negate_argslots, + const char* error_message) { + // Verify that [argslot..argslot+size) lies within (rsp, rbp). + Label L_ok, L_bad; + Register rdi_temp = rdi; + BLOCK_COMMENT("verify_argslots {"); + __ push(rdi_temp); + if (negate_argslots) { + if (arg_slots.is_constant()) { + arg_slots = -1 * arg_slots.as_constant(); + } else { + __ movptr(rdi_temp, arg_slots); + __ negptr(rdi_temp); + arg_slots = rdi_temp; + } + } + __ lea(rdi_temp, Address(arg_slot_base_reg, arg_slots, Interpreter::stackElementScale())); + __ cmpptr(rdi_temp, rbp); + __ pop(rdi_temp); + __ jcc(Assembler::above, L_bad); + __ cmpptr(rsp, arg_slot_base_reg); + __ jcc(Assembler::below, L_ok); + __ bind(L_bad); + __ stop(error_message); + __ BIND(L_ok); + BLOCK_COMMENT("} verify_argslots"); +} +// Make sure that arg_slots has the same sign as the given direction. +// If (and only if) arg_slots is a assembly-time constant, also allow it to be zero. +void MethodHandles::verify_stack_move(MacroAssembler* _masm, + RegisterOrConstant arg_slots, int direction) { + bool allow_zero = arg_slots.is_constant(); + if (direction == 0) { direction = +1; allow_zero = true; } + assert(stack_move_unit() == -1, "else add extra checks here"); + if (arg_slots.is_register()) { + Label L_ok, L_bad; + BLOCK_COMMENT("verify_stack_move {"); + // testl(arg_slots.as_register(), -stack_move_unit() - 1); // no need + // jcc(Assembler::notZero, L_bad); + __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD); + if (direction > 0) { + __ jcc(allow_zero ? Assembler::less : Assembler::lessEqual, L_bad); + __ cmpptr(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE); + __ jcc(Assembler::less, L_ok); + } else { + __ jcc(allow_zero ? Assembler::greater : Assembler::greaterEqual, L_bad); + __ cmpptr(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE); + __ jcc(Assembler::greater, L_ok); + } + __ bind(L_bad); + if (direction > 0) + __ stop("assert arg_slots > 0"); + else + __ stop("assert arg_slots < 0"); + __ BIND(L_ok); + BLOCK_COMMENT("} verify_stack_move"); + } else { + intptr_t size = arg_slots.as_constant(); + if (direction < 0) size = -size; + assert(size >= 0, "correct direction of constant move"); + assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move"); + } +} + +void MethodHandles::verify_klass(MacroAssembler* _masm, + Register obj, KlassHandle klass, + const char* error_message) { + oop* klass_addr = klass.raw_value(); + assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() && + klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(), + "must be one of the SystemDictionaryHandles"); + Register temp = rdi; + Label L_ok, L_bad; + BLOCK_COMMENT("verify_klass {"); + __ verify_oop(obj); + __ testptr(obj, obj); + __ jcc(Assembler::zero, L_bad); + __ push(temp); + __ load_klass(temp, obj); + __ cmpptr(temp, ExternalAddress((address) klass_addr)); + __ jcc(Assembler::equal, L_ok); + intptr_t super_check_offset = klass->super_check_offset(); + __ movptr(temp, Address(temp, super_check_offset)); + __ cmpptr(temp, ExternalAddress((address) klass_addr)); + __ jcc(Assembler::equal, L_ok); + __ pop(temp); + __ bind(L_bad); + __ stop(error_message); + __ BIND(L_ok); + __ pop(temp); + BLOCK_COMMENT("} verify_klass"); +} +#endif //ASSERT // Code generation address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { @@ -116,6 +568,9 @@ address entry_point = __ pc(); // fetch the MethodType from the method handle into rax (the 'check' register) + // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list. + // This would simplify several touchy bits of code. + // See 6984712: JSR 292 method handle calls need a clean argument base pointer { Register tem = rbx_method; for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) { @@ -128,17 +583,23 @@ __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); Register rdx_vmslots = rdx_temp; __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp))); - __ movptr(rcx_recv, __ argument_address(rdx_vmslots)); + Address mh_receiver_slot_addr = __ argument_address(rdx_vmslots); + __ movptr(rcx_recv, mh_receiver_slot_addr); trace_method_handle(_masm, "invokeExact"); __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type); + + // Nobody uses the MH receiver slot after this. Make sure. + DEBUG_ONLY(__ movptr(mh_receiver_slot_addr, (int32_t)0x999999)); + __ jump_to_method_handle_entry(rcx_recv, rdi_temp); // for invokeGeneric (only), apply argument and result conversions on the fly __ bind(invoke_generic_slow_path); #ifdef ASSERT - { Label L; + if (VerifyMethodHandles) { + Label L; __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric); __ jcc(Assembler::equal, L); __ stop("bad methodOop::intrinsic_id"); @@ -150,22 +611,14 @@ // make room on the stack for another pointer: Register rcx_argslot = rcx_recv; __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1)); - insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK, + insert_arg_slots(_masm, 2 * stack_move_unit(), rcx_argslot, rbx_temp, rdx_temp); // load up an adapter from the calling type (Java weaves this) - __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); Register rdx_adapter = rdx_temp; - // __ load_heap_oop(rdx_adapter, Address(rdx_temp, java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes())); - // deal with old JDK versions: - __ lea(rdi_temp, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp))); - __ cmpptr(rdi_temp, rdx_temp); - Label sorry_no_invoke_generic; - __ jcc(Assembler::below, sorry_no_invoke_generic); - - __ load_heap_oop(rdx_adapter, Address(rdi_temp, 0)); - __ testptr(rdx_adapter, rdx_adapter); - __ jcc(Assembler::zero, sorry_no_invoke_generic); + __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); + __ load_heap_oop(rdx_adapter, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp))); + __ verify_oop(rdx_adapter); __ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter); // As a trusted first argument, pass the type being called, so the adapter knows // the actual types of the arguments and return values. @@ -176,49 +629,31 @@ trace_method_handle(_masm, "invokeGeneric"); __ jump_to_method_handle_entry(rcx, rdi_temp); - __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available! - __ movptr(rcx_recv, Address(rcx_argslot, -1 * Interpreter::stackElementSize)); // recover original MH - __ push(rax_mtype); // required mtype - __ push(rcx_recv); // bad mh (1st stacked argument) - __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry())); + return entry_point; +} - return entry_point; +// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant. +static RegisterOrConstant constant(int value) { + return RegisterOrConstant(value); } // Helper to insert argument slots into the stack. -// arg_slots must be a multiple of stack_move_unit() and <= 0 +// arg_slots must be a multiple of stack_move_unit() and < 0 +// rax_argslot is decremented to point to the new (shifted) location of the argslot +// But, rdx_temp ends up holding the original value of rax_argslot. void MethodHandles::insert_arg_slots(MacroAssembler* _masm, RegisterOrConstant arg_slots, - int arg_mask, Register rax_argslot, - Register rbx_temp, Register rdx_temp, Register temp3_reg) { - assert(temp3_reg == noreg, "temp3 not required"); + Register rbx_temp, Register rdx_temp) { + // allow constant zero + if (arg_slots.is_constant() && arg_slots.as_constant() == 0) + return; assert_different_registers(rax_argslot, rbx_temp, rdx_temp, (!arg_slots.is_register() ? rsp : arg_slots.as_register())); - -#ifdef ASSERT - verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame"); - if (arg_slots.is_register()) { - Label L_ok, L_bad; - __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD); - __ jccb(Assembler::greater, L_bad); - __ testl(arg_slots.as_register(), -stack_move_unit() - 1); - __ jccb(Assembler::zero, L_ok); - __ bind(L_bad); - __ stop("assert arg_slots <= 0 and clear low bits"); - __ bind(L_ok); - } else { - assert(arg_slots.as_constant() <= 0, ""); - assert(arg_slots.as_constant() % -stack_move_unit() == 0, ""); - } -#endif //ASSERT - -#ifdef _LP64 - if (arg_slots.is_register()) { - // clean high bits of stack motion register (was loaded as an int) - __ movslq(arg_slots.as_register(), arg_slots.as_register()); - } -#endif + if (VerifyMethodHandles) + verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame"); + if (VerifyMethodHandles) + verify_stack_move(_masm, arg_slots, -1); // Make space on the stack for the inserted argument(s). // Then pull down everything shallower than rax_argslot. @@ -230,59 +665,39 @@ // argslot -= size; BLOCK_COMMENT("insert_arg_slots {"); __ mov(rdx_temp, rsp); // source pointer for copy - __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr)); + __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale())); { Label loop; __ BIND(loop); // pull one word down each time through the loop __ movptr(rbx_temp, Address(rdx_temp, 0)); - __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp); + __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp); __ addptr(rdx_temp, wordSize); __ cmpptr(rdx_temp, rax_argslot); - __ jccb(Assembler::less, loop); + __ jcc(Assembler::below, loop); } // Now move the argslot down, to point to the opened-up space. - __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr)); + __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale())); BLOCK_COMMENT("} insert_arg_slots"); } // Helper to remove argument slots from the stack. -// arg_slots must be a multiple of stack_move_unit() and >= 0 +// arg_slots must be a multiple of stack_move_unit() and > 0 void MethodHandles::remove_arg_slots(MacroAssembler* _masm, - RegisterOrConstant arg_slots, - Register rax_argslot, - Register rbx_temp, Register rdx_temp, Register temp3_reg) { - assert(temp3_reg == noreg, "temp3 not required"); + RegisterOrConstant arg_slots, + Register rax_argslot, + Register rbx_temp, Register rdx_temp) { + // allow constant zero + if (arg_slots.is_constant() && arg_slots.as_constant() == 0) + return; assert_different_registers(rax_argslot, rbx_temp, rdx_temp, (!arg_slots.is_register() ? rsp : arg_slots.as_register())); - -#ifdef ASSERT - // Verify that [argslot..argslot+size) lies within (rsp, rbp). - __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr)); - verify_argslot(_masm, rbx_temp, "deleted argument(s) must fall within current frame"); - if (arg_slots.is_register()) { - Label L_ok, L_bad; - __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD); - __ jccb(Assembler::less, L_bad); - __ testl(arg_slots.as_register(), -stack_move_unit() - 1); - __ jccb(Assembler::zero, L_ok); - __ bind(L_bad); - __ stop("assert arg_slots >= 0 and clear low bits"); - __ bind(L_ok); - } else { - assert(arg_slots.as_constant() >= 0, ""); - assert(arg_slots.as_constant() % -stack_move_unit() == 0, ""); - } -#endif //ASSERT - -#ifdef _LP64 - if (false) { // not needed, since register is positive - // clean high bits of stack motion register (was loaded as an int) - if (arg_slots.is_register()) - __ movslq(arg_slots.as_register(), arg_slots.as_register()); - } -#endif + if (VerifyMethodHandles) + verify_argslots(_masm, arg_slots, rax_argslot, false, + "deleted argument(s) must fall within current frame"); + if (VerifyMethodHandles) + verify_stack_move(_masm, arg_slots, +1); BLOCK_COMMENT("remove_arg_slots {"); // Pull up everything shallower than rax_argslot. @@ -299,19 +714,249 @@ __ BIND(loop); // pull one word up each time through the loop __ movptr(rbx_temp, Address(rdx_temp, 0)); - __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp); + __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp); __ addptr(rdx_temp, -wordSize); __ cmpptr(rdx_temp, rsp); - __ jccb(Assembler::greaterEqual, loop); + __ jcc(Assembler::aboveEqual, loop); } // Now move the argslot up, to point to the just-copied block. - __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr)); + __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale())); // And adjust the argslot address to point at the deletion point. - __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr)); + __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale())); BLOCK_COMMENT("} remove_arg_slots"); } +// Helper to copy argument slots to the top of the stack. +// The sequence starts with rax_argslot and is counted by slot_count +// slot_count must be a multiple of stack_move_unit() and >= 0 +// This function blows the temps but does not change rax_argslot. +void MethodHandles::push_arg_slots(MacroAssembler* _masm, + Register rax_argslot, + RegisterOrConstant slot_count, + int skip_words_count, + Register rbx_temp, Register rdx_temp) { + assert_different_registers(rax_argslot, rbx_temp, rdx_temp, + (!slot_count.is_register() ? rbp : slot_count.as_register()), + rsp); + assert(Interpreter::stackElementSize == wordSize, "else change this code"); + + if (VerifyMethodHandles) + verify_stack_move(_masm, slot_count, 0); + + // allow constant zero + if (slot_count.is_constant() && slot_count.as_constant() == 0) + return; + + BLOCK_COMMENT("push_arg_slots {"); + + Register rbx_top = rbx_temp; + + // There is at most 1 word to carry down with the TOS. + switch (skip_words_count) { + case 1: __ pop(rdx_temp); break; + case 0: break; + default: ShouldNotReachHere(); + } + + if (slot_count.is_constant()) { + for (int i = slot_count.as_constant() - 1; i >= 0; i--) { + __ pushptr(Address(rax_argslot, i * wordSize)); + } + } else { + Label L_plural, L_loop, L_break; + // Emit code to dynamically check for the common cases, zero and one slot. + __ cmpl(slot_count.as_register(), (int32_t) 1); + __ jccb(Assembler::greater, L_plural); + __ jccb(Assembler::less, L_break); + __ pushptr(Address(rax_argslot, 0)); + __ jmpb(L_break); + __ BIND(L_plural); + + // Loop for 2 or more: + // rbx = &rax[slot_count] + // while (rbx > rax) *(--rsp) = *(--rbx) + __ lea(rbx_top, Address(rax_argslot, slot_count, Address::times_ptr)); + __ BIND(L_loop); + __ subptr(rbx_top, wordSize); + __ pushptr(Address(rbx_top, 0)); + __ cmpptr(rbx_top, rax_argslot); + __ jcc(Assembler::above, L_loop); + __ bind(L_break); + } + switch (skip_words_count) { + case 1: __ push(rdx_temp); break; + case 0: break; + default: ShouldNotReachHere(); + } + BLOCK_COMMENT("} push_arg_slots"); +} + +// in-place movement; no change to rsp +// blows rax_temp, rdx_temp +void MethodHandles::move_arg_slots_up(MacroAssembler* _masm, + Register rbx_bottom, // invariant + Address top_addr, // can use rax_temp + RegisterOrConstant positive_distance_in_slots, + Register rax_temp, Register rdx_temp) { + BLOCK_COMMENT("move_arg_slots_up {"); + assert_different_registers(rbx_bottom, + rax_temp, rdx_temp, + positive_distance_in_slots.register_or_noreg()); + Label L_loop, L_break; + Register rax_top = rax_temp; + if (!top_addr.is_same_address(Address(rax_top, 0))) + __ lea(rax_top, top_addr); + // Detect empty (or broken) loop: +#ifdef ASSERT + if (VerifyMethodHandles) { + // Verify that &bottom < &top (non-empty interval) + Label L_ok, L_bad; + if (positive_distance_in_slots.is_register()) { + __ cmpptr(positive_distance_in_slots.as_register(), (int32_t) 0); + __ jcc(Assembler::lessEqual, L_bad); + } + __ cmpptr(rbx_bottom, rax_top); + __ jcc(Assembler::below, L_ok); + __ bind(L_bad); + __ stop("valid bounds (copy up)"); + __ BIND(L_ok); + } +#endif + __ cmpptr(rbx_bottom, rax_top); + __ jccb(Assembler::aboveEqual, L_break); + // work rax down to rbx, copying contiguous data upwards + // In pseudo-code: + // [rbx, rax) = &[bottom, top) + // while (--rax >= rbx) *(rax + distance) = *(rax + 0), rax--; + __ BIND(L_loop); + __ subptr(rax_top, wordSize); + __ movptr(rdx_temp, Address(rax_top, 0)); + __ movptr( Address(rax_top, positive_distance_in_slots, Address::times_ptr), rdx_temp); + __ cmpptr(rax_top, rbx_bottom); + __ jcc(Assembler::above, L_loop); + assert(Interpreter::stackElementSize == wordSize, "else change loop"); + __ bind(L_break); + BLOCK_COMMENT("} move_arg_slots_up"); +} + +// in-place movement; no change to rsp +// blows rax_temp, rdx_temp +void MethodHandles::move_arg_slots_down(MacroAssembler* _masm, + Address bottom_addr, // can use rax_temp + Register rbx_top, // invariant + RegisterOrConstant negative_distance_in_slots, + Register rax_temp, Register rdx_temp) { + BLOCK_COMMENT("move_arg_slots_down {"); + assert_different_registers(rbx_top, + negative_distance_in_slots.register_or_noreg(), + rax_temp, rdx_temp); + Label L_loop, L_break; + Register rax_bottom = rax_temp; + if (!bottom_addr.is_same_address(Address(rax_bottom, 0))) + __ lea(rax_bottom, bottom_addr); + // Detect empty (or broken) loop: +#ifdef ASSERT + assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, ""); + if (VerifyMethodHandles) { + // Verify that &bottom < &top (non-empty interval) + Label L_ok, L_bad; + if (negative_distance_in_slots.is_register()) { + __ cmpptr(negative_distance_in_slots.as_register(), (int32_t) 0); + __ jcc(Assembler::greaterEqual, L_bad); + } + __ cmpptr(rax_bottom, rbx_top); + __ jcc(Assembler::below, L_ok); + __ bind(L_bad); + __ stop("valid bounds (copy down)"); + __ BIND(L_ok); + } +#endif + __ cmpptr(rax_bottom, rbx_top); + __ jccb(Assembler::aboveEqual, L_break); + // work rax up to rbx, copying contiguous data downwards + // In pseudo-code: + // [rax, rbx) = &[bottom, top) + // while (rax < rbx) *(rax - distance) = *(rax + 0), rax++; + __ BIND(L_loop); + __ movptr(rdx_temp, Address(rax_bottom, 0)); + __ movptr( Address(rax_bottom, negative_distance_in_slots, Address::times_ptr), rdx_temp); + __ addptr(rax_bottom, wordSize); + __ cmpptr(rax_bottom, rbx_top); + __ jcc(Assembler::below, L_loop); + assert(Interpreter::stackElementSize == wordSize, "else change loop"); + __ bind(L_break); + BLOCK_COMMENT("} move_arg_slots_down"); +} + +// Copy from a field or array element to a stacked argument slot. +// is_element (ignored) says whether caller is loading an array element instead of an instance field. +void MethodHandles::move_typed_arg(MacroAssembler* _masm, + BasicType type, bool is_element, + Address slot_dest, Address value_src, + Register rbx_temp, Register rdx_temp) { + BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)"); + if (type == T_OBJECT || type == T_ARRAY) { + __ load_heap_oop(rbx_temp, value_src); + __ movptr(slot_dest, rbx_temp); + } else if (type != T_VOID) { + int arg_size = type2aelembytes(type); + bool arg_is_signed = is_signed_subword_type(type); + int slot_size = (arg_size > wordSize) ? arg_size : wordSize; + __ load_sized_value( rdx_temp, value_src, arg_size, arg_is_signed, rbx_temp); + __ store_sized_value( slot_dest, rdx_temp, slot_size, rbx_temp); + } + BLOCK_COMMENT("} move_typed_arg"); +} + +void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type, + Address return_slot) { + BLOCK_COMMENT("move_return_value {"); + // Old versions of the JVM must clean the FPU stack after every return. +#ifndef _LP64 +#ifdef COMPILER2 + // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases + if ((type == T_FLOAT && UseSSE < 1) || (type == T_DOUBLE && UseSSE < 2)) { + for (int i = 1; i < 8; i++) { + __ ffree(i); + } + } else if (UseSSE < 2) { + __ empty_FPU_stack(); + } +#endif //COMPILER2 +#endif //!_LP64 + + // Look at the type and pull the value out of the corresponding register. + if (type == T_VOID) { + // nothing to do + } else if (type == T_OBJECT) { + __ movptr(return_slot, rax); + } else if (type == T_INT || is_subword_type(type)) { + // write the whole word, even if only 32 bits is significant + __ movptr(return_slot, rax); + } else if (type == T_LONG) { + // store the value by parts + // Note: We assume longs are continguous (if misaligned) on the interpreter stack. + __ store_sized_value(return_slot, rax, BytesPerLong, rdx); + } else if (NOT_LP64((type == T_FLOAT && UseSSE < 1) || + (type == T_DOUBLE && UseSSE < 2) ||) + false) { + // Use old x86 FPU registers: + if (type == T_FLOAT) + __ fstp_s(return_slot); + else + __ fstp_d(return_slot); + } else if (type == T_FLOAT) { + __ movflt(return_slot, xmm0); + } else if (type == T_DOUBLE) { + __ movdbl(return_slot, xmm0); + } else { + ShouldNotReachHere(); + } + BLOCK_COMMENT("} move_return_value"); +} + + #ifndef PRODUCT extern "C" void print_method_handle(oop mh); void trace_method_handle_stub(const char* adaptername, @@ -321,48 +966,90 @@ intptr_t* saved_sp, intptr_t* saved_bp) { // called as a leaf from native code: do not block the JVM! + bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have rcx_mh intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset]; - intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset]; - printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n", - adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp); - if (last_sp != saved_sp && last_sp != NULL) - printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp); + intptr_t* base_sp = last_sp; + typedef MethodHandles::RicochetFrame RicochetFrame; + RicochetFrame* rfp = (RicochetFrame*)((address)saved_bp - RicochetFrame::sender_link_offset_in_bytes()); + if (!UseRicochetFrames || Universe::heap()->is_in((address) rfp->saved_args_base())) { + // Probably an interpreter frame. + base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset]; + } + intptr_t mh_reg = (intptr_t)mh; + const char* mh_reg_name = "rcx_mh"; + if (!has_mh) mh_reg_name = "rcx"; + tty->print_cr("MH %s %s="PTR_FORMAT" sp=("PTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="PTR_FORMAT, + adaptername, mh_reg_name, mh_reg, + (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp); if (Verbose) { - printf(" reg dump: "); + tty->print(" reg dump: "); int saved_regs_count = (entry_sp-1) - saved_regs; // 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax int i; for (i = 0; i <= saved_regs_count; i++) { - if (i > 0 && i % 4 == 0 && i != saved_regs_count) - printf("\n + dump: "); - printf(" %d: "INTPTR_FORMAT, i, saved_regs[i]); + if (i > 0 && i % 4 == 0 && i != saved_regs_count) { + tty->cr(); + tty->print(" + dump: "); + } + tty->print(" %d: "PTR_FORMAT, i, saved_regs[i]); } - printf("\n"); + tty->cr(); + if (last_sp != saved_sp && last_sp != NULL) + tty->print_cr("*** last_sp="PTR_FORMAT, (intptr_t)last_sp); int stack_dump_count = 16; if (stack_dump_count < (int)(saved_bp + 2 - saved_sp)) stack_dump_count = (int)(saved_bp + 2 - saved_sp); if (stack_dump_count > 64) stack_dump_count = 48; for (i = 0; i < stack_dump_count; i += 4) { - printf(" dump at SP[%d] "INTPTR_FORMAT": "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT"\n", - i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]); + tty->print_cr(" dump at SP[%d] "PTR_FORMAT": "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT, + i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]); } - print_method_handle(mh); + if (has_mh) + print_method_handle(mh); } } + +// The stub wraps the arguments in a struct on the stack to avoid +// dealing with the different calling conventions for passing 6 +// arguments. +struct MethodHandleStubArguments { + const char* adaptername; + oopDesc* mh; + intptr_t* saved_regs; + intptr_t* entry_sp; + intptr_t* saved_sp; + intptr_t* saved_bp; +}; +void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) { + trace_method_handle_stub(args->adaptername, + args->mh, + args->saved_regs, + args->entry_sp, + args->saved_sp, + args->saved_bp); +} + void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { if (!TraceMethodHandles) return; BLOCK_COMMENT("trace_method_handle {"); __ push(rax); - __ lea(rax, Address(rsp, wordSize*6)); // entry_sp + __ lea(rax, Address(rsp, wordSize * NOT_LP64(6) LP64_ONLY(14))); // entry_sp __ pusha(); __ pusha(); - // arguments: - __ push(rbp); // interpreter frame pointer + __ mov(rbx, rsp); + __ enter(); + // incoming state: + // rcx: method handle + // r13 or rsi: saved sp + // To avoid calling convention issues, build a record on the stack and pass the pointer to that instead. + __ push(rbp); // saved_bp __ push(rsi); // saved_sp __ push(rax); // entry_sp + __ push(rbx); // pusha saved_regs __ push(rcx); // mh - __ push(rcx); + __ push(rcx); // adaptername __ movptr(Address(rsp, 0), (intptr_t) adaptername); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5); + __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp); + __ leave(); __ popa(); __ pop(rax); BLOCK_COMMENT("} trace_method_handle"); @@ -376,13 +1063,20 @@ |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST) |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM) |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM) + //OP_PRIM_TO_REF is below... |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS) |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS) |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS) |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS) - //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG! + //OP_COLLECT_ARGS is below... + |(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) + |(!UseRicochetFrames ? 0 : + java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 : + ((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS) + )) ); - // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS. } //------------------------------------------------------------------------------ @@ -391,6 +1085,8 @@ // Generate an "entry" field for a method handle. // This determines how the method handle will respond to calls. void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) { + MethodHandles::EntryKind ek_orig = ek_original_kind(ek); + // Here is the register state during an interpreted call, // as set up by generate_method_handle_interpreter_entry(): // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused) @@ -403,10 +1099,11 @@ const Register rax_argslot = rax; const Register rbx_temp = rbx; const Register rdx_temp = rdx; + const Register rdi_temp = rdi; // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls) // and gen_c2i_adapter (from compiled calls): - const Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi); + const Register saved_last_sp = saved_last_sp_register(); // Argument registers for _raise_exception. // 32-bit: Pass first two oop/int args in registers ECX and EDX. @@ -439,6 +1136,13 @@ return; } +#ifdef ASSERT + __ push((int32_t) 0xEEEEEEEE); + __ push((int32_t) (intptr_t) entry_name(ek)); + LP64_ONLY(__ push((int32_t) high((intptr_t) entry_name(ek)))); + __ push((int32_t) 0x33333333); +#endif //ASSERT + address interp_entry = __ pc(); trace_method_handle(_masm, entry_name(ek)); @@ -554,7 +1258,6 @@ __ load_klass(rax_klass, rcx_recv); __ verify_oop(rax_klass); - Register rdi_temp = rdi; Register rbx_method = rbx_index; // get interface klass @@ -590,16 +1293,14 @@ case _bound_long_direct_mh: { bool direct_to_method = (ek >= _bound_ref_direct_mh); - BasicType arg_type = T_ILLEGAL; - int arg_mask = _INSERT_NO_MASK; - int arg_slots = -1; - get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots); + BasicType arg_type = ek_bound_mh_arg_type(ek); + int arg_slots = type2size[arg_type]; // make room for the new argument: __ movl(rax_argslot, rcx_bmh_vmargslot); __ lea(rax_argslot, __ argument_address(rax_argslot)); - insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, rax_argslot, rbx_temp, rdx_temp); + insert_arg_slots(_masm, arg_slots * stack_move_unit(), rax_argslot, rbx_temp, rdx_temp); // store bound argument into the new stack slot: __ load_heap_oop(rbx_temp, rcx_bmh_argument); @@ -607,9 +1308,10 @@ __ movptr(Address(rax_argslot, 0), rbx_temp); } else { Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type)); - const int arg_size = type2aelembytes(arg_type); - __ load_sized_value(rdx_temp, prim_value_addr, arg_size, is_signed_subword_type(arg_type), rbx_temp); - __ store_sized_value(Address(rax_argslot, 0), rdx_temp, arg_size, rbx_temp); + move_typed_arg(_masm, arg_type, false, + Address(rax_argslot, 0), + prim_value_addr, + rbx_temp, rdx_temp); } if (direct_to_method) { @@ -646,7 +1348,7 @@ // What class are we casting to? __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object! - __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes())); + load_klass_from_Class(_masm, rbx_klass); Label done; __ movptr(rdx_temp, vmarg); @@ -681,6 +1383,7 @@ case _adapter_prim_to_prim: case _adapter_ref_to_prim: + case _adapter_prim_to_ref: // handled completely by optimized cases __ stop("init_AdapterMethodHandle should not issue this"); break; @@ -732,8 +1435,7 @@ // Do the requested conversion and store the value. Register rbx_vminfo = rbx_temp; - __ movl(rbx_vminfo, rcx_amh_conversion); - assert(CONV_VMINFO_SHIFT == 0, "preshifted"); + load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion); // get the new MH: __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); @@ -771,7 +1473,7 @@ // on a little-endian machine we keep the first slot and add another after __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); - insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, + insert_arg_slots(_masm, stack_move_unit(), rax_argslot, rbx_temp, rdx_temp); Address vmarg1(rax_argslot, -Interpreter::stackElementSize); Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize); @@ -823,7 +1525,7 @@ __ movl(rax_argslot, rcx_amh_vmargslot); __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); if (ek == _adapter_opt_f2d) { - insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, + insert_arg_slots(_masm, stack_move_unit(), rax_argslot, rbx_temp, rdx_temp); } Address vmarg(rax_argslot, -Interpreter::stackElementSize); @@ -841,7 +1543,7 @@ #else //_LP64 if (ek == _adapter_opt_f2d) { __ fld_s(vmarg); // load float to ST0 - __ fstp_s(vmarg); // store single + __ fstp_d(vmarg); // store double } else { __ fld_d(vmarg); // load double to ST0 __ fstp_s(vmarg); // store single @@ -858,10 +1560,6 @@ } break; - case _adapter_prim_to_ref: - __ unimplemented(entry_name(ek)); // %%% FIXME: NYI - break; - case _adapter_swap_args: case _adapter_rot_args: // handled completely by optimized cases @@ -875,8 +1573,8 @@ case _adapter_opt_rot_2_up: case _adapter_opt_rot_2_down: { - int swap_bytes = 0, rotate = 0; - get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate); + int swap_slots = ek_adapter_opt_swap_slots(ek); + int rotate = ek_adapter_opt_swap_mode(ek); // 'argslot' is the position of the first argument to swap __ movl(rax_argslot, rcx_amh_vmargslot); @@ -884,83 +1582,69 @@ // 'vminfo' is the second Register rbx_destslot = rbx_temp; - __ movl(rbx_destslot, rcx_amh_conversion); - assert(CONV_VMINFO_SHIFT == 0, "preshifted"); - __ andl(rbx_destslot, CONV_VMINFO_MASK); + load_conversion_vminfo(_masm, rbx_destslot, rcx_amh_conversion); __ lea(rbx_destslot, __ argument_address(rbx_destslot)); - DEBUG_ONLY(verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame")); + if (VerifyMethodHandles) + verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame"); + assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here"); if (!rotate) { - for (int i = 0; i < swap_bytes; i += wordSize) { - __ movptr(rdx_temp, Address(rax_argslot , i)); - __ push(rdx_temp); - __ movptr(rdx_temp, Address(rbx_destslot, i)); - __ movptr(Address(rax_argslot, i), rdx_temp); - __ pop(rdx_temp); - __ movptr(Address(rbx_destslot, i), rdx_temp); + // simple swap + for (int i = 0; i < swap_slots; i++) { + __ movptr(rdi_temp, Address(rax_argslot, i * wordSize)); + __ movptr(rdx_temp, Address(rbx_destslot, i * wordSize)); + __ movptr(Address(rax_argslot, i * wordSize), rdx_temp); + __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp); } } else { - // push the first chunk, which is going to get overwritten - for (int i = swap_bytes; (i -= wordSize) >= 0; ) { - __ movptr(rdx_temp, Address(rax_argslot, i)); - __ push(rdx_temp); + // A rotate is actually pair of moves, with an "odd slot" (or pair) + // changing place with a series of other slots. + // First, push the "odd slot", which is going to get overwritten + for (int i = swap_slots - 1; i >= 0; i--) { + // handle one with rdi_temp instead of a push: + if (i == 0) __ movptr(rdi_temp, Address(rax_argslot, i * wordSize)); + else __ pushptr( Address(rax_argslot, i * wordSize)); } - if (rotate > 0) { - // rotate upward - __ subptr(rax_argslot, swap_bytes); -#ifdef ASSERT - { - // Verify that argslot > destslot, by at least swap_bytes. - Label L_ok; - __ cmpptr(rax_argslot, rbx_destslot); - __ jccb(Assembler::aboveEqual, L_ok); - __ stop("source must be above destination (upward rotation)"); - __ bind(L_ok); - } -#endif + // Here is rotate > 0: + // (low mem) (high mem) + // | dest: more_slots... | arg: odd_slot :arg+1 | + // => + // | dest: odd_slot | dest+1: more_slots... :arg+1 | // work argslot down to destslot, copying contiguous data upwards // pseudo-code: // rax = src_addr - swap_bytes // rbx = dest_addr // while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--; - Label loop; - __ bind(loop); - __ movptr(rdx_temp, Address(rax_argslot, 0)); - __ movptr(Address(rax_argslot, swap_bytes), rdx_temp); - __ addptr(rax_argslot, -wordSize); - __ cmpptr(rax_argslot, rbx_destslot); - __ jccb(Assembler::aboveEqual, loop); + move_arg_slots_up(_masm, + rbx_destslot, + Address(rax_argslot, 0), + swap_slots, + rax_argslot, rdx_temp); } else { - __ addptr(rax_argslot, swap_bytes); -#ifdef ASSERT - { - // Verify that argslot < destslot, by at least swap_bytes. - Label L_ok; - __ cmpptr(rax_argslot, rbx_destslot); - __ jccb(Assembler::belowEqual, L_ok); - __ stop("source must be below destination (downward rotation)"); - __ bind(L_ok); - } -#endif + // Here is the other direction, rotate < 0: + // (low mem) (high mem) + // | arg: odd_slot | arg+1: more_slots... :dest+1 | + // => + // | arg: more_slots... | dest: odd_slot :dest+1 | // work argslot up to destslot, copying contiguous data downwards // pseudo-code: // rax = src_addr + swap_bytes // rbx = dest_addr // while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++; - Label loop; - __ bind(loop); - __ movptr(rdx_temp, Address(rax_argslot, 0)); - __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp); - __ addptr(rax_argslot, wordSize); - __ cmpptr(rax_argslot, rbx_destslot); - __ jccb(Assembler::belowEqual, loop); + __ addptr(rbx_destslot, wordSize); + move_arg_slots_down(_masm, + Address(rax_argslot, swap_slots * wordSize), + rbx_destslot, + -swap_slots, + rax_argslot, rdx_temp); + + __ subptr(rbx_destslot, wordSize); } - // pop the original first chunk into the destination slot, now free - for (int i = 0; i < swap_bytes; i += wordSize) { - __ pop(rdx_temp); - __ movptr(Address(rbx_destslot, i), rdx_temp); + for (int i = 0; i < swap_slots; i++) { + if (i == 0) __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp); + else __ popptr(Address(rbx_destslot, i * wordSize)); } } @@ -976,53 +1660,22 @@ __ lea(rax_argslot, __ argument_address(rax_argslot)); // 'stack_move' is negative number of words to duplicate - Register rdx_stack_move = rdx_temp; - __ movl2ptr(rdx_stack_move, rcx_amh_conversion); - __ sarptr(rdx_stack_move, CONV_STACK_MOVE_SHIFT); - - int argslot0_num = 0; - Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num)); - assert(argslot0.base() == rsp, ""); - int pre_arg_size = argslot0.disp(); - assert(pre_arg_size % wordSize == 0, ""); - assert(pre_arg_size > 0, "must include PC"); - - // remember the old rsp+1 (argslot[0]) - Register rbx_oldarg = rbx_temp; - __ lea(rbx_oldarg, argslot0); + Register rdi_stack_move = rdi_temp; + load_stack_move(_masm, rdi_stack_move, rcx_recv, true); - // move rsp down to make room for dups - __ lea(rsp, Address(rsp, rdx_stack_move, Address::times_ptr)); - - // compute the new rsp+1 (argslot[0]) - Register rdx_newarg = rdx_temp; - __ lea(rdx_newarg, argslot0); - - __ push(rdi); // need a temp - // (preceding push must be done after arg addresses are taken!) - - // pull down the pre_arg_size data (PC) - for (int i = -pre_arg_size; i < 0; i += wordSize) { - __ movptr(rdi, Address(rbx_oldarg, i)); - __ movptr(Address(rdx_newarg, i), rdi); + if (VerifyMethodHandles) { + verify_argslots(_masm, rdi_stack_move, rax_argslot, true, + "copied argument(s) must fall within current frame"); } - // copy from rax_argslot[0...] down to new_rsp[1...] - // pseudo-code: - // rbx = old_rsp+1 - // rdx = new_rsp+1 - // rax = argslot - // while (rdx < rbx) *rdx++ = *rax++ - Label loop; - __ bind(loop); - __ movptr(rdi, Address(rax_argslot, 0)); - __ movptr(Address(rdx_newarg, 0), rdi); - __ addptr(rax_argslot, wordSize); - __ addptr(rdx_newarg, wordSize); - __ cmpptr(rdx_newarg, rbx_oldarg); - __ jccb(Assembler::less, loop); + // insert location is always the bottom of the argument list: + Address insert_location = __ argument_address(constant(0)); + int pre_arg_words = insert_location.disp() / wordSize; // return PC is pushed + assert(insert_location.base() == rsp, ""); - __ pop(rdi); // restore temp + __ negl(rdi_stack_move); + push_arg_slots(_masm, rax_argslot, rdi_stack_move, + pre_arg_words, rbx_temp, rdx_temp); __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); @@ -1035,63 +1688,583 @@ __ movl(rax_argslot, rcx_amh_vmargslot); __ lea(rax_argslot, __ argument_address(rax_argslot)); - __ push(rdi); // need a temp // (must do previous push after argslot address is taken) // 'stack_move' is number of words to drop - Register rdi_stack_move = rdi; - __ movl2ptr(rdi_stack_move, rcx_amh_conversion); - __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT); + Register rdi_stack_move = rdi_temp; + load_stack_move(_masm, rdi_stack_move, rcx_recv, false); remove_arg_slots(_masm, rdi_stack_move, rax_argslot, rbx_temp, rdx_temp); - __ pop(rdi); // restore temp - __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); } break; case _adapter_collect_args: - __ unimplemented(entry_name(ek)); // %%% FIXME: NYI - break; - + case _adapter_fold_args: case _adapter_spread_args: // handled completely by optimized cases __ stop("init_AdapterMethodHandle should not issue this"); break; + case _adapter_opt_collect_ref: + case _adapter_opt_collect_int: + case _adapter_opt_collect_long: + case _adapter_opt_collect_float: + case _adapter_opt_collect_double: + case _adapter_opt_collect_void: + case _adapter_opt_collect_0_ref: + case _adapter_opt_collect_1_ref: + case _adapter_opt_collect_2_ref: + case _adapter_opt_collect_3_ref: + case _adapter_opt_collect_4_ref: + case _adapter_opt_collect_5_ref: + case _adapter_opt_filter_S0_ref: + case _adapter_opt_filter_S1_ref: + case _adapter_opt_filter_S2_ref: + case _adapter_opt_filter_S3_ref: + case _adapter_opt_filter_S4_ref: + case _adapter_opt_filter_S5_ref: + case _adapter_opt_collect_2_S0_ref: + case _adapter_opt_collect_2_S1_ref: + case _adapter_opt_collect_2_S2_ref: + case _adapter_opt_collect_2_S3_ref: + case _adapter_opt_collect_2_S4_ref: + case _adapter_opt_collect_2_S5_ref: + case _adapter_opt_fold_ref: + case _adapter_opt_fold_int: + case _adapter_opt_fold_long: + case _adapter_opt_fold_float: + case _adapter_opt_fold_double: + case _adapter_opt_fold_void: + case _adapter_opt_fold_1_ref: + case _adapter_opt_fold_2_ref: + case _adapter_opt_fold_3_ref: + case _adapter_opt_fold_4_ref: + case _adapter_opt_fold_5_ref: + { + // Given a fresh incoming stack frame, build a new ricochet frame. + // On entry, TOS points at a return PC, and RBP is the callers frame ptr. + // RSI/R13 has the caller's exact stack pointer, which we must also preserve. + // RCX contains an AdapterMethodHandle of the indicated kind. + + // Relevant AMH fields: + // amh.vmargslot: + // points to the trailing edge of the arguments + // to filter, collect, or fold. For a boxing operation, + // it points just after the single primitive value. + // amh.argument: + // recursively called MH, on |collect| arguments + // amh.vmtarget: + // final destination MH, on return value, etc. + // amh.conversion.dest: + // tells what is the type of the return value + // (not needed here, since dest is also derived from ek) + // amh.conversion.vminfo: + // points to the trailing edge of the return value + // when the vmtarget is to be called; this is + // equal to vmargslot + (retained ? |collect| : 0) + + // Pass 0 or more argument slots to the recursive target. + int collect_count_constant = ek_adapter_opt_collect_count(ek); + + // The collected arguments are copied from the saved argument list: + int collect_slot_constant = ek_adapter_opt_collect_slot(ek); + + assert(ek_orig == _adapter_collect_args || + ek_orig == _adapter_fold_args, ""); + bool retain_original_args = (ek_orig == _adapter_fold_args); + + // The return value is replaced (or inserted) at the 'vminfo' argslot. + // Sometimes we can compute this statically. + int dest_slot_constant = -1; + if (!retain_original_args) + dest_slot_constant = collect_slot_constant; + else if (collect_slot_constant >= 0 && collect_count_constant >= 0) + // We are preserving all the arguments, and the return value is prepended, + // so the return slot is to the left (above) the |collect| sequence. + dest_slot_constant = collect_slot_constant + collect_count_constant; + + // Replace all those slots by the result of the recursive call. + // The result type can be one of ref, int, long, float, double, void. + // In the case of void, nothing is pushed on the stack after return. + BasicType dest = ek_adapter_opt_collect_type(ek); + assert(dest == type2wfield[dest], "dest is a stack slot type"); + int dest_count = type2size[dest]; + assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size"); + + // Choose a return continuation. + EntryKind ek_ret = _adapter_opt_return_any; + if (dest != T_CONFLICT && OptimizeMethodHandles) { + switch (dest) { + case T_INT : ek_ret = _adapter_opt_return_int; break; + case T_LONG : ek_ret = _adapter_opt_return_long; break; + case T_FLOAT : ek_ret = _adapter_opt_return_float; break; + case T_DOUBLE : ek_ret = _adapter_opt_return_double; break; + case T_OBJECT : ek_ret = _adapter_opt_return_ref; break; + case T_VOID : ek_ret = _adapter_opt_return_void; break; + default : ShouldNotReachHere(); + } + if (dest == T_OBJECT && dest_slot_constant >= 0) { + EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant); + if (ek_try <= _adapter_opt_return_LAST && + ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) { + ek_ret = ek_try; + } + } + assert(ek_adapter_opt_return_type(ek_ret) == dest, ""); + } + + // Already pushed: ... keep1 | collect | keep2 | sender_pc | + // push(sender_pc); + + // Compute argument base: + Register rax_argv = rax_argslot; + __ lea(rax_argv, __ argument_address(constant(0))); + + // Push a few extra argument words, if we need them to store the return value. + { + int extra_slots = 0; + if (retain_original_args) { + extra_slots = dest_count; + } else if (collect_count_constant == -1) { + extra_slots = dest_count; // collect_count might be zero; be generous + } else if (dest_count > collect_count_constant) { + extra_slots = (dest_count - collect_count_constant); + } else { + // else we know we have enough dead space in |collect| to repurpose for return values + } + DEBUG_ONLY(extra_slots += 1); + if (extra_slots > 0) { + __ pop(rbx_temp); // return value + __ subptr(rsp, (extra_slots * Interpreter::stackElementSize)); + // Push guard word #2 in debug mode. + DEBUG_ONLY(__ movptr(Address(rsp, 0), (int32_t) RicochetFrame::MAGIC_NUMBER_2)); + __ push(rbx_temp); + } + } + + RicochetFrame::enter_ricochet_frame(_masm, rcx_recv, rax_argv, + entry(ek_ret)->from_interpreted_entry(), rbx_temp); + + // Now pushed: ... keep1 | collect | keep2 | RF | + // some handy frame slots: + Address exact_sender_sp_addr = RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes()); + Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); + Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes()); + +#ifdef ASSERT + if (VerifyMethodHandles && dest != T_CONFLICT) { + BLOCK_COMMENT("verify AMH.conv.dest"); + load_conversion_dest_type(_masm, rbx_temp, conversion_addr); + Label L_dest_ok; + __ cmpl(rbx_temp, (int) dest); + __ jcc(Assembler::equal, L_dest_ok); + if (dest == T_INT) { + for (int bt = T_BOOLEAN; bt < T_INT; bt++) { + if (is_subword_type(BasicType(bt))) { + __ cmpl(rbx_temp, (int) bt); + __ jcc(Assembler::equal, L_dest_ok); + } + } + } + __ stop("bad dest in AMH.conv"); + __ BIND(L_dest_ok); + } +#endif //ASSERT + + // Find out where the original copy of the recursive argument sequence begins. + Register rax_coll = rax_argv; + { + RegisterOrConstant collect_slot = collect_slot_constant; + if (collect_slot_constant == -1) { + __ movl(rdi_temp, rcx_amh_vmargslot); + collect_slot = rdi_temp; + } + if (collect_slot_constant != 0) + __ lea(rax_coll, Address(rax_argv, collect_slot, Interpreter::stackElementScale())); + // rax_coll now points at the trailing edge of |collect| and leading edge of |keep2| + } + + // Replace the old AMH with the recursive MH. (No going back now.) + // In the case of a boxing call, the recursive call is to a 'boxer' method, + // such as Integer.valueOf or Long.valueOf. In the case of a filter + // or collect call, it will take one or more arguments, transform them, + // and return some result, to store back into argument_base[vminfo]. + __ load_heap_oop(rcx_recv, rcx_amh_argument); + if (VerifyMethodHandles) verify_method_handle(_masm, rcx_recv); + + // Push a space for the recursively called MH first: + __ push((int32_t)NULL_WORD); + + // Calculate |collect|, the number of arguments we are collecting. + Register rdi_collect_count = rdi_temp; + RegisterOrConstant collect_count; + if (collect_count_constant >= 0) { + collect_count = collect_count_constant; + } else { + __ load_method_handle_vmslots(rdi_collect_count, rcx_recv, rdx_temp); + collect_count = rdi_collect_count; + } +#ifdef ASSERT + if (VerifyMethodHandles && collect_count_constant >= 0) { + __ load_method_handle_vmslots(rbx_temp, rcx_recv, rdx_temp); + Label L_count_ok; + __ cmpl(rbx_temp, collect_count_constant); + __ jcc(Assembler::equal, L_count_ok); + __ stop("bad vminfo in AMH.conv"); + __ BIND(L_count_ok); + } +#endif //ASSERT + + // copy |collect| slots directly to TOS: + push_arg_slots(_masm, rax_coll, collect_count, 0, rbx_temp, rdx_temp); + // Now pushed: ... keep1 | collect | keep2 | RF... | collect | + // rax_coll still points at the trailing edge of |collect| and leading edge of |keep2| + + // If necessary, adjust the saved arguments to make room for the eventual return value. + // Normal adjustment: ... keep1 | +dest+ | -collect- | keep2 | RF... | collect | + // If retaining args: ... keep1 | +dest+ | collect | keep2 | RF... | collect | + // In the non-retaining case, this might move keep2 either up or down. + // We don't have to copy the whole | RF... collect | complex, + // but we must adjust RF.saved_args_base. + // Also, from now on, we will forget about the origial copy of |collect|. + // If we are retaining it, we will treat it as part of |keep2|. + // For clarity we will define |keep3| = |collect|keep2| or |keep2|. + + BLOCK_COMMENT("adjust trailing arguments {"); + // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements. + int open_count = dest_count; + RegisterOrConstant close_count = collect_count_constant; + Register rdi_close_count = rdi_collect_count; + if (retain_original_args) { + close_count = constant(0); + } else if (collect_count_constant == -1) { + close_count = rdi_collect_count; + } + + // How many slots need moving? This is simply dest_slot (0 => no |keep3|). + RegisterOrConstant keep3_count; + Register rsi_keep3_count = rsi; // can repair from RF.exact_sender_sp + if (dest_slot_constant >= 0) { + keep3_count = dest_slot_constant; + } else { + load_conversion_vminfo(_masm, rsi_keep3_count, conversion_addr); + keep3_count = rsi_keep3_count; + } +#ifdef ASSERT + if (VerifyMethodHandles && dest_slot_constant >= 0) { + load_conversion_vminfo(_masm, rbx_temp, conversion_addr); + Label L_vminfo_ok; + __ cmpl(rbx_temp, dest_slot_constant); + __ jcc(Assembler::equal, L_vminfo_ok); + __ stop("bad vminfo in AMH.conv"); + __ BIND(L_vminfo_ok); + } +#endif //ASSERT + + // tasks remaining: + bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0); + bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0)); + bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant()); + + if (stomp_dest | fix_arg_base) { + // we will probably need an updated rax_argv value + if (collect_slot_constant >= 0) { + // rax_coll already holds the leading edge of |keep2|, so tweak it + assert(rax_coll == rax_argv, "elided a move"); + if (collect_slot_constant != 0) + __ subptr(rax_argv, collect_slot_constant * Interpreter::stackElementSize); + } else { + // Just reload from RF.saved_args_base. + __ movptr(rax_argv, saved_args_base_addr); + } + } + + // Old and new argument locations (based at slot 0). + // Net shift (&new_argv - &old_argv) is (close_count - open_count). + bool zero_open_count = (open_count == 0); // remember this bit of info + if (move_keep3 && fix_arg_base) { + // It will be easier t have everything in one register: + if (close_count.is_register()) { + // Deduct open_count from close_count register to get a clean +/- value. + __ subptr(close_count.as_register(), open_count); + } else { + close_count = close_count.as_constant() - open_count; + } + open_count = 0; + } + Address old_argv(rax_argv, 0); + Address new_argv(rax_argv, close_count, Interpreter::stackElementScale(), + - open_count * Interpreter::stackElementSize); + + // First decide if any actual data are to be moved. + // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change. + // (As it happens, all movements involve an argument list size change.) + + // If there are variable parameters, use dynamic checks to skip around the whole mess. + Label L_done; + if (!keep3_count.is_constant()) { + __ testl(keep3_count.as_register(), keep3_count.as_register()); + __ jcc(Assembler::zero, L_done); + } + if (!close_count.is_constant()) { + __ cmpl(close_count.as_register(), open_count); + __ jcc(Assembler::equal, L_done); + } + + if (move_keep3 && fix_arg_base) { + bool emit_move_down = false, emit_move_up = false, emit_guard = false; + if (!close_count.is_constant()) { + emit_move_down = emit_guard = !zero_open_count; + emit_move_up = true; + } else if (open_count != close_count.as_constant()) { + emit_move_down = (open_count > close_count.as_constant()); + emit_move_up = !emit_move_down; + } + Label L_move_up; + if (emit_guard) { + __ cmpl(close_count.as_register(), open_count); + __ jcc(Assembler::greater, L_move_up); + } + + if (emit_move_down) { + // Move arguments down if |+dest+| > |-collect-| + // (This is rare, except when arguments are retained.) + // This opens space for the return value. + if (keep3_count.is_constant()) { + for (int i = 0; i < keep3_count.as_constant(); i++) { + __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize)); + __ movptr( new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp); + } + } else { + Register rbx_argv_top = rbx_temp; + __ lea(rbx_argv_top, old_argv.plus_disp(keep3_count, Interpreter::stackElementScale())); + move_arg_slots_down(_masm, + old_argv, // beginning of old argv + rbx_argv_top, // end of old argv + close_count, // distance to move down (must be negative) + rax_argv, rdx_temp); + // Used argv as an iteration variable; reload from RF.saved_args_base. + __ movptr(rax_argv, saved_args_base_addr); + } + } + + if (emit_guard) { + __ jmp(L_done); // assumes emit_move_up is true also + __ BIND(L_move_up); + } + + if (emit_move_up) { + + // Move arguments up if |+dest+| < |-collect-| + // (This is usual, except when |keep3| is empty.) + // This closes up the space occupied by the now-deleted collect values. + if (keep3_count.is_constant()) { + for (int i = keep3_count.as_constant() - 1; i >= 0; i--) { + __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize)); + __ movptr( new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp); + } + } else { + Address argv_top = old_argv.plus_disp(keep3_count, Interpreter::stackElementScale()); + move_arg_slots_up(_masm, + rax_argv, // beginning of old argv + argv_top, // end of old argv + close_count, // distance to move up (must be positive) + rbx_temp, rdx_temp); + } + } + } + __ BIND(L_done); + + if (fix_arg_base) { + // adjust RF.saved_args_base by adding (close_count - open_count) + if (!new_argv.is_same_address(Address(rax_argv, 0))) + __ lea(rax_argv, new_argv); + __ movptr(saved_args_base_addr, rax_argv); + } + + if (stomp_dest) { + // Stomp the return slot, so it doesn't hold garbage. + // This isn't strictly necessary, but it may help detect bugs. + int forty_two = RicochetFrame::RETURN_VALUE_PLACEHOLDER; + __ movptr(Address(rax_argv, keep3_count, Address::times_ptr), + (int32_t) forty_two); + // uses rsi_keep3_count + } + BLOCK_COMMENT("} adjust trailing arguments"); + + BLOCK_COMMENT("do_recursive_call"); + __ mov(saved_last_sp, rsp); // set rsi/r13 for callee + __ pushptr(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr()).addr()); + // The globally unique bounce address has two purposes: + // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame). + // 2. When returned to, it cuts back the stack and redirects control flow + // to the return handler. + // The return handler will further cut back the stack when it takes + // down the RF. Perhaps there is a way to streamline this further. + + // State during recursive call: + // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc | + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + + break; + } + + case _adapter_opt_return_ref: + case _adapter_opt_return_int: + case _adapter_opt_return_long: + case _adapter_opt_return_float: + case _adapter_opt_return_double: + case _adapter_opt_return_void: + case _adapter_opt_return_S0_ref: + case _adapter_opt_return_S1_ref: + case _adapter_opt_return_S2_ref: + case _adapter_opt_return_S3_ref: + case _adapter_opt_return_S4_ref: + case _adapter_opt_return_S5_ref: + { + BasicType dest_type_constant = ek_adapter_opt_return_type(ek); + int dest_slot_constant = ek_adapter_opt_return_slot(ek); + + if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); + + if (dest_slot_constant == -1) { + // The current stub is a general handler for this dest_type. + // It can be called from _adapter_opt_return_any below. + // Stash the address in a little table. + assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob"); + address return_handler = __ pc(); + _adapter_return_handlers[dest_type_constant] = return_handler; + if (dest_type_constant == T_INT) { + // do the subword types too + for (int bt = T_BOOLEAN; bt < T_INT; bt++) { + if (is_subword_type(BasicType(bt)) && + _adapter_return_handlers[bt] == NULL) { + _adapter_return_handlers[bt] = return_handler; + } + } + } + } + + Register rbx_arg_base = rbx_temp; + assert_different_registers(rax, rdx, // possibly live return value registers + rdi_temp, rbx_arg_base); + + Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); + Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes()); + + __ movptr(rbx_arg_base, saved_args_base_addr); + RegisterOrConstant dest_slot = dest_slot_constant; + if (dest_slot_constant == -1) { + load_conversion_vminfo(_masm, rdi_temp, conversion_addr); + dest_slot = rdi_temp; + } + // Store the result back into the argslot. + // This code uses the interpreter calling sequence, in which the return value + // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop. + // There are certain irregularities with floating point values, which can be seen + // in TemplateInterpreterGenerator::generate_return_entry_for. + move_return_value(_masm, dest_type_constant, Address(rbx_arg_base, dest_slot, Interpreter::stackElementScale())); + + RicochetFrame::leave_ricochet_frame(_masm, rcx_recv, rbx_arg_base, rdx_temp); + __ push(rdx_temp); // repush the return PC + + // Load the final target and go. + if (VerifyMethodHandles) verify_method_handle(_masm, rcx_recv); + __ jump_to_method_handle_entry(rcx_recv, rdx_temp); + __ hlt(); // -------------------- + break; + } + + case _adapter_opt_return_any: + { + if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); + Register rdi_conv = rdi_temp; + assert_different_registers(rax, rdx, // possibly live return value registers + rdi_conv, rbx_temp); + + Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); + load_conversion_dest_type(_masm, rdi_conv, conversion_addr); + __ lea(rbx_temp, ExternalAddress((address) &_adapter_return_handlers[0])); + __ movptr(rbx_temp, Address(rbx_temp, rdi_conv, Address::times_ptr)); + +#ifdef ASSERT + { Label L_badconv; + __ testptr(rbx_temp, rbx_temp); + __ jccb(Assembler::zero, L_badconv); + __ jmp(rbx_temp); + __ bind(L_badconv); + __ stop("bad method handle return"); + } +#else //ASSERT + __ jmp(rbx_temp); +#endif //ASSERT + break; + } + case _adapter_opt_spread_0: - case _adapter_opt_spread_1: - case _adapter_opt_spread_more: + case _adapter_opt_spread_1_ref: + case _adapter_opt_spread_2_ref: + case _adapter_opt_spread_3_ref: + case _adapter_opt_spread_4_ref: + case _adapter_opt_spread_5_ref: + case _adapter_opt_spread_ref: + case _adapter_opt_spread_byte: + case _adapter_opt_spread_char: + case _adapter_opt_spread_short: + case _adapter_opt_spread_int: + case _adapter_opt_spread_long: + case _adapter_opt_spread_float: + case _adapter_opt_spread_double: { // spread an array out into a group of arguments - int length_constant = get_ek_adapter_opt_spread_info(ek); + int length_constant = ek_adapter_opt_spread_count(ek); + bool length_can_be_zero = (length_constant == 0); + if (length_constant < 0) { + // some adapters with variable length must handle the zero case + if (!OptimizeMethodHandles || + ek_adapter_opt_spread_type(ek) != T_OBJECT) + length_can_be_zero = true; + } // find the address of the array argument __ movl(rax_argslot, rcx_amh_vmargslot); __ lea(rax_argslot, __ argument_address(rax_argslot)); - // grab some temps - { __ push(rsi); __ push(rdi); } - // (preceding pushes must be done after argslot address is taken!) -#define UNPUSH_RSI_RDI \ - { __ pop(rdi); __ pop(rsi); } + // grab another temp + Register rsi_temp = rsi; + { if (rsi_temp == saved_last_sp) __ push(saved_last_sp); } + // (preceding push must be done after argslot address is taken!) +#define UNPUSH_RSI \ + { if (rsi_temp == saved_last_sp) __ pop(saved_last_sp); } // arx_argslot points both to the array and to the first output arg vmarg = Address(rax_argslot, 0); // Get the array value. - Register rsi_array = rsi; + Register rsi_array = rsi_temp; Register rdx_array_klass = rdx_temp; - BasicType elem_type = T_OBJECT; + BasicType elem_type = ek_adapter_opt_spread_type(ek); + int elem_slots = type2size[elem_type]; // 1 or 2 + int array_slots = 1; // array is always a T_OBJECT int length_offset = arrayOopDesc::length_offset_in_bytes(); int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type); __ movptr(rsi_array, vmarg); - Label skip_array_check; - if (length_constant == 0) { + + Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done; + if (length_can_be_zero) { + // handle the null pointer case, if zero is allowed + Label L_skip; + if (length_constant < 0) { + load_conversion_vminfo(_masm, rbx_temp, rcx_amh_conversion); + __ testl(rbx_temp, rbx_temp); + __ jcc(Assembler::notZero, L_skip); + } __ testptr(rsi_array, rsi_array); - __ jcc(Assembler::zero, skip_array_check); + __ jcc(Assembler::zero, L_array_is_empty); + __ bind(L_skip); } __ null_check(rsi_array, oopDesc::klass_offset_in_bytes()); __ load_klass(rdx_array_klass, rsi_array); @@ -1099,22 +2272,20 @@ // Check the array type. Register rbx_klass = rbx_temp; __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object! - __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes())); + load_klass_from_Class(_masm, rbx_klass); Label ok_array_klass, bad_array_klass, bad_array_length; - __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi, ok_array_klass); + __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi_temp, ok_array_klass); // If we get here, the type check failed! __ jmp(bad_array_klass); - __ bind(ok_array_klass); + __ BIND(ok_array_klass); // Check length. if (length_constant >= 0) { __ cmpl(Address(rsi_array, length_offset), length_constant); } else { Register rbx_vminfo = rbx_temp; - __ movl(rbx_vminfo, rcx_amh_conversion); - assert(CONV_VMINFO_SHIFT == 0, "preshifted"); - __ andl(rbx_vminfo, CONV_VMINFO_MASK); + load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion); __ cmpl(rbx_vminfo, Address(rsi_array, length_offset)); } __ jcc(Assembler::notEqual, bad_array_length); @@ -1126,90 +2297,104 @@ // Form a pointer to the end of the affected region. __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize)); // 'stack_move' is negative number of words to insert - Register rdi_stack_move = rdi; - __ movl2ptr(rdi_stack_move, rcx_amh_conversion); - __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT); + // This number already accounts for elem_slots. + Register rdi_stack_move = rdi_temp; + load_stack_move(_masm, rdi_stack_move, rcx_recv, true); + __ cmpptr(rdi_stack_move, 0); + assert(stack_move_unit() < 0, "else change this comparison"); + __ jcc(Assembler::less, L_insert_arg_space); + __ jcc(Assembler::equal, L_copy_args); + // single argument case, with no array movement + __ BIND(L_array_is_empty); + remove_arg_slots(_masm, -stack_move_unit() * array_slots, + rax_argslot, rbx_temp, rdx_temp); + __ jmp(L_args_done); // no spreading to do + __ BIND(L_insert_arg_space); + // come here in the usual case, stack_move < 0 (2 or more spread arguments) Register rsi_temp = rsi_array; // spill this - insert_arg_slots(_masm, rdi_stack_move, -1, + insert_arg_slots(_masm, rdi_stack_move, rax_argslot, rbx_temp, rsi_temp); - // reload the array (since rsi was killed) - __ movptr(rsi_array, vmarg); - } else if (length_constant > 1) { - int arg_mask = 0; - int new_slots = (length_constant - 1); - for (int i = 0; i < new_slots; i++) { - arg_mask <<= 1; - arg_mask |= _INSERT_REF_MASK; - } - insert_arg_slots(_masm, new_slots * stack_move_unit(), arg_mask, + // reload the array since rsi was killed + // reload from rdx_argslot_limit since rax_argslot is now decremented + __ movptr(rsi_array, Address(rdx_argslot_limit, -Interpreter::stackElementSize)); + } else if (length_constant >= 1) { + int new_slots = (length_constant * elem_slots) - array_slots; + insert_arg_slots(_masm, new_slots * stack_move_unit(), rax_argslot, rbx_temp, rdx_temp); - } else if (length_constant == 1) { - // no stack resizing required } else if (length_constant == 0) { - remove_arg_slots(_masm, -stack_move_unit(), + __ BIND(L_array_is_empty); + remove_arg_slots(_masm, -stack_move_unit() * array_slots, rax_argslot, rbx_temp, rdx_temp); + } else { + ShouldNotReachHere(); } // Copy from the array to the new slots. // Note: Stack change code preserves integrity of rax_argslot pointer. // So even after slot insertions, rax_argslot still points to first argument. + // Beware: Arguments that are shallow on the stack are deep in the array, + // and vice versa. So a downward-growing stack (the usual) has to be copied + // elementwise in reverse order from the source array. + __ BIND(L_copy_args); if (length_constant == -1) { // [rax_argslot, rdx_argslot_limit) is the area we are inserting into. + // Array element [0] goes at rdx_argslot_limit[-wordSize]. Register rsi_source = rsi_array; __ lea(rsi_source, Address(rsi_array, elem0_offset)); + Register rdx_fill_ptr = rdx_argslot_limit; Label loop; - __ bind(loop); - __ movptr(rbx_temp, Address(rsi_source, 0)); - __ movptr(Address(rax_argslot, 0), rbx_temp); + __ BIND(loop); + __ addptr(rdx_fill_ptr, -Interpreter::stackElementSize * elem_slots); + move_typed_arg(_masm, elem_type, true, + Address(rdx_fill_ptr, 0), Address(rsi_source, 0), + rbx_temp, rdi_temp); __ addptr(rsi_source, type2aelembytes(elem_type)); - __ addptr(rax_argslot, Interpreter::stackElementSize); - __ cmpptr(rax_argslot, rdx_argslot_limit); - __ jccb(Assembler::less, loop); + __ cmpptr(rdx_fill_ptr, rax_argslot); + __ jcc(Assembler::above, loop); } else if (length_constant == 0) { - __ bind(skip_array_check); // nothing to copy } else { int elem_offset = elem0_offset; - int slot_offset = 0; + int slot_offset = length_constant * Interpreter::stackElementSize; for (int index = 0; index < length_constant; index++) { - __ movptr(rbx_temp, Address(rsi_array, elem_offset)); - __ movptr(Address(rax_argslot, slot_offset), rbx_temp); + slot_offset -= Interpreter::stackElementSize * elem_slots; // fill backward + move_typed_arg(_masm, elem_type, true, + Address(rax_argslot, slot_offset), Address(rsi_array, elem_offset), + rbx_temp, rdi_temp); elem_offset += type2aelembytes(elem_type); - slot_offset += Interpreter::stackElementSize; } } + __ BIND(L_args_done); // Arguments are spread. Move to next method handle. - UNPUSH_RSI_RDI; + UNPUSH_RSI; __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ jump_to_method_handle_entry(rcx_recv, rdx_temp); __ bind(bad_array_klass); - UNPUSH_RSI_RDI; + UNPUSH_RSI; assert(!vmarg.uses(rarg2_required), "must be different registers"); - __ movptr(rarg2_required, Address(rdx_array_klass, java_mirror_offset)); // required type - __ movptr(rarg1_actual, vmarg); // bad array - __ movl( rarg0_code, (int) Bytecodes::_aaload); // who is complaining? + __ load_heap_oop( rarg2_required, Address(rdx_array_klass, java_mirror_offset)); // required type + __ movptr( rarg1_actual, vmarg); // bad array + __ movl( rarg0_code, (int) Bytecodes::_aaload); // who is complaining? __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); __ bind(bad_array_length); - UNPUSH_RSI_RDI; + UNPUSH_RSI; assert(!vmarg.uses(rarg2_required), "must be different registers"); - __ mov (rarg2_required, rcx_recv); // AMH requiring a certain length - __ movptr(rarg1_actual, vmarg); // bad array - __ movl( rarg0_code, (int) Bytecodes::_arraylength); // who is complaining? + __ mov( rarg2_required, rcx_recv); // AMH requiring a certain length + __ movptr( rarg1_actual, vmarg); // bad array + __ movl( rarg0_code, (int) Bytecodes::_arraylength); // who is complaining? __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); +#undef UNPUSH_RSI -#undef UNPUSH_RSI_RDI + break; } - break; - case _adapter_flyby: - case _adapter_ricochet: - __ unimplemented(entry_name(ek)); // %%% FIXME: NYI - break; - - default: ShouldNotReachHere(); + default: + // do not require all platforms to recognize all adapter types + __ nop(); + return; } __ hlt();
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/x86/vm/methodHandles_x86.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,292 @@ +/* + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +// Platform-specific definitions for method handles. +// These definitions are inlined into class MethodHandles. + +public: + +// The stack just after the recursive call from a ricochet frame +// looks something like this. Offsets are marked in words, not bytes. +// rsi (r13 on LP64) is part of the interpreter calling sequence +// which tells the callee where is my real rsp (for frame walking). +// (...lower memory addresses) +// rsp: [ return pc ] always the global RicochetBlob::bounce_addr +// rsp+1: [ recursive arg N ] +// rsp+2: [ recursive arg N-1 ] +// ... +// rsp+N: [ recursive arg 1 ] +// rsp+N+1: [ recursive method handle ] +// ... +// rbp-6: [ cleanup continuation pc ] <-- (struct RicochetFrame) +// rbp-5: [ saved target MH ] the MH we will call on the saved args +// rbp-4: [ saved args layout oop ] an int[] array which describes argument layout +// rbp-3: [ saved args pointer ] address of transformed adapter arg M (slot 0) +// rbp-2: [ conversion ] information about how the return value is used +// rbp-1: [ exact sender sp ] exact TOS (rsi/r13) of original sender frame +// rbp+0: [ saved sender fp ] (for original sender of AMH) +// rbp+1: [ saved sender pc ] (back to original sender of AMH) +// rbp+2: [ transformed adapter arg M ] <-- (extended TOS of original sender) +// rbp+3: [ transformed adapter arg M-1] +// ... +// rbp+M+1: [ transformed adapter arg 1 ] +// rbp+M+2: [ padding ] <-- (rbp + saved args base offset) +// ... [ optional padding] +// (higher memory addresses...) +// +// The arguments originally passed by the original sender +// are lost, and arbitrary amounts of stack motion might have +// happened due to argument transformation. +// (This is done by C2I/I2C adapters and non-direct method handles.) +// This is why there is an unpredictable amount of memory between +// the extended and exact TOS of the sender. +// The ricochet adapter itself will also (in general) perform +// transformations before the recursive call. +// +// The transformed and saved arguments, immediately above the saved +// return PC, are a well-formed method handle invocation ready to execute. +// When the GC needs to walk the stack, these arguments are described +// via the saved arg types oop, an int[] array with a private format. +// This array is derived from the type of the transformed adapter +// method handle, which also sits at the base of the saved argument +// bundle. Since the GC may not be able to fish out the int[] +// array, so it is pushed explicitly on the stack. This may be +// an unnecessary expense. +// +// The following register conventions are significant at this point: +// rsp the thread stack, as always; preserved by caller +// rsi/r13 exact TOS of recursive frame (contents of [rbp-2]) +// rcx recursive method handle (contents of [rsp+N+1]) +// rbp preserved by caller (not used by caller) +// Unless otherwise specified, all registers can be blown by the call. +// +// If this frame must be walked, the transformed adapter arguments +// will be found with the help of the saved arguments descriptor. +// +// Therefore, the descriptor must match the referenced arguments. +// The arguments must be followed by at least one word of padding, +// which will be necessary to complete the final method handle call. +// That word is not treated as holding an oop. Neither is the word +// +// The word pointed to by the return argument pointer is not +// treated as an oop, even if points to a saved argument. +// This allows the saved argument list to have a "hole" in it +// to receive an oop from the recursive call. +// (The hole might temporarily contain RETURN_VALUE_PLACEHOLDER.) +// +// When the recursive callee returns, RicochetBlob::bounce_addr will +// immediately jump to the continuation stored in the RF. +// This continuation will merge the recursive return value +// into the saved argument list. At that point, the original +// rsi, rbp, and rsp will be reloaded, the ricochet frame will +// disappear, and the final target of the adapter method handle +// will be invoked on the transformed argument list. + +class RicochetFrame { + friend class MethodHandles; + + private: + intptr_t* _continuation; // what to do when control gets back here + oopDesc* _saved_target; // target method handle to invoke on saved_args + oopDesc* _saved_args_layout; // caching point for MethodTypeForm.vmlayout cookie + intptr_t* _saved_args_base; // base of pushed arguments (slot 0, arg N) (-3) + intptr_t _conversion; // misc. information from original AdapterMethodHandle (-2) + intptr_t* _exact_sender_sp; // parallel to interpreter_frame_sender_sp (-1) + intptr_t* _sender_link; // *must* coincide with frame::link_offset (0) + address _sender_pc; // *must* coincide with frame::return_addr_offset (1) + + public: + intptr_t* continuation() const { return _continuation; } + oop saved_target() const { return _saved_target; } + oop saved_args_layout() const { return _saved_args_layout; } + intptr_t* saved_args_base() const { return _saved_args_base; } + intptr_t conversion() const { return _conversion; } + intptr_t* exact_sender_sp() const { return _exact_sender_sp; } + intptr_t* sender_link() const { return _sender_link; } + address sender_pc() const { return _sender_pc; } + + intptr_t* extended_sender_sp() const { return saved_args_base(); } + + intptr_t return_value_slot_number() const { + return adapter_conversion_vminfo(conversion()); + } + BasicType return_value_type() const { + return adapter_conversion_dest_type(conversion()); + } + bool has_return_value_slot() const { + return return_value_type() != T_VOID; + } + intptr_t* return_value_slot_addr() const { + assert(has_return_value_slot(), ""); + return saved_arg_slot_addr(return_value_slot_number()); + } + intptr_t* saved_target_slot_addr() const { + return saved_arg_slot_addr(saved_args_length()); + } + intptr_t* saved_arg_slot_addr(int slot) const { + assert(slot >= 0, ""); + return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) ); + } + + jint saved_args_length() const; + jint saved_arg_offset(int arg) const; + + // GC interface + oop* saved_target_addr() { return (oop*)&_saved_target; } + oop* saved_args_layout_addr() { return (oop*)&_saved_args_layout; } + + oop compute_saved_args_layout(bool read_cache, bool write_cache); + + // Compiler/assembler interface. + static int continuation_offset_in_bytes() { return offset_of(RicochetFrame, _continuation); } + static int saved_target_offset_in_bytes() { return offset_of(RicochetFrame, _saved_target); } + static int saved_args_layout_offset_in_bytes(){ return offset_of(RicochetFrame, _saved_args_layout); } + static int saved_args_base_offset_in_bytes() { return offset_of(RicochetFrame, _saved_args_base); } + static int conversion_offset_in_bytes() { return offset_of(RicochetFrame, _conversion); } + static int exact_sender_sp_offset_in_bytes() { return offset_of(RicochetFrame, _exact_sender_sp); } + static int sender_link_offset_in_bytes() { return offset_of(RicochetFrame, _sender_link); } + static int sender_pc_offset_in_bytes() { return offset_of(RicochetFrame, _sender_pc); } + + // This value is not used for much, but it apparently must be nonzero. + static int frame_size_in_bytes() { return sender_link_offset_in_bytes(); } + +#ifdef ASSERT + // The magic number is supposed to help find ricochet frames within the bytes of stack dumps. + enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E }; + static int magic_number_1_offset_in_bytes() { return -wordSize; } + static int magic_number_2_offset_in_bytes() { return sizeof(RicochetFrame); } + intptr_t magic_number_1() const { return *(intptr_t*)((address)this + magic_number_1_offset_in_bytes()); }; + intptr_t magic_number_2() const { return *(intptr_t*)((address)this + magic_number_2_offset_in_bytes()); }; +#endif //ASSERT + + enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) }; + + static void verify_offsets() NOT_DEBUG_RETURN; + void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc. + void zap_arguments() NOT_DEBUG_RETURN; + + static void generate_ricochet_blob(MacroAssembler* _masm, + // output params: + int* frame_size_in_words, int* bounce_offset, int* exception_offset); + + static void enter_ricochet_frame(MacroAssembler* _masm, + Register rcx_recv, + Register rax_argv, + address return_handler, + Register rbx_temp); + static void leave_ricochet_frame(MacroAssembler* _masm, + Register rcx_recv, + Register new_sp_reg, + Register sender_pc_reg); + + static Address frame_address(int offset = 0) { + // The RicochetFrame is found by subtracting a constant offset from rbp. + return Address(rbp, - sender_link_offset_in_bytes() + offset); + } + + static RicochetFrame* from_frame(const frame& fr) { + address bp = (address) fr.fp(); + RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes()); + rf->verify(); + return rf; + } + + static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN; +}; + +// Additional helper methods for MethodHandles code generation: +public: + static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg); + static void load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr); + static void load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr); + + static void load_stack_move(MacroAssembler* _masm, + Register rdi_stack_move, + Register rcx_amh, + bool might_be_negative); + + static void insert_arg_slots(MacroAssembler* _masm, + RegisterOrConstant arg_slots, + Register rax_argslot, + Register rbx_temp, Register rdx_temp); + + static void remove_arg_slots(MacroAssembler* _masm, + RegisterOrConstant arg_slots, + Register rax_argslot, + Register rbx_temp, Register rdx_temp); + + static void push_arg_slots(MacroAssembler* _masm, + Register rax_argslot, + RegisterOrConstant slot_count, + int skip_words_count, + Register rbx_temp, Register rdx_temp); + + static void move_arg_slots_up(MacroAssembler* _masm, + Register rbx_bottom, // invariant + Address top_addr, // can use rax_temp + RegisterOrConstant positive_distance_in_slots, + Register rax_temp, Register rdx_temp); + + static void move_arg_slots_down(MacroAssembler* _masm, + Address bottom_addr, // can use rax_temp + Register rbx_top, // invariant + RegisterOrConstant negative_distance_in_slots, + Register rax_temp, Register rdx_temp); + + static void move_typed_arg(MacroAssembler* _masm, + BasicType type, bool is_element, + Address slot_dest, Address value_src, + Register rbx_temp, Register rdx_temp); + + static void move_return_value(MacroAssembler* _masm, BasicType type, + Address return_slot); + + static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, + const char* error_message) NOT_DEBUG_RETURN; + + static void verify_argslots(MacroAssembler* _masm, + RegisterOrConstant argslot_count, + Register argslot_reg, + bool negate_argslot, + const char* error_message) NOT_DEBUG_RETURN; + + static void verify_stack_move(MacroAssembler* _masm, + RegisterOrConstant arg_slots, + int direction) NOT_DEBUG_RETURN; + + static void verify_klass(MacroAssembler* _masm, + Register obj, KlassHandle klass, + const char* error_message = "wrong klass") NOT_DEBUG_RETURN; + + static void verify_method_handle(MacroAssembler* _masm, Register mh_reg) { + verify_klass(_masm, mh_reg, SystemDictionaryHandles::MethodHandle_klass(), + "reference is a MH"); + } + + static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN; + + static Register saved_last_sp_register() { + // Should be in sharedRuntime, not here. + return LP64_ONLY(r13) NOT_LP64(rsi); + }
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -2253,6 +2253,31 @@ return 0; } +//----------------------------generate_ricochet_blob--------------------------- +void SharedRuntime::generate_ricochet_blob() { + if (!EnableInvokeDynamic) return; // leave it as a null + + // allocate space for the code + ResourceMark rm; + // setup code generation tools + CodeBuffer buffer("ricochet_blob", 256, 256); + MacroAssembler* masm = new MacroAssembler(&buffer); + + int frame_size_in_words = -1, bounce_offset = -1, exception_offset = -1; + MethodHandles::RicochetFrame::generate_ricochet_blob(masm, &frame_size_in_words, &bounce_offset, &exception_offset); + + // ------------- + // make sure all code is generated + masm->flush(); + + // failed to generate? + if (frame_size_in_words < 0 || bounce_offset < 0 || exception_offset < 0) { + assert(false, "bad ricochet blob"); + return; + } + + _ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words); +} //------------------------------generate_deopt_blob---------------------------- void SharedRuntime::generate_deopt_blob() { @@ -2996,6 +3021,8 @@ generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), true); + generate_ricochet_blob(); + generate_deopt_blob(); #ifdef COMPILER2 generate_uncommon_trap_blob();
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -2530,6 +2530,32 @@ } +//----------------------------generate_ricochet_blob--------------------------- +void SharedRuntime::generate_ricochet_blob() { + if (!EnableInvokeDynamic) return; // leave it as a null + + // allocate space for the code + ResourceMark rm; + // setup code generation tools + CodeBuffer buffer("ricochet_blob", 512, 512); + MacroAssembler* masm = new MacroAssembler(&buffer); + + int frame_size_in_words = -1, bounce_offset = -1, exception_offset = -1; + MethodHandles::RicochetFrame::generate_ricochet_blob(masm, &frame_size_in_words, &bounce_offset, &exception_offset); + + // ------------- + // make sure all code is generated + masm->flush(); + + // failed to generate? + if (frame_size_in_words < 0 || bounce_offset < 0 || exception_offset < 0) { + assert(false, "bad ricochet blob"); + return; + } + + _ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words); +} + //------------------------------generate_deopt_blob---------------------------- void SharedRuntime::generate_deopt_blob() { // Allocate space for the code @@ -3205,6 +3231,8 @@ generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), true); + generate_ricochet_blob(); + generate_deopt_blob(); #ifdef COMPILER2
--- a/src/cpu/x86/vm/stubRoutines_x86_32.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/stubRoutines_x86_32.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -36,7 +36,7 @@ // MethodHandles adapters enum method_handles_platform_dependent_constants { - method_handles_adapters_code_size = 10000 + method_handles_adapters_code_size = 30000 DEBUG_ONLY(+ 10000) }; class x86 {
--- a/src/cpu/x86/vm/stubRoutines_x86_64.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/stubRoutines_x86_64.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -38,7 +38,7 @@ // MethodHandles adapters enum method_handles_platform_dependent_constants { - method_handles_adapters_code_size = 40000 + method_handles_adapters_code_size = 80000 DEBUG_ONLY(+ 120000) }; class x86 {
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -776,6 +776,98 @@ } +// Method entry for java.lang.ref.Reference.get. +address InterpreterGenerator::generate_Reference_get_entry(void) { +#ifndef SERIALGC + // Code: _aload_0, _getfield, _areturn + // parameter size = 1 + // + // The code that gets generated by this routine is split into 2 parts: + // 1. The "intrinsified" code for G1 (or any SATB based GC), + // 2. The slow path - which is an expansion of the regular method entry. + // + // Notes:- + // * In the G1 code we do not check whether we need to block for + // a safepoint. If G1 is enabled then we must execute the specialized + // code for Reference.get (except when the Reference object is null) + // so that we can log the value in the referent field with an SATB + // update buffer. + // If the code for the getfield template is modified so that the + // G1 pre-barrier code is executed when the current method is + // Reference.get() then going through the normal method entry + // will be fine. + // * The G1 code below can, however, check the receiver object (the instance + // of java.lang.Reference) and jump to the slow path if null. If the + // Reference object is null then we obviously cannot fetch the referent + // and so we don't need to call the G1 pre-barrier. Thus we can use the + // regular method entry code to generate the NPE. + // + // This code is based on generate_accessor_enty. + + // rbx,: methodOop + // rcx: receiver (preserve for slow entry into asm interpreter) + + // rsi: senderSP must preserved for slow path, set SP to it on fast path + + address entry = __ pc(); + + const int referent_offset = java_lang_ref_Reference::referent_offset; + guarantee(referent_offset > 0, "referent offset not initialized"); + + if (UseG1GC) { + Label slow_path; + + // Check if local 0 != NULL + // If the receiver is null then it is OK to jump to the slow path. + __ movptr(rax, Address(rsp, wordSize)); + __ testptr(rax, rax); + __ jcc(Assembler::zero, slow_path); + + // rax: local 0 (must be preserved across the G1 barrier call) + // + // rbx: method (at this point it's scratch) + // rcx: receiver (at this point it's scratch) + // rdx: scratch + // rdi: scratch + // + // rsi: sender sp + + // Preserve the sender sp in case the pre-barrier + // calls the runtime + __ push(rsi); + + // Load the value of the referent field. + const Address field_address(rax, referent_offset); + __ movptr(rax, field_address); + + // Generate the G1 pre-barrier code to log the value of + // the referent field in an SATB buffer. + __ get_thread(rcx); + __ g1_write_barrier_pre(noreg /* obj */, + rax /* pre_val */, + rcx /* thread */, + rbx /* tmp */, + true /* tosca_save */, + true /* expand_call */); + + // _areturn + __ pop(rsi); // get sender sp + __ pop(rdi); // get return address + __ mov(rsp, rsi); // set sp to sender sp + __ jmp(rdi); + + __ bind(slow_path); + (void) generate_normal_entry(false); + + return entry; + } +#endif // SERIALGC + + // If G1 is not enabled then attempt to go through the accessor entry point + // Reference.get is an accessor + return generate_accessor_entry(); +} + // // Interpreter stub for calling a native method. (asm interpreter) // This sets up a somewhat different looking stack for calling the native method @@ -1444,6 +1536,8 @@ case Interpreter::java_lang_math_log : // fall thru case Interpreter::java_lang_math_log10 : // fall thru case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break; + case Interpreter::java_lang_ref_reference_get + : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; default : ShouldNotReachHere(); break; } @@ -1495,6 +1589,7 @@ int tempcount, int popframe_extra_args, int moncount, + int caller_actual_parameters, int callee_param_count, int callee_locals, frame* caller,
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -757,6 +757,95 @@ return entry_point; } +// Method entry for java.lang.ref.Reference.get. +address InterpreterGenerator::generate_Reference_get_entry(void) { +#ifndef SERIALGC + // Code: _aload_0, _getfield, _areturn + // parameter size = 1 + // + // The code that gets generated by this routine is split into 2 parts: + // 1. The "intrinsified" code for G1 (or any SATB based GC), + // 2. The slow path - which is an expansion of the regular method entry. + // + // Notes:- + // * In the G1 code we do not check whether we need to block for + // a safepoint. If G1 is enabled then we must execute the specialized + // code for Reference.get (except when the Reference object is null) + // so that we can log the value in the referent field with an SATB + // update buffer. + // If the code for the getfield template is modified so that the + // G1 pre-barrier code is executed when the current method is + // Reference.get() then going through the normal method entry + // will be fine. + // * The G1 code can, however, check the receiver object (the instance + // of java.lang.Reference) and jump to the slow path if null. If the + // Reference object is null then we obviously cannot fetch the referent + // and so we don't need to call the G1 pre-barrier. Thus we can use the + // regular method entry code to generate the NPE. + // + // This code is based on generate_accessor_enty. + // + // rbx: methodOop + + // r13: senderSP must preserve for slow path, set SP to it on fast path + + address entry = __ pc(); + + const int referent_offset = java_lang_ref_Reference::referent_offset; + guarantee(referent_offset > 0, "referent offset not initialized"); + + if (UseG1GC) { + Label slow_path; + // rbx: method + + // Check if local 0 != NULL + // If the receiver is null then it is OK to jump to the slow path. + __ movptr(rax, Address(rsp, wordSize)); + + __ testptr(rax, rax); + __ jcc(Assembler::zero, slow_path); + + // rax: local 0 + // rbx: method (but can be used as scratch now) + // rdx: scratch + // rdi: scratch + + // Generate the G1 pre-barrier code to log the value of + // the referent field in an SATB buffer. + + // Load the value of the referent field. + const Address field_address(rax, referent_offset); + __ load_heap_oop(rax, field_address); + + // Generate the G1 pre-barrier code to log the value of + // the referent field in an SATB buffer. + __ g1_write_barrier_pre(noreg /* obj */, + rax /* pre_val */, + r15_thread /* thread */, + rbx /* tmp */, + true /* tosca_live */, + true /* expand_call */); + + // _areturn + __ pop(rdi); // get return address + __ mov(rsp, r13); // set sp to sender sp + __ jmp(rdi); + __ ret(0); + + // generate a vanilla interpreter entry as the slow path + __ bind(slow_path); + (void) generate_normal_entry(false); + + return entry; + } +#endif // SERIALGC + + // If G1 is not enabled then attempt to go through the accessor entry point + // Reference.get is an accessor + return generate_accessor_entry(); +} + + // Interpreter stub for calling a native method. (asm interpreter) // This sets up a somewhat different looking stack for calling the // native method than the typical interpreter frame setup. @@ -1463,6 +1552,8 @@ case Interpreter::java_lang_math_log : // fall thru case Interpreter::java_lang_math_log10 : // fall thru case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break; + case Interpreter::java_lang_ref_reference_get + : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; default : ShouldNotReachHere(); break; } @@ -1512,6 +1603,7 @@ int tempcount, int popframe_extra_args, int moncount, + int caller_actual_parameters, int callee_param_count, int callee_locals, frame* caller,
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -140,7 +140,12 @@ } __ get_thread(rcx); __ save_bcp(); - __ g1_write_barrier_pre(rdx, rcx, rsi, rbx, val != noreg); + __ g1_write_barrier_pre(rdx /* obj */, + rbx /* pre_val */, + rcx /* thread */, + rsi /* tmp */, + val != noreg /* tosca_live */, + false /* expand_call */); // Do the actual store // noreg means NULL @@ -149,7 +154,11 @@ // No post barrier for NULL } else { __ movl(Address(rdx, 0), val); - __ g1_write_barrier_post(rdx, rax, rcx, rbx, rsi); + __ g1_write_barrier_post(rdx /* store_adr */, + val /* new_val */, + rcx /* thread */, + rbx /* tmp */, + rsi /* tmp2 */); } __ restore_bcp(); @@ -413,7 +422,7 @@ Label L_done, L_throw_exception; const Register con_klass_temp = rcx; // same as Rcache - __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes())); + __ load_klass(con_klass_temp, rax); __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr())); __ jcc(Assembler::notEqual, L_done); __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0); @@ -423,7 +432,7 @@ // Load the exception from the system-array which wraps it: __ bind(L_throw_exception); - __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); + __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); __ jump(ExternalAddress(Interpreter::throw_exception_entry())); __ bind(L_done); @@ -937,9 +946,9 @@ __ jcc(Assembler::zero, is_null); // Move subklass into EBX - __ movptr(rbx, Address(rax, oopDesc::klass_offset_in_bytes())); + __ load_klass(rbx, rax); // Move superklass into EAX - __ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes())); + __ load_klass(rax, rdx); __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes())); // Compress array+index*wordSize+12 into a single register. Frees ECX. __ lea(rdx, element_address); @@ -1992,7 +2001,7 @@ if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { assert(state == vtos, "only valid state"); __ movptr(rax, aaddress(0)); - __ movptr(rdi, Address(rax, oopDesc::klass_offset_in_bytes())); + __ load_klass(rdi, rax); __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); __ testl(rdi, JVM_ACC_HAS_FINALIZER); Label skip_register_finalizer; @@ -2939,7 +2948,7 @@ // get receiver klass __ null_check(recv, oopDesc::klass_offset_in_bytes()); // Keep recv in rcx for callee expects it there - __ movptr(rax, Address(recv, oopDesc::klass_offset_in_bytes())); + __ load_klass(rax, recv); __ verify_oop(rax); // profile this call @@ -3019,7 +3028,7 @@ // Get receiver klass into rdx - also a null check __ restore_locals(); // restore rdi - __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes())); + __ load_klass(rdx, rcx); __ verify_oop(rdx); // profile this call @@ -3074,6 +3083,7 @@ void TemplateTable::invokedynamic(int byte_no) { transition(vtos, vtos); + assert(byte_no == f1_oop, "use this argument"); if (!EnableInvokeDynamic) { // We should not encounter this bytecode if !EnableInvokeDynamic. @@ -3086,7 +3096,6 @@ return; } - assert(byte_no == f1_oop, "use this argument"); prepare_invoke(rax, rbx, byte_no); // rax: CallSite object (f1) @@ -3097,14 +3106,14 @@ Register rax_callsite = rax; Register rcx_method_handle = rcx; - if (ProfileInterpreter) { - // %%% should make a type profile for any invokedynamic that takes a ref argument - // profile this call - __ profile_call(rsi); - } - - __ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx))); + // %%% should make a type profile for any invokedynamic that takes a ref argument + // profile this call + __ profile_call(rsi); + + __ verify_oop(rax_callsite); + __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx))); __ null_check(rcx_method_handle); + __ verify_oop(rcx_method_handle); __ prepare_to_jump_from_interpreted(); __ jump_to_method_handle_entry(rcx_method_handle, rdx); } @@ -3249,7 +3258,7 @@ (int32_t)markOopDesc::prototype()); // header __ pop(rcx); // get saved klass back in the register. } - __ movptr(Address(rax, oopDesc::klass_offset_in_bytes()), rcx); // klass + __ store_klass(rax, rcx); // klass { SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0); @@ -3324,7 +3333,7 @@ __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc))); __ bind(resolved); - __ movptr(rbx, Address(rdx, oopDesc::klass_offset_in_bytes())); + __ load_klass(rbx, rdx); // Generate subtype check. Blows ECX. Resets EDI. Object in EDX. // Superklass in EAX. Subklass in EBX. @@ -3367,12 +3376,12 @@ __ push(atos); call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); __ pop_ptr(rdx); - __ movptr(rdx, Address(rdx, oopDesc::klass_offset_in_bytes())); + __ load_klass(rdx, rdx); __ jmp(resolved); // Get superklass in EAX and subklass in EDX __ bind(quicked); - __ movptr(rdx, Address(rax, oopDesc::klass_offset_in_bytes())); + __ load_klass(rdx, rax); __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc))); __ bind(resolved);
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -147,12 +147,21 @@ } else { __ leaq(rdx, obj); } - __ g1_write_barrier_pre(rdx, r8, rbx, val != noreg); + __ g1_write_barrier_pre(rdx /* obj */, + rbx /* pre_val */, + r15_thread /* thread */, + r8 /* tmp */, + val != noreg /* tosca_live */, + false /* expand_call */); if (val == noreg) { __ store_heap_oop_null(Address(rdx, 0)); } else { __ store_heap_oop(Address(rdx, 0), val); - __ g1_write_barrier_post(rdx, val, r8, rbx); + __ g1_write_barrier_post(rdx /* store_adr */, + val /* new_val */, + r15_thread /* thread */, + r8 /* tmp */, + rbx /* tmp2 */); } } @@ -427,7 +436,7 @@ Label L_done, L_throw_exception; const Register con_klass_temp = rcx; // same as cache const Register array_klass_temp = rdx; // same as index - __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes())); + __ load_klass(con_klass_temp, rax); __ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr())); __ cmpptr(con_klass_temp, Address(array_klass_temp, 0)); __ jcc(Assembler::notEqual, L_done); @@ -438,7 +447,7 @@ // Load the exception from the system-array which wraps it: __ bind(L_throw_exception); - __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); + __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); __ jump(ExternalAddress(Interpreter::throw_exception_entry())); __ bind(L_done); @@ -3128,7 +3137,6 @@ return; } - assert(byte_no == f1_oop, "use this argument"); prepare_invoke(rax, rbx, byte_no); // rax: CallSite object (f1) @@ -3139,14 +3147,14 @@ Register rax_callsite = rax; Register rcx_method_handle = rcx; - if (ProfileInterpreter) { - // %%% should make a type profile for any invokedynamic that takes a ref argument - // profile this call - __ profile_call(r13); - } - - __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx))); + // %%% should make a type profile for any invokedynamic that takes a ref argument + // profile this call + __ profile_call(r13); + + __ verify_oop(rax_callsite); + __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx))); __ null_check(rcx_method_handle); + __ verify_oop(rcx_method_handle); __ prepare_to_jump_from_interpreted(); __ jump_to_method_handle_entry(rcx_method_handle, rdx); }
--- a/src/cpu/x86/vm/vm_version_x86.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/vm_version_x86.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All Rights Reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -348,7 +348,7 @@ } char buf[256]; - jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", cores_per_cpu(), threads_per_core(), cpu_family(), _model, _stepping, (supports_cmov() ? ", cmov" : ""), @@ -363,8 +363,7 @@ (supports_sse4_2() ? ", sse4.2" : ""), (supports_popcnt() ? ", popcnt" : ""), (supports_mmx_ext() ? ", mmxext" : ""), - (supports_3dnow() ? ", 3dnow" : ""), - (supports_3dnow2() ? ", 3dnowext" : ""), + (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), (supports_lzcnt() ? ", lzcnt": ""), (supports_sse4a() ? ", sse4a": ""), (supports_ht() ? ", ht": "")); @@ -442,12 +441,25 @@ } } - // On family 21 processors default is no sw prefetch - if ( cpu_family() == 21 ) { + // some defaults for AMD family 15h + if ( cpu_family() == 0x15 ) { + // On family 15h processors default is no sw prefetch if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { AllocatePrefetchStyle = 0; } + // Also, if some other prefetch style is specified, default instruction type is PREFETCHW + if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { + AllocatePrefetchInstr = 3; + } + // On family 15h processors use XMM and UnalignedLoadStores for Array Copy + if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) { + UseXMMForArrayCopy = true; + } + if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) { + UseUnalignedLoadStores = true; + } } + } if( is_intel() ) { // Intel cpus specific settings @@ -522,13 +534,13 @@ // set valid Prefetch instruction if( ReadPrefetchInstr < 0 ) ReadPrefetchInstr = 0; if( ReadPrefetchInstr > 3 ) ReadPrefetchInstr = 3; - if( ReadPrefetchInstr == 3 && !supports_3dnow() ) ReadPrefetchInstr = 0; - if( !supports_sse() && supports_3dnow() ) ReadPrefetchInstr = 3; + if( ReadPrefetchInstr == 3 && !supports_3dnow_prefetch() ) ReadPrefetchInstr = 0; + if( !supports_sse() && supports_3dnow_prefetch() ) ReadPrefetchInstr = 3; if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0; if( AllocatePrefetchInstr > 3 ) AllocatePrefetchInstr = 3; - if( AllocatePrefetchInstr == 3 && !supports_3dnow() ) AllocatePrefetchInstr=0; - if( !supports_sse() && supports_3dnow() ) AllocatePrefetchInstr = 3; + if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0; + if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3; // Allocation prefetch settings intx cache_line_size = L1_data_cache_line_size(); @@ -576,10 +588,10 @@ logical_processors_per_package()); tty->print_cr("UseSSE=%d",UseSSE); tty->print("Allocation: "); - if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow()) { + if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) { tty->print_cr("no prefetching"); } else { - if (UseSSE == 0 && supports_3dnow()) { + if (UseSSE == 0 && supports_3dnow_prefetch()) { tty->print("PREFETCHW"); } else if (UseSSE >= 1) { if (AllocatePrefetchInstr == 0) {
--- a/src/cpu/x86/vm/vm_version_x86.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/vm_version_x86.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All Rights Reserved. + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -188,7 +188,8 @@ CPU_FXSR = (1 << 2), CPU_HT = (1 << 3), CPU_MMX = (1 << 4), - CPU_3DNOW = (1 << 5), // 3DNow comes from cpuid 0x80000001 (EDX) + CPU_3DNOW_PREFETCH = (1 << 5), // Processor supports 3dnow prefetch and prefetchw instructions + // may not necessarily support other 3dnow instructions CPU_SSE = (1 << 6), CPU_SSE2 = (1 << 7), CPU_SSE3 = (1 << 8), // SSE3 comes from cpuid 1 (ECX) @@ -328,8 +329,9 @@ // AMD features. if (is_amd()) { - if (_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) - result |= CPU_3DNOW; + if ((_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) || + (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0)) + result |= CPU_3DNOW_PREFETCH; if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0) result |= CPU_LZCNT; if (_cpuid_info.ext_cpuid1_ecx.bits.sse4a != 0) @@ -446,9 +448,8 @@ // // AMD features // - static bool supports_3dnow() { return (_cpuFeatures & CPU_3DNOW) != 0; } + static bool supports_3dnow_prefetch() { return (_cpuFeatures & CPU_3DNOW_PREFETCH) != 0; } static bool supports_mmx_ext() { return is_amd() && _cpuid_info.ext_cpuid1_edx.bits.mmx_amd != 0; } - static bool supports_3dnow2() { return is_amd() && _cpuid_info.ext_cpuid1_edx.bits.tdnow2 != 0; } static bool supports_lzcnt() { return (_cpuFeatures & CPU_LZCNT) != 0; } static bool supports_sse4a() { return (_cpuFeatures & CPU_SSE4A) != 0; }
--- a/src/cpu/x86/vm/x86_32.ad Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/x86/vm/x86_32.ad Thu Jun 02 18:59:50 2011 +0100 @@ -3423,7 +3423,7 @@ masm.movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2] // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes - if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) { + if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { // prefetchw [eax + Offset(_owner)-2] masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2)); } @@ -3467,7 +3467,7 @@ masm.movptr(boxReg, tmpReg) ; // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes - if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) { + if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { // prefetchw [eax + Offset(_owner)-2] masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2)); } @@ -3614,7 +3614,7 @@ // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html. masm.get_thread (boxReg) ; - if ((EmitSync & 4096) && VM_Version::supports_3dnow() && os::is_MP()) { + if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { // prefetchw [ebx + Offset(_owner)-2] masm.prefetchw(Address(rbx, ObjectMonitor::owner_offset_in_bytes()-2)); } @@ -7333,7 +7333,7 @@ // Must be safe to execute with invalid address (cannot fault). instruct prefetchr0( memory mem ) %{ - predicate(UseSSE==0 && !VM_Version::supports_3dnow()); + predicate(UseSSE==0 && !VM_Version::supports_3dnow_prefetch()); match(PrefetchRead mem); ins_cost(0); size(0); @@ -7343,7 +7343,7 @@ %} instruct prefetchr( memory mem ) %{ - predicate(UseSSE==0 && VM_Version::supports_3dnow() || ReadPrefetchInstr==3); + predicate(UseSSE==0 && VM_Version::supports_3dnow_prefetch() || ReadPrefetchInstr==3); match(PrefetchRead mem); ins_cost(100); @@ -7387,7 +7387,7 @@ %} instruct prefetchw0( memory mem ) %{ - predicate(UseSSE==0 && !VM_Version::supports_3dnow()); + predicate(UseSSE==0 && !VM_Version::supports_3dnow_prefetch()); match(PrefetchWrite mem); ins_cost(0); size(0); @@ -7397,7 +7397,7 @@ %} instruct prefetchw( memory mem ) %{ - predicate(UseSSE==0 && VM_Version::supports_3dnow() || AllocatePrefetchInstr==3); + predicate(UseSSE==0 && VM_Version::supports_3dnow_prefetch() || AllocatePrefetchInstr==3); match( PrefetchWrite mem ); ins_cost(100); @@ -12989,6 +12989,53 @@ %} // ============================================================================ +// Counted Loop limit node which represents exact final iterator value. +// Note: the resulting value should fit into integer range since +// counted loops have limit check on overflow. +instruct loopLimit_eReg(eAXRegI limit, nadxRegI init, immI stride, eDXRegI limit_hi, nadxRegI tmp, eFlagsReg flags) %{ + match(Set limit (LoopLimit (Binary init limit) stride)); + effect(TEMP limit_hi, TEMP tmp, KILL flags); + ins_cost(300); + + format %{ "loopLimit $init,$limit,$stride # $limit = $init + $stride *( $limit - $init + $stride -1)/ $stride, kills $limit_hi" %} + ins_encode %{ + int strd = (int)$stride$$constant; + assert(strd != 1 && strd != -1, "sanity"); + int m1 = (strd > 0) ? 1 : -1; + // Convert limit to long (EAX:EDX) + __ cdql(); + // Convert init to long (init:tmp) + __ movl($tmp$$Register, $init$$Register); + __ sarl($tmp$$Register, 31); + // $limit - $init + __ subl($limit$$Register, $init$$Register); + __ sbbl($limit_hi$$Register, $tmp$$Register); + // + ($stride - 1) + if (strd > 0) { + __ addl($limit$$Register, (strd - 1)); + __ adcl($limit_hi$$Register, 0); + __ movl($tmp$$Register, strd); + } else { + __ addl($limit$$Register, (strd + 1)); + __ adcl($limit_hi$$Register, -1); + __ lneg($limit_hi$$Register, $limit$$Register); + __ movl($tmp$$Register, -strd); + } + // signed devision: (EAX:EDX) / pos_stride + __ idivl($tmp$$Register); + if (strd < 0) { + // restore sign + __ negl($tmp$$Register); + } + // (EAX) * stride + __ mull($tmp$$Register); + // + init (ignore upper bits) + __ addl($limit$$Register, $init$$Register); + %} + ins_pipe( pipe_slow ); +%} + +// ============================================================================ // Branch Instructions // Jump Table instruct jumpXtnd(eRegI switch_val) %{
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1302,6 +1302,26 @@ return generate_entry((address) CppInterpreter::accessor_entry); } +address InterpreterGenerator::generate_Reference_get_entry(void) { +#ifndef SERIALGC + if (UseG1GC) { + // We need to generate have a routine that generates code to: + // * load the value in the referent field + // * passes that value to the pre-barrier. + // + // In the case of G1 this will record the value of the + // referent in an SATB buffer if marking is active. + // This will cause concurrent marking to mark the referent + // field as live. + Unimplemented(); + } +#endif // SERIALGC + + // If G1 is not enabled then attempt to go through the accessor entry point + // Reference.get is an accessor + return generate_accessor_entry(); +} + address InterpreterGenerator::generate_native_entry(bool synchronized) { assert(synchronized == false, "should be"); @@ -1357,6 +1377,10 @@ entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break; + case Interpreter::java_lang_ref_reference_get: + entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); + break; + default: ShouldNotReachHere(); } @@ -1403,6 +1427,7 @@ int tempcount, int popframe_extra_args, int moncount, + int caller_actual_parameters, int callee_param_count, int callee_locals, frame* caller,
--- a/src/cpu/zero/vm/interpreterGenerator_zero.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/zero/vm/interpreterGenerator_zero.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright 2007 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -37,6 +37,7 @@ address generate_math_entry(AbstractInterpreter::MethodKind kind); address generate_empty_entry(); address generate_accessor_entry(); + address generate_Reference_get_entry(); address generate_method_handle_entry(); #endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP
--- a/src/cpu/zero/vm/interpreter_zero.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/zero/vm/interpreter_zero.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -82,24 +82,6 @@ return true; } -int AbstractInterpreter::size_activation(methodOop method, - int tempcount, - int popframe_extra_args, - int moncount, - int callee_param_count, - int callee_locals, - bool is_top_frame) { - return layout_activation(method, - tempcount, - popframe_extra_args, - moncount, - callee_param_count, - callee_locals, - (frame*) NULL, - (frame*) NULL, - is_top_frame); -} - void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { }
--- a/src/cpu/zero/vm/jni_zero.h Wed Jun 01 17:09:56 2011 +0100 +++ b/src/cpu/zero/vm/jni_zero.h Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright 2009 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. *
--- a/src/os/linux/vm/globals_linux.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os/linux/vm/globals_linux.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,19 +29,25 @@ // Defines Linux specific flags. They are not available on other platforms. // #define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \ - product(bool, UseOprofile, false, \ - "enable support for Oprofile profiler") \ - \ - product(bool, UseLinuxPosixThreadCPUClocks, true, \ - "enable fast Linux Posix clocks where available") -// NB: The default value of UseLinuxPosixThreadCPUClocks may be -// overridden in Arguments::parse_each_vm_init_arg. + product(bool, UseOprofile, false, \ + "enable support for Oprofile profiler") \ + \ + product(bool, UseLinuxPosixThreadCPUClocks, true, \ + "enable fast Linux Posix clocks where available") \ +/* NB: The default value of UseLinuxPosixThreadCPUClocks may be \ + overridden in Arguments::parse_each_vm_init_arg. */ \ + \ + product(bool, UseHugeTLBFS, false, \ + "Use MAP_HUGETLB for large pages") \ + \ + product(bool, UseSHM, false, \ + "Use SYSV shared memory for large pages") // // Defines Linux-specific default values. The flags are available on all // platforms, but they may have different default values on other platforms. // -define_pd_global(bool, UseLargePages, false); +define_pd_global(bool, UseLargePages, true); define_pd_global(bool, UseLargePagesIndividualAllocation, false); define_pd_global(bool, UseOSErrorReporting, false); define_pd_global(bool, UseThreadPriorities, true) ;
--- a/src/os/linux/vm/jvm_linux.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os/linux/vm/jvm_linux.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/linux/vm/osThread_linux.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os/linux/vm/osThread_linux.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/linux/vm/os_linux.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os/linux/vm/os_linux.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -2465,16 +2465,40 @@ return res != (uintptr_t) MAP_FAILED; } +// Define MAP_HUGETLB here so we can build HotSpot on old systems. +#ifndef MAP_HUGETLB +#define MAP_HUGETLB 0x40000 +#endif + +// Define MADV_HUGEPAGE here so we can build HotSpot on old systems. +#ifndef MADV_HUGEPAGE +#define MADV_HUGEPAGE 14 +#endif + bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) { + if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { + int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; + uintptr_t res = + (uintptr_t) ::mmap(addr, size, prot, + MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB, + -1, 0); + return res != (uintptr_t) MAP_FAILED; + } + return commit_memory(addr, size, exec); } -void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } +void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { + if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { + // We don't check the return value: madvise(MADV_HUGEPAGE) may not + // be supported or the memory may already be backed by huge pages. + ::madvise(addr, bytes, MADV_HUGEPAGE); + } +} void os::free_memory(char *addr, size_t bytes) { - ::mmap(addr, bytes, PROT_READ | PROT_WRITE, - MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); + ::madvise(addr, bytes, MADV_DONTNEED); } void os::numa_make_global(char *addr, size_t bytes) { @@ -2648,45 +2672,39 @@ // writing thread stacks don't use growable mappings (i.e. those // creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this // only applies to the main thread. -static bool -get_stack_bounds(uintptr_t *bottom, uintptr_t *top) -{ - FILE *f = fopen("/proc/self/maps", "r"); - if (f == NULL) + +static +bool get_stack_bounds(uintptr_t *bottom, uintptr_t *top) { + + char buf[128]; + int fd, sz; + + if ((fd = ::open("/proc/self/maps", O_RDONLY)) < 0) { return false; - - while (!feof(f)) { - size_t dummy; - char *str = NULL; - ssize_t len = getline(&str, &dummy, f); - if (len == -1) { - fclose(f); - return false; - } - - if (len > 0 && str[len-1] == '\n') { - str[len-1] = 0; - len--; - } - - static const char *stack_str = "[stack]"; - if (len > (ssize_t)strlen(stack_str) - && (strcmp(str + len - strlen(stack_str), stack_str) == 0)) { - if (sscanf(str, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) { - uintptr_t sp = (uintptr_t)__builtin_frame_address(0); - if (sp >= *bottom && sp <= *top) { - free(str); - fclose(f); - return true; + } + + const char kw[] = "[stack]"; + const int kwlen = sizeof(kw)-1; + + // Address part of /proc/self/maps couldn't be more than 128 bytes + while ((sz = os::get_line_chars(fd, buf, sizeof(buf))) > 0) { + if (sz > kwlen && ::memcmp(buf+sz-kwlen, kw, kwlen) == 0) { + // Extract addresses + if (sscanf(buf, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) { + uintptr_t sp = (uintptr_t) __builtin_frame_address(0); + if (sp >= *bottom && sp <= *top) { + ::close(fd); + return true; + } } - } - } - free(str); - } - fclose(f); + } + } + + ::close(fd); return false; } + // If the (growable) stack mapping already extends beyond the point // where we're going to put our guard pages, truncate the mapping at // that point by munmap()ping it. This ensures that when we later @@ -2818,6 +2836,43 @@ return linux_mprotect(addr, size, PROT_READ|PROT_WRITE); } +bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) { + bool result = false; + void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE, + MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB, + -1, 0); + + if (p != (void *) -1) { + // We don't know if this really is a huge page or not. + FILE *fp = fopen("/proc/self/maps", "r"); + if (fp) { + while (!feof(fp)) { + char chars[257]; + long x = 0; + if (fgets(chars, sizeof(chars), fp)) { + if (sscanf(chars, "%lx-%*x", &x) == 1 + && x == (long)p) { + if (strstr (chars, "hugepage")) { + result = true; + break; + } + } + } + } + fclose(fp); + } + munmap (p, page_size); + if (result) + return true; + } + + if (warn) { + warning("HugeTLBFS is not supported by the operating system."); + } + + return result; +} + /* * Set the coredump_filter bits to include largepages in core dump (bit 6) * @@ -2859,8 +2914,22 @@ static size_t _large_page_size = 0; -bool os::large_page_init() { - if (!UseLargePages) return false; +void os::large_page_init() { + if (!UseLargePages) { + UseHugeTLBFS = false; + UseSHM = false; + return; + } + + if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) { + // If UseLargePages is specified on the command line try both methods, + // if it's default, then try only HugeTLBFS. + if (FLAG_IS_DEFAULT(UseLargePages)) { + UseHugeTLBFS = true; + } else { + UseHugeTLBFS = UseSHM = true; + } + } if (LargePageSizeInBytes) { _large_page_size = LargePageSizeInBytes; @@ -2905,20 +2974,24 @@ } } + // print a warning if any large page related flag is specified on command line + bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS); + const size_t default_page_size = (size_t)Linux::page_size(); if (_large_page_size > default_page_size) { _page_sizes[0] = _large_page_size; _page_sizes[1] = default_page_size; _page_sizes[2] = 0; } + UseHugeTLBFS = UseHugeTLBFS && + Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size); + + if (UseHugeTLBFS) + UseSHM = false; + + UseLargePages = UseHugeTLBFS || UseSHM; set_coredump_filter(); - - // Large page support is available on 2.6 or newer kernel, some vendors - // (e.g. Redhat) have backported it to their 2.4 based distributions. - // We optimistically assume the support is available. If later it turns out - // not true, VM will automatically switch to use regular page size. - return true; } #ifndef SHM_HUGETLB @@ -2928,7 +3001,7 @@ char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) { // "exec" is passed in but not used. Creating the shared image for // the code cache doesn't have an SHM_X executable permission to check. - assert(UseLargePages, "only for large pages"); + assert(UseLargePages && UseSHM, "only for SHM large pages"); key_t key = IPC_PRIVATE; char *addr; @@ -2995,16 +3068,15 @@ return _large_page_size; } -// Linux does not support anonymous mmap with large page memory. The only way -// to reserve large page memory without file backing is through SysV shared -// memory API. The entire memory region is committed and pinned upfront. -// Hopefully this will change in the future... +// HugeTLBFS allows application to commit large page memory on demand; +// with SysV SHM the entire memory region must be allocated as shared +// memory. bool os::can_commit_large_page_memory() { - return false; + return UseHugeTLBFS; } bool os::can_execute_large_page_memory() { - return false; + return UseHugeTLBFS; } // Reserve memory at an arbitrary address, only if that area is @@ -4044,7 +4116,7 @@ #endif } - FLAG_SET_DEFAULT(UseLargePages, os::large_page_init()); + os::large_page_init(); // initialize suspend/resume support - must do this before signal_sets_init() if (SR_initialize() != 0) { @@ -4096,6 +4168,23 @@ UseNUMA = false; } } + // With SHM large pages we cannot uncommit a page, so there's not way + // we can make the adaptive lgrp chunk resizing work. If the user specified + // both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and + // disable adaptive resizing. + if (UseNUMA && UseLargePages && UseSHM) { + if (!FLAG_IS_DEFAULT(UseNUMA)) { + if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) { + UseLargePages = false; + } else { + warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing"); + UseAdaptiveSizePolicy = false; + UseAdaptiveNUMAChunkSizing = false; + } + } else { + UseNUMA = false; + } + } if (!UseNUMA && ForceNUMA) { UseNUMA = true; }
--- a/src/os/linux/vm/os_linux.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os/linux/vm/os_linux.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -86,6 +86,9 @@ static void rebuild_cpu_to_node_map(); static GrowableArray<int>* cpu_to_node() { return _cpu_to_node; } + + static bool hugetlbfs_sanity_check(bool warn, size_t page_size); + public: static void init_thread_fpu_state(); static int get_fpu_control_word();
--- a/src/os/linux/vm/os_linux.inline.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os/linux/vm/os_linux.inline.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/linux/vm/thread_linux.inline.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os/linux/vm/thread_linux.inline.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/solaris/dtrace/generateJvmOffsets.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os/solaris/dtrace/generateJvmOffsets.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/solaris/dtrace/jhelper.d Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os/solaris/dtrace/jhelper.d Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/solaris/dtrace/libjvm_db.c Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os/solaris/dtrace/libjvm_db.c Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/solaris/vm/dtraceJSDT_solaris.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os/solaris/vm/dtraceJSDT_solaris.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/solaris/vm/os_solaris.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os/solaris/vm/os_solaris.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -2826,7 +2826,9 @@ void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); - Solaris::set_mpss_range(addr, bytes, alignment_hint); + if (UseLargePages && UseMPSS) { + Solaris::set_mpss_range(addr, bytes, alignment_hint); + } } // Tell the OS to make the range local to the first-touching LWP @@ -3334,11 +3336,11 @@ return true; } -bool os::large_page_init() { +void os::large_page_init() { if (!UseLargePages) { UseISM = false; UseMPSS = false; - return false; + return; } // print a warning if any large page related flag is specified on command line @@ -3359,7 +3361,6 @@ Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); UseLargePages = UseISM || UseMPSS; - return UseLargePages; } bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) { @@ -4990,7 +4991,7 @@ #endif } - FLAG_SET_DEFAULT(UseLargePages, os::large_page_init()); + os::large_page_init(); // Check minimum allowable stack size for thread creation and to initialize // the java system classes, including StackOverflowError - depends on page @@ -5044,6 +5045,20 @@ UseNUMA = false; } } + // ISM is not compatible with the NUMA allocator - it always allocates + // pages round-robin across the lgroups. + if (UseNUMA && UseLargePages && UseISM) { + if (!FLAG_IS_DEFAULT(UseNUMA)) { + if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseISM)) { + UseLargePages = false; + } else { + warning("UseNUMA is not compatible with ISM large pages, disabling NUMA allocator"); + UseNUMA = false; + } + } else { + UseNUMA = false; + } + } if (!UseNUMA && ForceNUMA) { UseNUMA = true; }
--- a/src/os/windows/vm/os_windows.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os/windows/vm/os_windows.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -921,6 +921,8 @@ HINSTANCE dbghelp; EXCEPTION_POINTERS ep; MINIDUMP_EXCEPTION_INFORMATION mei; + MINIDUMP_EXCEPTION_INFORMATION* pmei; + HANDLE hProcess = GetCurrentProcess(); DWORD processId = GetCurrentProcessId(); HANDLE dumpFile; @@ -971,17 +973,22 @@ VMError::report_coredump_status("Failed to create file for dumping", false); return; } - - ep.ContextRecord = (PCONTEXT) contextRecord; - ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord; - - mei.ThreadId = GetCurrentThreadId(); - mei.ExceptionPointers = &ep; + if (exceptionRecord != NULL && contextRecord != NULL) { + ep.ContextRecord = (PCONTEXT) contextRecord; + ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord; + + mei.ThreadId = GetCurrentThreadId(); + mei.ExceptionPointers = &ep; + pmei = &mei; + } else { + pmei = NULL; + } + // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. - if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, &mei, NULL, NULL) == false && - _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, &mei, NULL, NULL) == false) { + if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && + _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { VMError::report_coredump_status("Call to MiniDumpWriteDump() failed", false); } else { VMError::report_coredump_status(buffer, true); @@ -2755,8 +2762,8 @@ _hToken = NULL; } -bool os::large_page_init() { - if (!UseLargePages) return false; +void os::large_page_init() { + if (!UseLargePages) return; // print a warning if any large page related flag is specified on command line bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || @@ -2801,7 +2808,7 @@ } cleanup_after_large_page_init(); - return success; + UseLargePages = success; } // On win32, one cannot release just a part of reserved memory, it's an @@ -3554,7 +3561,7 @@ #endif } - FLAG_SET_DEFAULT(UseLargePages, os::large_page_init()); + os::large_page_init(); // Setup Windows Exceptions
--- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -93,7 +93,7 @@ inline void OrderAccess::store_fence(jbyte* p, jbyte v) { __asm__ volatile ( "xchgb (%2),%0" - : "=r" (v) + : "=q" (v) : "0" (v), "r" (p) : "memory"); } @@ -155,7 +155,7 @@ // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile. inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { __asm__ volatile ( "xchgb (%2),%0" - : "=r" (v) + : "=q" (v) : "0" (v), "r" (p) : "memory"); }
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. *
--- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.s Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os_cpu/solaris_sparc/vm/solaris_sparc.s Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ !! -!! Copyright (c) 2005, 2008 Oracle and/or its affiliates. All rights reserved. +!! Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved. !! DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. !! !! This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Thu Jun 02 18:59:50 2011 +0100 @@ -497,6 +497,9 @@ addAttr(rv, "TargetMachine", "MachineX64"); } + // We always want the /DEBUG option to get full symbol information in the pdb files + addAttr(rv, "GenerateDebugInformation", "true"); + return rv; } @@ -504,8 +507,7 @@ Vector getDebugLinkerFlags() { Vector rv = new Vector(); - // /DEBUG option - addAttr(rv, "GenerateDebugInformation", "true"); + // Empty now that /DEBUG option is used by all configs return rv; }
--- a/src/share/tools/hsdis/README Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/tools/hsdis/README Thu Jun 02 18:59:50 2011 +0100 @@ -1,4 +1,4 @@ -Copyright (c) 2008 Oracle and/or its affiliates. All rights reserved. +Copyright (c) 2008, Oracle and/or its affiliates. All rights reserved. DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. This code is free software; you can redistribute it and/or modify it
--- a/src/share/tools/hsdis/hsdis-demo.c Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/tools/hsdis/hsdis-demo.c Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/tools/hsdis/hsdis.c Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/tools/hsdis/hsdis.c Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/adlc/main.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/adlc/main.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/adlc/output_c.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/adlc/output_c.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/asm/assembler.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/asm/assembler.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/asm/assembler.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/asm/assembler.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/asm/codeBuffer.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/asm/codeBuffer.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_CodeStubs.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_CodeStubs.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -519,42 +519,126 @@ // Code stubs for Garbage-First barriers. class G1PreBarrierStub: public CodeStub { private: + bool _do_load; LIR_Opr _addr; LIR_Opr _pre_val; LIR_PatchCode _patch_code; CodeEmitInfo* _info; public: - // pre_val (a temporary register) must be a register; + // Version that _does_ generate a load of the previous value from addr. // addr (the address of the field to be read) must be a LIR_Address + // pre_val (a temporary register) must be a register; G1PreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) : - _addr(addr), _pre_val(pre_val), _patch_code(patch_code), _info(info) + _addr(addr), _pre_val(pre_val), _do_load(true), + _patch_code(patch_code), _info(info) { assert(_pre_val->is_register(), "should be temporary register"); assert(_addr->is_address(), "should be the address of the field"); } + // Version that _does not_ generate load of the previous value; the + // previous value is assumed to have already been loaded into pre_val. + G1PreBarrierStub(LIR_Opr pre_val) : + _addr(LIR_OprFact::illegalOpr), _pre_val(pre_val), _do_load(false), + _patch_code(lir_patch_none), _info(NULL) + { + assert(_pre_val->is_register(), "should be a register"); + } + LIR_Opr addr() const { return _addr; } LIR_Opr pre_val() const { return _pre_val; } LIR_PatchCode patch_code() const { return _patch_code; } CodeEmitInfo* info() const { return _info; } + bool do_load() const { return _do_load; } virtual void emit_code(LIR_Assembler* e); virtual void visit(LIR_OpVisitState* visitor) { - // don't pass in the code emit info since it's processed in the fast - // path - if (_info != NULL) - visitor->do_slow_case(_info); - else + if (_do_load) { + // don't pass in the code emit info since it's processed in the fast + // path + if (_info != NULL) + visitor->do_slow_case(_info); + else + visitor->do_slow_case(); + + visitor->do_input(_addr); + visitor->do_temp(_pre_val); + } else { visitor->do_slow_case(); - visitor->do_input(_addr); - visitor->do_temp(_pre_val); + visitor->do_input(_pre_val); + } } #ifndef PRODUCT virtual void print_name(outputStream* out) const { out->print("G1PreBarrierStub"); } #endif // PRODUCT }; +// This G1 barrier code stub is used in Unsafe.getObject. +// It generates a sequence of guards around the SATB +// barrier code that are used to detect when we have +// the referent field of a Reference object. +// The first check is assumed to have been generated +// in the code generated for Unsafe.getObject(). + +class G1UnsafeGetObjSATBBarrierStub: public CodeStub { + private: + LIR_Opr _val; + LIR_Opr _src; + + LIR_Opr _tmp; + LIR_Opr _thread; + + bool _gen_src_check; + + public: + // A G1 barrier that is guarded by generated guards that determine whether + // val (which is the result of Unsafe.getObject() should be recorded in an + // SATB log buffer. We could be reading the referent field of a Reference object + // using Unsafe.getObject() and we need to record the referent. + // + // * val is the operand returned by the unsafe.getObject routine. + // * src is the base object + // * tmp is a temp used to load the klass of src, and then reference type + // * thread is the thread object. + + G1UnsafeGetObjSATBBarrierStub(LIR_Opr val, LIR_Opr src, + LIR_Opr tmp, LIR_Opr thread, + bool gen_src_check) : + _val(val), _src(src), + _tmp(tmp), _thread(thread), + _gen_src_check(gen_src_check) + { + assert(_val->is_register(), "should have already been loaded"); + assert(_src->is_register(), "should have already been loaded"); + + assert(_tmp->is_register(), "should be a temporary register"); + } + + LIR_Opr val() const { return _val; } + LIR_Opr src() const { return _src; } + + LIR_Opr tmp() const { return _tmp; } + LIR_Opr thread() const { return _thread; } + + bool gen_src_check() const { return _gen_src_check; } + + virtual void emit_code(LIR_Assembler* e); + + virtual void visit(LIR_OpVisitState* visitor) { + visitor->do_slow_case(); + visitor->do_input(_val); + visitor->do_input(_src); + visitor->do_input(_thread); + + visitor->do_temp(_tmp); + } + +#ifndef PRODUCT + virtual void print_name(outputStream* out) const { out->print("G1UnsafeGetObjSATBBarrierStub"); } +#endif // PRODUCT +}; + class G1PostBarrierStub: public CodeStub { private: LIR_Opr _addr;
--- a/src/share/vm/c1/c1_Compilation.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_Compilation.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_Defs.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_Defs.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_FpuStackSim.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_FpuStackSim.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_FrameMap.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_FrameMap.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_FrameMap.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_FrameMap.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_GraphBuilder.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -2824,7 +2824,7 @@ int idx = 0; if (!method()->is_static()) { // we should always see the receiver - state->store_local(idx, new Local(objectType, idx)); + state->store_local(idx, new Local(method()->holder(), objectType, idx)); idx = 1; } @@ -2836,7 +2836,7 @@ // don't allow T_ARRAY to propagate into locals types if (basic_type == T_ARRAY) basic_type = T_OBJECT; ValueType* vt = as_ValueType(basic_type); - state->store_local(idx, new Local(vt, idx)); + state->store_local(idx, new Local(type, vt, idx)); idx += type->size(); } @@ -2913,6 +2913,46 @@ block()->set_end(end); break; } + + case vmIntrinsics::_Reference_get: + { + if (UseG1GC) { + // With java.lang.ref.reference.get() we must go through the + // intrinsic - when G1 is enabled - even when get() is the root + // method of the compile so that, if necessary, the value in + // the referent field of the reference object gets recorded by + // the pre-barrier code. + // Specifically, if G1 is enabled, the value in the referent + // field is recorded by the G1 SATB pre barrier. This will + // result in the referent being marked live and the reference + // object removed from the list of discovered references during + // reference processing. + + // Set up a stream so that appending instructions works properly. + ciBytecodeStream s(scope->method()); + s.reset_to_bci(0); + scope_data()->set_stream(&s); + s.next(); + + // setup the initial block state + _block = start_block; + _state = start_block->state()->copy_for_parsing(); + _last = start_block; + load_local(objectType, 0); + + // Emit the intrinsic node. + bool result = try_inline_intrinsics(scope->method()); + if (!result) BAILOUT("failed to inline intrinsic"); + method_return(apop()); + + // connect the begin and end blocks and we're all done. + BlockEnd* end = last()->as_BlockEnd(); + block()->set_end(end); + break; + } + // Otherwise, fall thru + } + default: scope_data()->add_to_work_list(start_block); iterate_all_blocks(); @@ -3150,6 +3190,15 @@ append_unsafe_CAS(callee); return true; + case vmIntrinsics::_Reference_get: + // It is only when G1 is enabled that we absolutely + // need to use the intrinsic version of Reference.get() + // so that the value in the referent field, if necessary, + // can be registered by the pre-barrier code. + if (!UseG1GC) return false; + preserves_state = true; + break; + default : return false; // do not inline } // create intrinsic node
--- a/src/share/vm/c1/c1_Instruction.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_Instruction.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -135,6 +135,33 @@ } +ciType* Local::exact_type() const { + ciType* type = declared_type(); + + // for primitive arrays, the declared type is the exact type + if (type->is_type_array_klass()) { + return type; + } else if (type->is_instance_klass()) { + ciInstanceKlass* ik = (ciInstanceKlass*)type; + if (ik->is_loaded() && ik->is_final() && !ik->is_interface()) { + return type; + } + } else if (type->is_obj_array_klass()) { + ciObjArrayKlass* oak = (ciObjArrayKlass*)type; + ciType* base = oak->base_element_type(); + if (base->is_instance_klass()) { + ciInstanceKlass* ik = base->as_instance_klass(); + if (ik->is_loaded() && ik->is_final()) { + return type; + } + } else if (base->is_primitive_type()) { + return type; + } + } + return NULL; +} + + ciType* LoadIndexed::exact_type() const { ciType* array_type = array()->exact_type(); if (array_type == NULL) { @@ -189,16 +216,21 @@ return ciTypeArrayKlass::make(elt_type()); } - ciType* NewObjectArray::exact_type() const { return ciObjArrayKlass::make(klass()); } +ciType* NewArray::declared_type() const { + return exact_type(); +} ciType* NewInstance::exact_type() const { return klass(); } +ciType* NewInstance::declared_type() const { + return exact_type(); +} ciType* CheckCast::declared_type() const { return klass(); @@ -349,6 +381,11 @@ if (state() != NULL) state()->values_do(f); } +ciType* Invoke::declared_type() const { + ciType *t = _target->signature()->return_type(); + assert(t->basic_type() != T_VOID, "need return value of void method?"); + return t; +} // Implementation of Contant intx Constant::hash() const { @@ -559,7 +596,7 @@ // of the inserted block, without recomputing the values of the other blocks // in the CFG. Therefore the value of "depth_first_number" in BlockBegin becomes meaningless. BlockBegin* BlockBegin::insert_block_between(BlockBegin* sux) { - BlockBegin* new_sux = new BlockBegin(-99); + BlockBegin* new_sux = new BlockBegin(end()->state()->bci()); // mark this block (special treatment when block order is computed) new_sux->set(critical_edge_split_flag);
--- a/src/share/vm/c1/c1_Instruction.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_Instruction.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -621,16 +621,21 @@ LEAF(Local, Instruction) private: int _java_index; // the local index within the method to which the local belongs + ciType* _declared_type; public: // creation - Local(ValueType* type, int index) + Local(ciType* declared, ValueType* type, int index) : Instruction(type) , _java_index(index) + , _declared_type(declared) {} // accessors int java_index() const { return _java_index; } + ciType* declared_type() const { return _declared_type; } + ciType* exact_type() const; + // generic virtual void input_values_do(ValueVisitor* f) { /* no values */ } }; @@ -1146,6 +1151,8 @@ BasicTypeList* signature() const { return _signature; } ciMethod* target() const { return _target; } + ciType* declared_type() const; + // Returns false if target is not loaded bool target_is_final() const { return check_flag(TargetIsFinalFlag); } bool target_is_loaded() const { return check_flag(TargetIsLoadedFlag); } @@ -1187,6 +1194,7 @@ // generic virtual bool can_trap() const { return true; } ciType* exact_type() const; + ciType* declared_type() const; }; @@ -1208,6 +1216,8 @@ virtual bool needs_exception_state() const { return false; } + ciType* declared_type() const; + // generic virtual bool can_trap() const { return true; } virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_length); } @@ -1397,6 +1407,7 @@ vmIntrinsics::ID _id; Values* _args; Value _recv; + int _nonnull_state; // mask identifying which args are nonnull public: // preserves_state can be set to true for Intrinsics @@ -1417,6 +1428,7 @@ , _id(id) , _args(args) , _recv(NULL) + , _nonnull_state(AllBits) { assert(args != NULL, "args must exist"); ASSERT_VALUES @@ -1442,6 +1454,23 @@ Value receiver() const { assert(has_receiver(), "must have receiver"); return _recv; } bool preserves_state() const { return check_flag(PreservesStateFlag); } + bool arg_needs_null_check(int i) { + if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) { + return is_set_nth_bit(_nonnull_state, i); + } + return true; + } + + void set_arg_needs_null_check(int i, bool check) { + if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) { + if (check) { + _nonnull_state |= nth_bit(i); + } else { + _nonnull_state &= ~(nth_bit(i)); + } + } + } + // generic virtual bool can_trap() const { return check_flag(CanTrapFlag); } virtual void input_values_do(ValueVisitor* f) {
--- a/src/share/vm/c1/c1_InstructionPrinter.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_InstructionPrinter.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -132,17 +132,22 @@ if (value->is_null_object()) { output()->print("null"); } else if (!value->is_loaded()) { - output()->print("<unloaded object 0x%x>", value); + output()->print("<unloaded object " PTR_FORMAT ">", value); } else if (value->is_method()) { ciMethod* m = (ciMethod*)value; output()->print("<method %s.%s>", m->holder()->name()->as_utf8(), m->name()->as_utf8()); } else { - output()->print("<object 0x%x>", value->constant_encoding()); + output()->print("<object " PTR_FORMAT ">", value->constant_encoding()); } } else if (type->as_InstanceConstant() != NULL) { - output()->print("<instance 0x%x>", type->as_InstanceConstant()->value()->constant_encoding()); + ciInstance* value = type->as_InstanceConstant()->value(); + if (value->is_loaded()) { + output()->print("<instance " PTR_FORMAT ">", value->constant_encoding()); + } else { + output()->print("<unloaded instance " PTR_FORMAT ">", value); + } } else if (type->as_ArrayConstant() != NULL) { - output()->print("<array 0x%x>", type->as_ArrayConstant()->value()->constant_encoding()); + output()->print("<array " PTR_FORMAT ">", type->as_ArrayConstant()->value()->constant_encoding()); } else if (type->as_ClassConstant() != NULL) { ciInstanceKlass* klass = type->as_ClassConstant()->value(); if (!klass->is_loaded()) {
--- a/src/share/vm/c1/c1_LIR.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_LIR.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1215,7 +1215,11 @@ src_range_check = 1 << 5, dst_range_check = 1 << 6, type_check = 1 << 7, - all_flags = (1 << 8) - 1 + overlapping = 1 << 8, + unaligned = 1 << 9, + src_objarray = 1 << 10, + dst_objarray = 1 << 11, + all_flags = (1 << 12) - 1 }; LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
--- a/src/share/vm/c1/c1_LIRAssembler.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -836,6 +836,9 @@ _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size); } } + check_codespace(); + CHECK_BAILOUT(); + s.next(); } VerifyOops = v;
--- a/src/share/vm/c1/c1_LIRAssembler.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_LIRGenerator.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -706,6 +706,38 @@ } } +static Value maxvalue(IfOp* ifop) { + switch (ifop->cond()) { + case If::eql: return NULL; + case If::neq: return NULL; + case If::lss: // x < y ? x : y + case If::leq: // x <= y ? x : y + if (ifop->x() == ifop->tval() && + ifop->y() == ifop->fval()) return ifop->y(); + return NULL; + + case If::gtr: // x > y ? y : x + case If::geq: // x >= y ? y : x + if (ifop->x() == ifop->tval() && + ifop->y() == ifop->fval()) return ifop->y(); + return NULL; + + } +} + +static ciType* phi_declared_type(Phi* phi) { + ciType* t = phi->operand_at(0)->declared_type(); + if (t == NULL) { + return NULL; + } + for(int i = 1; i < phi->operand_count(); i++) { + if (t != phi->operand_at(i)->declared_type()) { + return NULL; + } + } + return t; +} + void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) { Instruction* src = x->argument_at(0); Instruction* src_pos = x->argument_at(1); @@ -715,12 +747,20 @@ // first try to identify the likely type of the arrays involved ciArrayKlass* expected_type = NULL; - bool is_exact = false; + bool is_exact = false, src_objarray = false, dst_objarray = false; { ciArrayKlass* src_exact_type = as_array_klass(src->exact_type()); ciArrayKlass* src_declared_type = as_array_klass(src->declared_type()); + Phi* phi; + if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) { + src_declared_type = as_array_klass(phi_declared_type(phi)); + } ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type()); ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type()); + if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) { + dst_declared_type = as_array_klass(phi_declared_type(phi)); + } + if (src_exact_type != NULL && src_exact_type == dst_exact_type) { // the types exactly match so the type is fully known is_exact = true; @@ -744,17 +784,60 @@ if (expected_type == NULL) expected_type = dst_exact_type; if (expected_type == NULL) expected_type = src_declared_type; if (expected_type == NULL) expected_type = dst_declared_type; + + src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass()); + dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass()); } // if a probable array type has been identified, figure out if any // of the required checks for a fast case can be elided. int flags = LIR_OpArrayCopy::all_flags; + + if (!src_objarray) + flags &= ~LIR_OpArrayCopy::src_objarray; + if (!dst_objarray) + flags &= ~LIR_OpArrayCopy::dst_objarray; + + if (!x->arg_needs_null_check(0)) + flags &= ~LIR_OpArrayCopy::src_null_check; + if (!x->arg_needs_null_check(2)) + flags &= ~LIR_OpArrayCopy::dst_null_check; + + if (expected_type != NULL) { - // try to skip null checks - if (src->as_NewArray() != NULL) + Value length_limit = NULL; + + IfOp* ifop = length->as_IfOp(); + if (ifop != NULL) { + // look for expressions like min(v, a.length) which ends up as + // x > y ? y : x or x >= y ? y : x + if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) && + ifop->x() == ifop->fval() && + ifop->y() == ifop->tval()) { + length_limit = ifop->y(); + } + } + + // try to skip null checks and range checks + NewArray* src_array = src->as_NewArray(); + if (src_array != NULL) { flags &= ~LIR_OpArrayCopy::src_null_check; - if (dst->as_NewArray() != NULL) + if (length_limit != NULL && + src_array->length() == length_limit && + is_constant_zero(src_pos)) { + flags &= ~LIR_OpArrayCopy::src_range_check; + } + } + + NewArray* dst_array = dst->as_NewArray(); + if (dst_array != NULL) { flags &= ~LIR_OpArrayCopy::dst_null_check; + if (length_limit != NULL && + dst_array->length() == length_limit && + is_constant_zero(dst_pos)) { + flags &= ~LIR_OpArrayCopy::dst_range_check; + } + } // check from incoming constant values if (positive_constant(src_pos)) @@ -788,6 +871,28 @@ } } + IntConstant* src_int = src_pos->type()->as_IntConstant(); + IntConstant* dst_int = dst_pos->type()->as_IntConstant(); + if (src_int && dst_int) { + int s_offs = src_int->value(); + int d_offs = dst_int->value(); + if (src_int->value() >= dst_int->value()) { + flags &= ~LIR_OpArrayCopy::overlapping; + } + if (expected_type != NULL) { + BasicType t = expected_type->element_type()->basic_type(); + int element_size = type2aelembytes(t); + if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) && + ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) { + flags &= ~LIR_OpArrayCopy::unaligned; + } + } + } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) { + // src and dest positions are the same, or dst is zero so assume + // nonoverlapping copy. + flags &= ~LIR_OpArrayCopy::overlapping; + } + if (src == dst) { // moving within a single array so no type checks are needed if (flags & LIR_OpArrayCopy::type_check) { @@ -1104,6 +1209,38 @@ set_no_result(x); } +// Examble: ref.get() +// Combination of LoadField and g1 pre-write barrier +void LIRGenerator::do_Reference_get(Intrinsic* x) { + + const int referent_offset = java_lang_ref_Reference::referent_offset; + guarantee(referent_offset > 0, "referent offset not initialized"); + + assert(x->number_of_arguments() == 1, "wrong type"); + + LIRItem reference(x->argument_at(0), this); + reference.load_item(); + + // need to perform the null check on the reference objecy + CodeEmitInfo* info = NULL; + if (x->needs_null_check()) { + info = state_for(x); + } + + LIR_Address* referent_field_adr = + new LIR_Address(reference.result(), referent_offset, T_OBJECT); + + LIR_Opr result = rlock_result(x); + + __ load(referent_field_adr, result, info); + + // Register the value in the referent field with the pre-barrier + pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */, + result /* pre_val */, + false /* do_load */, + false /* patch */, + NULL /* info */); +} // Example: object.getClass () void LIRGenerator::do_getClass(Intrinsic* x) { @@ -1246,13 +1383,14 @@ // Various barriers -void LIRGenerator::pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) { +void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, + bool do_load, bool patch, CodeEmitInfo* info) { // Do the pre-write barrier, if any. switch (_bs->kind()) { #ifndef SERIALGC case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: - G1SATBCardTableModRef_pre_barrier(addr_opr, patch, info); + G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info); break; #endif // SERIALGC case BarrierSet::CardTableModRef: @@ -1293,9 +1431,8 @@ //////////////////////////////////////////////////////////////////////// #ifndef SERIALGC -void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) { - if (G1DisablePreBarrier) return; - +void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, + bool do_load, bool patch, CodeEmitInfo* info) { // First we test whether marking is in progress. BasicType flag_type; if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { @@ -1314,26 +1451,40 @@ // Read the marking-in-progress flag. LIR_Opr flag_val = new_register(T_INT); __ load(mark_active_flag_addr, flag_val); - - LIR_PatchCode pre_val_patch_code = - patch ? lir_patch_normal : lir_patch_none; - - LIR_Opr pre_val = new_register(T_OBJECT); - __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); - if (!addr_opr->is_address()) { - assert(addr_opr->is_register(), "must be"); - addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT)); + + LIR_PatchCode pre_val_patch_code = lir_patch_none; + + CodeStub* slow; + + if (do_load) { + assert(pre_val == LIR_OprFact::illegalOpr, "sanity"); + assert(addr_opr != LIR_OprFact::illegalOpr, "sanity"); + + if (patch) + pre_val_patch_code = lir_patch_normal; + + pre_val = new_register(T_OBJECT); + + if (!addr_opr->is_address()) { + assert(addr_opr->is_register(), "must be"); + addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT)); + } + slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info); + } else { + assert(addr_opr == LIR_OprFact::illegalOpr, "sanity"); + assert(pre_val->is_register(), "must be"); + assert(pre_val->type() == T_OBJECT, "must be an object"); + assert(info == NULL, "sanity"); + + slow = new G1PreBarrierStub(pre_val); } - CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, - info); + __ branch(lir_cond_notEqual, T_INT, slow); __ branch_destination(slow->continuation()); } void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { - if (G1DisablePostBarrier) return; - // If the "new_val" is a constant NULL, no barrier is necessary. if (new_val->is_constant() && new_val->as_constant_ptr()->as_jobject() == NULL) return; @@ -1351,7 +1502,7 @@ if (addr->is_address()) { LIR_Address* address = addr->as_address_ptr(); - LIR_Opr ptr = new_register(T_OBJECT); + LIR_Opr ptr = new_pointer_register(); if (!address->index()->is_valid() && address->disp() == 0) { __ move(address->base(), ptr); } else { @@ -1403,7 +1554,9 @@ LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base); if (addr->is_address()) { LIR_Address* address = addr->as_address_ptr(); - LIR_Opr ptr = new_register(T_OBJECT); + // ptr cannot be an object because we use this barrier for array card marks + // and addr can point in the middle of an array. + LIR_Opr ptr = new_pointer_register(); if (!address->index()->is_valid() && address->disp() == 0) { __ move(address->base(), ptr); } else { @@ -1555,6 +1708,8 @@ if (is_oop) { // Do the pre-write barrier, if any. pre_barrier(LIR_OprFact::address(address), + LIR_OprFact::illegalOpr /* pre_val */, + true /* do_load*/, needs_patching, (info ? new CodeEmitInfo(info) : NULL)); } @@ -1984,9 +2139,144 @@ off.load_item(); src.load_item(); - LIR_Opr reg = reg = rlock_result(x, x->basic_type()); + LIR_Opr reg = rlock_result(x, x->basic_type()); get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile()); + +#ifndef SERIALGC + // We might be reading the value of the referent field of a + // Reference object in order to attach it back to the live + // object graph. If G1 is enabled then we need to record + // the value that is being returned in an SATB log buffer. + // + // We need to generate code similar to the following... + // + // if (offset == java_lang_ref_Reference::referent_offset) { + // if (src != NULL) { + // if (klass(src)->reference_type() != REF_NONE) { + // pre_barrier(..., reg, ...); + // } + // } + // } + // + // The first non-constant check of either the offset or + // the src operand will be done here; the remainder + // will take place in the generated code stub. + + if (UseG1GC && type == T_OBJECT) { + bool gen_code_stub = true; // Assume we need to generate the slow code stub. + bool gen_offset_check = true; // Assume the code stub has to generate the offset guard. + bool gen_source_check = true; // Assume the code stub has to check the src object for null. + + if (off.is_constant()) { + jlong off_con = (off.type()->is_int() ? + (jlong) off.get_jint_constant() : + off.get_jlong_constant()); + + + if (off_con != (jlong) java_lang_ref_Reference::referent_offset) { + // The constant offset is something other than referent_offset. + // We can skip generating/checking the remaining guards and + // skip generation of the code stub. + gen_code_stub = false; + } else { + // The constant offset is the same as referent_offset - + // we do not need to generate a runtime offset check. + gen_offset_check = false; + } + } + + // We don't need to generate stub if the source object is an array + if (gen_code_stub && src.type()->is_array()) { + gen_code_stub = false; + } + + if (gen_code_stub) { + // We still need to continue with the checks. + if (src.is_constant()) { + ciObject* src_con = src.get_jobject_constant(); + + if (src_con->is_null_object()) { + // The constant src object is null - We can skip + // generating the code stub. + gen_code_stub = false; + } else { + // Non-null constant source object. We still have to generate + // the slow stub - but we don't need to generate the runtime + // null object check. + gen_source_check = false; + } + } + } + + if (gen_code_stub) { + // Temoraries. + LIR_Opr src_klass = new_register(T_OBJECT); + + // Get the thread pointer for the pre-barrier + LIR_Opr thread = getThreadPointer(); + + CodeStub* stub; + + // We can have generate one runtime check here. Let's start with + // the offset check. + if (gen_offset_check) { + // if (offset == referent_offset) -> slow code stub + // If offset is an int then we can do the comparison with the + // referent_offset constant; otherwise we need to move + // referent_offset into a temporary register and generate + // a reg-reg compare. + + LIR_Opr referent_off; + + if (off.type()->is_int()) { + referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset); + } else { + assert(off.type()->is_long(), "what else?"); + referent_off = new_register(T_LONG); + __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off); + } + + __ cmp(lir_cond_equal, off.result(), referent_off); + + // Optionally generate "src == null" check. + stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(), + src_klass, thread, + gen_source_check); + + __ branch(lir_cond_equal, as_BasicType(off.type()), stub); + } else { + if (gen_source_check) { + // offset is a const and equals referent offset + // if (source != null) -> slow code stub + __ cmp(lir_cond_notEqual, src.result(), LIR_OprFact::oopConst(NULL)); + + // Since we are generating the "if src == null" guard here, + // there is no need to generate the "src == null" check again. + stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(), + src_klass, thread, + false); + + __ branch(lir_cond_notEqual, T_OBJECT, stub); + } else { + // We have statically determined that offset == referent_offset + // && src != null so we unconditionally branch to code stub + // to perform the guards and record reg in the SATB log buffer. + + stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(), + src_klass, thread, + false); + + __ branch(lir_cond_always, T_ILLEGAL, stub); + } + } + + // Continuation point + __ branch_destination(stub->continuation()); + } + } +#endif // SERIALGC + if (x->is_volatile() && os::is_MP()) __ membar_acquire(); } @@ -2652,6 +2942,10 @@ do_AttemptUpdate(x); break; + case vmIntrinsics::_Reference_get: + do_Reference_get(x); + break; + default: ShouldNotReachHere(); break; } }
--- a/src/share/vm/c1/c1_LIRGenerator.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_LIRGenerator.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -246,6 +246,7 @@ void do_AttemptUpdate(Intrinsic* x); void do_NIOCheckIndex(Intrinsic* x); void do_FPIntrinsics(Intrinsic* x); + void do_Reference_get(Intrinsic* x); void do_UnsafePrefetch(UnsafePrefetch* x, bool is_store); @@ -260,13 +261,14 @@ // generic interface - void pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info); + void pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, bool do_load, bool patch, CodeEmitInfo* info); void post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val); // specific implementations // pre barriers - void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info); + void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, + bool do_load, bool patch, CodeEmitInfo* info); // post barriers
--- a/src/share/vm/c1/c1_LinearScan.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_LinearScan.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_LinearScan.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_LinearScan.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_MacroAssembler.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_MacroAssembler.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_Optimizer.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_Optimizer.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -252,26 +252,28 @@ Constant::CompareResult t_compare_res = x_tval_const->compare(cond, y_const); Constant::CompareResult f_compare_res = x_fval_const->compare(cond, y_const); - guarantee(t_compare_res != Constant::not_comparable && f_compare_res != Constant::not_comparable, "incomparable constants in IfOp"); - - Value new_tval = t_compare_res == Constant::cond_true ? tval : fval; - Value new_fval = f_compare_res == Constant::cond_true ? tval : fval; + // not_comparable here is a valid return in case we're comparing unloaded oop constants + if (t_compare_res != Constant::not_comparable && f_compare_res != Constant::not_comparable) { + Value new_tval = t_compare_res == Constant::cond_true ? tval : fval; + Value new_fval = f_compare_res == Constant::cond_true ? tval : fval; - _ifop_count++; - if (new_tval == new_fval) { - return new_tval; - } else { - return new IfOp(x_ifop->x(), x_ifop_cond, x_ifop->y(), new_tval, new_fval); + _ifop_count++; + if (new_tval == new_fval) { + return new_tval; + } else { + return new IfOp(x_ifop->x(), x_ifop_cond, x_ifop->y(), new_tval, new_fval); + } } } } else { Constant* x_const = x->as_Constant(); if (x_const != NULL) { // x and y are constants Constant::CompareResult x_compare_res = x_const->compare(cond, y_const); - guarantee(x_compare_res != Constant::not_comparable, "incomparable constants in IfOp"); - - _ifop_count++; - return x_compare_res == Constant::cond_true ? tval : fval; + // not_comparable here is a valid return in case we're comparing unloaded oop constants + if (x_compare_res != Constant::not_comparable) { + _ifop_count++; + return x_compare_res == Constant::cond_true ? tval : fval; + } } } } @@ -644,7 +646,7 @@ void NullCheckVisitor::do_InstanceOf (InstanceOf* x) {} void NullCheckVisitor::do_MonitorEnter (MonitorEnter* x) { nce()->handle_AccessMonitor(x); } void NullCheckVisitor::do_MonitorExit (MonitorExit* x) { nce()->handle_AccessMonitor(x); } -void NullCheckVisitor::do_Intrinsic (Intrinsic* x) { nce()->clear_last_explicit_null_check(); } +void NullCheckVisitor::do_Intrinsic (Intrinsic* x) { nce()->handle_Intrinsic(x); } void NullCheckVisitor::do_BlockBegin (BlockBegin* x) {} void NullCheckVisitor::do_Goto (Goto* x) {} void NullCheckVisitor::do_If (If* x) {} @@ -1023,6 +1025,12 @@ void NullCheckEliminator::handle_Intrinsic(Intrinsic* x) { if (!x->has_receiver()) { + if (x->id() == vmIntrinsics::_arraycopy) { + for (int i = 0; i < x->number_of_arguments(); i++) { + x->set_arg_needs_null_check(i, !set_contains(x->argument_at(i))); + } + } + // Be conservative clear_last_explicit_null_check(); return;
--- a/src/share/vm/c1/c1_Runtime1.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_Runtime1.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -103,7 +103,10 @@ int Runtime1::_generic_arraycopy_cnt = 0; int Runtime1::_primitive_arraycopy_cnt = 0; int Runtime1::_oop_arraycopy_cnt = 0; +int Runtime1::_generic_arraycopystub_cnt = 0; int Runtime1::_arraycopy_slowcase_cnt = 0; +int Runtime1::_arraycopy_checkcast_cnt = 0; +int Runtime1::_arraycopy_checkcast_attempt_cnt = 0; int Runtime1::_new_type_array_slowcase_cnt = 0; int Runtime1::_new_object_array_slowcase_cnt = 0; int Runtime1::_new_instance_slowcase_cnt = 0; @@ -119,6 +122,32 @@ int Runtime1::_throw_incompatible_class_change_error_count = 0; int Runtime1::_throw_array_store_exception_count = 0; int Runtime1::_throw_count = 0; + +static int _byte_arraycopy_cnt = 0; +static int _short_arraycopy_cnt = 0; +static int _int_arraycopy_cnt = 0; +static int _long_arraycopy_cnt = 0; +static int _oop_arraycopy_cnt = 0; + +address Runtime1::arraycopy_count_address(BasicType type) { + switch (type) { + case T_BOOLEAN: + case T_BYTE: return (address)&_byte_arraycopy_cnt; + case T_CHAR: + case T_SHORT: return (address)&_short_arraycopy_cnt; + case T_FLOAT: + case T_INT: return (address)&_int_arraycopy_cnt; + case T_DOUBLE: + case T_LONG: return (address)&_long_arraycopy_cnt; + case T_ARRAY: + case T_OBJECT: return (address)&_oop_arraycopy_cnt; + default: + ShouldNotReachHere(); + return NULL; + } +} + + #endif // Simple helper to see if the caller of a runtime stub which @@ -997,9 +1026,21 @@ // first replace the tail, then the call #ifdef ARM if(stub_id == Runtime1::load_klass_patching_id && !VM_Version::supports_movw()) { + nmethod* nm = CodeCache::find_nmethod(instr_pc); + oop* oop_addr = NULL; + assert(nm != NULL, "invalid nmethod_pc"); + RelocIterator oops(nm, copy_buff, copy_buff + 1); + while (oops.next()) { + if (oops.type() == relocInfo::oop_type) { + oop_Relocation* r = oops.oop_reloc(); + oop_addr = r->oop_addr(); + break; + } + } + assert(oop_addr != NULL, "oop relocation must exist"); copy_buff -= *byte_count; NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff); - n_copy2->set_data((intx) (load_klass()), instr_pc); + n_copy2->set_pc_relative_offset((address)oop_addr, instr_pc); } #endif @@ -1229,9 +1270,17 @@ tty->print_cr(" _handle_wrong_method_cnt: %d", SharedRuntime::_wrong_method_ctr); tty->print_cr(" _ic_miss_cnt: %d", SharedRuntime::_ic_miss_ctr); tty->print_cr(" _generic_arraycopy_cnt: %d", _generic_arraycopy_cnt); + tty->print_cr(" _generic_arraycopystub_cnt: %d", _generic_arraycopystub_cnt); + tty->print_cr(" _byte_arraycopy_cnt: %d", _byte_arraycopy_cnt); + tty->print_cr(" _short_arraycopy_cnt: %d", _short_arraycopy_cnt); + tty->print_cr(" _int_arraycopy_cnt: %d", _int_arraycopy_cnt); + tty->print_cr(" _long_arraycopy_cnt: %d", _long_arraycopy_cnt); tty->print_cr(" _primitive_arraycopy_cnt: %d", _primitive_arraycopy_cnt); - tty->print_cr(" _oop_arraycopy_cnt: %d", _oop_arraycopy_cnt); + tty->print_cr(" _oop_arraycopy_cnt (C): %d", Runtime1::_oop_arraycopy_cnt); + tty->print_cr(" _oop_arraycopy_cnt (stub): %d", _oop_arraycopy_cnt); tty->print_cr(" _arraycopy_slowcase_cnt: %d", _arraycopy_slowcase_cnt); + tty->print_cr(" _arraycopy_checkcast_cnt: %d", _arraycopy_checkcast_cnt); + tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt); tty->print_cr(" _new_type_array_slowcase_cnt: %d", _new_type_array_slowcase_cnt); tty->print_cr(" _new_object_array_slowcase_cnt: %d", _new_object_array_slowcase_cnt);
--- a/src/share/vm/c1/c1_Runtime1.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_Runtime1.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -94,7 +94,10 @@ static int _generic_arraycopy_cnt; static int _primitive_arraycopy_cnt; static int _oop_arraycopy_cnt; + static int _generic_arraycopystub_cnt; static int _arraycopy_slowcase_cnt; + static int _arraycopy_checkcast_cnt; + static int _arraycopy_checkcast_attempt_cnt; static int _new_type_array_slowcase_cnt; static int _new_object_array_slowcase_cnt; static int _new_instance_slowcase_cnt; @@ -174,7 +177,8 @@ static void trace_block_entry(jint block_id); #ifndef PRODUCT - static address throw_count_address() { return (address)&_throw_count; } + static address throw_count_address() { return (address)&_throw_count; } + static address arraycopy_count_address(BasicType type); #endif // directly accessible leaf routine
--- a/src/share/vm/c1/c1_globals.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/c1/c1_globals.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -232,14 +232,7 @@ } // compute size of arguments - int arg_size = target->arg_size(); - if (code == Bytecodes::_invokedynamic) { - assert(!target->is_static(), "receiver explicit in method"); - arg_size--; // implicit, not really on stack - } - if (!target->is_loaded() && code == Bytecodes::_invokestatic) { - arg_size--; - } + int arg_size = target->invoke_arg_size(code); int arg_base = MAX2(state._stack_height - arg_size, 0); // direct recursive calls are skipped if they can be bound statically without introducing
--- a/src/share/vm/ci/ciClassList.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciClassList.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciEnv.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciEnv.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -756,7 +756,7 @@ assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic"); bool is_resolved = cpool->cache()->main_entry_at(index)->is_resolved(bc); - if (is_resolved && (oop) cpool->cache()->secondary_entry_at(index)->f1() == NULL) + if (is_resolved && cpool->cache()->secondary_entry_at(index)->is_f1_null()) // FIXME: code generation could allow for null (unlinked) call site is_resolved = false; @@ -770,7 +770,7 @@ // Get the invoker methodOop from the constant pool. oop f1_value = cpool->cache()->main_entry_at(index)->f1(); - methodOop signature_invoker = methodOop(f1_value); + methodOop signature_invoker = (methodOop) f1_value; assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(), "correct result from LinkResolver::resolve_invokedynamic");
--- a/src/share/vm/ci/ciEnv.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciEnv.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciInstance.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciInstance.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -66,8 +66,8 @@ "invalid access"); VM_ENTRY_MARK; ciConstant result; - oop obj = get_oop(); - assert(obj != NULL, "bad oop"); + Handle obj = get_oop(); + assert(!obj.is_null(), "bad oop"); BasicType field_btype = field->type()->basic_type(); int offset = field->offset();
--- a/src/share/vm/ci/ciKlass.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciKlass.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciMethod.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciMethod.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -127,7 +127,24 @@ ciSignature* signature() const { return _signature; } ciType* return_type() const { return _signature->return_type(); } int arg_size_no_receiver() const { return _signature->size(); } - int arg_size() const { return _signature->size() + (_flags.is_static() ? 0 : 1); } + // Can only be used on loaded ciMethods + int arg_size() const { + check_is_loaded(); + return _signature->size() + (_flags.is_static() ? 0 : 1); + } + // Report the number of elements on stack when invoking this method. + // This is different than the regular arg_size because invokdynamic + // has an implicit receiver. + int invoke_arg_size(Bytecodes::Code code) const { + int arg_size = _signature->size(); + // Add a receiver argument, maybe: + if (code != Bytecodes::_invokestatic && + code != Bytecodes::_invokedynamic) { + arg_size++; + } + return arg_size; + } + // Method code and related information. address code() { if (_code == NULL) load_code(); return _code; } @@ -276,9 +293,9 @@ void print_short_name(outputStream* st = tty); methodOop get_method_handle_target() { - klassOop receiver_limit_oop = NULL; - int flags = 0; - return MethodHandles::decode_method(get_oop(), receiver_limit_oop, flags); + KlassHandle receiver_limit; int flags = 0; + methodHandle m = MethodHandles::decode_method(get_oop(), receiver_limit, flags); + return m(); } };
--- a/src/share/vm/ci/ciMethodData.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciMethodData.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -233,7 +233,10 @@ public: bool is_method_data() { return true; } - bool is_empty() { return _state == empty_state; } + + void set_mature() { _state = mature_state; } + + bool is_empty() { return _state == empty_state; } bool is_mature() { return _state == mature_state; } int creation_mileage() { return _orig.creation_mileage(); }
--- a/src/share/vm/ci/ciMethodHandle.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciMethodHandle.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "ci/ciClassList.hpp" #include "ci/ciInstance.hpp" +#include "ci/ciMethodData.hpp" #include "ci/ciMethodHandle.hpp" #include "ci/ciUtilities.hpp" #include "prims/methodHandleWalk.hpp" @@ -36,15 +37,42 @@ // ciMethodHandle::get_adapter // // Return an adapter for this MethodHandle. -ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const { +ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) const { VM_ENTRY_MARK; Handle h(get_oop()); methodHandle callee(_callee->get_methodOop()); // We catch all exceptions here that could happen in the method // handle compiler and stop the VM. - MethodHandleCompiler mhc(h, callee, is_invokedynamic, CATCH); - methodHandle m = mhc.compile(CATCH); - return CURRENT_ENV->get_object(m())->as_method(); + MethodHandleCompiler mhc(h, callee->name(), callee->signature(), _profile->count(), is_invokedynamic, THREAD); + if (!HAS_PENDING_EXCEPTION) { + methodHandle m = mhc.compile(THREAD); + if (!HAS_PENDING_EXCEPTION) { + return CURRENT_ENV->get_object(m())->as_method(); + } + } + if (PrintMiscellaneous && (Verbose || WizardMode)) { + tty->print("*** ciMethodHandle::get_adapter => "); + PENDING_EXCEPTION->print(); + tty->print("*** get_adapter (%s): ", is_invokedynamic ? "indy" : "mh"); ((ciObject*)this)->print(); //@@ + } + CLEAR_PENDING_EXCEPTION; + return NULL; +} + +// ------------------------------------------------------------------ +// ciMethodHandle::get_adapter +// +// Return an adapter for this MethodHandle. +ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const { + ciMethod* result = get_adapter_impl(is_invokedynamic); + if (result) { + // Fake up the MDO maturity. + ciMethodData* mdo = result->method_data(); + if (mdo != NULL && _caller->method_data() != NULL && _caller->method_data()->is_mature()) { + mdo->set_mature(); + } + } + return result; }
--- a/src/share/vm/ci/ciMethodHandle.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciMethodHandle.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_CI_CIMETHODHANDLE_HPP #define SHARE_VM_CI_CIMETHODHANDLE_HPP +#include "ci/ciCallProfile.hpp" #include "ci/ciInstance.hpp" #include "prims/methodHandles.hpp" @@ -33,32 +34,37 @@ // The class represents a java.lang.invoke.MethodHandle object. class ciMethodHandle : public ciInstance { private: - ciMethod* _callee; + ciMethod* _callee; + ciMethod* _caller; + ciCallProfile* _profile; // Return an adapter for this MethodHandle. - ciMethod* get_adapter(bool is_invokedynamic) const; + ciMethod* get_adapter_impl(bool is_invokedynamic) const; + ciMethod* get_adapter( bool is_invokedynamic) const; protected: void print_impl(outputStream* st); public: - ciMethodHandle(instanceHandle h_i) : ciInstance(h_i) {}; + ciMethodHandle(instanceHandle h_i) : + ciInstance(h_i), + _callee(NULL), + _caller(NULL), + _profile(NULL) + {} // What kind of ciObject is this? bool is_method_handle() const { return true; } - ciMethod* callee() const { return _callee; } - void set_callee(ciMethod* m) { _callee = m; } + void set_callee(ciMethod* m) { _callee = m; } + void set_caller(ciMethod* m) { _caller = m; } + void set_call_profile(ciCallProfile* profile) { _profile = profile; } // Return an adapter for a MethodHandle call. - ciMethod* get_method_handle_adapter() const { - return get_adapter(false); - } + ciMethod* get_method_handle_adapter() const { return get_adapter(false); } // Return an adapter for an invokedynamic call. - ciMethod* get_invokedynamic_adapter() const { - return get_adapter(true); - } + ciMethod* get_invokedynamic_adapter() const { return get_adapter(true); } }; #endif // SHARE_VM_CI_CIMETHODHANDLE_HPP
--- a/src/share/vm/ci/ciObjArrayKlass.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciObjArrayKlass.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciObject.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciObject.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -194,6 +194,16 @@ // ciObject::should_be_constant() bool ciObject::should_be_constant() { if (ScavengeRootsInCode >= 2) return true; // force everybody to be a constant + if (!JavaObjectsInPerm && !is_null_object()) { + // We want Strings and Classes to be embeddable by default since + // they used to be in the perm world. Not all Strings used to be + // embeddable but there's no easy way to distinguish the interned + // from the regulars ones so just treat them all that way. + ciEnv* env = CURRENT_ENV; + if (klass() == env->String_klass() || klass() == env->Class_klass()) { + return true; + } + } return handle() == NULL || !is_scavengable(); }
--- a/src/share/vm/ci/ciObject.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciObject.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciObjectFactory.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciObjectFactory.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciSignature.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciSignature.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciSignature.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciSignature.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciSymbol.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciSymbol.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciSymbol.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/ciSymbol.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/compilerInterface.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/ci/compilerInterface.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/classFileError.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/classFileError.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/classFileParser.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/classFileParser.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -170,7 +170,6 @@ ShouldNotReachHere(); } break; - case JVM_CONSTANT_InvokeDynamicTrans : // this tag appears only in old classfiles case JVM_CONSTANT_InvokeDynamic : { if (_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { @@ -186,14 +185,6 @@ cfs->guarantee_more(5, CHECK); // bsm_index, nt, tag/access_flags u2 bootstrap_specifier_index = cfs->get_u2_fast(); u2 name_and_type_index = cfs->get_u2_fast(); - if (tag == JVM_CONSTANT_InvokeDynamicTrans) { - if (!AllowTransitionalJSR292) - classfile_parse_error( - "This JVM does not support transitional InvokeDynamic tag %u in class file %s", - tag, CHECK); - cp->invoke_dynamic_trans_at_put(index, bootstrap_specifier_index, name_and_type_index); - break; - } if (_max_bootstrap_specifier_index < (int) bootstrap_specifier_index) _max_bootstrap_specifier_index = (int) bootstrap_specifier_index; // collect for later cp->invoke_dynamic_at_put(index, bootstrap_specifier_index, name_and_type_index); @@ -492,7 +483,6 @@ ref_index, CHECK_(nullHandle)); } break; - case JVM_CONSTANT_InvokeDynamicTrans : case JVM_CONSTANT_InvokeDynamic : { int name_and_type_ref_index = cp->invoke_dynamic_name_and_type_ref_index_at(index); @@ -501,14 +491,6 @@ "Invalid constant pool index %u in class file %s", name_and_type_ref_index, CHECK_(nullHandle)); - if (tag == JVM_CONSTANT_InvokeDynamicTrans) { - int bootstrap_method_ref_index = cp->invoke_dynamic_bootstrap_method_ref_index_at(index); - check_property(valid_cp_range(bootstrap_method_ref_index, length) && - cp->tag_at(bootstrap_method_ref_index).is_method_handle(), - "Invalid constant pool index %u in class file %s", - bootstrap_method_ref_index, - CHECK_(nullHandle)); - } // bootstrap specifier index must be checked later, when BootstrapMethods attr is available break; } @@ -578,6 +560,7 @@ } break; } + case JVM_CONSTANT_InvokeDynamic: case JVM_CONSTANT_Fieldref: case JVM_CONSTANT_Methodref: case JVM_CONSTANT_InterfaceMethodref: { @@ -2213,11 +2196,12 @@ TRAPS) { typeArrayHandle nullHandle; int length = methods()->length(); - // If JVMTI original method ordering is enabled we have to + // If JVMTI original method ordering or sharing is enabled we have to // remember the original class file ordering. // We temporarily use the vtable_index field in the methodOop to store the // class file index, so we can read in after calling qsort. - if (JvmtiExport::can_maintain_original_method_order()) { + // Put the method ordering in the shared archive. + if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) { for (int index = 0; index < length; index++) { methodOop m = methodOop(methods->obj_at(index)); assert(!m->valid_vtable_index(), "vtable index should not be set"); @@ -2231,8 +2215,9 @@ methods_parameter_annotations(), methods_default_annotations()); - // If JVMTI original method ordering is enabled construct int array remembering the original ordering - if (JvmtiExport::can_maintain_original_method_order()) { + // If JVMTI original method ordering or sharing is enabled construct int + // array remembering the original ordering + if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) { typeArrayOop new_ordering = oopFactory::new_permanent_intArray(length, CHECK_(nullHandle)); typeArrayHandle method_ordering(THREAD, new_ordering); for (int index = 0; index < length; index++) { @@ -2783,7 +2768,6 @@ } } - if (AllowTransitionalJSR292 && word_sig_index == 0) return; if (word_sig_index == 0) THROW_MSG(vmSymbols::java_lang_VirtualMachineError(), "missing I or J signature (for vmentry) in java.lang.invoke.MethodHandle"); @@ -2823,7 +2807,6 @@ } } - if (AllowTransitionalJSR292 && !found_vmentry) return; if (!found_vmentry) THROW_MSG(vmSymbols::java_lang_VirtualMachineError(), "missing vmentry byte field in java.lang.invoke.MethodHandle"); @@ -3194,15 +3177,6 @@ if (EnableInvokeDynamic && class_name == vmSymbols::java_lang_invoke_MethodHandle() && class_loader.is_null()) { java_lang_invoke_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle)); } - if (AllowTransitionalJSR292 && - EnableInvokeDynamic && class_name == vmSymbols::java_dyn_MethodHandle() && class_loader.is_null()) { - java_lang_invoke_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle)); - } - if (AllowTransitionalJSR292 && - EnableInvokeDynamic && class_name == vmSymbols::sun_dyn_MethodHandleImpl() && class_loader.is_null()) { - // allow vmentry field in MethodHandleImpl also - java_lang_invoke_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle)); - } // Add a fake "discovered" field if it is not present // for compatibility with earlier jdk's.
--- a/src/share/vm/classfile/classFileStream.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/classFileStream.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/classLoader.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/classLoader.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/classLoader.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/classLoader.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/dictionary.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/dictionary.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/dictionary.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/dictionary.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/javaAssertions.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/javaAssertions.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/javaClasses.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/javaClasses.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -67,28 +67,6 @@ return ik->find_local_field(name_symbol, signature_symbol, fd); } -static bool find_hacked_field(instanceKlass* ik, - Symbol* name_symbol, Symbol* signature_symbol, - fieldDescriptor* fd, - bool allow_super = false) { - bool found = find_field(ik, name_symbol, signature_symbol, fd, allow_super); - if (!found && AllowTransitionalJSR292) { - Symbol* backup_sig = SystemDictionary::find_backup_signature(signature_symbol); - if (backup_sig != NULL) { - found = find_field(ik, name_symbol, backup_sig, fd, allow_super); - if (TraceMethodHandles) { - ResourceMark rm; - tty->print_cr("MethodHandles: %s.%s: backup for %s => %s%s", - ik->name()->as_C_string(), name_symbol->as_C_string(), - signature_symbol->as_C_string(), backup_sig->as_C_string(), - (found ? "" : " (NOT FOUND)")); - } - } - } - return found; -} -#define find_field find_hacked_field /* remove after AllowTransitionalJSR292 */ - // Helpful routine for computing field offsets at run time rather than hardcoding them static void compute_offset(int &dest_offset, @@ -1379,7 +1357,7 @@ }; -void java_lang_Throwable::fill_in_stack_trace(Handle throwable, TRAPS) { +void java_lang_Throwable::fill_in_stack_trace(Handle throwable, methodHandle method, TRAPS) { if (!StackTraceInThrowable) return; ResourceMark rm(THREAD); @@ -1396,6 +1374,16 @@ JavaThread* thread = (JavaThread*)THREAD; BacktraceBuilder bt(CHECK); + // If there is no Java frame just return the method that was being called + // with bci 0 + if (!thread->has_last_Java_frame()) { + if (max_depth >= 1 && method() != NULL) { + bt.push(method(), 0, CHECK); + set_backtrace(throwable(), bt.backtrace()); + } + return; + } + // Instead of using vframe directly, this version of fill_in_stack_trace // basically handles everything by hand. This significantly improved the // speed of this method call up to 28.5% on Solaris sparc. 27.1% on Windows. @@ -1453,32 +1441,41 @@ } } #ifdef ASSERT - assert(st_method() == method && st.bci() == bci, - "Wrong stack trace"); - st.next(); - // vframeStream::method isn't GC-safe so store off a copy - // of the methodOop in case we GC. - if (!st.at_end()) { - st_method = st.method(); - } + assert(st_method() == method && st.bci() == bci, + "Wrong stack trace"); + st.next(); + // vframeStream::method isn't GC-safe so store off a copy + // of the methodOop in case we GC. + if (!st.at_end()) { + st_method = st.method(); + } #endif + + // the format of the stacktrace will be: + // - 1 or more fillInStackTrace frames for the exception class (skipped) + // - 0 or more <init> methods for the exception class (skipped) + // - rest of the stack + if (!skip_fillInStackTrace_check) { - // check "fillInStackTrace" only once, so we negate the flag - // after the first time check. - skip_fillInStackTrace_check = true; - if (method->name() == vmSymbols::fillInStackTrace_name()) { + if ((method->name() == vmSymbols::fillInStackTrace_name() || + method->name() == vmSymbols::fillInStackTrace0_name()) && + throwable->is_a(method->method_holder())) { continue; } + else { + skip_fillInStackTrace_check = true; // gone past them all + } } - // skip <init> methods of the exceptions klass. If there is <init> methods - // that belongs to a superclass of the exception we are going to skipping - // them in stack trace. This is simlar to classic VM. if (!skip_throwableInit_check) { + assert(skip_fillInStackTrace_check, "logic error in backtrace filtering"); + + // skip <init> methods of the exception class and superclasses + // This is simlar to classic VM. if (method->name() == vmSymbols::object_initializer_name() && throwable->is_a(method->method_holder())) { continue; } else { - // if no "Throwable.init()" method found, we stop checking it next time. + // there are none or we've seen them all - either way stop checking skip_throwableInit_check = true; } } @@ -1490,7 +1487,7 @@ set_backtrace(throwable(), bt.backtrace()); } -void java_lang_Throwable::fill_in_stack_trace(Handle throwable) { +void java_lang_Throwable::fill_in_stack_trace(Handle throwable, methodHandle method) { // No-op if stack trace is disabled if (!StackTraceInThrowable) { return; @@ -1504,7 +1501,7 @@ PRESERVE_EXCEPTION_MARK; JavaThread* thread = JavaThread::active(); - fill_in_stack_trace(throwable, thread); + fill_in_stack_trace(throwable, method, thread); // ignore exceptions thrown during stack trace filling CLEAR_PENDING_EXCEPTION; } @@ -2333,7 +2330,6 @@ klassOop k = SystemDictionary::MethodHandle_klass(); if (k != NULL && EnableInvokeDynamic) { bool allow_super = false; - if (AllowTransitionalJSR292) allow_super = true; // temporary, to access java.dyn.MethodHandleImpl compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::java_lang_invoke_MethodType_signature(), allow_super); compute_offset(_vmtarget_offset, k, vmSymbols::vmtarget_name(), vmSymbols::object_signature(), allow_super); compute_offset(_vmentry_offset, k, vmSymbols::vmentry_name(), vmSymbols::machine_word_signature(), allow_super); @@ -2606,6 +2602,7 @@ // Support for java_lang_invoke_MethodTypeForm int java_lang_invoke_MethodTypeForm::_vmslots_offset; +int java_lang_invoke_MethodTypeForm::_vmlayout_offset; int java_lang_invoke_MethodTypeForm::_erasedType_offset; int java_lang_invoke_MethodTypeForm::_genericInvoker_offset; @@ -2613,6 +2610,7 @@ klassOop k = SystemDictionary::MethodTypeForm_klass(); if (k != NULL) { compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), true); + compute_optional_offset(_vmlayout_offset, k, vmSymbols::vmlayout_name(), vmSymbols::object_signature()); compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_lang_invoke_MethodType_signature(), true); compute_optional_offset(_genericInvoker_offset, k, vmSymbols::genericInvoker_name(), vmSymbols::java_lang_invoke_MethodHandle_signature(), true); if (_genericInvoker_offset == 0) _genericInvoker_offset = -1; // set to explicit "empty" value @@ -2621,9 +2619,31 @@ int java_lang_invoke_MethodTypeForm::vmslots(oop mtform) { assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); + assert(_vmslots_offset > 0, ""); return mtform->int_field(_vmslots_offset); } +oop java_lang_invoke_MethodTypeForm::vmlayout(oop mtform) { + assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); + assert(_vmlayout_offset > 0, ""); + return mtform->obj_field(_vmlayout_offset); +} + +oop java_lang_invoke_MethodTypeForm::init_vmlayout(oop mtform, oop cookie) { + assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); + oop previous = vmlayout(mtform); + if (previous != NULL) { + return previous; // someone else beat us to it + } + HeapWord* cookie_addr = (HeapWord*) mtform->obj_field_addr<oop>(_vmlayout_offset); + OrderAccess::storestore(); // make sure our copy is fully committed + previous = oopDesc::atomic_compare_exchange_oop(cookie, cookie_addr, previous); + if (previous != NULL) { + return previous; // someone else beat us to it + } + return cookie; +} + oop java_lang_invoke_MethodTypeForm::erasedType(oop mtform) { assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); return mtform->obj_field(_erasedType_offset);
--- a/src/share/vm/classfile/javaClasses.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/javaClasses.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -440,8 +440,8 @@ static void fill_in_stack_trace_of_preallocated_backtrace(Handle throwable); // Fill in current stack trace, can cause GC - static void fill_in_stack_trace(Handle throwable, TRAPS); - static void fill_in_stack_trace(Handle throwable); + static void fill_in_stack_trace(Handle throwable, methodHandle method, TRAPS); + static void fill_in_stack_trace(Handle throwable, methodHandle method = methodHandle()); // Programmatic access to stack trace static oop get_stack_trace_element(oop throwable, int index, TRAPS); static int get_stack_trace_depth(oop throwable, TRAPS); @@ -949,18 +949,19 @@ OP_CHECK_CAST = 0x2, // ref-to-ref conversion; requires a Class argument OP_PRIM_TO_PRIM = 0x3, // converts from one primitive to another OP_REF_TO_PRIM = 0x4, // unboxes a wrapper to produce a primitive - OP_PRIM_TO_REF = 0x5, // boxes a primitive into a wrapper (NYI) + OP_PRIM_TO_REF = 0x5, // boxes a primitive into a wrapper OP_SWAP_ARGS = 0x6, // swap arguments (vminfo is 2nd arg) OP_ROT_ARGS = 0x7, // rotate arguments (vminfo is displaced arg) OP_DUP_ARGS = 0x8, // duplicates one or more arguments (at TOS) OP_DROP_ARGS = 0x9, // remove one or more argument slots - OP_COLLECT_ARGS = 0xA, // combine one or more arguments into a varargs (NYI) + OP_COLLECT_ARGS = 0xA, // combine arguments using an auxiliary function OP_SPREAD_ARGS = 0xB, // expand in place a varargs array (of known size) - OP_FLYBY = 0xC, // operate first on reified argument list (NYI) - OP_RICOCHET = 0xD, // run an adapter chain on the return value (NYI) + OP_FOLD_ARGS = 0xC, // combine but do not remove arguments; prepend result + //OP_UNUSED_13 = 0xD, // unused code, perhaps for reified argument lists CONV_OP_LIMIT = 0xE, // limit of CONV_OP enumeration CONV_OP_MASK = 0xF00, // this nybble contains the conversion op field + CONV_TYPE_MASK = 0x0F, // fits T_ADDRESS and below CONV_VMINFO_MASK = 0x0FF, // LSB is reserved for JVM use CONV_VMINFO_SHIFT = 0, // position of bits in CONV_VMINFO_MASK CONV_OP_SHIFT = 8, // position of bits in CONV_OP_MASK @@ -1089,6 +1090,7 @@ private: static int _vmslots_offset; // number of argument slots needed + static int _vmlayout_offset; // object describing internal calling sequence static int _erasedType_offset; // erasedType = canonical MethodType static int _genericInvoker_offset; // genericInvoker = adapter for invokeGeneric @@ -1100,8 +1102,12 @@ static oop erasedType(oop mtform); static oop genericInvoker(oop mtform); + static oop vmlayout(oop mtform); + static oop init_vmlayout(oop mtform, oop cookie); + // Accessors for code generation: static int vmslots_offset_in_bytes() { return _vmslots_offset; } + static int vmlayout_offset_in_bytes() { return _vmlayout_offset; } static int erasedType_offset_in_bytes() { return _erasedType_offset; } static int genericInvoker_offset_in_bytes() { return _genericInvoker_offset; } };
--- a/src/share/vm/classfile/loaderConstraints.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/loaderConstraints.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/loaderConstraints.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/loaderConstraints.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/placeholders.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/placeholders.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/placeholders.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/placeholders.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/resolutionErrors.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/resolutionErrors.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/resolutionErrors.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/resolutionErrors.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/stackMapFrame.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/stackMapFrame.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -208,8 +208,10 @@ return true; } -bool StackMapFrame::is_assignable_to(const StackMapFrame* target, TRAPS) const { - if (_max_locals != target->max_locals() || _stack_size != target->stack_size()) { +bool StackMapFrame::is_assignable_to( + const StackMapFrame* target, bool is_exception_handler, TRAPS) const { + if (_max_locals != target->max_locals() || + _stack_size != target->stack_size()) { return false; } // Only need to compare type elements up to target->locals() or target->stack(). @@ -222,7 +224,7 @@ bool match_flags = (_flags | target->flags()) == target->flags(); return match_locals && match_stack && - (match_flags || has_flag_match_exception(target)); + (match_flags || (is_exception_handler && has_flag_match_exception(target))); } VerificationType StackMapFrame::pop_stack_ex(VerificationType type, TRAPS) {
--- a/src/share/vm/classfile/stackMapFrame.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/stackMapFrame.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -134,7 +134,8 @@ void copy_stack(const StackMapFrame* src); // Return true if this stack map frame is assignable to target. - bool is_assignable_to(const StackMapFrame* target, TRAPS) const; + bool is_assignable_to(const StackMapFrame* target, + bool is_exception_handler, TRAPS) const; // Push type into stack type array. inline void push_stack(VerificationType type, TRAPS) {
--- a/src/share/vm/classfile/stackMapTable.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/stackMapTable.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,10 +98,13 @@ bool result = true; StackMapFrame *stackmap_frame = _frame_array[frame_index]; if (match) { + // when checking handler target, match == true && update == false + bool is_exception_handler = !update; // Has direct control flow from last instruction, need to match the two // frames. result = frame->is_assignable_to( - stackmap_frame, CHECK_VERIFY_(frame->verifier(), false)); + stackmap_frame, is_exception_handler, + CHECK_VERIFY_(frame->verifier(), false)); } if (update) { // Use the frame in stackmap table as current frame
--- a/src/share/vm/classfile/stackMapTable.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/stackMapTable.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/systemDictionary.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/systemDictionary.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1255,6 +1255,16 @@ methodHandle m(THREAD, methodOop(methods->obj_at(index2))); m()->link_method(m, CHECK_(nh)); } + if (JvmtiExport::has_redefined_a_class()) { + // Reinitialize vtable because RedefineClasses may have changed some + // entries in this vtable for super classes so the CDS vtable might + // point to old or obsolete entries. RedefineClasses doesn't fix up + // vtables in the shared system dictionary, only the main one. + // It also redefines the itable too so fix that too. + ResourceMark rm(THREAD); + ik->vtable()->initialize_vtable(false, CHECK_(nh)); + ik->itable()->initialize_itable(false, CHECK_(nh)); + } } if (TraceClassLoading) { @@ -1887,99 +1897,27 @@ 0 }; -Symbol* SystemDictionary::find_backup_symbol(Symbol* symbol, - const char* from_prefix, - const char* to_prefix) { - assert(AllowTransitionalJSR292, ""); // delete this subroutine - Symbol* backup_symbol = NULL; - size_t from_len = strlen(from_prefix); - if (strncmp((const char*) symbol->base(), from_prefix, from_len) != 0) - return NULL; - char buf[100]; - size_t to_len = strlen(to_prefix); - size_t tail_len = symbol->utf8_length() - from_len; - size_t new_len = to_len + tail_len; - guarantee(new_len < sizeof(buf), "buf too small"); - memcpy(buf, to_prefix, to_len); - memcpy(buf + to_len, symbol->base() + from_len, tail_len); - buf[new_len] = '\0'; - vmSymbols::SID backup_sid = vmSymbols::find_sid(buf); - if (backup_sid != vmSymbols::NO_SID) { - backup_symbol = vmSymbols::symbol_at(backup_sid); - } - return backup_symbol; -} - -Symbol* SystemDictionary::find_backup_class_name(Symbol* symbol) { - assert(AllowTransitionalJSR292, ""); // delete this subroutine - if (symbol == NULL) return NULL; - Symbol* backup_symbol = find_backup_symbol(symbol, "java/lang/invoke/", "java/dyn/"); // AllowTransitionalJSR292 ONLY - if (backup_symbol == NULL) - backup_symbol = find_backup_symbol(symbol, "java/dyn/", "sun/dyn/"); // AllowTransitionalJSR292 ONLY - return backup_symbol; -} - -Symbol* SystemDictionary::find_backup_signature(Symbol* symbol) { - assert(AllowTransitionalJSR292, ""); // delete this subroutine - if (symbol == NULL) return NULL; - return find_backup_symbol(symbol, "Ljava/lang/invoke/", "Ljava/dyn/"); -} - bool SystemDictionary::initialize_wk_klass(WKID id, int init_opt, TRAPS) { assert(id >= (int)FIRST_WKID && id < (int)WKID_LIMIT, "oob"); int info = wk_init_info[id - FIRST_WKID]; int sid = (info >> CEIL_LG_OPTION_LIMIT); Symbol* symbol = vmSymbols::symbol_at((vmSymbols::SID)sid); klassOop* klassp = &_well_known_klasses[id]; - bool pre_load = (init_opt < SystemDictionary::Opt); - bool try_load = true; + bool must_load = (init_opt < SystemDictionary::Opt); + bool try_load = true; if (init_opt == SystemDictionary::Opt_Kernel) { #ifndef KERNEL try_load = false; #endif //KERNEL } - Symbol* backup_symbol = NULL; // symbol to try if the current symbol fails - if (init_opt == SystemDictionary::Pre_JSR292) { - if (!EnableInvokeDynamic) try_load = false; // do not bother to load such classes - if (AllowTransitionalJSR292) { - backup_symbol = find_backup_class_name(symbol); - if (try_load && PreferTransitionalJSR292) { - while (backup_symbol != NULL) { - (*klassp) = resolve_or_null(backup_symbol, CHECK_0); // try backup early - if (TraceMethodHandles) { - ResourceMark rm; - tty->print_cr("MethodHandles: try backup first for %s => %s (%s)", - symbol->as_C_string(), backup_symbol->as_C_string(), - ((*klassp) == NULL) ? "no such class" : "backup load succeeded"); - } - if ((*klassp) != NULL) return true; - backup_symbol = find_backup_class_name(backup_symbol); // find next backup - } - } - } - } - if ((*klassp) != NULL) return true; - if (!try_load) return false; - while (symbol != NULL) { - bool must_load = (pre_load && (backup_symbol == NULL)); + if ((*klassp) == NULL && try_load) { if (must_load) { (*klassp) = resolve_or_fail(symbol, true, CHECK_0); // load required class } else { (*klassp) = resolve_or_null(symbol, CHECK_0); // load optional klass } - if ((*klassp) != NULL) return true; - // Go around again. Example of long backup sequence: - // java.lang.invoke.MemberName, java.dyn.MemberName, sun.dyn.MemberName, ONLY if AllowTransitionalJSR292 - if (TraceMethodHandles && (backup_symbol != NULL)) { - ResourceMark rm; - tty->print_cr("MethodHandles: backup for %s => %s", - symbol->as_C_string(), backup_symbol->as_C_string()); - } - symbol = backup_symbol; - if (AllowTransitionalJSR292) - backup_symbol = find_backup_class_name(symbol); } - return false; + return ((*klassp) != NULL); } void SystemDictionary::initialize_wk_klasses_until(WKID limit_id, WKID &start_id, TRAPS) { @@ -2408,9 +2346,7 @@ // Must create lots of stuff here, but outside of the SystemDictionary lock. if (THREAD->is_Compiler_thread()) return NULL; // do not attempt from within compiler - bool for_invokeGeneric = (name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name)); - if (AllowInvokeForInvokeGeneric && name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name)) - for_invokeGeneric = true; + bool for_invokeGeneric = (name_id != vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name)); bool found_on_bcp = false; Handle mt = find_method_handle_type(signature, accessing_klass, for_invokeGeneric, @@ -2425,8 +2361,15 @@ spe = invoke_method_table()->find_entry(index, hash, signature, name_id); if (spe == NULL) spe = invoke_method_table()->add_entry(index, hash, signature, name_id); - if (spe->property_oop() == NULL) + if (spe->property_oop() == NULL) { spe->set_property_oop(m()); + // Link m to his method type, if it is suitably generic. + oop mtform = java_lang_invoke_MethodType::form(mt()); + if (mtform != NULL && mt() == java_lang_invoke_MethodTypeForm::erasedType(mtform) + && java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) { + java_lang_invoke_MethodTypeForm::init_vmlayout(mtform, m()); + } + } } else { non_cached_result = m; } @@ -2497,14 +2440,10 @@ JavaCallArguments args(Handle(THREAD, rt())); args.push_oop(pts()); JavaValue result(T_OBJECT); - Symbol* findMethodHandleType_signature = vmSymbols::findMethodHandleType_signature(); - if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodType_klass()->name() == vmSymbols::java_dyn_MethodType()) { - findMethodHandleType_signature = vmSymbols::findMethodHandleType_TRANS_signature(); - } JavaCalls::call_static(&result, SystemDictionary::MethodHandleNatives_klass(), vmSymbols::findMethodHandleType_name(), - findMethodHandleType_signature, + vmSymbols::findMethodHandleType_signature(), &args, CHECK_(empty)); Handle method_type(THREAD, (oop) result.get_jobject()); @@ -2512,14 +2451,10 @@ // call java.lang.invoke.MethodHandleNatives::notifyGenericMethodType(MethodType) -> void JavaCallArguments args(Handle(THREAD, method_type())); JavaValue no_result(T_VOID); - Symbol* notifyGenericMethodType_signature = vmSymbols::notifyGenericMethodType_signature(); - if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodType_klass()->name() == vmSymbols::java_dyn_MethodType()) { - notifyGenericMethodType_signature = vmSymbols::notifyGenericMethodType_TRANS_signature(); - } JavaCalls::call_static(&no_result, SystemDictionary::MethodHandleNatives_klass(), vmSymbols::notifyGenericMethodType_name(), - notifyGenericMethodType_signature, + vmSymbols::notifyGenericMethodType_signature(), &args, THREAD); if (HAS_PENDING_EXCEPTION) { // If the notification fails, just kill it. @@ -2568,14 +2503,10 @@ args.push_oop(name()); args.push_oop(type()); JavaValue result(T_OBJECT); - Symbol* linkMethodHandleConstant_signature = vmSymbols::linkMethodHandleConstant_signature(); - if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodHandle_klass()->name() == vmSymbols::java_dyn_MethodHandle()) { - linkMethodHandleConstant_signature = vmSymbols::linkMethodHandleConstant_TRANS_signature(); - } JavaCalls::call_static(&result, SystemDictionary::MethodHandleNatives_klass(), vmSymbols::linkMethodHandleConstant_name(), - linkMethodHandleConstant_signature, + vmSymbols::linkMethodHandleConstant_signature(), &args, CHECK_(empty)); return Handle(THREAD, (oop) result.get_jobject()); } @@ -2606,17 +2537,10 @@ args.push_oop(caller_mname()); args.push_int(caller_bci); JavaValue result(T_OBJECT); - Symbol* makeDynamicCallSite_signature = vmSymbols::makeDynamicCallSite_signature(); - if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodHandleNatives_klass()->name() == vmSymbols::sun_dyn_MethodHandleNatives()) { - makeDynamicCallSite_signature = vmSymbols::makeDynamicCallSite_TRANS_signature(); - } - if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodHandleNatives_klass()->name() == vmSymbols::java_dyn_MethodHandleNatives()) { - makeDynamicCallSite_signature = vmSymbols::makeDynamicCallSite_TRANS2_signature(); - } JavaCalls::call_static(&result, SystemDictionary::MethodHandleNatives_klass(), vmSymbols::makeDynamicCallSite_name(), - makeDynamicCallSite_signature, + vmSymbols::makeDynamicCallSite_signature(), &args, CHECK_(empty)); oop call_site_oop = (oop) result.get_jobject(); assert(call_site_oop->is_oop() @@ -2697,28 +2621,10 @@ argument_info_result = argument_info; // return argument_info to caller return bsm; } - // else null BSM; fall through - } else if (tag.is_name_and_type()) { - // JSR 292 EDR does not have JVM_CONSTANT_InvokeDynamic - // a bare name&type defaults its BSM to null, so fall through... } else { ShouldNotReachHere(); // verifier does not allow this } - // Fall through to pick up the per-class bootstrap method. - // This mechanism may go away in the PFD. - assert(AllowTransitionalJSR292, "else the verifier should have stopped us already"); - argument_info_result = empty; // return no argument_info to caller - oop bsm_oop = instanceKlass::cast(caller_method->method_holder())->bootstrap_method(); - if (bsm_oop != NULL) { - if (TraceMethodHandles) { - tty->print_cr("bootstrap method for "PTR_FORMAT" registered as "PTR_FORMAT":", - (intptr_t) caller_method(), (intptr_t) bsm_oop); - } - assert(bsm_oop->is_oop(), "must be sane"); - return Handle(THREAD, bsm_oop); - } - return empty; }
--- a/src/share/vm/classfile/systemDictionary.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/systemDictionary.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -146,7 +146,6 @@ /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \ template(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292) \ template(MemberName_klass, java_lang_invoke_MemberName, Pre_JSR292) \ - template(MethodHandleImpl_klass, sun_dyn_MethodHandleImpl, Opt) /* AllowTransitionalJSR292 ONLY */ \ template(MethodHandleNatives_klass, java_lang_invoke_MethodHandleNatives, Pre_JSR292) \ template(AdapterMethodHandle_klass, java_lang_invoke_AdapterMethodHandle, Pre_JSR292) \ template(BoundMethodHandle_klass, java_lang_invoke_BoundMethodHandle, Pre_JSR292) \ @@ -154,7 +153,6 @@ template(MethodType_klass, java_lang_invoke_MethodType, Pre_JSR292) \ template(MethodTypeForm_klass, java_lang_invoke_MethodTypeForm, Pre_JSR292) \ template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \ - template(Linkage_klass, java_lang_invoke_Linkage, Opt) /* AllowTransitionalJSR292 ONLY */ \ template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \ /* Note: MethodHandle must be first, and CallSite last in group */ \ \ @@ -422,8 +420,6 @@ initialize_wk_klasses_until((WKID) limit, start_id, THREAD); } - static Symbol* find_backup_symbol(Symbol* symbol, const char* from_prefix, const char* to_prefix); - public: #define WK_KLASS_DECLARE(name, ignore_symbol, option) \ static klassOop name() { return check_klass_##option(_well_known_klasses[WK_KLASS_ENUM_NAME(name)]); } @@ -445,9 +441,6 @@ static void load_abstract_ownable_synchronizer_klass(TRAPS); - static Symbol* find_backup_class_name(Symbol* class_name_symbol); - static Symbol* find_backup_signature(Symbol* signature_symbol); - private: // Tells whether ClassLoader.loadClassInternal is present static bool has_loadClassInternal() { return _has_loadClassInternal; }
--- a/src/share/vm/classfile/verificationType.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/verificationType.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/verificationType.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/verificationType.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/verifier.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/verifier.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1671,19 +1671,13 @@ VerificationType::long_type(), VerificationType::long2_type(), CHECK_VERIFY(this)); } else if (tag.is_method_handle()) { - Symbol* methodHandle_name = vmSymbols::java_lang_invoke_MethodHandle(); - if (AllowTransitionalJSR292 && !Universe::is_bootstrapping()) - methodHandle_name = SystemDictionaryHandles::MethodHandle_klass()->name(); current_frame->push_stack( VerificationType::reference_type( - methodHandle_name), CHECK_VERIFY(this)); + vmSymbols::java_lang_invoke_MethodHandle()), CHECK_VERIFY(this)); } else if (tag.is_method_type()) { - Symbol* methodType_name = vmSymbols::java_lang_invoke_MethodType(); - if (AllowTransitionalJSR292 && !Universe::is_bootstrapping()) - methodType_name = SystemDictionaryHandles::MethodType_klass()->name(); current_frame->push_stack( VerificationType::reference_type( - methodType_name), CHECK_VERIFY(this)); + vmSymbols::java_lang_invoke_MethodType()), CHECK_VERIFY(this)); } else { verify_error(bci, "Invalid index in ldc"); return; @@ -1950,8 +1944,7 @@ unsigned int types = (opcode == Bytecodes::_invokeinterface ? 1 << JVM_CONSTANT_InterfaceMethodref : opcode == Bytecodes::_invokedynamic - ? ((AllowTransitionalJSR292 ? 1 << JVM_CONSTANT_NameAndType : 0) - |1 << JVM_CONSTANT_InvokeDynamic) + ? 1 << JVM_CONSTANT_InvokeDynamic : 1 << JVM_CONSTANT_Methodref); verify_cp_type(index, cp, types, CHECK_VERIFY(this));
--- a/src/share/vm/classfile/verifier.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/verifier.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/vmSymbols.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/classfile/vmSymbols.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -245,44 +245,15 @@ template(java_lang_invoke_AdapterMethodHandle, "java/lang/invoke/AdapterMethodHandle") \ template(java_lang_invoke_BoundMethodHandle, "java/lang/invoke/BoundMethodHandle") \ template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \ - /* temporary transitional public names from 6839872: */ \ - template(java_dyn_InvokeDynamic, "java/dyn/InvokeDynamic") /* AllowTransitionalJSR292 ONLY */ \ - template(java_dyn_Linkage, "java/dyn/Linkage") /* AllowTransitionalJSR292 ONLY */ \ - template(java_dyn_CallSite, "java/dyn/CallSite") /* AllowTransitionalJSR292 ONLY */ \ - template(java_dyn_MethodHandle, "java/dyn/MethodHandle") /* AllowTransitionalJSR292 ONLY */ \ - template(java_dyn_MethodType, "java/dyn/MethodType") /* AllowTransitionalJSR292 ONLY */ \ - template(java_dyn_WrongMethodTypeException, "java/dyn/WrongMethodTypeException") /* AllowTransitionalJSR292 ONLY */ \ - template(java_dyn_MethodType_signature, "Ljava/dyn/MethodType;") /* AllowTransitionalJSR292 ONLY */ \ - template(java_dyn_MethodHandle_signature, "Ljava/dyn/MethodHandle;") /* AllowTransitionalJSR292 ONLY */ \ - /* temporary transitional internal names from 6839872: */ \ - template(java_dyn_MethodTypeForm, "java/dyn/MethodTypeForm") /* AllowTransitionalJSR292 ONLY */ \ - template(java_dyn_MethodTypeForm_signature, "Ljava/dyn/MethodTypeForm;") /* AllowTransitionalJSR292 ONLY */ \ - template(java_dyn_MemberName, "java/dyn/MemberName") /* AllowTransitionalJSR292 ONLY */ \ - template(java_dyn_MethodHandleNatives, "java/dyn/MethodHandleNatives") /* AllowTransitionalJSR292 ONLY */ \ - template(java_dyn_AdapterMethodHandle, "java/dyn/AdapterMethodHandle") /* AllowTransitionalJSR292 ONLY */ \ - template(java_dyn_BoundMethodHandle, "java/dyn/BoundMethodHandle") /* AllowTransitionalJSR292 ONLY */ \ - template(java_dyn_DirectMethodHandle, "java/dyn/DirectMethodHandle") /* AllowTransitionalJSR292 ONLY */ \ - /* temporary transitional internal names from EDR: */ \ - template(sun_dyn_MemberName, "sun/dyn/MemberName") /* AllowTransitionalJSR292 ONLY */ \ - template(sun_dyn_MethodHandleImpl, "sun/dyn/MethodHandleImpl") /* AllowTransitionalJSR292 ONLY */ \ - template(sun_dyn_MethodHandleNatives, "sun/dyn/MethodHandleNatives") /* AllowTransitionalJSR292 ONLY */ \ - template(sun_dyn_AdapterMethodHandle, "sun/dyn/AdapterMethodHandle") /* AllowTransitionalJSR292 ONLY */ \ - template(sun_dyn_BoundMethodHandle, "sun/dyn/BoundMethodHandle") /* AllowTransitionalJSR292 ONLY */ \ - template(sun_dyn_DirectMethodHandle, "sun/dyn/DirectMethodHandle") /* AllowTransitionalJSR292 ONLY */ \ /* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \ template(findMethodHandleType_name, "findMethodHandleType") \ template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \ - template(findMethodHandleType_TRANS_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") /* AllowTransitionalJSR292 ONLY */ \ template(notifyGenericMethodType_name, "notifyGenericMethodType") \ template(notifyGenericMethodType_signature, "(Ljava/lang/invoke/MethodType;)V") \ - template(notifyGenericMethodType_TRANS_signature, "(Ljava/dyn/MethodType;)V") /* AllowTransitionalJSR292 ONLY */ \ template(linkMethodHandleConstant_name, "linkMethodHandleConstant") \ template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/invoke/MethodHandle;") \ - template(linkMethodHandleConstant_TRANS_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/dyn/MethodHandle;") /* AllowTransitionalJSR292 ONLY */ \ template(makeDynamicCallSite_name, "makeDynamicCallSite") \ template(makeDynamicCallSite_signature, "(Ljava/lang/invoke/MethodHandle;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/Object;Ljava/lang/invoke/MemberName;I)Ljava/lang/invoke/CallSite;") \ - template(makeDynamicCallSite_TRANS_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Lsun/dyn/MemberName;I)Ljava/dyn/CallSite;") /* AllowTransitionalJSR292 ONLY */ \ - template(makeDynamicCallSite_TRANS2_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Ljava/dyn/MemberName;I)Ljava/dyn/CallSite;") /* AllowTransitionalJSR292 ONLY */ \ NOT_LP64( do_alias(machine_word_signature, int_signature) ) \ LP64_ONLY( do_alias(machine_word_signature, long_signature) ) \ \ @@ -330,6 +301,7 @@ template(dispatch_name, "dispatch") \ template(getSystemClassLoader_name, "getSystemClassLoader") \ template(fillInStackTrace_name, "fillInStackTrace") \ + template(fillInStackTrace0_name, "fillInStackTrace0") \ template(getCause_name, "getCause") \ template(initCause_name, "initCause") \ template(setProperty_name, "setProperty") \ @@ -369,6 +341,7 @@ template(vmtarget_name, "vmtarget") \ template(vmentry_name, "vmentry") \ template(vmslots_name, "vmslots") \ + template(vmlayout_name, "vmlayout") \ template(vmindex_name, "vmindex") \ template(vmargslot_name, "vmargslot") \ template(flags_name, "flags") \ @@ -421,6 +394,7 @@ template(void_signature, "V") \ template(byte_array_signature, "[B") \ template(char_array_signature, "[C") \ + template(int_array_signature, "[I") \ template(object_void_signature, "(Ljava/lang/Object;)V") \ template(object_int_signature, "(Ljava/lang/Object;)I") \ template(object_boolean_signature, "(Ljava/lang/Object;)Z") \ @@ -499,6 +473,13 @@ template(sun_management_ManagementFactory, "sun/management/ManagementFactory") \ template(sun_management_Sensor, "sun/management/Sensor") \ template(sun_management_Agent, "sun/management/Agent") \ + template(sun_management_GarbageCollectorImpl, "sun/management/GarbageCollectorImpl") \ + template(getGcInfoBuilder_name, "getGcInfoBuilder") \ + template(getGcInfoBuilder_signature, "()Lsun/management/GcInfoBuilder;") \ + template(com_sun_management_GcInfo, "com/sun/management/GcInfo") \ + template(com_sun_management_GcInfo_constructor_signature, "(Lsun/management/GcInfoBuilder;JJJ[Ljava/lang/management/MemoryUsage;[Ljava/lang/management/MemoryUsage;[Ljava/lang/Object;)V") \ + template(createGCNotification_name, "createGCNotification") \ + template(createGCNotification_signature, "(JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/sun/management/GcInfo;)V") \ template(createMemoryPoolMBean_name, "createMemoryPoolMBean") \ template(createMemoryManagerMBean_name, "createMemoryManagerMBean") \ template(createGarbageCollectorMBean_name, "createGarbageCollectorMBean") \ @@ -516,6 +497,7 @@ template(java_lang_management_MemoryPoolMXBean, "java/lang/management/MemoryPoolMXBean") \ template(java_lang_management_MemoryManagerMXBean, "java/lang/management/MemoryManagerMXBean") \ template(java_lang_management_GarbageCollectorMXBean,"java/lang/management/GarbageCollectorMXBean") \ + template(gcInfoBuilder_name, "gcInfoBuilder") \ template(createMemoryPool_name, "createMemoryPool") \ template(createMemoryManager_name, "createMemoryManager") \ template(createGarbageCollector_name, "createGarbageCollector") \ @@ -706,6 +688,10 @@ do_intrinsic(_checkIndex, java_nio_Buffer, checkIndex_name, int_int_signature, F_R) \ do_name( checkIndex_name, "checkIndex") \ \ + /* java/lang/ref/Reference */ \ + do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \ + \ + \ do_class(sun_misc_AtomicLongCSImpl, "sun/misc/AtomicLongCSImpl") \ do_intrinsic(_get_AtomicLong, sun_misc_AtomicLongCSImpl, get_name, void_long_signature, F_R) \ /* (symbols get_name and void_long_signature defined above) */ \ @@ -910,8 +896,6 @@ do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_object_array_object_signature, F_R) \ /* (symbols invoke_name and invoke_signature defined above) */ \ do_intrinsic(_checkSpreadArgument, java_lang_invoke_MethodHandleNatives, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) \ - do_intrinsic(_checkSpreadArgument_TRANS,sun_dyn_MethodHandleImpl, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) /* AllowTransitionalJSR292 ONLY */ \ - do_intrinsic(_checkSpreadArgument_TRANS2,java_dyn_MethodHandleNatives, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) /* AllowTransitionalJSR292 ONLY */ \ do_name( checkSpreadArgument_name, "checkSpreadArgument") \ do_name( checkSpreadArgument_signature, "(Ljava/lang/Object;I)V") \ do_intrinsic(_invokeExact, java_lang_invoke_MethodHandle, invokeExact_name, object_array_object_signature, F_RN) \
--- a/src/share/vm/code/codeBlob.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/code/codeBlob.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -152,6 +152,32 @@ } +void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) { + // Do not hold the CodeCache lock during name formatting. + assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); + + if (stub != NULL) { + char stub_id[256]; + assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); + jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); + if (PrintStubCode) { + tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub); + Disassembler::decode(stub->code_begin(), stub->code_end()); + } + Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); + + if (JvmtiExport::should_post_dynamic_code_generated()) { + const char* stub_name = name2; + if (name2[0] == '\0') stub_name = name1; + JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); + } + } + + // Track memory usage statistic after releasing CodeCache_lock + MemoryService::track_code_cache_memory_usage(); +} + + void CodeBlob::flush() { if (_oop_maps) { FREE_C_HEAP_ARRAY(unsigned char, _oop_maps); @@ -312,23 +338,7 @@ stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); } - // Do not hold the CodeCache lock during name formatting. - if (stub != NULL) { - char stub_id[256]; - jio_snprintf(stub_id, sizeof(stub_id), "RuntimeStub - %s", stub_name); - if (PrintStubCode) { - tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, stub); - Disassembler::decode(stub->code_begin(), stub->code_end()); - } - Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); - - if (JvmtiExport::should_post_dynamic_code_generated()) { - JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); - } - } - - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); + trace_new_stub(stub, "RuntimeStub - ", stub_name); return stub; } @@ -340,6 +350,50 @@ return p; } +// operator new shared by all singletons: +void* SingletonBlob::operator new(size_t s, unsigned size) { + void* p = CodeCache::allocate(size); + if (!p) fatal("Initial size of CodeCache is too small"); + return p; +} + + +//---------------------------------------------------------------------------------------------------- +// Implementation of RicochetBlob + +RicochetBlob::RicochetBlob( + CodeBuffer* cb, + int size, + int bounce_offset, + int exception_offset, + int frame_size +) +: SingletonBlob("RicochetBlob", cb, sizeof(RicochetBlob), size, frame_size, (OopMapSet*) NULL) +{ + _bounce_offset = bounce_offset; + _exception_offset = exception_offset; +} + + +RicochetBlob* RicochetBlob::create( + CodeBuffer* cb, + int bounce_offset, + int exception_offset, + int frame_size) +{ + RicochetBlob* blob = NULL; + ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + unsigned int size = allocation_size(cb, sizeof(RicochetBlob)); + blob = new (size) RicochetBlob(cb, size, bounce_offset, exception_offset, frame_size); + } + + trace_new_stub(blob, "RicochetBlob"); + + return blob; +} + //---------------------------------------------------------------------------------------------------- // Implementation of DeoptimizationBlob @@ -386,34 +440,12 @@ frame_size); } - // Do not hold the CodeCache lock during name formatting. - if (blob != NULL) { - char blob_id[256]; - jio_snprintf(blob_id, sizeof(blob_id), "DeoptimizationBlob@" PTR_FORMAT, blob->code_begin()); - if (PrintStubCode) { - tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); - Disassembler::decode(blob->code_begin(), blob->code_end()); - } - Forte::register_stub(blob_id, blob->code_begin(), blob->code_end()); - - if (JvmtiExport::should_post_dynamic_code_generated()) { - JvmtiExport::post_dynamic_code_generated("DeoptimizationBlob", blob->code_begin(), blob->code_end()); - } - } - - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); + trace_new_stub(blob, "DeoptimizationBlob"); return blob; } -void* DeoptimizationBlob::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); - if (!p) fatal("Initial size of CodeCache is too small"); - return p; -} - //---------------------------------------------------------------------------------------------------- // Implementation of UncommonTrapBlob @@ -441,33 +473,12 @@ blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size); } - // Do not hold the CodeCache lock during name formatting. - if (blob != NULL) { - char blob_id[256]; - jio_snprintf(blob_id, sizeof(blob_id), "UncommonTrapBlob@" PTR_FORMAT, blob->code_begin()); - if (PrintStubCode) { - tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); - Disassembler::decode(blob->code_begin(), blob->code_end()); - } - Forte::register_stub(blob_id, blob->code_begin(), blob->code_end()); - - if (JvmtiExport::should_post_dynamic_code_generated()) { - JvmtiExport::post_dynamic_code_generated("UncommonTrapBlob", blob->code_begin(), blob->code_end()); - } - } - - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); + trace_new_stub(blob, "UncommonTrapBlob"); return blob; } -void* UncommonTrapBlob::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); - if (!p) fatal("Initial size of CodeCache is too small"); - return p; -} #endif // COMPILER2 @@ -498,33 +509,12 @@ blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size); } - // We do not need to hold the CodeCache lock during name formatting - if (blob != NULL) { - char blob_id[256]; - jio_snprintf(blob_id, sizeof(blob_id), "ExceptionBlob@" PTR_FORMAT, blob->code_begin()); - if (PrintStubCode) { - tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); - Disassembler::decode(blob->code_begin(), blob->code_end()); - } - Forte::register_stub(blob_id, blob->code_begin(), blob->code_end()); - - if (JvmtiExport::should_post_dynamic_code_generated()) { - JvmtiExport::post_dynamic_code_generated("ExceptionBlob", blob->code_begin(), blob->code_end()); - } - } - - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); + trace_new_stub(blob, "ExceptionBlob"); return blob; } -void* ExceptionBlob::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); - if (!p) fatal("Initial size of CodeCache is too small"); - return p; -} #endif // COMPILER2 @@ -554,35 +544,12 @@ blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); } - // We do not need to hold the CodeCache lock during name formatting. - if (blob != NULL) { - char blob_id[256]; - jio_snprintf(blob_id, sizeof(blob_id), "SafepointBlob@" PTR_FORMAT, blob->code_begin()); - if (PrintStubCode) { - tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); - Disassembler::decode(blob->code_begin(), blob->code_end()); - } - Forte::register_stub(blob_id, blob->code_begin(), blob->code_end()); - - if (JvmtiExport::should_post_dynamic_code_generated()) { - JvmtiExport::post_dynamic_code_generated("SafepointBlob", blob->code_begin(), blob->code_end()); - } - } - - // Track memory usage statistic after releasing CodeCache_lock - MemoryService::track_code_cache_memory_usage(); + trace_new_stub(blob, "SafepointBlob"); return blob; } -void* SafepointBlob::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); - if (!p) fatal("Initial size of CodeCache is too small"); - return p; -} - - //---------------------------------------------------------------------------------------------------- // Verification and printing
--- a/src/share/vm/code/codeBlob.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/code/codeBlob.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -35,6 +35,7 @@ // Suptypes are: // nmethod : Compiled Java methods (include method that calls to native code) // RuntimeStub : Call to VM runtime methods +// RicochetBlob : Used for blocking MethodHandle adapters // DeoptimizationBlob : Used for deoptimizatation // ExceptionBlob : Used for stack unrolling // SafepointBlob : Used to handle illegal instruction exceptions @@ -95,12 +96,13 @@ void flush(); // Typing - virtual bool is_buffer_blob() const { return false; } - virtual bool is_nmethod() const { return false; } - virtual bool is_runtime_stub() const { return false; } - virtual bool is_deoptimization_stub() const { return false; } - virtual bool is_uncommon_trap_stub() const { return false; } - virtual bool is_exception_stub() const { return false; } + virtual bool is_buffer_blob() const { return false; } + virtual bool is_nmethod() const { return false; } + virtual bool is_runtime_stub() const { return false; } + virtual bool is_ricochet_stub() const { return false; } + virtual bool is_deoptimization_stub() const { return false; } + virtual bool is_uncommon_trap_stub() const { return false; } + virtual bool is_exception_stub() const { return false; } virtual bool is_safepoint_stub() const { return false; } virtual bool is_adapter_blob() const { return false; } virtual bool is_method_handles_adapter_blob() const { return false; } @@ -182,6 +184,9 @@ virtual void print_on(outputStream* st) const; virtual void print_value_on(outputStream* st) const; + // Deal with Disassembler, VTune, Forte, JvmtiExport, MemoryService. + static void trace_new_stub(CodeBlob* blob, const char* name1, const char* name2 = ""); + // Print the comment associated with offset on stream, if there is one virtual void print_block_comment(outputStream* stream, address block_begin) { intptr_t offset = (intptr_t)(block_begin - code_begin()); @@ -318,7 +323,11 @@ class SingletonBlob: public CodeBlob { friend class VMStructs; - public: + + protected: + void* operator new(size_t s, unsigned size); + + public: SingletonBlob( const char* name, CodeBuffer* cb, @@ -341,6 +350,50 @@ //---------------------------------------------------------------------------------------------------- +// RicochetBlob +// Holds an arbitrary argument list indefinitely while Java code executes recursively. + +class RicochetBlob: public SingletonBlob { + friend class VMStructs; + private: + + int _bounce_offset; + int _exception_offset; + + // Creation support + RicochetBlob( + CodeBuffer* cb, + int size, + int bounce_offset, + int exception_offset, + int frame_size + ); + + public: + // Creation + static RicochetBlob* create( + CodeBuffer* cb, + int bounce_offset, + int exception_offset, + int frame_size + ); + + // Typing + bool is_ricochet_stub() const { return true; } + + // GC for args + void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ } + + address bounce_addr() const { return code_begin() + _bounce_offset; } + address exception_addr() const { return code_begin() + _exception_offset; } + bool returns_to_bounce_addr(address pc) const { + address bounce_pc = bounce_addr(); + return (pc == bounce_pc || (pc + frame::pc_return_offset) == bounce_pc); + } +}; + + +//---------------------------------------------------------------------------------------------------- // DeoptimizationBlob class DeoptimizationBlob: public SingletonBlob { @@ -363,8 +416,6 @@ int frame_size ); - void* operator new(size_t s, unsigned size); - public: // Creation static DeoptimizationBlob* create( @@ -378,7 +429,6 @@ // Typing bool is_deoptimization_stub() const { return true; } - const DeoptimizationBlob *as_deoptimization_stub() const { return this; } bool exception_address_is_unpack_entry(address pc) const { address unpack_pc = unpack(); return (pc == unpack_pc || (pc + frame::pc_return_offset) == unpack_pc); @@ -426,8 +476,6 @@ int frame_size ); - void* operator new(size_t s, unsigned size); - public: // Creation static UncommonTrapBlob* create( @@ -458,8 +506,6 @@ int frame_size ); - void* operator new(size_t s, unsigned size); - public: // Creation static ExceptionBlob* create( @@ -491,8 +537,6 @@ int frame_size ); - void* operator new(size_t s, unsigned size); - public: // Creation static SafepointBlob* create(
--- a/src/share/vm/code/codeCache.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/code/codeCache.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -796,6 +796,7 @@ int nmethodCount = 0; int runtimeStubCount = 0; int adapterCount = 0; + int ricochetStubCount = 0; int deoptimizationStubCount = 0; int uncommonTrapStubCount = 0; int bufferBlobCount = 0; @@ -840,6 +841,8 @@ } } else if (cb->is_runtime_stub()) { runtimeStubCount++; + } else if (cb->is_ricochet_stub()) { + ricochetStubCount++; } else if (cb->is_deoptimization_stub()) { deoptimizationStubCount++; } else if (cb->is_uncommon_trap_stub()) { @@ -876,6 +879,7 @@ tty->print_cr("runtime_stubs: %d",runtimeStubCount); tty->print_cr("adapters: %d",adapterCount); tty->print_cr("buffer blobs: %d",bufferBlobCount); + tty->print_cr("ricochet_stubs: %d",ricochetStubCount); tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); tty->print_cr("\nnmethod size distribution (non-zombie java)"); @@ -964,3 +968,14 @@ nof_blobs(), nof_nmethods(), nof_adapters(), unallocated_capacity(), largest_free_block()); } + +size_t CodeCache::largest_free_block() { + // This is called both with and without CodeCache_lock held so + // handle both cases. + if (CodeCache_lock->owned_by_self()) { + return _heap->largest_free_block(); + } else { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + return _heap->largest_free_block(); + } +}
--- a/src/share/vm/code/codeCache.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/code/codeCache.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -160,7 +160,7 @@ static size_t capacity() { return _heap->capacity(); } static size_t max_capacity() { return _heap->max_capacity(); } static size_t unallocated_capacity() { return _heap->unallocated_capacity(); } - static size_t largest_free_block() { return _heap->largest_free_block(); } + static size_t largest_free_block(); static bool needs_flushing() { return largest_free_block() < CodeCacheFlushingMinimumFreeSpace; } static bool needs_cache_clean() { return _needs_cache_clean; }
--- a/src/share/vm/code/compiledIC.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/code/compiledIC.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/code/compiledIC.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/code/compiledIC.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/code/dependencies.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/code/dependencies.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/code/icBuffer.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/code/icBuffer.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/code/nmethod.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/code/nmethod.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1810,7 +1810,7 @@ void maybe_print(oop* p) { if (_print_nm == NULL) return; if (!_detected_scavenge_root) _print_nm->print_on(tty, "new scavenge root"); - tty->print_cr(""PTR_FORMAT"[offset=%d] detected non-perm oop "PTR_FORMAT" (found at "PTR_FORMAT")", + tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")", _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm), (intptr_t)(*p), (intptr_t)p); (*p)->print(); @@ -2311,7 +2311,7 @@ _nm->print_nmethod(true); _ok = false; } - tty->print_cr("*** non-perm oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)", + tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)", (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm)); (*p)->print(); } @@ -2324,7 +2324,7 @@ DebugScavengeRoot debug_scavenge_root(this); oops_do(&debug_scavenge_root); if (!debug_scavenge_root.ok()) - fatal("found an unadvertised bad non-perm oop in the code cache"); + fatal("found an unadvertised bad scavengable oop in the code cache"); } assert(scavenge_root_not_marked(), ""); }
--- a/src/share/vm/code/nmethod.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/code/nmethod.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -109,7 +109,7 @@ class nmethod : public CodeBlob { friend class VMStructs; friend class NMethodSweeper; - friend class CodeCache; // non-perm oops + friend class CodeCache; // scavengable oops private: // Shared fields for all nmethod's methodOop _method; @@ -466,17 +466,17 @@ bool is_at_poll_return(address pc); bool is_at_poll_or_poll_return(address pc); - // Non-perm oop support + // Scavengable oop support bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; } protected: - enum { npl_on_list = 0x01, npl_marked = 0x10 }; - void set_on_scavenge_root_list() { _scavenge_root_state = npl_on_list; } + enum { sl_on_list = 0x01, sl_marked = 0x10 }; + void set_on_scavenge_root_list() { _scavenge_root_state = sl_on_list; } void clear_on_scavenge_root_list() { _scavenge_root_state = 0; } // assertion-checking and pruning logic uses the bits of _scavenge_root_state #ifndef PRODUCT - void set_scavenge_root_marked() { _scavenge_root_state |= npl_marked; } - void clear_scavenge_root_marked() { _scavenge_root_state &= ~npl_marked; } - bool scavenge_root_not_marked() { return (_scavenge_root_state &~ npl_on_list) == 0; } + void set_scavenge_root_marked() { _scavenge_root_state |= sl_marked; } + void clear_scavenge_root_marked() { _scavenge_root_state &= ~sl_marked; } + bool scavenge_root_not_marked() { return (_scavenge_root_state &~ sl_on_list) == 0; } // N.B. there is no positive marked query, and we only use the not_marked query for asserts. #endif //PRODUCT nmethod* scavenge_root_link() const { return _scavenge_root_link; }
--- a/src/share/vm/code/relocInfo.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/code/relocInfo.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -472,20 +472,14 @@ return itr._rh; } - -static inline bool is_index(intptr_t index) { - return 0 < index && index < os::vm_page_size(); -} - - int32_t Relocation::runtime_address_to_index(address runtime_address) { - assert(!is_index((intptr_t)runtime_address), "must not look like an index"); + assert(!is_reloc_index((intptr_t)runtime_address), "must not look like an index"); if (runtime_address == NULL) return 0; StubCodeDesc* p = StubCodeDesc::desc_for(runtime_address); if (p != NULL && p->begin() == runtime_address) { - assert(is_index(p->index()), "there must not be too many stubs"); + assert(is_reloc_index(p->index()), "there must not be too many stubs"); return (int32_t)p->index(); } else { // Known "miscellaneous" non-stub pointers: @@ -506,7 +500,7 @@ address Relocation::index_to_runtime_address(int32_t index) { if (index == 0) return NULL; - if (is_index(index)) { + if (is_reloc_index(index)) { StubCodeDesc* p = StubCodeDesc::desc_for_index(index); assert(p != NULL, "there must be a stub for this index"); return p->begin(); @@ -634,7 +628,7 @@ #ifndef _LP64 p = pack_1_int_to(p, index); #else - if (is_index(index)) { + if (is_reloc_index(index)) { p = pack_2_ints_to(p, index, 0); } else { jlong t = (jlong) _target; @@ -642,7 +636,7 @@ int32_t hi = high(t); p = pack_2_ints_to(p, lo, hi); DEBUG_ONLY(jlong t1 = jlong_from(hi, lo)); - assert(!is_index(t1) && (address) t1 == _target, "not symmetric"); + assert(!is_reloc_index(t1) && (address) t1 == _target, "not symmetric"); } #endif /* _LP64 */ dest->set_locs_end((relocInfo*) p); @@ -656,7 +650,7 @@ int32_t lo, hi; unpack_2_ints(lo, hi); jlong t = jlong_from(hi, lo);; - if (is_index(t)) { + if (is_reloc_index(t)) { _target = index_to_runtime_address(t); } else { _target = (address) t;
--- a/src/share/vm/code/relocInfo.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/code/relocInfo.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -703,6 +703,10 @@ assert(datalen()==0 || type()==relocInfo::none, "no data here"); } + static bool is_reloc_index(intptr_t index) { + return 0 < index && index < os::vm_page_size(); + } + protected: // Helper functions for pack_data_to() and unpack_data(). @@ -1127,6 +1131,12 @@ return rh; } + // Some address looking values aren't safe to treat as relocations + // and should just be treated as constants. + static bool can_be_relocated(address target) { + return target != NULL && !is_reloc_index((intptr_t)target); + } + private: address _target; // address in runtime
--- a/src/share/vm/code/vmreg.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/code/vmreg.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/compiler/compileBroker.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/compiler/compileBroker.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -976,6 +976,15 @@ return; } + // If the requesting thread is holding the pending list lock + // then we just return. We can't risk blocking while holding + // the pending list lock or a 3-way deadlock may occur + // between the reference handler thread, a GC (instigated + // by a compiler thread), and compiled method registration. + if (instanceRefKlass::owns_pending_list_lock(JavaThread::current())) { + return; + } + // Outputs from the following MutexLocker block: CompileTask* task = NULL; bool blocking = false; @@ -1304,17 +1313,8 @@ // Should the current thread be blocked until this compilation request // has been fulfilled? bool CompileBroker::is_compile_blocking(methodHandle method, int osr_bci) { - if (!BackgroundCompilation) { - Symbol* class_name = method->method_holder()->klass_part()->name(); - if (class_name->starts_with("java/lang/ref/Reference", 23)) { - // The reference handler thread can dead lock with the GC if compilation is blocking, - // so we avoid blocking compiles for anything in the java.lang.ref.Reference class, - // including inner classes such as ReferenceHandler. - return false; - } - return true; - } - return false; + assert(!instanceRefKlass::owns_pending_list_lock(JavaThread::current()), "possible deadlock"); + return !BackgroundCompilation; } @@ -1736,8 +1736,14 @@ UseInterpreter = true; if (UseCompiler || AlwaysCompileLoopMethods ) { if (xtty != NULL) { + stringStream s; + // Dump code cache state into a buffer before locking the tty, + // because log_state() will use locks causing lock conflicts. + CodeCache::log_state(&s); + // Lock to prevent tearing + ttyLocker ttyl; xtty->begin_elem("code_cache_full"); - CodeCache::log_state(xtty); + xtty->print(s.as_string()); xtty->stamp(); xtty->end_elem(); }
--- a/src/share/vm/compiler/compileLog.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/compiler/compileLog.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/compiler/compilerOracle.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/compiler/compilerOracle.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/compiler/compilerOracle.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/compiler/compilerOracle.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/compiler/disassembler.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/compiler/disassembler.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -283,10 +283,10 @@ st->print("Stub::%s", desc->name()); if (desc->begin() != adr) st->print("%+d 0x%p",adr - desc->begin(), adr); - else if (WizardMode) st->print(" " INTPTR_FORMAT, adr); + else if (WizardMode) st->print(" " PTR_FORMAT, adr); return; } - st->print("Stub::<unknown> " INTPTR_FORMAT, adr); + st->print("Stub::<unknown> " PTR_FORMAT, adr); return; } @@ -314,8 +314,8 @@ } } - // Fall through to a simple numeral. - st->print(INTPTR_FORMAT, (intptr_t)adr); + // Fall through to a simple (hexadecimal) numeral. + st->print(PTR_FORMAT, adr); } void decode_env::print_insn_labels() { @@ -326,7 +326,7 @@ cb->print_block_comment(st, p); } if (_print_pc) { - st->print(" " INTPTR_FORMAT ": ", (intptr_t) p); + st->print(" " PTR_FORMAT ": ", p); } } @@ -432,7 +432,7 @@ void Disassembler::decode(CodeBlob* cb, outputStream* st) { if (!load_library()) return; decode_env env(cb, st); - env.output()->print_cr("Decoding CodeBlob " INTPTR_FORMAT, cb); + env.output()->print_cr("Decoding CodeBlob " PTR_FORMAT, cb); env.decode_instructions(cb->code_begin(), cb->code_end()); } @@ -446,7 +446,7 @@ void Disassembler::decode(nmethod* nm, outputStream* st) { if (!load_library()) return; decode_env env(nm, st); - env.output()->print_cr("Decoding compiled method " INTPTR_FORMAT ":", nm); + env.output()->print_cr("Decoding compiled method " PTR_FORMAT ":", nm); env.output()->print_cr("Code:"); #ifdef SHARK @@ -478,9 +478,9 @@ int offset = 0; for (address p = nm->consts_begin(); p < nm->consts_end(); p += 4, offset += 4) { if ((offset % 8) == 0) { - env.output()->print_cr(" " INTPTR_FORMAT " (offset: %4d): " PTR32_FORMAT " " PTR64_FORMAT, (intptr_t) p, offset, *((int32_t*) p), *((int64_t*) p)); + env.output()->print_cr(" " PTR_FORMAT " (offset: %4d): " PTR32_FORMAT " " PTR64_FORMAT, p, offset, *((int32_t*) p), *((int64_t*) p)); } else { - env.output()->print_cr(" " INTPTR_FORMAT " (offset: %4d): " PTR32_FORMAT, (intptr_t) p, offset, *((int32_t*) p)); + env.output()->print_cr(" " PTR_FORMAT " (offset: %4d): " PTR32_FORMAT, p, offset, *((int32_t*) p)); } } }
--- a/src/share/vm/compiler/disassembler.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/compiler/disassembler.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1963,10 +1963,21 @@ // Iteration support, mostly delegated from a CMS generation void CompactibleFreeListSpace::save_marks() { - // mark the "end" of the used space at the time of this call; + assert(Thread::current()->is_VM_thread(), + "Global variable should only be set when single-threaded"); + // Mark the "end" of the used space at the time of this call; // note, however, that promoted objects from this point // on are tracked in the _promoInfo below. set_saved_mark_word(unallocated_block()); +#ifdef ASSERT + // Check the sanity of save_marks() etc. + MemRegion ur = used_region(); + MemRegion urasm = used_region_at_save_marks(); + assert(ur.contains(urasm), + err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")" + " should contain [" PTR_FORMAT "," PTR_FORMAT ")", + ur.start(), ur.end(), urasm.start(), urasm.end())); +#endif // inform allocator that promotions should be tracked. assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); _promoInfo.startTrackingPromotions();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -2026,7 +2026,7 @@ } { - TraceCMSMemoryManagerStats(); + TraceCMSMemoryManagerStats tmms(gch->gc_cause()); } GenMarkSweep::invoke_at_safepoint(_cmsGen->level(), ref_processor(), clear_all_soft_refs); @@ -3189,10 +3189,9 @@ } void CMSCollector::setup_cms_unloading_and_verification_state() { - const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC + const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC || VerifyBeforeExit; - const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings - | SharedHeap::SO_CodeCache; + const int rso = SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; if (should_unload_classes()) { // Should unload classes this cycle remove_root_scanning_option(rso); // Shrink the root set appropriately @@ -3480,7 +3479,7 @@ void CMSCollector::checkpointRootsInitial(bool asynch) { assert(_collectorState == InitialMarking, "Wrong collector state"); check_correct_thread_executing(); - TraceCMSMemoryManagerStats tms(_collectorState); + TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); ReferenceProcessor* rp = ref_processor(); SpecializationStats::clear(); @@ -4859,7 +4858,8 @@ // world is stopped at this checkpoint assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); - TraceCMSMemoryManagerStats tms(_collectorState); + TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); + verify_work_stacks_empty(); verify_overflow_empty(); @@ -5994,7 +5994,7 @@ verify_work_stacks_empty(); verify_overflow_empty(); increment_sweep_count(); - TraceCMSMemoryManagerStats tms(_collectorState); + TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); _inter_sweep_timer.stop(); _inter_sweep_estimate.sample(_inter_sweep_timer.seconds()); @@ -9236,11 +9236,12 @@ return res; } -TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase): TraceMemoryManagerStats() { +TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() { switch (phase) { case CMSCollector::InitialMarking: initialize(true /* fullGC */ , + cause /* cause of the GC */, true /* recordGCBeginTime */, true /* recordPreGCUsage */, false /* recordPeakUsage */, @@ -9252,6 +9253,7 @@ case CMSCollector::FinalMarking: initialize(true /* fullGC */ , + cause /* cause of the GC */, false /* recordGCBeginTime */, false /* recordPreGCUsage */, false /* recordPeakUsage */, @@ -9263,6 +9265,7 @@ case CMSCollector::Sweeping: initialize(true /* fullGC */ , + cause /* cause of the GC */, false /* recordGCBeginTime */, false /* recordPreGCUsage */, true /* recordPeakUsage */, @@ -9278,8 +9281,9 @@ } // when bailing out of cms in concurrent mode failure -TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(): TraceMemoryManagerStats() { +TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(GCCause::Cause cause): TraceMemoryManagerStats() { initialize(true /* fullGC */ , + cause /* cause of the GC */, true /* recordGCBeginTime */, true /* recordPreGCUsage */, true /* recordPeakUsage */,
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1895,8 +1895,8 @@ class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { public: - TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase); - TraceCMSMemoryManagerStats(); + TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause); + TraceCMSMemoryManagerStats(GCCause::Cause cause); };
--- a/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -262,39 +262,18 @@ for (int i = 0; i < _numMarkedRegions; i++) { assert(_markedRegions.at(i) != NULL, "Should be true by sorting!"); _markedRegions.at(i)->set_sort_index(i); - if (G1PrintRegionLivenessInfo > 0) { - if (i == 0) gclog_or_tty->print_cr("Sorted marked regions:"); - if (i < G1PrintRegionLivenessInfo || - (_numMarkedRegions-i) < G1PrintRegionLivenessInfo) { - HeapRegion* hr = _markedRegions.at(i); - size_t u = hr->used(); - gclog_or_tty->print_cr(" Region %d: %d used, %d max live, %5.2f%%.", - i, u, hr->max_live_bytes(), - 100.0*(float)hr->max_live_bytes()/(float)u); - } + } + if (G1PrintRegionLivenessInfo) { + G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting"); + for (int i = 0; i < _numMarkedRegions; ++i) { + HeapRegion* r = _markedRegions.at(i); + cl.doHeapRegion(r); } } - if (G1PolicyVerbose > 1) - printSortedHeapRegions(); assert(verify(), "should now be sorted"); } void -printHeapRegion(HeapRegion *hr) { - if (hr->isHumongous()) - gclog_or_tty->print("H: "); - if (hr->in_collection_set()) - gclog_or_tty->print("CS: "); - gclog_or_tty->print_cr("Region " PTR_FORMAT " (%s%s) " - "[" PTR_FORMAT ", " PTR_FORMAT"] " - "Used: " SIZE_FORMAT "K, garbage: " SIZE_FORMAT "K.", - hr, hr->is_young() ? "Y " : " ", - hr->is_marked()? "M1" : "M0", - hr->bottom(), hr->end(), - hr->used()/K, hr->garbage_bytes()/K); -} - -void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) { assert(!hr->isHumongous(), "Humongous regions shouldn't be added to the collection set"); @@ -351,27 +330,9 @@ void CollectionSetChooser::updateAfterFullCollection() { - G1CollectedHeap* g1h = G1CollectedHeap::heap(); clearMarkedHeapRegions(); } -void -CollectionSetChooser::printSortedHeapRegions() { - gclog_or_tty->print_cr("Printing %d Heap Regions sorted by amount of known garbage", - _numMarkedRegions); - - DEBUG_ONLY(int marked_count = 0;) - for (int i = 0; i < _markedRegions.length(); i++) { - HeapRegion* r = _markedRegions.at(i); - if (r != NULL) { - printHeapRegion(r); - DEBUG_ONLY(marked_count++;) - } - } - assert(marked_count == _numMarkedRegions, "must be"); - gclog_or_tty->print_cr("Done sorted heap region print"); -} - void CollectionSetChooser::removeRegion(HeapRegion *hr) { int si = hr->sort_index(); assert(si == -1 || hr->is_marked(), "Sort index not valid.");
--- a/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -100,8 +100,6 @@ CollectionSetChooser(); - void printSortedHeapRegions(); - void sortMarkedHeapRegions(); void fillCache(); bool addRegionToCache(void);
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -31,23 +31,31 @@ #include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "memory/space.inline.hpp" #include "runtime/atomic.hpp" +#include "runtime/java.hpp" #include "utilities/copy.hpp" // Possible sizes for the card counts cache: odd primes that roughly double in size. // (See jvmtiTagMap.cpp). -int ConcurrentG1Refine::_cc_cache_sizes[] = { - 16381, 32771, 76831, 150001, 307261, - 614563, 1228891, 2457733, 4915219, 9830479, - 19660831, 39321619, 78643219, 157286461, -1 + +#define MAX_SIZE ((size_t) -1) + +size_t ConcurrentG1Refine::_cc_cache_sizes[] = { + 16381, 32771, 76831, 150001, 307261, + 614563, 1228891, 2457733, 4915219, 9830479, + 19660831, 39321619, 78643219, 157286461, MAX_SIZE }; ConcurrentG1Refine::ConcurrentG1Refine() : _card_counts(NULL), _card_epochs(NULL), - _n_card_counts(0), _max_n_card_counts(0), + _n_card_counts(0), _max_cards(0), _max_n_card_counts(0), _cache_size_index(0), _expand_card_counts(false), _hot_cache(NULL), _def_use_cache(false), _use_cache(false), - _n_periods(0), + // We initialize the epochs of the array to 0. By initializing + // _n_periods to 1 and not 0 we automatically invalidate all the + // entries on the array. Otherwise we might accidentally think that + // we claimed a card that was in fact never set (see CR7033292). + _n_periods(1), _threads(NULL), _n_threads(0) { @@ -98,27 +106,44 @@ void ConcurrentG1Refine::init() { if (G1ConcRSLogCacheSize > 0) { _g1h = G1CollectedHeap::heap(); - _max_n_card_counts = - (unsigned) (_g1h->max_capacity() >> CardTableModRefBS::card_shift); + + _max_cards = _g1h->max_capacity() >> CardTableModRefBS::card_shift; + _max_n_card_counts = _max_cards * G1MaxHotCardCountSizePercent / 100; size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1; - guarantee(_max_n_card_counts < max_card_num, "card_num representation"); + guarantee(_max_cards < max_card_num, "card_num representation"); - int desired = _max_n_card_counts / InitialCacheFraction; - for (_cache_size_index = 0; - _cc_cache_sizes[_cache_size_index] >= 0; _cache_size_index++) { - if (_cc_cache_sizes[_cache_size_index] >= desired) break; - } - _cache_size_index = MAX2(0, (_cache_size_index - 1)); + // We need _n_card_counts to be less than _max_n_card_counts here + // so that the expansion call (below) actually allocates the + // _counts and _epochs arrays. + assert(_n_card_counts == 0, "pre-condition"); + assert(_max_n_card_counts > 0, "pre-condition"); - int initial_size = _cc_cache_sizes[_cache_size_index]; - if (initial_size < 0) initial_size = _max_n_card_counts; + // Find the index into cache size array that is of a size that's + // large enough to hold desired_sz. + size_t desired_sz = _max_cards / InitialCacheFraction; + int desired_sz_index = 0; + while (_cc_cache_sizes[desired_sz_index] < desired_sz) { + desired_sz_index += 1; + assert(desired_sz_index < MAX_CC_CACHE_INDEX, "invariant"); + } + assert(desired_sz_index < MAX_CC_CACHE_INDEX, "invariant"); - // Make sure we don't go bigger than we will ever need - _n_card_counts = MIN2((unsigned) initial_size, _max_n_card_counts); + // If the desired_sz value is between two sizes then + // _cc_cache_sizes[desired_sz_index-1] < desired_sz <= _cc_cache_sizes[desired_sz_index] + // we will start with the lower size in the optimistic expectation that + // we will not need to expand up. Note desired_sz_index could also be 0. + if (desired_sz_index > 0 && + _cc_cache_sizes[desired_sz_index] > desired_sz) { + desired_sz_index -= 1; + } - _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts); - _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts); + if (!expand_card_count_cache(desired_sz_index)) { + // Allocation was unsuccessful - exit + vm_exit_during_initialization("Could not reserve enough space for card count cache"); + } + assert(_n_card_counts > 0, "post-condition"); + assert(_cache_size_index == desired_sz_index, "post-condition"); Copy::fill_to_bytes(&_card_counts[0], _n_card_counts * sizeof(CardCountCacheEntry)); @@ -163,10 +188,13 @@ ConcurrentG1Refine::~ConcurrentG1Refine() { if (G1ConcRSLogCacheSize > 0) { + // Please see the comment in allocate_card_count_cache + // for why we call os::malloc() and os::free() directly. assert(_card_counts != NULL, "Logic"); - FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts); + os::free(_card_counts); assert(_card_epochs != NULL, "Logic"); - FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs); + os::free(_card_epochs); + assert(_hot_cache != NULL, "Logic"); FREE_C_HEAP_ARRAY(jbyte*, _hot_cache); } @@ -382,29 +410,93 @@ } } -void ConcurrentG1Refine::expand_card_count_cache() { - if (_n_card_counts < _max_n_card_counts) { - int new_idx = _cache_size_index+1; - int new_size = _cc_cache_sizes[new_idx]; - if (new_size < 0) new_size = _max_n_card_counts; +// The arrays used to hold the card counts and the epochs must have +// a 1:1 correspondence. Hence they are allocated and freed together +// Returns true if the allocations of both the counts and epochs +// were successful; false otherwise. +bool ConcurrentG1Refine::allocate_card_count_cache(size_t n, + CardCountCacheEntry** counts, + CardEpochCacheEntry** epochs) { + // We call the allocation/free routines directly for the counts + // and epochs arrays. The NEW_C_HEAP_ARRAY/FREE_C_HEAP_ARRAY + // macros call AllocateHeap and FreeHeap respectively. + // AllocateHeap will call vm_exit_out_of_memory in the event + // of an allocation failure and abort the JVM. With the + // _counts/epochs arrays we only need to abort the JVM if the + // initial allocation of these arrays fails. + // + // Additionally AllocateHeap/FreeHeap do some tracing of + // allocate/free calls so calling one without calling the + // other can cause inconsistencies in the tracing. So we + // call neither. - // Make sure we don't go bigger than we will ever need - new_size = MIN2((unsigned) new_size, _max_n_card_counts); + assert(*counts == NULL, "out param"); + assert(*epochs == NULL, "out param"); + + size_t counts_size = n * sizeof(CardCountCacheEntry); + size_t epochs_size = n * sizeof(CardEpochCacheEntry); + + *counts = (CardCountCacheEntry*) os::malloc(counts_size); + if (*counts == NULL) { + // allocation was unsuccessful + return false; + } + + *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size); + if (*epochs == NULL) { + // allocation was unsuccessful - free counts array + assert(*counts != NULL, "must be"); + os::free(*counts); + *counts = NULL; + return false; + } - // Expand the card count and card epoch tables - if (new_size > (int)_n_card_counts) { - // We can just free and allocate a new array as we're - // not interested in preserving the contents - assert(_card_counts != NULL, "Logic!"); - assert(_card_epochs != NULL, "Logic!"); - FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts); - FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs); - _n_card_counts = new_size; - _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts); - _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts); - _cache_size_index = new_idx; + // We successfully allocated both counts and epochs + return true; +} + +// Returns true if the card counts/epochs cache was +// successfully expanded; false otherwise. +bool ConcurrentG1Refine::expand_card_count_cache(int cache_size_idx) { + // Can we expand the card count and epoch tables? + if (_n_card_counts < _max_n_card_counts) { + assert(cache_size_idx >= 0 && cache_size_idx < MAX_CC_CACHE_INDEX, "oob"); + + size_t cache_size = _cc_cache_sizes[cache_size_idx]; + // Make sure we don't go bigger than we will ever need + cache_size = MIN2(cache_size, _max_n_card_counts); + + // Should we expand the card count and card epoch tables? + if (cache_size > _n_card_counts) { + // We have been asked to allocate new, larger, arrays for + // the card counts and the epochs. Attempt the allocation + // of both before we free the existing arrays in case + // the allocation is unsuccessful... + CardCountCacheEntry* counts = NULL; + CardEpochCacheEntry* epochs = NULL; + + if (allocate_card_count_cache(cache_size, &counts, &epochs)) { + // Allocation was successful. + // We can just free the old arrays; we're + // not interested in preserving the contents + if (_card_counts != NULL) os::free(_card_counts); + if (_card_epochs != NULL) os::free(_card_epochs); + + // Cache the size of the arrays and the index that got us there. + _n_card_counts = cache_size; + _cache_size_index = cache_size_idx; + + _card_counts = counts; + _card_epochs = epochs; + + // We successfully allocated/expanded the caches. + return true; + } } } + + // We did not successfully expand the caches. + return false; } void ConcurrentG1Refine::clear_and_record_card_counts() { @@ -415,10 +507,16 @@ #endif if (_expand_card_counts) { - expand_card_count_cache(); + int new_idx = _cache_size_index + 1; + + if (expand_card_count_cache(new_idx)) { + // Allocation was successful and _n_card_counts has + // been updated to the new size. We only need to clear + // the epochs so we don't read a bogus epoch value + // when inserting a card into the hot card cache. + Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry)); + } _expand_card_counts = false; - // Only need to clear the epochs. - Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry)); } int this_epoch = (int) _n_periods;
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -94,7 +94,7 @@ } CardEpochCacheEntry; julong make_epoch_entry(unsigned int card_num, unsigned int epoch) { - assert(0 <= card_num && card_num < _max_n_card_counts, "Bounds"); + assert(0 <= card_num && card_num < _max_cards, "Bounds"); assert(0 <= epoch && epoch <= _n_periods, "must be"); return ((julong) card_num << card_num_shift) | epoch; @@ -117,15 +117,24 @@ CardEpochCacheEntry* _card_epochs; // The current number of buckets in the card count cache - unsigned _n_card_counts; + size_t _n_card_counts; + + // The number of cards for the entire reserved heap + size_t _max_cards; - // The max number of buckets required for the number of - // cards for the entire reserved heap - unsigned _max_n_card_counts; + // The max number of buckets for the card counts and epochs caches. + // This is the maximum that the counts and epochs will grow to. + // It is specified as a fraction or percentage of _max_cards using + // G1MaxHotCardCountSizePercent. + size_t _max_n_card_counts; // Possible sizes of the cache: odd primes that roughly double in size. // (See jvmtiTagMap.cpp). - static int _cc_cache_sizes[]; + enum { + MAX_CC_CACHE_INDEX = 15 // maximum index into the cache size array. + }; + + static size_t _cc_cache_sizes[MAX_CC_CACHE_INDEX]; // The index in _cc_cache_sizes corresponding to the size of // _card_counts. @@ -147,12 +156,23 @@ CardTableModRefBS* _ct_bs; G1CollectedHeap* _g1h; - // Expands the array that holds the card counts to the next size up - void expand_card_count_cache(); + // Helper routine for expand_card_count_cache(). + // The arrays used to hold the card counts and the epochs must have + // a 1:1 correspondence. Hence they are allocated and freed together. + // Returns true if the allocations of both the counts and epochs + // were successful; false otherwise. + bool allocate_card_count_cache(size_t n, + CardCountCacheEntry** counts, + CardEpochCacheEntry** epochs); + + // Expands the arrays that hold the card counts and epochs + // to the cache size at index. Returns true if the expansion/ + // allocation was successful; false otherwise. + bool expand_card_count_cache(int index); // hash a given key (index of card_ptr) with the specified size - static unsigned int hash(size_t key, int size) { - return (unsigned int) key % size; + static unsigned int hash(size_t key, size_t size) { + return (unsigned int) (key % size); } // hash a given key (index of card_ptr) @@ -160,11 +180,11 @@ return hash(key, _n_card_counts); } - unsigned ptr_2_card_num(jbyte* card_ptr) { - return (unsigned) (card_ptr - _ct_bot); + unsigned int ptr_2_card_num(jbyte* card_ptr) { + return (unsigned int) (card_ptr - _ct_bot); } - jbyte* card_num_2_ptr(unsigned card_num) { + jbyte* card_num_2_ptr(unsigned int card_num) { return (jbyte*) (_ct_bot + card_num); }
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -826,6 +826,14 @@ void ConcurrentMark::checkpointRootsInitialPost() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); + // If we force an overflow during remark, the remark operation will + // actually abort and we'll restart concurrent marking. If we always + // force an oveflow during remark we'll never actually complete the + // marking phase. So, we initilize this here, at the start of the + // cycle, so that at the remaining overflow number will decrease at + // every remark and we'll eventually not need to cause one. + force_overflow_stw()->init(); + // For each region note start of marking. NoteStartOfMarkHRClosure startcl; g1h->heap_region_iterate(&startcl); @@ -893,27 +901,37 @@ } /* - Notice that in the next two methods, we actually leave the STS - during the barrier sync and join it immediately afterwards. If we - do not do this, this then the following deadlock can occur: one - thread could be in the barrier sync code, waiting for the other - thread to also sync up, whereas another one could be trying to - yield, while also waiting for the other threads to sync up too. - - Because the thread that does the sync barrier has left the STS, it - is possible to be suspended for a Full GC or an evacuation pause - could occur. This is actually safe, since the entering the sync - barrier is one of the last things do_marking_step() does, and it - doesn't manipulate any data structures afterwards. -*/ + * Notice that in the next two methods, we actually leave the STS + * during the barrier sync and join it immediately afterwards. If we + * do not do this, the following deadlock can occur: one thread could + * be in the barrier sync code, waiting for the other thread to also + * sync up, whereas another one could be trying to yield, while also + * waiting for the other threads to sync up too. + * + * Note, however, that this code is also used during remark and in + * this case we should not attempt to leave / enter the STS, otherwise + * we'll either hit an asseert (debug / fastdebug) or deadlock + * (product). So we should only leave / enter the STS if we are + * operating concurrently. + * + * Because the thread that does the sync barrier has left the STS, it + * is possible to be suspended for a Full GC or an evacuation pause + * could occur. This is actually safe, since the entering the sync + * barrier is one of the last things do_marking_step() does, and it + * doesn't manipulate any data structures afterwards. + */ void ConcurrentMark::enter_first_sync_barrier(int task_num) { if (verbose_low()) gclog_or_tty->print_cr("[%d] entering first barrier", task_num); - ConcurrentGCThread::stsLeave(); + if (concurrent()) { + ConcurrentGCThread::stsLeave(); + } _first_overflow_barrier_sync.enter(); - ConcurrentGCThread::stsJoin(); + if (concurrent()) { + ConcurrentGCThread::stsJoin(); + } // at this point everyone should have synced up and not be doing any // more work @@ -923,7 +941,12 @@ // let task 0 do this if (task_num == 0) { // task 0 is responsible for clearing the global data structures - clear_marking_state(); + // We should be here because of an overflow. During STW we should + // not clear the overflow flag since we rely on it being true when + // we exit this method to abort the pause and restart concurent + // marking. + clear_marking_state(concurrent() /* clear_overflow */); + force_overflow()->update(); if (PrintGC) { gclog_or_tty->date_stamp(PrintGCDateStamps); @@ -940,15 +963,45 @@ if (verbose_low()) gclog_or_tty->print_cr("[%d] entering second barrier", task_num); - ConcurrentGCThread::stsLeave(); + if (concurrent()) { + ConcurrentGCThread::stsLeave(); + } _second_overflow_barrier_sync.enter(); - ConcurrentGCThread::stsJoin(); + if (concurrent()) { + ConcurrentGCThread::stsJoin(); + } // at this point everything should be re-initialised and ready to go if (verbose_low()) gclog_or_tty->print_cr("[%d] leaving second barrier", task_num); } +#ifndef PRODUCT +void ForceOverflowSettings::init() { + _num_remaining = G1ConcMarkForceOverflow; + _force = false; + update(); +} + +void ForceOverflowSettings::update() { + if (_num_remaining > 0) { + _num_remaining -= 1; + _force = true; + } else { + _force = false; + } +} + +bool ForceOverflowSettings::should_force() { + if (_force) { + _force = false; + return true; + } else { + return false; + } +} +#endif // !PRODUCT + void ConcurrentMark::grayRoot(oop p) { HeapWord* addr = (HeapWord*) p; // We can't really check against _heap_start and _heap_end, since it @@ -1117,6 +1170,7 @@ _restart_for_overflow = false; size_t active_workers = MAX2((size_t) 1, parallel_marking_threads()); + force_overflow_conc()->init(); set_phase(active_workers, true /* concurrent */); CMConcurrentMarkingTask markingTask(this, cmThread()); @@ -1204,7 +1258,6 @@ g1p->record_concurrent_mark_remark_end(); } - #define CARD_BM_TEST_MODE 0 class CalcLiveObjectsClosure: public HeapRegionClosure { @@ -1726,6 +1779,11 @@ } _total_counting_time += this_final_counting_time; + if (G1PrintRegionLivenessInfo) { + G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); + _g1h->heap_region_iterate(&cl); + } + // Install newly created mark bitMap as "prev". swapMarkBitMaps(); @@ -1841,7 +1899,7 @@ while (!_cleanup_list.is_empty()) { HeapRegion* hr = _cleanup_list.remove_head(); assert(hr != NULL, "the list was not empty"); - hr->rem_set()->clear(); + hr->par_clear(); tmp_free_list.add_as_tail(hr); // Instead of adding one region at a time to the secondary_free_list, @@ -2699,12 +2757,16 @@ } -void ConcurrentMark::clear_marking_state() { +void ConcurrentMark::clear_marking_state(bool clear_overflow) { _markStack.setEmpty(); _markStack.clear_overflow(); _regionStack.setEmpty(); _regionStack.clear_overflow(); - clear_has_overflown(); + if (clear_overflow) { + clear_has_overflown(); + } else { + assert(has_overflown(), "pre-condition"); + } _finger = _heap_start; for (int i = 0; i < (int)_max_task_num; ++i) { @@ -2992,6 +3054,28 @@ _should_gray_objects = true; } +// Resets the region fields of active CMTasks whose values point +// into the collection set. +void ConcurrentMark::reset_active_task_region_fields_in_cset() { + assert(SafepointSynchronize::is_at_safepoint(), "should be in STW"); + assert(parallel_marking_threads() <= _max_task_num, "sanity"); + + for (int i = 0; i < (int)parallel_marking_threads(); i += 1) { + CMTask* task = _tasks[i]; + HeapWord* task_finger = task->finger(); + if (task_finger != NULL) { + assert(_g1h->is_in_g1_reserved(task_finger), "not in heap"); + HeapRegion* finger_region = _g1h->heap_region_containing(task_finger); + if (finger_region->in_collection_set()) { + // The task's current region is in the collection set. + // This region will be evacuated in the current GC and + // the region fields in the task will be stale. + task->giveup_current_region(); + } + } + } +} + // abandon current marking iteration due to a Full GC void ConcurrentMark::abort() { // Clear all marks to force marking thread to do nothing @@ -3199,8 +3283,12 @@ CMTask* task) : _g1h(g1h), _cm(cm), _task(task) { - _ref_processor = g1h->ref_processor(); - assert(_ref_processor != NULL, "should not be NULL"); + assert(_ref_processor == NULL, "should be initialized to NULL"); + + if (G1UseConcMarkReferenceProcessing) { + _ref_processor = g1h->ref_processor(); + assert(_ref_processor != NULL, "should not be NULL"); + } } }; @@ -4271,6 +4359,15 @@ } } + // If we are about to wrap up and go into termination, check if we + // should raise the overflow flag. + if (do_termination && !has_aborted()) { + if (_cm->force_overflow()->should_force()) { + _cm->set_has_overflown(); + regular_clock_call(); + } + } + // We still haven't aborted. Now, let's try to get into the // termination protocol. if (do_termination && !has_aborted()) { @@ -4423,3 +4520,175 @@ _marking_step_diffs_ms.add(0.5); } + +// These are formatting macros that are used below to ensure +// consistent formatting. The *_H_* versions are used to format the +// header for a particular value and they should be kept consistent +// with the corresponding macro. Also note that most of the macros add +// the necessary white space (as a prefix) which makes them a bit +// easier to compose. + +// All the output lines are prefixed with this string to be able to +// identify them easily in a large log file. +#define G1PPRL_LINE_PREFIX "###" + +#define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT +#ifdef _LP64 +#define G1PPRL_ADDR_BASE_H_FORMAT " %37s" +#else // _LP64 +#define G1PPRL_ADDR_BASE_H_FORMAT " %21s" +#endif // _LP64 + +// For per-region info +#define G1PPRL_TYPE_FORMAT " %-4s" +#define G1PPRL_TYPE_H_FORMAT " %4s" +#define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) +#define G1PPRL_BYTE_H_FORMAT " %9s" +#define G1PPRL_DOUBLE_FORMAT " %14.1f" +#define G1PPRL_DOUBLE_H_FORMAT " %14s" + +// For summary info +#define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT +#define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT +#define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" +#define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" + +G1PrintRegionLivenessInfoClosure:: +G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) + : _out(out), + _total_used_bytes(0), _total_capacity_bytes(0), + _total_prev_live_bytes(0), _total_next_live_bytes(0), + _hum_used_bytes(0), _hum_capacity_bytes(0), + _hum_prev_live_bytes(0), _hum_next_live_bytes(0) { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + MemRegion g1_committed = g1h->g1_committed(); + MemRegion g1_reserved = g1h->g1_reserved(); + double now = os::elapsedTime(); + + // Print the header of the output. + _out->cr(); + _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); + _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" + G1PPRL_SUM_ADDR_FORMAT("committed") + G1PPRL_SUM_ADDR_FORMAT("reserved") + G1PPRL_SUM_BYTE_FORMAT("region-size"), + g1_committed.start(), g1_committed.end(), + g1_reserved.start(), g1_reserved.end(), + HeapRegion::GrainBytes); + _out->print_cr(G1PPRL_LINE_PREFIX); + _out->print_cr(G1PPRL_LINE_PREFIX + G1PPRL_TYPE_H_FORMAT + G1PPRL_ADDR_BASE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_DOUBLE_H_FORMAT, + "type", "address-range", + "used", "prev-live", "next-live", "gc-eff"); +} + +// It takes as a parameter a reference to one of the _hum_* fields, it +// deduces the corresponding value for a region in a humongous region +// series (either the region size, or what's left if the _hum_* field +// is < the region size), and updates the _hum_* field accordingly. +size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { + size_t bytes = 0; + // The > 0 check is to deal with the prev and next live bytes which + // could be 0. + if (*hum_bytes > 0) { + bytes = MIN2((size_t) HeapRegion::GrainBytes, *hum_bytes); + *hum_bytes -= bytes; + } + return bytes; +} + +// It deduces the values for a region in a humongous region series +// from the _hum_* fields and updates those accordingly. It assumes +// that that _hum_* fields have already been set up from the "starts +// humongous" region and we visit the regions in address order. +void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, + size_t* capacity_bytes, + size_t* prev_live_bytes, + size_t* next_live_bytes) { + assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); + *used_bytes = get_hum_bytes(&_hum_used_bytes); + *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); + *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); + *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); +} + +bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { + const char* type = ""; + HeapWord* bottom = r->bottom(); + HeapWord* end = r->end(); + size_t capacity_bytes = r->capacity(); + size_t used_bytes = r->used(); + size_t prev_live_bytes = r->live_bytes(); + size_t next_live_bytes = r->next_live_bytes(); + double gc_eff = r->gc_efficiency(); + if (r->used() == 0) { + type = "FREE"; + } else if (r->is_survivor()) { + type = "SURV"; + } else if (r->is_young()) { + type = "EDEN"; + } else if (r->startsHumongous()) { + type = "HUMS"; + + assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && + _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, + "they should have been zeroed after the last time we used them"); + // Set up the _hum_* fields. + _hum_capacity_bytes = capacity_bytes; + _hum_used_bytes = used_bytes; + _hum_prev_live_bytes = prev_live_bytes; + _hum_next_live_bytes = next_live_bytes; + get_hum_bytes(&used_bytes, &capacity_bytes, + &prev_live_bytes, &next_live_bytes); + end = bottom + HeapRegion::GrainWords; + } else if (r->continuesHumongous()) { + type = "HUMC"; + get_hum_bytes(&used_bytes, &capacity_bytes, + &prev_live_bytes, &next_live_bytes); + assert(end == bottom + HeapRegion::GrainWords, "invariant"); + } else { + type = "OLD"; + } + + _total_used_bytes += used_bytes; + _total_capacity_bytes += capacity_bytes; + _total_prev_live_bytes += prev_live_bytes; + _total_next_live_bytes += next_live_bytes; + + // Print a line for this particular region. + _out->print_cr(G1PPRL_LINE_PREFIX + G1PPRL_TYPE_FORMAT + G1PPRL_ADDR_BASE_FORMAT + G1PPRL_BYTE_FORMAT + G1PPRL_BYTE_FORMAT + G1PPRL_BYTE_FORMAT + G1PPRL_DOUBLE_FORMAT, + type, bottom, end, + used_bytes, prev_live_bytes, next_live_bytes, gc_eff); + + return false; +} + +G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { + // Print the footer of the output. + _out->print_cr(G1PPRL_LINE_PREFIX); + _out->print_cr(G1PPRL_LINE_PREFIX + " SUMMARY" + G1PPRL_SUM_MB_FORMAT("capacity") + G1PPRL_SUM_MB_PERC_FORMAT("used") + G1PPRL_SUM_MB_PERC_FORMAT("prev-live") + G1PPRL_SUM_MB_PERC_FORMAT("next-live"), + bytes_to_mb(_total_capacity_bytes), + bytes_to_mb(_total_used_bytes), + perc(_total_used_bytes, _total_capacity_bytes), + bytes_to_mb(_total_prev_live_bytes), + perc(_total_prev_live_bytes, _total_capacity_bytes), + bytes_to_mb(_total_next_live_bytes), + perc(_total_next_live_bytes, _total_capacity_bytes)); + _out->cr(); +}
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -316,6 +316,19 @@ void setEmpty() { _index = 0; clear_overflow(); } }; +class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC { +private: +#ifndef PRODUCT + uintx _num_remaining; + bool _force; +#endif // !defined(PRODUCT) + +public: + void init() PRODUCT_RETURN; + void update() PRODUCT_RETURN; + bool should_force() PRODUCT_RETURN_( return false; ); +}; + // this will enable a variety of different statistics per GC task #define _MARKING_STATS_ 0 // this will enable the higher verbose levels @@ -462,6 +475,9 @@ WorkGang* _parallel_workers; + ForceOverflowSettings _force_overflow_conc; + ForceOverflowSettings _force_overflow_stw; + void weakRefsWork(bool clear_all_soft_refs); void swapMarkBitMaps(); @@ -470,7 +486,7 @@ // task local ones; should be called during initial mark. void reset(); // It resets all the marking data structures. - void clear_marking_state(); + void clear_marking_state(bool clear_overflow = true); // It should be called to indicate which phase we're in (concurrent // mark or remark) and how many threads are currently active. @@ -547,6 +563,22 @@ void enter_first_sync_barrier(int task_num); void enter_second_sync_barrier(int task_num); + ForceOverflowSettings* force_overflow_conc() { + return &_force_overflow_conc; + } + + ForceOverflowSettings* force_overflow_stw() { + return &_force_overflow_stw; + } + + ForceOverflowSettings* force_overflow() { + if (concurrent()) { + return force_overflow_conc(); + } else { + return force_overflow_stw(); + } + } + public: // Manipulation of the global mark stack. // Notice that the first mark_stack_push is CAS-based, whereas the @@ -777,10 +809,19 @@ // It indicates that a new collection set is being chosen. void newCSet(); + // It registers a collection set heap region with CM. This is used // to determine whether any heap regions are located above the finger. void registerCSetRegion(HeapRegion* hr); + // Resets the region fields of any active CMTask whose region fields + // are in the collection set (i.e. the region currently claimed by + // the CMTask will be evacuated and may be used, subsequently, as + // an alloc region). When this happens the region fields in the CMTask + // are stale and, hence, should be cleared causing the worker thread + // to claim a new region. + void reset_active_task_region_fields_in_cset(); + // Registers the maximum region-end associated with a set of // regions with CM. Again this is used to determine whether any // heap regions are located above the finger. @@ -1007,9 +1048,6 @@ void setup_for_region(HeapRegion* hr); // it brings up-to-date the limit of the region void update_region_limit(); - // it resets the local fields after a task has finished scanning a - // region - void giveup_current_region(); // called when either the words scanned or the refs visited limit // has been reached @@ -1062,6 +1100,11 @@ // exit the termination protocol after it's entered it. virtual bool should_exit_termination(); + // Resets the local region fields after a task has finished scanning a + // region; or when they have become stale as a result of the region + // being evacuated. + void giveup_current_region(); + HeapWord* finger() { return _finger; } bool has_aborted() { return _has_aborted; } @@ -1149,4 +1192,54 @@ #endif // _MARKING_STATS_ }; +// Class that's used to to print out per-region liveness +// information. It's currently used at the end of marking and also +// after we sort the old regions at the end of the cleanup operation. +class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure { +private: + outputStream* _out; + + // Accumulators for these values. + size_t _total_used_bytes; + size_t _total_capacity_bytes; + size_t _total_prev_live_bytes; + size_t _total_next_live_bytes; + + // These are set up when we come across a "stars humongous" region + // (as this is where most of this information is stored, not in the + // subsequent "continues humongous" regions). After that, for every + // region in a given humongous region series we deduce the right + // values for it by simply subtracting the appropriate amount from + // these fields. All these values should reach 0 after we've visited + // the last region in the series. + size_t _hum_used_bytes; + size_t _hum_capacity_bytes; + size_t _hum_prev_live_bytes; + size_t _hum_next_live_bytes; + + static double perc(size_t val, size_t total) { + if (total == 0) { + return 0.0; + } else { + return 100.0 * ((double) val / (double) total); + } + } + + static double bytes_to_mb(size_t val) { + return (double) val / (double) M; + } + + // See the .cpp file. + size_t get_hum_bytes(size_t* hum_bytes); + void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes, + size_t* prev_live_bytes, size_t* next_live_bytes); + +public: + // The header and footer are printed in the constructor and + // destructor respectively. + G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name); + virtual bool doHeapRegion(HeapRegion* r); + ~G1PrintRegionLivenessInfoClosure(); +}; + #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1AllocRegion.inline.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" + +G1CollectedHeap* G1AllocRegion::_g1h = NULL; +HeapRegion* G1AllocRegion::_dummy_region = NULL; + +void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) { + assert(_dummy_region == NULL, "should be set once"); + assert(dummy_region != NULL, "pre-condition"); + assert(dummy_region->free() == 0, "pre-condition"); + + // Make sure that any allocation attempt on this region will fail + // and will not trigger any asserts. + assert(allocate(dummy_region, 1, false) == NULL, "should fail"); + assert(par_allocate(dummy_region, 1, false) == NULL, "should fail"); + assert(allocate(dummy_region, 1, true) == NULL, "should fail"); + assert(par_allocate(dummy_region, 1, true) == NULL, "should fail"); + + _g1h = g1h; + _dummy_region = dummy_region; +} + +void G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region, + bool bot_updates) { + assert(alloc_region != NULL && alloc_region != _dummy_region, + "pre-condition"); + + // Other threads might still be trying to allocate using a CAS out + // of the region we are trying to retire, as they can do so without + // holding the lock. So, we first have to make sure that noone else + // can allocate out of it by doing a maximal allocation. Even if our + // CAS attempt fails a few times, we'll succeed sooner or later + // given that failed CAS attempts mean that the region is getting + // closed to being full. + size_t free_word_size = alloc_region->free() / HeapWordSize; + + // This is the minimum free chunk we can turn into a dummy + // object. If the free space falls below this, then noone can + // allocate in this region anyway (all allocation requests will be + // of a size larger than this) so we won't have to perform the dummy + // allocation. + size_t min_word_size_to_fill = CollectedHeap::min_fill_size(); + + while (free_word_size >= min_word_size_to_fill) { + HeapWord* dummy = par_allocate(alloc_region, free_word_size, bot_updates); + if (dummy != NULL) { + // If the allocation was successful we should fill in the space. + CollectedHeap::fill_with_object(dummy, free_word_size); + alloc_region->set_pre_dummy_top(dummy); + break; + } + + free_word_size = alloc_region->free() / HeapWordSize; + // It's also possible that someone else beats us to the + // allocation and they fill up the region. In that case, we can + // just get out of the loop. + } + assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill, + "post-condition"); +} + +void G1AllocRegion::retire(bool fill_up) { + assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly")); + + trace("retiring"); + HeapRegion* alloc_region = _alloc_region; + if (alloc_region != _dummy_region) { + // We never have to check whether the active region is empty or not, + // and potentially free it if it is, given that it's guaranteed that + // it will never be empty. + assert(!alloc_region->is_empty(), + ar_ext_msg(this, "the alloc region should never be empty")); + + if (fill_up) { + fill_up_remaining_space(alloc_region, _bot_updates); + } + + assert(alloc_region->used() >= _used_bytes_before, + ar_ext_msg(this, "invariant")); + size_t allocated_bytes = alloc_region->used() - _used_bytes_before; + retire_region(alloc_region, allocated_bytes); + _used_bytes_before = 0; + _alloc_region = _dummy_region; + } + trace("retired"); +} + +HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size, + bool force) { + assert(_alloc_region == _dummy_region, ar_ext_msg(this, "pre-condition")); + assert(_used_bytes_before == 0, ar_ext_msg(this, "pre-condition")); + + trace("attempting region allocation"); + HeapRegion* new_alloc_region = allocate_new_region(word_size, force); + if (new_alloc_region != NULL) { + new_alloc_region->reset_pre_dummy_top(); + // Need to do this before the allocation + _used_bytes_before = new_alloc_region->used(); + HeapWord* result = allocate(new_alloc_region, word_size, _bot_updates); + assert(result != NULL, ar_ext_msg(this, "the allocation should succeeded")); + + OrderAccess::storestore(); + // Note that we first perform the allocation and then we store the + // region in _alloc_region. This is the reason why an active region + // can never be empty. + _alloc_region = new_alloc_region; + trace("region allocation successful"); + return result; + } else { + trace("region allocation failed"); + return NULL; + } + ShouldNotReachHere(); +} + +void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) { + msg->append("[%s] %s b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT, + _name, message, BOOL_TO_STR(_bot_updates), + _alloc_region, _used_bytes_before); +} + +void G1AllocRegion::init() { + trace("initializing"); + assert(_alloc_region == NULL && _used_bytes_before == 0, + ar_ext_msg(this, "pre-condition")); + assert(_dummy_region != NULL, "should have been set"); + _alloc_region = _dummy_region; + trace("initialized"); +} + +HeapRegion* G1AllocRegion::release() { + trace("releasing"); + HeapRegion* alloc_region = _alloc_region; + retire(false /* fill_up */); + assert(_alloc_region == _dummy_region, "post-condition of retire()"); + _alloc_region = NULL; + trace("released"); + return (alloc_region == _dummy_region) ? NULL : alloc_region; +} + +#if G1_ALLOC_REGION_TRACING +void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) { + // All the calls to trace that set either just the size or the size + // and the result are considered part of level 2 tracing and are + // skipped during level 1 tracing. + if ((word_size == 0 && result == NULL) || (G1_ALLOC_REGION_TRACING > 1)) { + const size_t buffer_length = 128; + char hr_buffer[buffer_length]; + char rest_buffer[buffer_length]; + + HeapRegion* alloc_region = _alloc_region; + if (alloc_region == NULL) { + jio_snprintf(hr_buffer, buffer_length, "NULL"); + } else if (alloc_region == _dummy_region) { + jio_snprintf(hr_buffer, buffer_length, "DUMMY"); + } else { + jio_snprintf(hr_buffer, buffer_length, + HR_FORMAT, HR_FORMAT_PARAMS(alloc_region)); + } + + if (G1_ALLOC_REGION_TRACING > 1) { + if (result != NULL) { + jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT" "PTR_FORMAT, + word_size, result); + } else if (word_size != 0) { + jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT, word_size); + } else { + jio_snprintf(rest_buffer, buffer_length, ""); + } + } else { + jio_snprintf(rest_buffer, buffer_length, ""); + } + + tty->print_cr("[%s] %s : %s %s", _name, hr_buffer, str, rest_buffer); + } +} +#endif // G1_ALLOC_REGION_TRACING + +G1AllocRegion::G1AllocRegion(const char* name, + bool bot_updates) + : _name(name), _bot_updates(bot_updates), + _alloc_region(NULL), _used_bytes_before(0) { } +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP + +#include "gc_implementation/g1/heapRegion.hpp" + +class G1CollectedHeap; + +// 0 -> no tracing, 1 -> basic tracing, 2 -> basic + allocation tracing +#define G1_ALLOC_REGION_TRACING 0 + +class ar_ext_msg; + +// A class that holds a region that is active in satisfying allocation +// requests, potentially issued in parallel. When the active region is +// full it will be retired it replaced with a new one. The +// implementation assumes that fast-path allocations will be lock-free +// and a lock will need to be taken when the active region needs to be +// replaced. + +class G1AllocRegion VALUE_OBJ_CLASS_SPEC { + friend class ar_ext_msg; + +private: + // The active allocating region we are currently allocating out + // of. The invariant is that if this object is initialized (i.e., + // init() has been called and release() has not) then _alloc_region + // is either an active allocating region or the dummy region (i.e., + // it can never be NULL) and this object can be used to satisfy + // allocation requests. If this object is not initialized + // (i.e. init() has not been called or release() has been called) + // then _alloc_region is NULL and this object should not be used to + // satisfy allocation requests (it was done this way to force the + // correct use of init() and release()). + HeapRegion* _alloc_region; + + // When we set up a new active region we save its used bytes in this + // field so that, when we retire it, we can calculate how much space + // we allocated in it. + size_t _used_bytes_before; + + // Specifies whether the allocate calls will do BOT updates or not. + bool _bot_updates; + + // Useful for debugging and tracing. + const char* _name; + + // A dummy region (i.e., it's been allocated specially for this + // purpose and it is not part of the heap) that is full (i.e., top() + // == end()). When we don't have a valid active region we make + // _alloc_region point to this. This allows us to skip checking + // whether the _alloc_region is NULL or not. + static HeapRegion* _dummy_region; + + // Some of the methods below take a bot_updates parameter. Its value + // should be the same as the _bot_updates field. The idea is that + // the parameter will be a constant for a particular alloc region + // and, given that these methods will be hopefully inlined, the + // compiler should compile out the test. + + // Perform a non-MT-safe allocation out of the given region. + static inline HeapWord* allocate(HeapRegion* alloc_region, + size_t word_size, + bool bot_updates); + + // Perform a MT-safe allocation out of the given region. + static inline HeapWord* par_allocate(HeapRegion* alloc_region, + size_t word_size, + bool bot_updates); + + // Ensure that the region passed as a parameter has been filled up + // so that noone else can allocate out of it any more. + static void fill_up_remaining_space(HeapRegion* alloc_region, + bool bot_updates); + + // Retire the active allocating region. If fill_up is true then make + // sure that the region is full before we retire it so that noone + // else can allocate out of it. + void retire(bool fill_up); + + // Allocate a new active region and use it to perform a word_size + // allocation. The force parameter will be passed on to + // G1CollectedHeap::allocate_new_alloc_region() and tells it to try + // to allocate a new region even if the max has been reached. + HeapWord* new_alloc_region_and_allocate(size_t word_size, bool force); + + void fill_in_ext_msg(ar_ext_msg* msg, const char* message); + +protected: + // For convenience as subclasses use it. + static G1CollectedHeap* _g1h; + + virtual HeapRegion* allocate_new_region(size_t word_size, bool force) = 0; + virtual void retire_region(HeapRegion* alloc_region, + size_t allocated_bytes) = 0; + + G1AllocRegion(const char* name, bool bot_updates); + +public: + static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region); + + HeapRegion* get() const { + // Make sure that the dummy region does not escape this class. + return (_alloc_region == _dummy_region) ? NULL : _alloc_region; + } + + // The following two are the building blocks for the allocation method. + + // First-level allocation: Should be called without holding a + // lock. It will try to allocate lock-free out of the active region, + // or return NULL if it was unable to. + inline HeapWord* attempt_allocation(size_t word_size, bool bot_updates); + + // Second-level allocation: Should be called while holding a + // lock. It will try to first allocate lock-free out of the active + // region or, if it's unable to, it will try to replace the active + // alloc region with a new one. We require that the caller takes the + // appropriate lock before calling this so that it is easier to make + // it conform to its locking protocol. + inline HeapWord* attempt_allocation_locked(size_t word_size, + bool bot_updates); + + // Should be called to allocate a new region even if the max of this + // type of regions has been reached. Should only be called if other + // allocation attempts have failed and we are not holding a valid + // active region. + inline HeapWord* attempt_allocation_force(size_t word_size, + bool bot_updates); + + // Should be called before we start using this object. + void init(); + + // Should be called when we want to release the active region which + // is returned after it's been retired. + HeapRegion* release(); + +#if G1_ALLOC_REGION_TRACING + void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL); +#else // G1_ALLOC_REGION_TRACING + void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL) { } +#endif // G1_ALLOC_REGION_TRACING +}; + +class ar_ext_msg : public err_msg { +public: + ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("") { + alloc_region->fill_in_ext_msg(this, message); + } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP + +#include "gc_implementation/g1/g1AllocRegion.hpp" + +inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region, + size_t word_size, + bool bot_updates) { + assert(alloc_region != NULL, err_msg("pre-condition")); + + if (!bot_updates) { + return alloc_region->allocate_no_bot_updates(word_size); + } else { + return alloc_region->allocate(word_size); + } +} + +inline HeapWord* G1AllocRegion::par_allocate(HeapRegion* alloc_region, + size_t word_size, + bool bot_updates) { + assert(alloc_region != NULL, err_msg("pre-condition")); + assert(!alloc_region->is_empty(), err_msg("pre-condition")); + + if (!bot_updates) { + return alloc_region->par_allocate_no_bot_updates(word_size); + } else { + return alloc_region->par_allocate(word_size); + } +} + +inline HeapWord* G1AllocRegion::attempt_allocation(size_t word_size, + bool bot_updates) { + assert(bot_updates == _bot_updates, ar_ext_msg(this, "pre-condition")); + + HeapRegion* alloc_region = _alloc_region; + assert(alloc_region != NULL, ar_ext_msg(this, "not initialized properly")); + + HeapWord* result = par_allocate(alloc_region, word_size, bot_updates); + if (result != NULL) { + trace("alloc", word_size, result); + return result; + } + trace("alloc failed", word_size); + return NULL; +} + +inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size, + bool bot_updates) { + // First we have to tedo the allocation, assuming we're holding the + // appropriate lock, in case another thread changed the region while + // we were waiting to get the lock. + HeapWord* result = attempt_allocation(word_size, bot_updates); + if (result != NULL) { + return result; + } + + retire(true /* fill_up */); + result = new_alloc_region_and_allocate(word_size, false /* force */); + if (result != NULL) { + trace("alloc locked (second attempt)", word_size, result); + return result; + } + trace("alloc locked failed", word_size); + return NULL; +} + +inline HeapWord* G1AllocRegion::attempt_allocation_force(size_t word_size, + bool bot_updates) { + assert(bot_updates == _bot_updates, ar_ext_msg(this, "pre-condition")); + assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly")); + + trace("forcing alloc"); + HeapWord* result = new_alloc_region_and_allocate(word_size, true /* force */); + if (result != NULL) { + trace("alloc forced", word_size, result); + return result; + } + trace("alloc forced failed", word_size); + return NULL; +} + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -28,6 +28,7 @@ #include "gc_implementation/g1/concurrentG1Refine.hpp" #include "gc_implementation/g1/concurrentG1RefineThread.hpp" #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" +#include "gc_implementation/g1/g1AllocRegion.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" #include "gc_implementation/g1/g1MarkSweep.hpp" @@ -427,6 +428,37 @@ _cmThread->stop(); } +#ifdef ASSERT +// A region is added to the collection set as it is retired +// so an address p can point to a region which will be in the +// collection set but has not yet been retired. This method +// therefore is only accurate during a GC pause after all +// regions have been retired. It is used for debugging +// to check if an nmethod has references to objects that can +// be move during a partial collection. Though it can be +// inaccurate, it is sufficient for G1 because the conservative +// implementation of is_scavengable() for G1 will indicate that +// all nmethods must be scanned during a partial collection. +bool G1CollectedHeap::is_in_partial_collection(const void* p) { + HeapRegion* hr = heap_region_containing(p); + return hr != NULL && hr->in_collection_set(); +} +#endif + +// Returns true if the reference points to an object that +// can move in an incremental collecction. +bool G1CollectedHeap::is_scavengable(const void* p) { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + G1CollectorPolicy* g1p = g1h->g1_policy(); + HeapRegion* hr = heap_region_containing(p); + if (hr == NULL) { + // perm gen (or null) + return false; + } else { + return !hr->isHumongous(); + } +} + void G1CollectedHeap::check_ct_logs_at_safepoint() { DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); @@ -517,8 +549,7 @@ return NULL; } -HeapRegion* G1CollectedHeap::new_region_work(size_t word_size, - bool do_expand) { +HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) { assert(!isHumongous(word_size) || word_size <= (size_t) HeapRegion::GrainWords, "the only time we use this to allocate a humongous region is " @@ -566,7 +597,7 @@ size_t word_size) { HeapRegion* alloc_region = NULL; if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { - alloc_region = new_region_work(word_size, true /* do_expand */); + alloc_region = new_region(word_size, true /* do_expand */); if (purpose == GCAllocForSurvived && alloc_region != NULL) { alloc_region->set_survivor(); } @@ -587,7 +618,7 @@ // Only one region to allocate, no need to go through the slower // path. The caller will attempt the expasion if this fails, so // let's not try to expand here too. - HeapRegion* hr = new_region_work(word_size, false /* do_expand */); + HeapRegion* hr = new_region(word_size, false /* do_expand */); if (hr != NULL) { first = hr->hrs_index(); } else { @@ -788,407 +819,12 @@ return result; } -void -G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) { - // Other threads might still be trying to allocate using CASes out - // of the region we are retiring, as they can do so without holding - // the Heap_lock. So we first have to make sure that noone else can - // allocate in it by doing a maximal allocation. Even if our CAS - // attempt fails a few times, we'll succeed sooner or later given - // that a failed CAS attempt mean that the region is getting closed - // to being full (someone else succeeded in allocating into it). - size_t free_word_size = cur_alloc_region->free() / HeapWordSize; - - // This is the minimum free chunk we can turn into a dummy - // object. If the free space falls below this, then noone can - // allocate in this region anyway (all allocation requests will be - // of a size larger than this) so we won't have to perform the dummy - // allocation. - size_t min_word_size_to_fill = CollectedHeap::min_fill_size(); - - while (free_word_size >= min_word_size_to_fill) { - HeapWord* dummy = - cur_alloc_region->par_allocate_no_bot_updates(free_word_size); - if (dummy != NULL) { - // If the allocation was successful we should fill in the space. - CollectedHeap::fill_with_object(dummy, free_word_size); - break; - } - - free_word_size = cur_alloc_region->free() / HeapWordSize; - // It's also possible that someone else beats us to the - // allocation and they fill up the region. In that case, we can - // just get out of the loop - } - assert(cur_alloc_region->free() / HeapWordSize < min_word_size_to_fill, - "sanity"); - - retire_cur_alloc_region_common(cur_alloc_region); - assert(_cur_alloc_region == NULL, "post-condition"); -} - -// See the comment in the .hpp file about the locking protocol and -// assumptions of this method (and other related ones). -HeapWord* -G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size, - bool at_safepoint, - bool do_dirtying, - bool can_expand) { - assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); - assert(_cur_alloc_region == NULL, - "replace_cur_alloc_region_and_allocate() should only be called " - "after retiring the previous current alloc region"); - assert(SafepointSynchronize::is_at_safepoint() == at_safepoint, - "at_safepoint and is_at_safepoint() should be a tautology"); - assert(!can_expand || g1_policy()->can_expand_young_list(), - "we should not call this method with can_expand == true if " - "we are not allowed to expand the young gen"); - - if (can_expand || !g1_policy()->is_young_list_full()) { - HeapRegion* new_cur_alloc_region = new_alloc_region(word_size); - if (new_cur_alloc_region != NULL) { - assert(new_cur_alloc_region->is_empty(), - "the newly-allocated region should be empty, " - "as right now we only allocate new regions out of the free list"); - g1_policy()->update_region_num(true /* next_is_young */); - set_region_short_lived_locked(new_cur_alloc_region); - - assert(!new_cur_alloc_region->isHumongous(), - "Catch a regression of this bug."); - - // We need to ensure that the stores to _cur_alloc_region and, - // subsequently, to top do not float above the setting of the - // young type. - OrderAccess::storestore(); - - // Now, perform the allocation out of the region we just - // allocated. Note that noone else can access that region at - // this point (as _cur_alloc_region has not been updated yet), - // so we can just go ahead and do the allocation without any - // atomics (and we expect this allocation attempt to - // suceeded). Given that other threads can attempt an allocation - // with a CAS and without needing the Heap_lock, if we assigned - // the new region to _cur_alloc_region before first allocating - // into it other threads might have filled up the new region - // before we got a chance to do the allocation ourselves. In - // that case, we would have needed to retire the region, grab a - // new one, and go through all this again. Allocating out of the - // new region before assigning it to _cur_alloc_region avoids - // all this. - HeapWord* result = - new_cur_alloc_region->allocate_no_bot_updates(word_size); - assert(result != NULL, "we just allocate out of an empty region " - "so allocation should have been successful"); - assert(is_in(result), "result should be in the heap"); - - // Now make sure that the store to _cur_alloc_region does not - // float above the store to top. - OrderAccess::storestore(); - _cur_alloc_region = new_cur_alloc_region; - - if (!at_safepoint) { - Heap_lock->unlock(); - } - - // do the dirtying, if necessary, after we release the Heap_lock - if (do_dirtying) { - dirty_young_block(result, word_size); - } - return result; - } - } - - assert(_cur_alloc_region == NULL, "we failed to allocate a new current " - "alloc region, it should still be NULL"); - assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); - return NULL; -} - -// See the comment in the .hpp file about the locking protocol and -// assumptions of this method (and other related ones). -HeapWord* -G1CollectedHeap::attempt_allocation_slow(size_t word_size) { - assert_heap_locked_and_not_at_safepoint(); - assert(!isHumongous(word_size), "attempt_allocation_slow() should not be " - "used for humongous allocations"); - - // We should only reach here when we were unable to allocate - // otherwise. So, we should have not active current alloc region. - assert(_cur_alloc_region == NULL, "current alloc region should be NULL"); - - // We will loop while succeeded is false, which means that we tried - // to do a collection, but the VM op did not succeed. So, when we - // exit the loop, either one of the allocation attempts was - // successful, or we succeeded in doing the VM op but which was - // unable to allocate after the collection. - for (int try_count = 1; /* we'll return or break */; try_count += 1) { - bool succeeded = true; - - // Every time we go round the loop we should be holding the Heap_lock. - assert_heap_locked(); - - if (GC_locker::is_active_and_needs_gc()) { - // We are locked out of GC because of the GC locker. We can - // allocate a new region only if we can expand the young gen. - - if (g1_policy()->can_expand_young_list()) { - // Yes, we are allowed to expand the young gen. Let's try to - // allocate a new current alloc region. - HeapWord* result = - replace_cur_alloc_region_and_allocate(word_size, - false, /* at_safepoint */ - true, /* do_dirtying */ - true /* can_expand */); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - } - // We could not expand the young gen further (or we could but we - // failed to allocate a new region). We'll stall until the GC - // locker forces a GC. - - // If this thread is not in a jni critical section, we stall - // the requestor until the critical section has cleared and - // GC allowed. When the critical section clears, a GC is - // initiated by the last thread exiting the critical section; so - // we retry the allocation sequence from the beginning of the loop, - // rather than causing more, now probably unnecessary, GC attempts. - JavaThread* jthr = JavaThread::current(); - assert(jthr != NULL, "sanity"); - if (jthr->in_critical()) { - if (CheckJNICalls) { - fatal("Possible deadlock due to allocating while" - " in jni critical section"); - } - // We are returning NULL so the protocol is that we're still - // holding the Heap_lock. - assert_heap_locked(); - return NULL; - } - - Heap_lock->unlock(); - GC_locker::stall_until_clear(); - - // No need to relock the Heap_lock. We'll fall off to the code - // below the else-statement which assumes that we are not - // holding the Heap_lock. - } else { - // We are not locked out. So, let's try to do a GC. The VM op - // will retry the allocation before it completes. - - // Read the GC count while holding the Heap_lock - unsigned int gc_count_before = SharedHeap::heap()->total_collections(); - - Heap_lock->unlock(); - - HeapWord* result = - do_collection_pause(word_size, gc_count_before, &succeeded); - assert_heap_not_locked(); - if (result != NULL) { - assert(succeeded, "the VM op should have succeeded"); - - // Allocations that take place on VM operations do not do any - // card dirtying and we have to do it here. - dirty_young_block(result, word_size); - return result; - } - } - - // Both paths that get us here from above unlock the Heap_lock. - assert_heap_not_locked(); - - // We can reach here when we were unsuccessful in doing a GC, - // because another thread beat us to it, or because we were locked - // out of GC due to the GC locker. In either case a new alloc - // region might be available so we will retry the allocation. - HeapWord* result = attempt_allocation(word_size); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - - // So far our attempts to allocate failed. The only time we'll go - // around the loop and try again is if we tried to do a GC and the - // VM op that we tried to schedule was not successful because - // another thread beat us to it. If that happened it's possible - // that by the time we grabbed the Heap_lock again and tried to - // allocate other threads filled up the young generation, which - // means that the allocation attempt after the GC also failed. So, - // it's worth trying to schedule another GC pause. - if (succeeded) { - break; - } - - // Give a warning if we seem to be looping forever. - if ((QueuedAllocationWarningCount > 0) && - (try_count % QueuedAllocationWarningCount == 0)) { - warning("G1CollectedHeap::attempt_allocation_slow() " - "retries %d times", try_count); - } - } - - assert_heap_locked(); - return NULL; -} - -// See the comment in the .hpp file about the locking protocol and -// assumptions of this method (and other related ones). -HeapWord* -G1CollectedHeap::attempt_allocation_humongous(size_t word_size, - bool at_safepoint) { - // This is the method that will allocate a humongous object. All - // allocation paths that attempt to allocate a humongous object - // should eventually reach here. Currently, the only paths are from - // mem_allocate() and attempt_allocation_at_safepoint(). - assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); - assert(isHumongous(word_size), "attempt_allocation_humongous() " - "should only be used for humongous allocations"); - assert(SafepointSynchronize::is_at_safepoint() == at_safepoint, - "at_safepoint and is_at_safepoint() should be a tautology"); - - HeapWord* result = NULL; - - // We will loop while succeeded is false, which means that we tried - // to do a collection, but the VM op did not succeed. So, when we - // exit the loop, either one of the allocation attempts was - // successful, or we succeeded in doing the VM op but which was - // unable to allocate after the collection. - for (int try_count = 1; /* we'll return or break */; try_count += 1) { - bool succeeded = true; - - // Given that humongous objects are not allocated in young - // regions, we'll first try to do the allocation without doing a - // collection hoping that there's enough space in the heap. - result = humongous_obj_allocate(word_size); - assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), - "catch a regression of this bug."); - if (result != NULL) { - if (!at_safepoint) { - // If we're not at a safepoint, unlock the Heap_lock. - Heap_lock->unlock(); - } - return result; - } - - // If we failed to allocate the humongous object, we should try to - // do a collection pause (if we're allowed) in case it reclaims - // enough space for the allocation to succeed after the pause. - if (!at_safepoint) { - // Read the GC count while holding the Heap_lock - unsigned int gc_count_before = SharedHeap::heap()->total_collections(); - - // If we're allowed to do a collection we're not at a - // safepoint, so it is safe to unlock the Heap_lock. - Heap_lock->unlock(); - - result = do_collection_pause(word_size, gc_count_before, &succeeded); - assert_heap_not_locked(); - if (result != NULL) { - assert(succeeded, "the VM op should have succeeded"); - return result; - } - - // If we get here, the VM operation either did not succeed - // (i.e., another thread beat us to it) or it succeeded but - // failed to allocate the object. - - // If we're allowed to do a collection we're not at a - // safepoint, so it is safe to lock the Heap_lock. - Heap_lock->lock(); - } - - assert(result == NULL, "otherwise we should have exited the loop earlier"); - - // So far our attempts to allocate failed. The only time we'll go - // around the loop and try again is if we tried to do a GC and the - // VM op that we tried to schedule was not successful because - // another thread beat us to it. That way it's possible that some - // space was freed up by the thread that successfully scheduled a - // GC. So it's worth trying to allocate again. - if (succeeded) { - break; - } - - // Give a warning if we seem to be looping forever. - if ((QueuedAllocationWarningCount > 0) && - (try_count % QueuedAllocationWarningCount == 0)) { - warning("G1CollectedHeap::attempt_allocation_humongous " - "retries %d times", try_count); - } - } - - assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); - return NULL; -} - -HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, - bool expect_null_cur_alloc_region) { - assert_at_safepoint(true /* should_be_vm_thread */); - assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region, - err_msg("the current alloc region was unexpectedly found " - "to be non-NULL, cur alloc region: "PTR_FORMAT" " - "expect_null_cur_alloc_region: %d word_size: "SIZE_FORMAT, - _cur_alloc_region, expect_null_cur_alloc_region, word_size)); - - if (!isHumongous(word_size)) { - if (!expect_null_cur_alloc_region) { - HeapRegion* cur_alloc_region = _cur_alloc_region; - if (cur_alloc_region != NULL) { - // We are at a safepoint so no reason to use the MT-safe version. - HeapWord* result = cur_alloc_region->allocate_no_bot_updates(word_size); - if (result != NULL) { - assert(is_in(result), "result should be in the heap"); - - // We will not do any dirtying here. This is guaranteed to be - // called during a safepoint and the thread that scheduled the - // pause will do the dirtying if we return a non-NULL result. - return result; - } - - retire_cur_alloc_region_common(cur_alloc_region); - } - } - - assert(_cur_alloc_region == NULL, - "at this point we should have no cur alloc region"); - return replace_cur_alloc_region_and_allocate(word_size, - true, /* at_safepoint */ - false /* do_dirtying */, - false /* can_expand */); - } else { - return attempt_allocation_humongous(word_size, - true /* at_safepoint */); - } - - ShouldNotReachHere(); -} - HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { assert_heap_not_locked_and_not_at_safepoint(); - assert(!isHumongous(word_size), "we do not allow TLABs of humongous size"); - - // First attempt: Try allocating out of the current alloc region - // using a CAS. If that fails, take the Heap_lock and retry the - // allocation, potentially replacing the current alloc region. - HeapWord* result = attempt_allocation(word_size); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - - // Second attempt: Go to the slower path where we might try to - // schedule a collection. - result = attempt_allocation_slow(word_size); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - - assert_heap_locked(); - // Need to unlock the Heap_lock before returning. - Heap_lock->unlock(); - return NULL; + assert(!isHumongous(word_size), "we do not allow humongous TLABs"); + + unsigned int dummy_gc_count_before; + return attempt_allocation(word_size, &dummy_gc_count_before); } HeapWord* @@ -1200,48 +836,18 @@ assert(!is_tlab, "mem_allocate() this should not be called directly " "to allocate TLABs"); - // Loop until the allocation is satisified, - // or unsatisfied after GC. + // Loop until the allocation is satisified, or unsatisfied after GC. for (int try_count = 1; /* we'll return */; try_count += 1) { unsigned int gc_count_before; - { - if (!isHumongous(word_size)) { - // First attempt: Try allocating out of the current alloc region - // using a CAS. If that fails, take the Heap_lock and retry the - // allocation, potentially replacing the current alloc region. - HeapWord* result = attempt_allocation(word_size); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - - assert_heap_locked(); - - // Second attempt: Go to the slower path where we might try to - // schedule a collection. - result = attempt_allocation_slow(word_size); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - } else { - // attempt_allocation_humongous() requires the Heap_lock to be held. - Heap_lock->lock(); - - HeapWord* result = attempt_allocation_humongous(word_size, - false /* at_safepoint */); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - } - - assert_heap_locked(); - // Read the gc count while the heap lock is held. - gc_count_before = SharedHeap::heap()->total_collections(); - - // Release the Heap_lock before attempting the collection. - Heap_lock->unlock(); + + HeapWord* result = NULL; + if (!isHumongous(word_size)) { + result = attempt_allocation(word_size, &gc_count_before); + } else { + result = attempt_allocation_humongous(word_size, &gc_count_before); + } + if (result != NULL) { + return result; } // Create the garbage collection operation... @@ -1249,7 +855,6 @@ // ...and get the VM thread to execute it. VMThread::execute(&op); - assert_heap_not_locked(); if (op.prologue_succeeded() && op.pause_succeeded()) { // If the operation was successful we'll return the result even // if it is NULL. If the allocation attempt failed immediately @@ -1275,21 +880,207 @@ } ShouldNotReachHere(); + return NULL; } -void G1CollectedHeap::abandon_cur_alloc_region() { +HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, + unsigned int *gc_count_before_ret) { + // Make sure you read the note in attempt_allocation_humongous(). + + assert_heap_not_locked_and_not_at_safepoint(); + assert(!isHumongous(word_size), "attempt_allocation_slow() should not " + "be called for humongous allocation requests"); + + // We should only get here after the first-level allocation attempt + // (attempt_allocation()) failed to allocate. + + // We will loop until a) we manage to successfully perform the + // allocation or b) we successfully schedule a collection which + // fails to perform the allocation. b) is the only case when we'll + // return NULL. + HeapWord* result = NULL; + for (int try_count = 1; /* we'll return */; try_count += 1) { + bool should_try_gc; + unsigned int gc_count_before; + + { + MutexLockerEx x(Heap_lock); + + result = _mutator_alloc_region.attempt_allocation_locked(word_size, + false /* bot_updates */); + if (result != NULL) { + return result; + } + + // If we reach here, attempt_allocation_locked() above failed to + // allocate a new region. So the mutator alloc region should be NULL. + assert(_mutator_alloc_region.get() == NULL, "only way to get here"); + + if (GC_locker::is_active_and_needs_gc()) { + if (g1_policy()->can_expand_young_list()) { + result = _mutator_alloc_region.attempt_allocation_force(word_size, + false /* bot_updates */); + if (result != NULL) { + return result; + } + } + should_try_gc = false; + } else { + // Read the GC count while still holding the Heap_lock. + gc_count_before = SharedHeap::heap()->total_collections(); + should_try_gc = true; + } + } + + if (should_try_gc) { + bool succeeded; + result = do_collection_pause(word_size, gc_count_before, &succeeded); + if (result != NULL) { + assert(succeeded, "only way to get back a non-NULL result"); + return result; + } + + if (succeeded) { + // If we get here we successfully scheduled a collection which + // failed to allocate. No point in trying to allocate + // further. We'll just return NULL. + MutexLockerEx x(Heap_lock); + *gc_count_before_ret = SharedHeap::heap()->total_collections(); + return NULL; + } + } else { + GC_locker::stall_until_clear(); + } + + // We can reach here if we were unsuccessul in scheduling a + // collection (because another thread beat us to it) or if we were + // stalled due to the GC locker. In either can we should retry the + // allocation attempt in case another thread successfully + // performed a collection and reclaimed enough space. We do the + // first attempt (without holding the Heap_lock) here and the + // follow-on attempt will be at the start of the next loop + // iteration (after taking the Heap_lock). + result = _mutator_alloc_region.attempt_allocation(word_size, + false /* bot_updates */); + if (result != NULL ){ + return result; + } + + // Give a warning if we seem to be looping forever. + if ((QueuedAllocationWarningCount > 0) && + (try_count % QueuedAllocationWarningCount == 0)) { + warning("G1CollectedHeap::attempt_allocation_slow() " + "retries %d times", try_count); + } + } + + ShouldNotReachHere(); + return NULL; +} + +HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, + unsigned int * gc_count_before_ret) { + // The structure of this method has a lot of similarities to + // attempt_allocation_slow(). The reason these two were not merged + // into a single one is that such a method would require several "if + // allocation is not humongous do this, otherwise do that" + // conditional paths which would obscure its flow. In fact, an early + // version of this code did use a unified method which was harder to + // follow and, as a result, it had subtle bugs that were hard to + // track down. So keeping these two methods separate allows each to + // be more readable. It will be good to keep these two in sync as + // much as possible. + + assert_heap_not_locked_and_not_at_safepoint(); + assert(isHumongous(word_size), "attempt_allocation_humongous() " + "should only be called for humongous allocations"); + + // We will loop until a) we manage to successfully perform the + // allocation or b) we successfully schedule a collection which + // fails to perform the allocation. b) is the only case when we'll + // return NULL. + HeapWord* result = NULL; + for (int try_count = 1; /* we'll return */; try_count += 1) { + bool should_try_gc; + unsigned int gc_count_before; + + { + MutexLockerEx x(Heap_lock); + + // Given that humongous objects are not allocated in young + // regions, we'll first try to do the allocation without doing a + // collection hoping that there's enough space in the heap. + result = humongous_obj_allocate(word_size); + if (result != NULL) { + return result; + } + + if (GC_locker::is_active_and_needs_gc()) { + should_try_gc = false; + } else { + // Read the GC count while still holding the Heap_lock. + gc_count_before = SharedHeap::heap()->total_collections(); + should_try_gc = true; + } + } + + if (should_try_gc) { + // If we failed to allocate the humongous object, we should try to + // do a collection pause (if we're allowed) in case it reclaims + // enough space for the allocation to succeed after the pause. + + bool succeeded; + result = do_collection_pause(word_size, gc_count_before, &succeeded); + if (result != NULL) { + assert(succeeded, "only way to get back a non-NULL result"); + return result; + } + + if (succeeded) { + // If we get here we successfully scheduled a collection which + // failed to allocate. No point in trying to allocate + // further. We'll just return NULL. + MutexLockerEx x(Heap_lock); + *gc_count_before_ret = SharedHeap::heap()->total_collections(); + return NULL; + } + } else { + GC_locker::stall_until_clear(); + } + + // We can reach here if we were unsuccessul in scheduling a + // collection (because another thread beat us to it) or if we were + // stalled due to the GC locker. In either can we should retry the + // allocation attempt in case another thread successfully + // performed a collection and reclaimed enough space. Give a + // warning if we seem to be looping forever. + + if ((QueuedAllocationWarningCount > 0) && + (try_count % QueuedAllocationWarningCount == 0)) { + warning("G1CollectedHeap::attempt_allocation_humongous() " + "retries %d times", try_count); + } + } + + ShouldNotReachHere(); + return NULL; +} + +HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, + bool expect_null_mutator_alloc_region) { assert_at_safepoint(true /* should_be_vm_thread */); - - HeapRegion* cur_alloc_region = _cur_alloc_region; - if (cur_alloc_region != NULL) { - assert(!cur_alloc_region->is_empty(), - "the current alloc region can never be empty"); - assert(cur_alloc_region->is_young(), - "the current alloc region should be young"); - - retire_cur_alloc_region_common(cur_alloc_region); - } - assert(_cur_alloc_region == NULL, "post-condition"); + assert(_mutator_alloc_region.get() == NULL || + !expect_null_mutator_alloc_region, + "the current alloc region was unexpectedly found to be non-NULL"); + + if (!isHumongous(word_size)) { + return _mutator_alloc_region.attempt_allocation_locked(word_size, + false /* bot_updates */); + } else { + return humongous_obj_allocate(word_size); + } + + ShouldNotReachHere(); } void G1CollectedHeap::abandon_gc_alloc_regions() { @@ -1401,7 +1192,8 @@ TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty); - TraceMemoryManagerStats tms(true /* fullGC */); + TraceCollectorStats tcs(g1mm()->full_collection_counters()); + TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); double start = os::elapsedTime(); g1_policy()->record_full_collection_start(); @@ -1417,8 +1209,8 @@ if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification + gclog_or_tty->print(" VerifyBeforeGC:"); prepare_for_verify(); - gclog_or_tty->print(" VerifyBeforeGC:"); Universe::verify(true); } @@ -1439,9 +1231,8 @@ concurrent_mark()->abort(); // Make sure we'll choose a new allocation region afterwards. - abandon_cur_alloc_region(); + release_mutator_alloc_region(); abandon_gc_alloc_regions(); - assert(_cur_alloc_region == NULL, "Invariant."); g1_rem_set()->cleanupHRRS(); tear_down_region_lists(); @@ -1547,6 +1338,8 @@ // evacuation pause. clear_cset_fast_test(); + init_mutator_alloc_region(); + double end = os::elapsedTime(); g1_policy()->record_full_collection_end(); @@ -1578,6 +1371,7 @@ if (PrintHeapAtGC) { Universe::print_heap_after_gc(); } + g1mm()->update_counters(); return true; } @@ -1720,8 +1514,9 @@ *succeeded = true; // Let's attempt the allocation first. - HeapWord* result = attempt_allocation_at_safepoint(word_size, - false /* expect_null_cur_alloc_region */); + HeapWord* result = + attempt_allocation_at_safepoint(word_size, + false /* expect_null_mutator_alloc_region */); if (result != NULL) { assert(*succeeded, "sanity"); return result; @@ -1748,7 +1543,7 @@ // Retry the allocation result = attempt_allocation_at_safepoint(word_size, - true /* expect_null_cur_alloc_region */); + true /* expect_null_mutator_alloc_region */); if (result != NULL) { assert(*succeeded, "sanity"); return result; @@ -1765,7 +1560,7 @@ // Retry the allocation once more result = attempt_allocation_at_safepoint(word_size, - true /* expect_null_cur_alloc_region */); + true /* expect_null_mutator_alloc_region */); if (result != NULL) { assert(*succeeded, "sanity"); return result; @@ -1796,7 +1591,7 @@ if (expand(expand_bytes)) { verify_region_sets_optional(); return attempt_allocation_at_safepoint(word_size, - false /* expect_null_cur_alloc_region */); + false /* expect_null_mutator_alloc_region */); } return NULL; } @@ -1940,7 +1735,6 @@ _evac_failure_scan_stack(NULL) , _mark_in_progress(false), _cg1r(NULL), _summary_bytes_used(0), - _cur_alloc_region(NULL), _refine_cte_cl(NULL), _full_collection(false), _free_list("Master Free List"), @@ -2099,7 +1893,6 @@ _g1_max_committed = _g1_committed; _hrs = new HeapRegionSeq(_expansion_regions); guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); - guarantee(_cur_alloc_region == NULL, "from constructor"); // 6843694 - ensure that the maximum region index can fit // in the remembered set structures. @@ -2195,6 +1988,26 @@ // Do later initialization work for concurrent refinement. _cg1r->init(); + // Here we allocate the dummy full region that is required by the + // G1AllocRegion class. If we don't pass an address in the reserved + // space here, lots of asserts fire. + MemRegion mr(_g1_reserved.start(), HeapRegion::GrainWords); + HeapRegion* dummy_region = new HeapRegion(_bot_shared, mr, true); + // We'll re-use the same region whether the alloc region will + // require BOT updates or not and, if it doesn't, then a non-young + // region will complain that it cannot support allocations without + // BOT updates. So we'll tag the dummy region as young to avoid that. + dummy_region->set_young(); + // Make sure it's full. + dummy_region->set_top(dummy_region->end()); + G1AllocRegion::setup(this, dummy_region); + + init_mutator_alloc_region(); + + // Do create of the monitoring and management support so that + // values in the heap have been properly initialized. + _g1mm = new G1MonitoringSupport(this, &_g1_storage); + return JNI_OK; } @@ -2261,7 +2074,7 @@ "Should be owned on this thread's behalf."); size_t result = _summary_bytes_used; // Read only once in case it is set to NULL concurrently - HeapRegion* hr = _cur_alloc_region; + HeapRegion* hr = _mutator_alloc_region.get(); if (hr != NULL) result += hr->used(); return result; @@ -2324,13 +2137,11 @@ // to free(), resulting in a SIGSEGV. Note that this doesn't appear // to be a problem in the optimized build, since the two loads of the // current allocation region field are optimized away. - HeapRegion* car = _cur_alloc_region; - - // FIXME: should iterate over all regions? - if (car == NULL) { + HeapRegion* hr = _mutator_alloc_region.get(); + if (hr == NULL) { return 0; } - return car->free(); + return hr->free(); } bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { @@ -2339,6 +2150,28 @@ (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); } +#ifndef PRODUCT +void G1CollectedHeap::allocate_dummy_regions() { + // Let's fill up most of the region + size_t word_size = HeapRegion::GrainWords - 1024; + // And as a result the region we'll allocate will be humongous. + guarantee(isHumongous(word_size), "sanity"); + + for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) { + // Let's use the existing mechanism for the allocation + HeapWord* dummy_obj = humongous_obj_allocate(word_size); + if (dummy_obj != NULL) { + MemRegion mr(dummy_obj, word_size); + CollectedHeap::fill_with_object(mr); + } else { + // If we can't allocate once, we probably cannot allocate + // again. Let's get out of the loop. + break; + } + } +} +#endif // !PRODUCT + void G1CollectedHeap::increment_full_collections_completed(bool concurrent) { MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); @@ -2781,16 +2614,12 @@ // since we can't allow tlabs to grow big enough to accomodate // humongous objects. - // We need to store the cur alloc region locally, since it might change - // between when we test for NULL and when we use it later. - ContiguousSpace* cur_alloc_space = _cur_alloc_region; + HeapRegion* hr = _mutator_alloc_region.get(); size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; - - if (cur_alloc_space == NULL) { + if (hr == NULL) { return max_tlab_size; } else { - return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize), - max_tlab_size); + return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size); } } @@ -3007,17 +2836,26 @@ bool silent, bool use_prev_marking) { if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { - if (!silent) { gclog_or_tty->print("roots "); } + if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); } VerifyRootsClosure rootsCl(use_prev_marking); CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); - process_strong_roots(true, // activate StrongRootsScope - false, - SharedHeap::SO_AllClasses, + // We apply the relevant closures to all the oops in the + // system dictionary, the string table and the code cache. + const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; + process_strong_roots(true, // activate StrongRootsScope + true, // we set "collecting perm gen" to true, + // so we don't reset the dirty cards in the perm gen. + SharedHeap::ScanningOption(so), // roots scanning options &rootsCl, &blobsCl, &rootsCl); + // Since we used "collecting_perm_gen" == true above, we will not have + // checked the refs from perm into the G1-collected heap. We check those + // references explicitly below. Whether the relevant cards are dirty + // is checked further below in the rem set verification. + if (!silent) { gclog_or_tty->print("Permgen roots "); } + perm_gen()->oop_iterate(&rootsCl); bool failures = rootsCl.failures(); - rem_set()->invalidate(perm_gen()->used_region(), false); if (!silent) { gclog_or_tty->print("HeapRegionSets "); } verify_region_sets(); if (!silent) { gclog_or_tty->print("HeapRegions "); } @@ -3364,6 +3202,7 @@ } verify_region_sets_optional(); + verify_dirty_young_regions(); { // This call will decide whether this pause is an initial-mark @@ -3393,7 +3232,8 @@ TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty); - TraceMemoryManagerStats tms(false /* fullGC */); + TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); + TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); // If the secondary_free_list is not empty, append it to the // free_list. No need to wait for the cleanup operation to finish; @@ -3425,8 +3265,8 @@ if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification + gclog_or_tty->print(" VerifyBeforeGC:"); prepare_for_verify(); - gclog_or_tty->print(" VerifyBeforeGC:"); Universe::verify(false); } @@ -3442,7 +3282,7 @@ // Forget the current alloc region (we might even choose it to be part // of the collection set!). - abandon_cur_alloc_region(); + release_mutator_alloc_region(); // The elapsed time induced by the start time below deliberately elides // the possible verification above. @@ -3483,8 +3323,9 @@ // progress, this will be zero. _cm->set_oops_do_bound(); - if (mark_in_progress()) + if (mark_in_progress()) { concurrent_mark()->newCSet(); + } #if YOUNG_LIST_VERBOSE gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); @@ -3494,6 +3335,16 @@ g1_policy()->choose_collection_set(target_pause_time_ms); + // We have chosen the complete collection set. If marking is + // active then, we clear the region fields of any of the + // concurrent marking tasks whose region fields point into + // the collection set as these values will become stale. This + // will cause the owning marking threads to claim a new region + // when marking restarts. + if (mark_in_progress()) { + concurrent_mark()->reset_active_task_region_fields_in_cset(); + } + // Nothing to do if we were unable to choose a collection set. #if G1_REM_SET_LOGGING gclog_or_tty->print_cr("\nAfter pause, heap:"); @@ -3567,12 +3418,16 @@ doConcurrentMark(); } + allocate_dummy_regions(); + #if YOUNG_LIST_VERBOSE gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); _young_list->print(); g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE + init_mutator_alloc_region(); + double end_time_sec = os::elapsedTime(); double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; g1_policy()->record_pause_time_ms(pause_time_ms); @@ -3628,6 +3483,8 @@ if (PrintHeapAtGC) { Universe::print_heap_after_gc(); } + g1mm()->update_counters(); + if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) && (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { @@ -3655,6 +3512,15 @@ return gclab_word_size; } +void G1CollectedHeap::init_mutator_alloc_region() { + assert(_mutator_alloc_region.get() == NULL, "pre-condition"); + _mutator_alloc_region.init(); +} + +void G1CollectedHeap::release_mutator_alloc_region() { + _mutator_alloc_region.release(); + assert(_mutator_alloc_region.get() == NULL, "post-condition"); +} void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); @@ -3879,7 +3745,7 @@ if (r->is_empty()) { // We didn't actually allocate anything in it; let's just put // it back on the free list. - _free_list.add_as_tail(r); + _free_list.add_as_head(r); } else if (_retain_gc_alloc_region[ap] && !totally) { // retain it so that we can use it at the beginning of the next GC _retained_gc_alloc_regions[ap] = r; @@ -4151,6 +4017,9 @@ oop G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop old) { + assert(obj_in_cs(old), + err_msg("obj: "PTR_FORMAT" should still be in the CSet", + (HeapWord*) old)); markOop m = old->mark(); oop forward_ptr = old->forward_to_atomic(old); if (forward_ptr == NULL) { @@ -4173,7 +4042,13 @@ } return old; } else { - // Someone else had a place to copy it. + // Forward-to-self failed. Either someone else managed to allocate + // space for this object (old != forward_ptr) or they beat us in + // self-forwarding it (old == forward_ptr). + assert(old == forward_ptr || !obj_in_cs(forward_ptr), + err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" " + "should not be in the CSet", + (HeapWord*) old, (HeapWord*) forward_ptr)); return forward_ptr; } } @@ -4484,11 +4359,10 @@ T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop(heap_oop); - assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)), - "shouldn't still be in the CSet if evacuation didn't fail."); HeapWord* addr = (HeapWord*)obj; - if (_g1->is_in_g1_reserved(addr)) + if (_g1->is_in_g1_reserved(addr)) { _cm->grayRoot(oop(addr)); + } } } @@ -5013,7 +4887,7 @@ *pre_used += hr->used(); hr->hr_clear(par, true /* clear_space */); - free_list->add_as_tail(hr); + free_list->add_as_head(hr); } void G1CollectedHeap::free_humongous_region(HeapRegion* hr, @@ -5065,7 +4939,7 @@ } if (free_list != NULL && !free_list->is_empty()) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); - _free_list.add_as_tail(free_list); + _free_list.add_as_head(free_list); } if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) { MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); @@ -5137,22 +5011,52 @@ #ifndef PRODUCT class G1VerifyCardTableCleanup: public HeapRegionClosure { + G1CollectedHeap* _g1h; CardTableModRefBS* _ct_bs; public: - G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) - : _ct_bs(ct_bs) - { } - virtual bool doHeapRegion(HeapRegion* r) - { - MemRegion mr(r->bottom(), r->end()); + G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs) + : _g1h(g1h), _ct_bs(ct_bs) { } + virtual bool doHeapRegion(HeapRegion* r) { if (r->is_survivor()) { - _ct_bs->verify_dirty_region(mr); + _g1h->verify_dirty_region(r); } else { - _ct_bs->verify_clean_region(mr); + _g1h->verify_not_dirty_region(r); } return false; } }; + +void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) { + // All of the region should be clean. + CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); + MemRegion mr(hr->bottom(), hr->end()); + ct_bs->verify_not_dirty_region(mr); +} + +void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) { + // We cannot guarantee that [bottom(),end()] is dirty. Threads + // dirty allocated blocks as they allocate them. The thread that + // retires each region and replaces it with a new one will do a + // maximal allocation to fill in [pre_dummy_top(),end()] but will + // not dirty that area (one less thing to have to do while holding + // a lock). So we can only verify that [bottom(),pre_dummy_top()] + // is dirty. + CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); + MemRegion mr(hr->bottom(), hr->pre_dummy_top()); + ct_bs->verify_dirty_region(mr); +} + +void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { + CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); + for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { + verify_dirty_region(hr); + } +} + +void G1CollectedHeap::verify_dirty_young_regions() { + verify_dirty_young_list(_young_list->first_region()); + verify_dirty_young_list(_young_list->first_survivor_region()); +} #endif void G1CollectedHeap::cleanUpCardTable() { @@ -5188,7 +5092,7 @@ g1_policy()->record_clear_ct_time( elapsed * 1000.0); #ifndef PRODUCT if (G1VerifyCTCleanup || VerifyAfterGC) { - G1VerifyCardTableCleanup cleanup_verifier(ct_bs); + G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs); heap_region_iterate(&cleanup_verifier); } #endif @@ -5500,6 +5404,45 @@ } } +HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, + bool force) { + assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); + assert(!force || g1_policy()->can_expand_young_list(), + "if force is true we should be able to expand the young list"); + if (force || !g1_policy()->is_young_list_full()) { + HeapRegion* new_alloc_region = new_region(word_size, + false /* do_expand */); + if (new_alloc_region != NULL) { + g1_policy()->update_region_num(true /* next_is_young */); + set_region_short_lived_locked(new_alloc_region); + g1mm()->update_eden_counters(); + return new_alloc_region; + } + } + return NULL; +} + +void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region, + size_t allocated_bytes) { + assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); + assert(alloc_region->is_young(), "all mutator alloc regions should be young"); + + g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); + _summary_bytes_used += allocated_bytes; +} + +HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, + bool force) { + return _g1h->new_mutator_alloc_region(word_size, force); +} + +void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, + size_t allocated_bytes) { + _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes); +} + +// Heap region set verification + class VerifyRegionListsClosure : public HeapRegionClosure { private: HumongousRegionSet* _humongous_set;
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -26,8 +26,11 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP #include "gc_implementation/g1/concurrentMark.hpp" +#include "gc_implementation/g1/g1AllocRegion.hpp" #include "gc_implementation/g1/g1RemSet.hpp" +#include "gc_implementation/g1/g1MonitoringSupport.hpp" #include "gc_implementation/g1/heapRegionSets.hpp" +#include "gc_implementation/shared/hSpaceCounters.hpp" #include "gc_implementation/parNew/parGCAllocBuffer.hpp" #include "memory/barrierSet.hpp" #include "memory/memRegion.hpp" @@ -56,6 +59,7 @@ class ConcurrentMark; class ConcurrentMarkThread; class ConcurrentG1Refine; +class GenerationCounters; typedef OverflowTaskQueue<StarTask> RefToScanQueue; typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet; @@ -128,6 +132,15 @@ void print(); }; +class MutatorAllocRegion : public G1AllocRegion { +protected: + virtual HeapRegion* allocate_new_region(size_t word_size, bool force); + virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); +public: + MutatorAllocRegion() + : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } +}; + class RefineCardTableEntryClosure; class G1CollectedHeap : public SharedHeap { friend class VM_G1CollectForAllocation; @@ -135,6 +148,7 @@ friend class VM_G1CollectFull; friend class VM_G1IncCollectionPause; friend class VMStructs; + friend class MutatorAllocRegion; // Closures used in implementation. friend class G1ParCopyHelper; @@ -197,12 +211,15 @@ // The sequence of all heap regions in the heap. HeapRegionSeq* _hrs; - // The region from which normal-sized objects are currently being - // allocated. May be NULL. - HeapRegion* _cur_alloc_region; + // Alloc region used to satisfy mutator allocation requests. + MutatorAllocRegion _mutator_alloc_region; - // Postcondition: cur_alloc_region == NULL. - void abandon_cur_alloc_region(); + // It resets the mutator alloc region before new allocations can take place. + void init_mutator_alloc_region(); + + // It releases the mutator alloc region. + void release_mutator_alloc_region(); + void abandon_gc_alloc_regions(); // The to-space memory regions into which objects are being copied during @@ -222,6 +239,9 @@ // current collection. HeapRegion* _gc_alloc_region_list; + // Helper for monitoring and management support. + G1MonitoringSupport* _g1mm; + // Determines PLAB size for a particular allocation purpose. static size_t desired_plab_sz(GCAllocPurpose purpose); @@ -284,6 +304,14 @@ // started is maintained in _total_full_collections in CollectedHeap. volatile unsigned int _full_collections_completed; + // This is a non-product method that is helpful for testing. It is + // called at the end of a GC and artificially expands the heap by + // allocating a number of dead regions. This way we can induce very + // frequent marking cycles and stress the cleanup / concurrent + // cleanup code more (as all the regions that will be allocated by + // this method will be found dead by the marking cycle). + void allocate_dummy_regions() PRODUCT_RETURN; + // These are macros so that, if the assert fires, we get the correct // line number, file, etc. @@ -360,27 +388,21 @@ G1CollectorPolicy* _g1_policy; // This is the second level of trying to allocate a new region. If - // new_region_work didn't find a region in the free_list, this call - // will check whether there's anything available in the - // secondary_free_list and/or wait for more regions to appear in that - // list, if _free_regions_coming is set. + // new_region() didn't find a region on the free_list, this call will + // check whether there's anything available on the + // secondary_free_list and/or wait for more regions to appear on + // that list, if _free_regions_coming is set. HeapRegion* new_region_try_secondary_free_list(); // Try to allocate a single non-humongous HeapRegion sufficient for // an allocation of the given word_size. If do_expand is true, // attempt to expand the heap if necessary to satisfy the allocation // request. - HeapRegion* new_region_work(size_t word_size, bool do_expand); + HeapRegion* new_region(size_t word_size, bool do_expand); - // Try to allocate a new region to be used for allocation by a - // mutator thread. Attempt to expand the heap if no region is + // Try to allocate a new region to be used for allocation by + // a GC thread. It will try to expand the heap if no region is // available. - HeapRegion* new_alloc_region(size_t word_size) { - return new_region_work(word_size, false /* do_expand */); - } - - // Try to allocate a new region to be used for allocation by a GC - // thread. Attempt to expand the heap if no region is available. HeapRegion* new_gc_alloc_region(int purpose, size_t word_size); // Attempt to satisfy a humongous allocation request of the given @@ -415,10 +437,6 @@ // * All non-TLAB allocation requests should go to mem_allocate() // and mem_allocate() should never be called with is_tlab == true. // - // * If the GC locker is active we currently stall until we can - // allocate a new young region. This will be changed in the - // near future (see CR 6994056). - // // * If either call cannot satisfy the allocation request using the // current allocating region, they will try to get a new one. If // this fails, they will attempt to do an evacuation pause and @@ -441,122 +459,38 @@ bool is_tlab, /* expected to be false */ bool* gc_overhead_limit_was_exceeded); - // The following methods, allocate_from_cur_allocation_region(), - // attempt_allocation(), attempt_allocation_locked(), - // replace_cur_alloc_region_and_allocate(), - // attempt_allocation_slow(), and attempt_allocation_humongous() - // have very awkward pre- and post-conditions with respect to - // locking: - // - // If they are called outside a safepoint they assume the caller - // holds the Heap_lock when it calls them. However, on exit they - // will release the Heap_lock if they return a non-NULL result, but - // keep holding the Heap_lock if they return a NULL result. The - // reason for this is that we need to dirty the cards that span - // allocated blocks on young regions to avoid having to take the - // slow path of the write barrier (for performance reasons we don't - // update RSets for references whose source is a young region, so we - // don't need to look at dirty cards on young regions). But, doing - // this card dirtying while holding the Heap_lock can be a - // scalability bottleneck, especially given that some allocation - // requests might be of non-trivial size (and the larger the region - // size is, the fewer allocations requests will be considered - // humongous, as the humongous size limit is a fraction of the - // region size). So, when one of these calls succeeds in allocating - // a block it does the card dirtying after it releases the Heap_lock - // which is why it will return without holding it. - // - // The above assymetry is the reason why locking / unlocking is done - // explicitly (i.e., with Heap_lock->lock() and - // Heap_lock->unlocked()) instead of using MutexLocker and - // MutexUnlocker objects. The latter would ensure that the lock is - // unlocked / re-locked at every possible exit out of the basic - // block. However, we only want that action to happen in selected - // places. - // - // Further, if the above methods are called during a safepoint, then - // naturally there's no assumption about the Heap_lock being held or - // there's no attempt to unlock it. The parameter at_safepoint - // indicates whether the call is made during a safepoint or not (as - // an optimization, to avoid reading the global flag with - // SafepointSynchronize::is_at_safepoint()). - // - // The methods share these parameters: - // - // * word_size : the size of the allocation request in words - // * at_safepoint : whether the call is done at a safepoint; this - // also determines whether a GC is permitted - // (at_safepoint == false) or not (at_safepoint == true) - // * do_dirtying : whether the method should dirty the allocated - // block before returning - // - // They all return either the address of the block, if they - // successfully manage to allocate it, or NULL. + // The following three methods take a gc_count_before_ret + // parameter which is used to return the GC count if the method + // returns NULL. Given that we are required to read the GC count + // while holding the Heap_lock, and these paths will take the + // Heap_lock at some point, it's easier to get them to read the GC + // count while holding the Heap_lock before they return NULL instead + // of the caller (namely: mem_allocate()) having to also take the + // Heap_lock just to read the GC count. + + // First-level mutator allocation attempt: try to allocate out of + // the mutator alloc region without taking the Heap_lock. This + // should only be used for non-humongous allocations. + inline HeapWord* attempt_allocation(size_t word_size, + unsigned int* gc_count_before_ret); - // It tries to satisfy an allocation request out of the current - // alloc region, which is passed as a parameter. It assumes that the - // caller has checked that the current alloc region is not NULL. - // Given that the caller has to check the current alloc region for - // at least NULL, it might as well pass it as the first parameter so - // that the method doesn't have to read it from the - // _cur_alloc_region field again. It is called from both - // attempt_allocation() and attempt_allocation_locked() and the - // with_heap_lock parameter indicates whether the caller was holding - // the heap lock when it called it or not. - inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, - size_t word_size, - bool with_heap_lock); - - // First-level of allocation slow path: it attempts to allocate out - // of the current alloc region in a lock-free manner using a CAS. If - // that fails it takes the Heap_lock and calls - // attempt_allocation_locked() for the second-level slow path. - inline HeapWord* attempt_allocation(size_t word_size); - - // Second-level of allocation slow path: while holding the Heap_lock - // it tries to allocate out of the current alloc region and, if that - // fails, tries to allocate out of a new current alloc region. - inline HeapWord* attempt_allocation_locked(size_t word_size); + // Second-level mutator allocation attempt: take the Heap_lock and + // retry the allocation attempt, potentially scheduling a GC + // pause. This should only be used for non-humongous allocations. + HeapWord* attempt_allocation_slow(size_t word_size, + unsigned int* gc_count_before_ret); - // It assumes that the current alloc region has been retired and - // tries to allocate a new one. If it's successful, it performs the - // allocation out of the new current alloc region and updates - // _cur_alloc_region. Normally, it would try to allocate a new - // region if the young gen is not full, unless can_expand is true in - // which case it would always try to allocate a new region. - HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size, - bool at_safepoint, - bool do_dirtying, - bool can_expand); - - // Third-level of allocation slow path: when we are unable to - // allocate a new current alloc region to satisfy an allocation - // request (i.e., when attempt_allocation_locked() fails). It will - // try to do an evacuation pause, which might stall due to the GC - // locker, and retry the allocation attempt when appropriate. - HeapWord* attempt_allocation_slow(size_t word_size); + // Takes the Heap_lock and attempts a humongous allocation. It can + // potentially schedule a GC pause. + HeapWord* attempt_allocation_humongous(size_t word_size, + unsigned int* gc_count_before_ret); - // The method that tries to satisfy a humongous allocation - // request. If it cannot satisfy it it will try to do an evacuation - // pause to perhaps reclaim enough space to be able to satisfy the - // allocation request afterwards. - HeapWord* attempt_allocation_humongous(size_t word_size, - bool at_safepoint); - - // It does the common work when we are retiring the current alloc region. - inline void retire_cur_alloc_region_common(HeapRegion* cur_alloc_region); - - // It retires the current alloc region, which is passed as a - // parameter (since, typically, the caller is already holding on to - // it). It sets _cur_alloc_region to NULL. - void retire_cur_alloc_region(HeapRegion* cur_alloc_region); - - // It attempts to do an allocation immediately before or after an - // evacuation pause and can only be called by the VM thread. It has - // slightly different assumptions that the ones before (i.e., - // assumes that the current alloc region has been retired). + // Allocation attempt that should be called during safepoints (e.g., + // at the end of a successful GC). expect_null_mutator_alloc_region + // specifies whether the mutator alloc region is expected to be NULL + // or not. HeapWord* attempt_allocation_at_safepoint(size_t word_size, - bool expect_null_cur_alloc_region); + bool expect_null_mutator_alloc_region); // It dirties the cards that cover the block so that so that the post // write barrier never queues anything when updating objects on this @@ -583,6 +517,12 @@ // GC pause. void retire_alloc_region(HeapRegion* alloc_region, bool par); + // These two methods are the "callbacks" from the G1AllocRegion class. + + HeapRegion* new_mutator_alloc_region(size_t word_size, bool force); + void retire_mutator_alloc_region(HeapRegion* alloc_region, + size_t allocated_bytes); + // - if explicit_gc is true, the GC is for a System.gc() or a heap // inspection request and should collect the entire heap // - if clear_all_soft_refs is true, all soft references should be @@ -616,6 +556,9 @@ HeapWord* expand_and_allocate(size_t word_size); public: + + G1MonitoringSupport* g1mm() { return _g1mm; } + // Expand the garbage-first heap by at least the given size (in bytes!). // Returns true if the heap was expanded by the requested amount; // false otherwise. @@ -1027,6 +970,11 @@ // The number of regions available for "regular" expansion. size_t expansion_regions() { return _expansion_regions; } + void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; + void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; + void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; + void verify_dirty_young_regions() PRODUCT_RETURN; + // verify_region_sets() performs verification over the region // lists. It will be compiled in the product code to be used when // necessary (i.e., during heap verification). @@ -1061,7 +1009,7 @@ } void append_secondary_free_list() { - _free_list.add_as_tail(&_secondary_free_list); + _free_list.add_as_head(&_secondary_free_list); } void append_secondary_free_list_if_not_empty_with_lock() { @@ -1128,7 +1076,13 @@ return _g1_reserved.contains(p); } - // Returns a MemRegion that corresponds to the space that has been + // Returns a MemRegion that corresponds to the space that has been + // reserved for the heap + MemRegion g1_reserved() { + return _g1_reserved; + } + + // Returns a MemRegion that corresponds to the space that has been // committed in the heap MemRegion g1_committed() { return _g1_committed; @@ -1300,6 +1254,12 @@ return hr != NULL && hr->is_young(); } +#ifdef ASSERT + virtual bool is_in_partial_collection(const void* p); +#endif + + virtual bool is_scavengable(const void* addr); + // We don't need barriers for initializing stores to objects // in the young gen: for the SATB pre-barrier, there is no // pre-value that needs to be remembered; for the remembered-set
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -27,6 +27,7 @@ #include "gc_implementation/g1/concurrentMark.hpp" #include "gc_implementation/g1/g1CollectedHeap.hpp" +#include "gc_implementation/g1/g1AllocRegion.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "utilities/taskqueue.hpp" @@ -59,131 +60,23 @@ return r != NULL && r->in_collection_set(); } -// See the comment in the .hpp file about the locking protocol and -// assumptions of this method (and other related ones). inline HeapWord* -G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, - size_t word_size, - bool with_heap_lock) { - assert_not_at_safepoint(); - assert(with_heap_lock == Heap_lock->owned_by_self(), - "with_heap_lock and Heap_lock->owned_by_self() should be a tautology"); - assert(cur_alloc_region != NULL, "pre-condition of the method"); - assert(cur_alloc_region->is_young(), - "we only support young current alloc regions"); - assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() " - "should not be used for humongous allocations"); - assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug."); - - assert(!cur_alloc_region->is_empty(), - err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty", - cur_alloc_region->bottom(), cur_alloc_region->end())); - HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size); - if (result != NULL) { - assert(is_in(result), "result should be in the heap"); - - if (with_heap_lock) { - Heap_lock->unlock(); - } - assert_heap_not_locked(); - // Do the dirtying after we release the Heap_lock. - dirty_young_block(result, word_size); - return result; - } - - if (with_heap_lock) { - assert_heap_locked(); - } else { - assert_heap_not_locked(); - } - return NULL; -} - -// See the comment in the .hpp file about the locking protocol and -// assumptions of this method (and other related ones). -inline HeapWord* -G1CollectedHeap::attempt_allocation(size_t word_size) { +G1CollectedHeap::attempt_allocation(size_t word_size, + unsigned int* gc_count_before_ret) { assert_heap_not_locked_and_not_at_safepoint(); - assert(!isHumongous(word_size), "attempt_allocation() should not be called " - "for humongous allocation requests"); - - HeapRegion* cur_alloc_region = _cur_alloc_region; - if (cur_alloc_region != NULL) { - HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region, - word_size, - false /* with_heap_lock */); - assert_heap_not_locked(); - if (result != NULL) { - return result; - } - } + assert(!isHumongous(word_size), "attempt_allocation() should not " + "be called for humongous allocation requests"); - // Our attempt to allocate lock-free failed as the current - // allocation region is either NULL or full. So, we'll now take the - // Heap_lock and retry. - Heap_lock->lock(); - - HeapWord* result = attempt_allocation_locked(word_size); - if (result != NULL) { - assert_heap_not_locked(); - return result; + HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size, + false /* bot_updates */); + if (result == NULL) { + result = attempt_allocation_slow(word_size, gc_count_before_ret); } - - assert_heap_locked(); - return NULL; -} - -inline void -G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) { - assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); - assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region, - "pre-condition of the call"); - assert(cur_alloc_region->is_young(), - "we only support young current alloc regions"); - - // The region is guaranteed to be young - g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region); - _summary_bytes_used += cur_alloc_region->used(); - _cur_alloc_region = NULL; -} - -inline HeapWord* -G1CollectedHeap::attempt_allocation_locked(size_t word_size) { - assert_heap_locked_and_not_at_safepoint(); - assert(!isHumongous(word_size), "attempt_allocation_locked() " - "should not be called for humongous allocation requests"); - - // First, reread the current alloc region and retry the allocation - // in case somebody replaced it while we were waiting to get the - // Heap_lock. - HeapRegion* cur_alloc_region = _cur_alloc_region; - if (cur_alloc_region != NULL) { - HeapWord* result = allocate_from_cur_alloc_region( - cur_alloc_region, word_size, - true /* with_heap_lock */); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - - // We failed to allocate out of the current alloc region, so let's - // retire it before getting a new one. - retire_cur_alloc_region(cur_alloc_region); + assert_heap_not_locked(); + if (result != NULL) { + dirty_young_block(result, word_size); } - - assert_heap_locked(); - // Try to get a new region and allocate out of it - HeapWord* result = replace_cur_alloc_region_and_allocate(word_size, - false, /* at_safepoint */ - true, /* do_dirtying */ - false /* can_expand */); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - - assert_heap_locked(); - return NULL; + return result; } // It dirties the cards that cover the block so that so that the post
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -307,6 +307,7 @@ _par_last_termination_times_ms = new double[_parallel_gc_threads]; _par_last_termination_attempts = new double[_parallel_gc_threads]; _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads]; + _par_last_gc_worker_times_ms = new double[_parallel_gc_threads]; // start conservatively _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis; @@ -911,6 +912,7 @@ _par_last_termination_times_ms[i] = -1234.0; _par_last_termination_attempts[i] = -1234.0; _par_last_gc_worker_end_times_ms[i] = -1234.0; + _par_last_gc_worker_times_ms[i] = -1234.0; } #endif @@ -1063,8 +1065,7 @@ void G1CollectorPolicy::print_par_stats(int level, const char* str, - double* data, - bool summary) { + double* data) { double min = data[0], max = data[0]; double total = 0.0; LineBuffer buf(level); @@ -1078,20 +1079,15 @@ total += val; buf.append(" %3.1lf", val); } - if (summary) { - buf.append_and_print_cr(""); - double avg = total / (double) ParallelGCThreads; - buf.append(" "); - buf.append("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf", - avg, min, max); - } - buf.append_and_print_cr("]"); + buf.append_and_print_cr(""); + double avg = total / (double) ParallelGCThreads; + buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]", + avg, min, max, max - min); } void G1CollectorPolicy::print_par_sizes(int level, const char* str, - double* data, - bool summary) { + double* data) { double min = data[0], max = data[0]; double total = 0.0; LineBuffer buf(level); @@ -1105,14 +1101,10 @@ total += val; buf.append(" %d", (int) val); } - if (summary) { - buf.append_and_print_cr(""); - double avg = total / (double) ParallelGCThreads; - buf.append(" "); - buf.append("Sum: %d, Avg: %d, Min: %d, Max: %d", - (int)total, (int)avg, (int)min, (int)max); - } - buf.append_and_print_cr("]"); + buf.append_and_print_cr(""); + double avg = total / (double) ParallelGCThreads; + buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]", + (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min); } void G1CollectorPolicy::print_stats (int level, @@ -1421,22 +1413,22 @@ } if (parallel) { print_stats(1, "Parallel Time", _cur_collection_par_time_ms); - print_par_stats(2, "GC Worker Start Time", - _par_last_gc_worker_start_times_ms, false); + print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms); print_par_stats(2, "Update RS", _par_last_update_rs_times_ms); - print_par_sizes(3, "Processed Buffers", - _par_last_update_rs_processed_buffers, true); - print_par_stats(2, "Ext Root Scanning", - _par_last_ext_root_scan_times_ms); - print_par_stats(2, "Mark Stack Scanning", - _par_last_mark_stack_scan_times_ms); + print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers); + print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms); + print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms); print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms); print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms); print_par_stats(2, "Termination", _par_last_termination_times_ms); - print_par_sizes(3, "Termination Attempts", - _par_last_termination_attempts, true); - print_par_stats(2, "GC Worker End Time", - _par_last_gc_worker_end_times_ms, false); + print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts); + print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms); + + for (int i = 0; i < _parallel_gc_threads; i++) { + _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i]; + } + print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms); + print_stats(2, "Other", parallel_other_time); print_stats(1, "Clear CT", _cur_clear_ct_time_ms); } else {
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -182,6 +182,7 @@ double* _par_last_termination_times_ms; double* _par_last_termination_attempts; double* _par_last_gc_worker_end_times_ms; + double* _par_last_gc_worker_times_ms; // indicates that we are in young GC mode bool _in_young_gc_mode; @@ -569,11 +570,8 @@ void print_stats(int level, const char* str, double value); void print_stats(int level, const char* str, int value); - void print_par_stats(int level, const char* str, double* data) { - print_par_stats(level, str, data, true); - } - void print_par_stats(int level, const char* str, double* data, bool summary); - void print_par_sizes(int level, const char* str, double* data, bool summary); + void print_par_stats(int level, const char* str, double* data); + void print_par_sizes(int level, const char* str, double* data); void check_other_times(int level, NumberSeq* other_times_ms,
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1MonitoringSupport.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "gc_implementation/g1/g1CollectorPolicy.hpp" + +G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h, + VirtualSpace* g1_storage_addr) : + _g1h(g1h), + _incremental_collection_counters(NULL), + _full_collection_counters(NULL), + _non_young_collection_counters(NULL), + _old_space_counters(NULL), + _young_collection_counters(NULL), + _eden_counters(NULL), + _from_counters(NULL), + _to_counters(NULL), + _g1_storage_addr(g1_storage_addr) +{ + // Counters for GC collections + // + // name "collector.0". In a generational collector this would be the + // young generation collection. + _incremental_collection_counters = + new CollectorCounters("G1 incremental collections", 0); + // name "collector.1". In a generational collector this would be the + // old generation collection. + _full_collection_counters = + new CollectorCounters("G1 stop-the-world full collections", 1); + + // timer sampling for all counters supporting sampling only update the + // used value. See the take_sample() method. G1 requires both used and + // capacity updated so sampling is not currently used. It might + // be sufficient to update all counters in take_sample() even though + // take_sample() only returns "used". When sampling was used, there + // were some anomolous values emitted which may have been the consequence + // of not updating all values simultaneously (i.e., see the calculation done + // in eden_space_used(), is it possbile that the values used to + // calculate either eden_used or survivor_used are being updated by + // the collector when the sample is being done?). + const bool sampled = false; + + // "Generation" and "Space" counters. + // + // name "generation.1" This is logically the old generation in + // generational GC terms. The "1, 1" parameters are for + // the n-th generation (=1) with 1 space. + // Counters are created from minCapacity, maxCapacity, and capacity + _non_young_collection_counters = + new GenerationCounters("whole heap", 1, 1, _g1_storage_addr); + + // name "generation.1.space.0" + // Counters are created from maxCapacity, capacity, initCapacity, + // and used. + _old_space_counters = new HSpaceCounters("space", 0, + _g1h->max_capacity(), _g1h->capacity(), _non_young_collection_counters); + + // Young collection set + // name "generation.0". This is logically the young generation. + // The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces. + // See _non_young_collection_counters for additional counters + _young_collection_counters = new GenerationCounters("young", 0, 3, NULL); + + // Replace "max_heap_byte_size() with maximum young gen size for + // g1Collectedheap + // name "generation.0.space.0" + // See _old_space_counters for additional counters + _eden_counters = new HSpaceCounters("eden", 0, + _g1h->max_capacity(), eden_space_committed(), + _young_collection_counters); + + // name "generation.0.space.1" + // See _old_space_counters for additional counters + // Set the arguments to indicate that this survivor space is not used. + _from_counters = new HSpaceCounters("s0", 1, (long) 0, (long) 0, + _young_collection_counters); + + // name "generation.0.space.2" + // See _old_space_counters for additional counters + _to_counters = new HSpaceCounters("s1", 2, + _g1h->max_capacity(), + survivor_space_committed(), + _young_collection_counters); +} + +size_t G1MonitoringSupport::overall_committed() { + return g1h()->capacity(); +} + +size_t G1MonitoringSupport::overall_used() { + return g1h()->used_unlocked(); +} + +size_t G1MonitoringSupport::eden_space_committed() { + return MAX2(eden_space_used(), (size_t) HeapRegion::GrainBytes); +} + +size_t G1MonitoringSupport::eden_space_used() { + size_t young_list_length = g1h()->young_list()->length(); + size_t eden_used = young_list_length * HeapRegion::GrainBytes; + size_t survivor_used = survivor_space_used(); + eden_used = subtract_up_to_zero(eden_used, survivor_used); + return eden_used; +} + +size_t G1MonitoringSupport::survivor_space_committed() { + return MAX2(survivor_space_used(), + (size_t) HeapRegion::GrainBytes); +} + +size_t G1MonitoringSupport::survivor_space_used() { + size_t survivor_num = g1h()->g1_policy()->recorded_survivor_regions(); + size_t survivor_used = survivor_num * HeapRegion::GrainBytes; + return survivor_used; +} + +size_t G1MonitoringSupport::old_space_committed() { + size_t committed = overall_committed(); + size_t eden_committed = eden_space_committed(); + size_t survivor_committed = survivor_space_committed(); + committed = subtract_up_to_zero(committed, eden_committed); + committed = subtract_up_to_zero(committed, survivor_committed); + committed = MAX2(committed, (size_t) HeapRegion::GrainBytes); + return committed; +} + +// See the comment near the top of g1MonitoringSupport.hpp for +// an explanation of these calculations for "used" and "capacity". +size_t G1MonitoringSupport::old_space_used() { + size_t used = overall_used(); + size_t eden_used = eden_space_used(); + size_t survivor_used = survivor_space_used(); + used = subtract_up_to_zero(used, eden_used); + used = subtract_up_to_zero(used, survivor_used); + return used; +} + +void G1MonitoringSupport::update_counters() { + if (UsePerfData) { + eden_counters()->update_capacity(eden_space_committed()); + eden_counters()->update_used(eden_space_used()); + to_counters()->update_capacity(survivor_space_committed()); + to_counters()->update_used(survivor_space_used()); + old_space_counters()->update_capacity(old_space_committed()); + old_space_counters()->update_used(old_space_used()); + non_young_collection_counters()->update_all(); + } +} + +void G1MonitoringSupport::update_eden_counters() { + if (UsePerfData) { + eden_counters()->update_capacity(eden_space_committed()); + eden_counters()->update_used(eden_space_used()); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP + +#include "gc_implementation/shared/hSpaceCounters.hpp" + +class G1CollectedHeap; +class G1SpaceMonitoringSupport; + +// Class for monitoring logical spaces in G1. +// G1 defines a set of regions as a young +// collection (analogous to a young generation). +// The young collection is a logical generation +// with no fixed chunk (see space.hpp) reflecting +// the address space for the generation. In addition +// to the young collection there is its complement +// the non-young collection that is simply the regions +// not in the young collection. The non-young collection +// is treated here as a logical old generation only +// because the monitoring tools expect a generational +// heap. The monitoring tools expect that a Space +// (see space.hpp) exists that describe the +// address space of young collection and non-young +// collection and such a view is provided here. +// +// This class provides interfaces to access +// the value of variables for the young collection +// that include the "capacity" and "used" of the +// young collection along with constant values +// for the minimum and maximum capacities for +// the logical spaces. Similarly for the non-young +// collection. +// +// Also provided are counters for G1 concurrent collections +// and stop-the-world full heap collecitons. +// +// Below is a description of how "used" and "capactiy" +// (or committed) is calculated for the logical spaces. +// +// 1) The used space calculation for a pool is not necessarily +// independent of the others. We can easily get from G1 the overall +// used space in the entire heap, the number of regions in the young +// generation (includes both eden and survivors), and the number of +// survivor regions. So, from that we calculate: +// +// survivor_used = survivor_num * region_size +// eden_used = young_region_num * region_size - survivor_used +// old_gen_used = overall_used - eden_used - survivor_used +// +// Note that survivor_used and eden_used are upper bounds. To get the +// actual value we would have to iterate over the regions and add up +// ->used(). But that'd be expensive. So, we'll accept some lack of +// accuracy for those two. But, we have to be careful when calculating +// old_gen_used, in case we subtract from overall_used more then the +// actual number and our result goes negative. +// +// 2) Calculating the used space is straightforward, as described +// above. However, how do we calculate the committed space, given that +// we allocate space for the eden, survivor, and old gen out of the +// same pool of regions? One way to do this is to use the used value +// as also the committed value for the eden and survivor spaces and +// then calculate the old gen committed space as follows: +// +// old_gen_committed = overall_committed - eden_committed - survivor_committed +// +// Maybe a better way to do that would be to calculate used for eden +// and survivor as a sum of ->used() over their regions and then +// calculate committed as region_num * region_size (i.e., what we use +// to calculate the used space now). This is something to consider +// in the future. +// +// 3) Another decision that is again not straightforward is what is +// the max size that each memory pool can grow to. One way to do this +// would be to use the committed size for the max for the eden and +// survivors and calculate the old gen max as follows (basically, it's +// a similar pattern to what we use for the committed space, as +// described above): +// +// old_gen_max = overall_max - eden_max - survivor_max +// +// Unfortunately, the above makes the max of each pool fluctuate over +// time and, even though this is allowed according to the spec, it +// broke several assumptions in the M&M framework (there were cases +// where used would reach a value greater than max). So, for max we +// use -1, which means "undefined" according to the spec. +// +// 4) Now, there is a very subtle issue with all the above. The +// framework will call get_memory_usage() on the three pools +// asynchronously. As a result, each call might get a different value +// for, say, survivor_num which will yield inconsistent values for +// eden_used, survivor_used, and old_gen_used (as survivor_num is used +// in the calculation of all three). This would normally be +// ok. However, it's possible that this might cause the sum of +// eden_used, survivor_used, and old_gen_used to go over the max heap +// size and this seems to sometimes cause JConsole (and maybe other +// clients) to get confused. There's not a really an easy / clean +// solution to this problem, due to the asynchrounous nature of the +// framework. + +class G1MonitoringSupport : public CHeapObj { + G1CollectedHeap* _g1h; + VirtualSpace* _g1_storage_addr; + + // jstat performance counters + // incremental collections both fully and partially young + CollectorCounters* _incremental_collection_counters; + // full stop-the-world collections + CollectorCounters* _full_collection_counters; + // young collection set counters. The _eden_counters, + // _from_counters, and _to_counters are associated with + // this "generational" counter. + GenerationCounters* _young_collection_counters; + // non-young collection set counters. The _old_space_counters + // below are associated with this "generational" counter. + GenerationCounters* _non_young_collection_counters; + // Counters for the capacity and used for + // the whole heap + HSpaceCounters* _old_space_counters; + // the young collection + HSpaceCounters* _eden_counters; + // the survivor collection (only one, _to_counters, is actively used) + HSpaceCounters* _from_counters; + HSpaceCounters* _to_counters; + + // It returns x - y if x > y, 0 otherwise. + // As described in the comment above, some of the inputs to the + // calculations we have to do are obtained concurrently and hence + // may be inconsistent with each other. So, this provides a + // defensive way of performing the subtraction and avoids the value + // going negative (which would mean a very large result, given that + // the parameter are size_t). + static size_t subtract_up_to_zero(size_t x, size_t y) { + if (x > y) { + return x - y; + } else { + return 0; + } + } + + public: + G1MonitoringSupport(G1CollectedHeap* g1h, VirtualSpace* g1_storage_addr); + + G1CollectedHeap* g1h() { return _g1h; } + VirtualSpace* g1_storage_addr() { return _g1_storage_addr; } + + // Performance Counter accessors + void update_counters(); + void update_eden_counters(); + + CollectorCounters* incremental_collection_counters() { + return _incremental_collection_counters; + } + CollectorCounters* full_collection_counters() { + return _full_collection_counters; + } + GenerationCounters* non_young_collection_counters() { + return _non_young_collection_counters; + } + HSpaceCounters* old_space_counters() { return _old_space_counters; } + HSpaceCounters* eden_counters() { return _eden_counters; } + HSpaceCounters* from_counters() { return _from_counters; } + HSpaceCounters* to_counters() { return _to_counters; } + + // Monitoring support used by + // MemoryService + // jstat counters + size_t overall_committed(); + size_t overall_used(); + + size_t eden_space_committed(); + size_t eden_space_used(); + + size_t survivor_space_committed(); + size_t survivor_space_used(); + + size_t old_space_committed(); + size_t old_space_used(); +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -157,7 +157,6 @@ void set_try_claimed() { _try_claimed = true; } void scanCard(size_t index, HeapRegion *r) { - _cards_done++; DirtyCardToOopClosure* cl = r->new_dcto_closure(_oc, CardTableModRefBS::Precise, @@ -168,17 +167,14 @@ HeapWord* card_start = _bot_shared->address_for_index(index); HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words; Space *sp = SharedHeap::heap()->space_containing(card_start); - MemRegion sm_region; - if (ParallelGCThreads > 0) { - // first find the used area - sm_region = sp->used_region_at_save_marks(); - } else { - // The closure is not idempotent. We shouldn't look at objects - // allocated during the GC. - sm_region = sp->used_region_at_save_marks(); - } + MemRegion sm_region = sp->used_region_at_save_marks(); MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end)); - if (!mr.is_empty()) { + if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) { + // We make the card as "claimed" lazily (so races are possible + // but they're benign), which reduces the number of duplicate + // scans (the rsets of the regions in the cset can intersect). + _ct_bs->set_card_claimed(index); + _cards_done++; cl->do_MemRegion(mr); } } @@ -199,6 +195,9 @@ HeapRegionRemSet* hrrs = r->rem_set(); if (hrrs->iter_is_complete()) return false; // All done. if (!_try_claimed && !hrrs->claim_iter()) return false; + // If we ever free the collection set concurrently, we should also + // clear the card table concurrently therefore we won't need to + // add regions of the collection set to the dirty cards region. _g1h->push_dirty_cards_region(r); // If we didn't return above, then // _try_claimed || r->claim_iter() @@ -230,15 +229,10 @@ _g1h->push_dirty_cards_region(card_region); } - // If the card is dirty, then we will scan it during updateRS. - if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) { - // We make the card as "claimed" lazily (so races are possible but they're benign), - // which reduces the number of duplicate scans (the rsets of the regions in the cset - // can intersect). - if (!_ct_bs->is_card_claimed(card_index)) { - _ct_bs->set_card_claimed(card_index); - scanCard(card_index, card_region); - } + // If the card is dirty, then we will scan it during updateRS. + if (!card_region->in_collection_set() && + !_ct_bs->is_card_dirty(card_index)) { + scanCard(card_index, card_region); } } if (!_try_claimed) { @@ -246,8 +240,6 @@ } return false; } - // Set all cards back to clean. - void cleanup() {_g1h->cleanUpCardTable();} size_t cards_done() { return _cards_done;} size_t cards_looked_up() { return _cards;} }; @@ -566,8 +558,9 @@ update_rs_cl.set_region(r); HeapWord* stop_point = r->oops_on_card_seq_iterate_careful(scanRegion, - &filter_then_update_rs_cset_oop_cl, - false /* filter_young */); + &filter_then_update_rs_cset_oop_cl, + false /* filter_young */, + NULL /* card_ptr */); // Since this is performed in the event of an evacuation failure, we // we shouldn't see a non-null stop point @@ -735,12 +728,6 @@ (OopClosure*)&mux : (OopClosure*)&update_rs_oop_cl)); - // Undirty the card. - *card_ptr = CardTableModRefBS::clean_card_val(); - // We must complete this write before we do any of the reads below. - OrderAccess::storeload(); - // And process it, being careful of unallocated portions of TLAB's. - // The region for the current card may be a young region. The // current card may have been a card that was evicted from the // card cache. When the card was inserted into the cache, we had @@ -749,7 +736,7 @@ // and tagged as young. // // We wish to filter out cards for such a region but the current - // thread, if we're running conucrrently, may "see" the young type + // thread, if we're running concurrently, may "see" the young type // change at any time (so an earlier "is_young" check may pass or // fail arbitrarily). We tell the iteration code to perform this // filtering when it has been determined that there has been an actual @@ -759,7 +746,8 @@ HeapWord* stop_point = r->oops_on_card_seq_iterate_careful(dirtyRegion, &filter_then_update_rs_oop_cl, - filter_young); + filter_young, + card_ptr); // If stop_point is non-null, then we encountered an unallocated region // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,7 +47,9 @@ void G1SATBCardTableModRefBS::enqueue(oop pre_val) { - assert(pre_val->is_oop_or_null(true), "Error"); + // Nulls should have been already filtered. + assert(pre_val->is_oop(true), "Error"); + if (!JavaThread::satb_mark_queue_set().is_active()) return; Thread* thr = Thread::current(); if (thr->is_Java_thread()) { @@ -59,20 +61,6 @@ } } -// When we know the current java thread: -template <class T> void -G1SATBCardTableModRefBS::write_ref_field_pre_static(T* field, - oop new_val, - JavaThread* jt) { - if (!JavaThread::satb_mark_queue_set().is_active()) return; - T heap_oop = oopDesc::load_heap_oop(field); - if (!oopDesc::is_null(heap_oop)) { - oop pre_val = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(pre_val->is_oop(true /* ignore mark word */), "Error"); - jt->satb_mark_queue().enqueue(pre_val); - } -} - template <class T> void G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) { if (!JavaThread::satb_mark_queue_set().is_active()) return;
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,12 +37,11 @@ // snapshot-at-the-beginning marking. class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS { -private: +public: // Add "pre_val" to a set of objects that may have been disconnected from the // pre-marking object graph. static void enqueue(oop pre_val); -public: G1SATBCardTableModRefBS(MemRegion whole_heap, int max_covered_regions); @@ -61,10 +60,6 @@ } } - // When we know the current java thread: - template <class T> static void write_ref_field_pre_static(T* field, oop newVal, - JavaThread* jt); - // We export this to make it available in cases where the static // type of the barrier set is known. Note that it is non-virtual. template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -89,8 +89,9 @@ "The number of discovered reference objects to process before " \ "draining concurrent marking work queues.") \ \ - develop(bool, G1SATBBarrierPrintNullPreVals, false, \ - "If true, count frac of ptr writes with null pre-vals.") \ + experimental(bool, G1UseConcMarkReferenceProcessing, true, \ + "If true, enable reference discovery during concurrent " \ + "marking and reference processing at the end of remark.") \ \ product(intx, G1SATBBufferSize, 1*K, \ "Number of entries in an SATB log buffer.") \ @@ -138,19 +139,13 @@ develop(bool, G1RSCountHisto, false, \ "If true, print a histogram of RS occupancies after each pause") \ \ - develop(intx, G1PrintRegionLivenessInfo, 0, \ - "When > 0, print the occupancies of the <n> best and worst" \ - "regions.") \ + product(bool, G1PrintRegionLivenessInfo, false, \ + "Prints the liveness information for all regions in the heap " \ + "at the end of a marking cycle.") \ \ develop(bool, G1PrintParCleanupStats, false, \ "When true, print extra stats about parallel cleanup.") \ \ - develop(bool, G1DisablePreBarrier, false, \ - "Disable generation of pre-barrier (i.e., marking barrier) ") \ - \ - develop(bool, G1DisablePostBarrier, false, \ - "Disable generation of post-barrier (i.e., RS barrier) ") \ - \ product(intx, G1UpdateBufferSize, 256, \ "Size of an update buffer") \ \ @@ -193,6 +188,10 @@ develop(intx, G1ConcRSHotCardLimit, 4, \ "The threshold that defines (>=) a hot card.") \ \ + develop(intx, G1MaxHotCardCountSizePercent, 25, \ + "The maximum size of the hot card count cache as a " \ + "percentage of the number of cards for the maximum heap.") \ + \ develop(bool, G1PrintOopAppls, false, \ "When true, print applications of closures to external locs.") \ \ @@ -301,13 +300,22 @@ develop(uintx, G1StressConcRegionFreeingDelayMillis, 0, \ "Artificial delay during concurrent region freeing") \ \ + develop(uintx, G1DummyRegionsPerGC, 0, \ + "The number of dummy regions G1 will allocate at the end of " \ + "each evacuation pause in order to artificially fill up the " \ + "heap and stress the marking implementation.") \ + \ develop(bool, ReduceInitialCardMarksForG1, false, \ "When ReduceInitialCardMarks is true, this flag setting " \ " controls whether G1 allows the RICM optimization") \ \ develop(bool, G1ExitOnExpansionFailure, false, \ "Raise a fatal VM exit out of memory failure in the event " \ - " that heap expansion fails due to running out of swap.") + " that heap expansion fails due to running out of swap.") \ + \ + develop(uintx, G1ConcMarkForceOverflow, 0, \ + "The number of times we'll force an overflow during " \ + "concurrent marking") G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -360,6 +360,7 @@ set_young_index_in_cset(-1); uninstall_surv_rate_group(); set_young_type(NotYoung); + reset_pre_dummy_top(); if (!par) { // If this is parallel, this will be done later. @@ -375,6 +376,17 @@ if (clear_space) clear(SpaceDecorator::Mangle); } +void HeapRegion::par_clear() { + assert(used() == 0, "the region should have been already cleared"); + assert(capacity() == (size_t) HeapRegion::GrainBytes, + "should be back to normal"); + HeapRegionRemSet* hrrs = rem_set(); + hrrs->clear(); + CardTableModRefBS* ct_bs = + (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set(); + ct_bs->clear(MemRegion(bottom(), end())); +} + // <PREDICTION> void HeapRegion::calc_gc_efficiency() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); @@ -599,7 +611,15 @@ HeapRegion:: oops_on_card_seq_iterate_careful(MemRegion mr, FilterOutOfRegionClosure* cl, - bool filter_young) { + bool filter_young, + jbyte* card_ptr) { + // Currently, we should only have to clean the card if filter_young + // is true and vice versa. + if (filter_young) { + assert(card_ptr != NULL, "pre-condition"); + } else { + assert(card_ptr == NULL, "pre-condition"); + } G1CollectedHeap* g1h = G1CollectedHeap::heap(); // If we're within a stop-world GC, then we might look at a card in a @@ -625,6 +645,15 @@ assert(!is_young(), "check value of filter_young"); + // We can only clean the card here, after we make the decision that + // the card is not young. And we only clean the card if we have been + // asked to (i.e., card_ptr != NULL). + if (card_ptr != NULL) { + *card_ptr = CardTableModRefBS::clean_card_val(); + // We must complete this write before we do any of the reads below. + OrderAccess::storeload(); + } + // We used to use "block_start_careful" here. But we're actually happy // to update the BOT while we do this... HeapWord* cur = block_start(mr.start()); @@ -923,11 +952,11 @@ ContiguousSpace::set_saved_mark(); OrderAccess::storestore(); _gc_time_stamp = curr_gc_time_stamp; - // The following fence is to force a flush of the writes above, but - // is strictly not needed because when an allocating worker thread - // calls set_saved_mark() it does so under the ParGCRareEvent_lock; - // when the lock is released, the write will be flushed. - // OrderAccess::fence(); + // No need to do another barrier to flush the writes above. If + // this is called in parallel with other threads trying to + // allocate into the region, the caller should call this while + // holding a lock and when the lock is released the writes will be + // flushed. } }
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -149,6 +149,13 @@ G1BlockOffsetArrayContigSpace _offsets; Mutex _par_alloc_lock; volatile unsigned _gc_time_stamp; + // When we need to retire an allocation region, while other threads + // are also concurrently trying to allocate into it, we typically + // allocate a dummy object at the end of the region to ensure that + // no more allocations can take place in it. However, sometimes we + // want to know where the end of the last "real" object we allocated + // into the region was and this is what this keeps track. + HeapWord* _pre_dummy_top; public: // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be @@ -163,6 +170,17 @@ virtual void set_saved_mark(); void reset_gc_time_stamp() { _gc_time_stamp = 0; } + // See the comment above in the declaration of _pre_dummy_top for an + // explanation of what it is. + void set_pre_dummy_top(HeapWord* pre_dummy_top) { + assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition"); + _pre_dummy_top = pre_dummy_top; + } + HeapWord* pre_dummy_top() { + return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top; + } + void reset_pre_dummy_top() { _pre_dummy_top = NULL; } + virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); virtual void clear(bool mangle_space); @@ -380,13 +398,16 @@ // The number of bytes marked live in the region in the last marking phase. size_t marked_bytes() { return _prev_marked_bytes; } + size_t live_bytes() { + return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes(); + } + // The number of bytes counted in the next marking. size_t next_marked_bytes() { return _next_marked_bytes; } // The number of bytes live wrt the next marking. size_t next_live_bytes() { - return (top() - next_top_at_mark_start()) - * HeapWordSize - + next_marked_bytes(); + return + (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes(); } // A lower bound on the amount of garbage bytes in the region. @@ -563,6 +584,7 @@ // Reset HR stuff to default values. void hr_clear(bool par, bool clear_space); + void par_clear(); void initialize(MemRegion mr, bool clear_space, bool mangle_space); @@ -781,12 +803,16 @@ HeapWord* object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl); - // In this version - if filter_young is true and the region - // is a young region then we skip the iteration. + // filter_young: if true and the region is a young region then we + // skip the iteration. + // card_ptr: if not NULL, and we decide that the card is not young + // and we iterate over it, we'll clean the card before we start the + // iteration. HeapWord* oops_on_card_seq_iterate_careful(MemRegion mr, FilterOutOfRegionClosure* cl, - bool filter_young); + bool filter_young, + jbyte* card_ptr); // A version of block start that is guaranteed to find *some* block // boundary at or before "p", but does not object iteration, and may
--- a/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -38,15 +38,8 @@ // this is used for larger LAB allocations only. inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { MutexLocker x(&_par_alloc_lock); - // This ought to be just "allocate", because of the lock above, but that - // ContiguousSpace::allocate asserts that either the allocating thread - // holds the heap lock or it is the VM thread and we're at a safepoint. - // The best I (dld) could figure was to put a field in ContiguousSpace - // meaning "locking at safepoint taken care of", and set/reset that - // here. But this will do for now, especially in light of the comment - // above. Perhaps in the future some lock-free manner of keeping the - // coordination. - HeapWord* res = ContiguousSpace::par_allocate(size); + // Given that we take the lock no need to use par_allocate() here. + HeapWord* res = ContiguousSpace::allocate(size); if (res != NULL) { _offsets.alloc_block(res, size); }
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -261,6 +261,45 @@ msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail()); } +void HeapRegionLinkedList::add_as_head(HeapRegionLinkedList* from_list) { + hrs_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(from_list); + + verify_optional(); + from_list->verify_optional(); + + if (from_list->is_empty()) return; + +#ifdef ASSERT + HeapRegionLinkedListIterator iter(from_list); + while (iter.more_available()) { + HeapRegion* hr = iter.get_next(); + // In set_containing_set() we check that we either set the value + // from NULL to non-NULL or vice versa to catch bugs. So, we have + // to NULL it first before setting it to the value. + hr->set_containing_set(NULL); + hr->set_containing_set(this); + } +#endif // ASSERT + + if (_head != NULL) { + assert(length() > 0 && _tail != NULL, hrs_ext_msg(this, "invariant")); + from_list->_tail->set_next(_head); + } else { + assert(length() == 0 && _head == NULL, hrs_ext_msg(this, "invariant")); + _tail = from_list->_tail; + } + _head = from_list->_head; + + _length += from_list->length(); + _region_num += from_list->region_num(); + _total_used_bytes += from_list->total_used_bytes(); + from_list->clear(); + + verify_optional(); + from_list->verify_optional(); +} + void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) { hrs_assert_mt_safety_ok(this); hrs_assert_mt_safety_ok(from_list);
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -277,6 +277,10 @@ } public: + // It adds hr to the list as the new head. The region should not be + // a member of another set. + inline void add_as_head(HeapRegion* hr); + // It adds hr to the list as the new tail. The region should not be // a member of another set. inline void add_as_tail(HeapRegion* hr); @@ -290,6 +294,11 @@ // It moves the regions from from_list to this list and empties // from_list. The new regions will appear in the same order as they + // were in from_list and be linked in the beginning of this list. + void add_as_head(HeapRegionLinkedList* from_list); + + // It moves the regions from from_list to this list and empties + // from_list. The new regions will appear in the same order as they // were in from_list and be linked in the end of this list. void add_as_tail(HeapRegionLinkedList* from_list);
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -110,6 +110,23 @@ //////////////////// HeapRegionLinkedList //////////////////// +inline void HeapRegionLinkedList::add_as_head(HeapRegion* hr) { + hrs_assert_mt_safety_ok(this); + assert((length() == 0 && _head == NULL && _tail == NULL) || + (length() > 0 && _head != NULL && _tail != NULL), + hrs_ext_msg(this, "invariant")); + // add_internal() will verify the region. + add_internal(hr); + + // Now link the region. + if (_head != NULL) { + hr->set_next(_head); + } else { + _tail = hr; + } + _head = hr; +} + inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) { hrs_assert_mt_safety_ok(this); assert((length() == 0 && _head == NULL && _tail == NULL) ||
--- a/src/share/vm/gc_implementation/g1/heapRegionSets.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/heapRegionSets.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2010 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,49 +29,48 @@ #include "memory/sharedHeap.hpp" #include "memory/space.inline.hpp" #include "memory/universe.hpp" +#include "oops/oop.inline.hpp" #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/virtualspace.hpp" -void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr, - DirtyCardToOopClosure* dcto_cl, - MemRegionClosure* cl, - bool clear, - int n_threads) { - if (n_threads > 0) { - assert((n_threads == 1 && ParallelGCThreads == 0) || - n_threads <= (int)ParallelGCThreads, - "# worker threads != # requested!"); - // Make sure the LNC array is valid for the space. - jbyte** lowest_non_clean; - uintptr_t lowest_non_clean_base_chunk_index; - size_t lowest_non_clean_chunk_size; - get_LNC_array_for_space(sp, lowest_non_clean, - lowest_non_clean_base_chunk_index, - lowest_non_clean_chunk_size); +void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, + OopsInGenClosure* cl, + CardTableRS* ct, + int n_threads) { + assert(n_threads > 0, "Error: expected n_threads > 0"); + assert((n_threads == 1 && ParallelGCThreads == 0) || + n_threads <= (int)ParallelGCThreads, + "# worker threads != # requested!"); + // Make sure the LNC array is valid for the space. + jbyte** lowest_non_clean; + uintptr_t lowest_non_clean_base_chunk_index; + size_t lowest_non_clean_chunk_size; + get_LNC_array_for_space(sp, lowest_non_clean, + lowest_non_clean_base_chunk_index, + lowest_non_clean_chunk_size); - int n_strides = n_threads * StridesPerThread; - SequentialSubTasksDone* pst = sp->par_seq_tasks(); - pst->set_n_threads(n_threads); - pst->set_n_tasks(n_strides); + int n_strides = n_threads * ParGCStridesPerThread; + SequentialSubTasksDone* pst = sp->par_seq_tasks(); + pst->set_n_threads(n_threads); + pst->set_n_tasks(n_strides); - int stride = 0; - while (!pst->is_task_claimed(/* reference */ stride)) { - process_stride(sp, mr, stride, n_strides, dcto_cl, cl, clear, - lowest_non_clean, - lowest_non_clean_base_chunk_index, - lowest_non_clean_chunk_size); - } - if (pst->all_tasks_completed()) { - // Clear lowest_non_clean array for next time. - intptr_t first_chunk_index = addr_to_chunk_index(mr.start()); - uintptr_t last_chunk_index = addr_to_chunk_index(mr.last()); - for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) { - intptr_t ind = ch - lowest_non_clean_base_chunk_index; - assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size, - "Bounds error"); - lowest_non_clean[ind] = NULL; - } + int stride = 0; + while (!pst->is_task_claimed(/* reference */ stride)) { + process_stride(sp, mr, stride, n_strides, cl, ct, + lowest_non_clean, + lowest_non_clean_base_chunk_index, + lowest_non_clean_chunk_size); + } + if (pst->all_tasks_completed()) { + // Clear lowest_non_clean array for next time. + intptr_t first_chunk_index = addr_to_chunk_index(mr.start()); + uintptr_t last_chunk_index = addr_to_chunk_index(mr.last()); + for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) { + intptr_t ind = ch - lowest_non_clean_base_chunk_index; + assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size, + "Bounds error"); + lowest_non_clean[ind] = NULL; } } } @@ -81,14 +80,13 @@ process_stride(Space* sp, MemRegion used, jint stride, int n_strides, - DirtyCardToOopClosure* dcto_cl, - MemRegionClosure* cl, - bool clear, + OopsInGenClosure* cl, + CardTableRS* ct, jbyte** lowest_non_clean, uintptr_t lowest_non_clean_base_chunk_index, size_t lowest_non_clean_chunk_size) { - // We don't have to go downwards here; it wouldn't help anyway, - // because of parallelism. + // We go from higher to lower addresses here; it wouldn't help that much + // because of the strided parallelism pattern used here. // Find the first card address of the first chunk in the stride that is // at least "bottom" of the used region. @@ -101,25 +99,35 @@ if ((uintptr_t)stride >= start_chunk_stride_num) { chunk_card_start = (jbyte*)(start_card + (stride - start_chunk_stride_num) * - CardsPerStrideChunk); + ParGCCardsPerStrideChunk); } else { // Go ahead to the next chunk group boundary, then to the requested stride. chunk_card_start = (jbyte*)(start_card + (n_strides - start_chunk_stride_num + stride) * - CardsPerStrideChunk); + ParGCCardsPerStrideChunk); } while (chunk_card_start < end_card) { - // We don't have to go downwards here; it wouldn't help anyway, - // because of parallelism. (We take care with "min_done"; see below.) + // Even though we go from lower to higher addresses below, the + // strided parallelism can interleave the actual processing of the + // dirty pages in various ways. For a specific chunk within this + // stride, we take care to avoid double scanning or missing a card + // by suitably initializing the "min_done" field in process_chunk_boundaries() + // below, together with the dirty region extension accomplished in + // DirtyCardToOopClosure::do_MemRegion(). + jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk; // Invariant: chunk_mr should be fully contained within the "used" region. - jbyte* chunk_card_end = chunk_card_start + CardsPerStrideChunk; MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start), chunk_card_end >= end_card ? used.end() : addr_for(chunk_card_end)); assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)"); assert(used.contains(chunk_mr), "chunk_mr should be subset of used"); + DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), + cl->gen_boundary()); + ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); + + // Process the chunk. process_chunk_boundaries(sp, dcto_cl, @@ -129,13 +137,30 @@ lowest_non_clean_base_chunk_index, lowest_non_clean_chunk_size); - non_clean_card_iterate_work(chunk_mr, cl, clear); + // We want the LNC array updates above in process_chunk_boundaries + // to be visible before any of the card table value changes as a + // result of the dirty card iteration below. + OrderAccess::storestore(); + + // We do not call the non_clean_card_iterate_serial() version because + // we want to clear the cards: clear_cl here does the work of finding + // contiguous dirty ranges of cards to process and clear. + clear_cl.do_MemRegion(chunk_mr); // Find the next chunk of the stride. - chunk_card_start += CardsPerStrideChunk * n_strides; + chunk_card_start += ParGCCardsPerStrideChunk * n_strides; } } + +// If you want a talkative process_chunk_boundaries, +// then #define NOISY(x) x +#ifdef NOISY +#error "Encountered a global preprocessor flag, NOISY, which might clash with local definition to follow" +#else +#define NOISY(x) +#endif + void CardTableModRefBS:: process_chunk_boundaries(Space* sp, @@ -146,126 +171,232 @@ uintptr_t lowest_non_clean_base_chunk_index, size_t lowest_non_clean_chunk_size) { - // We must worry about the chunk boundaries. + // We must worry about non-array objects that cross chunk boundaries, + // because such objects are both precisely and imprecisely marked: + // .. if the head of such an object is dirty, the entire object + // needs to be scanned, under the interpretation that this + // was an imprecise mark + // .. if the head of such an object is not dirty, we can assume + // precise marking and it's efficient to scan just the dirty + // cards. + // In either case, each scanned reference must be scanned precisely + // once so as to avoid cloning of a young referent. For efficiency, + // our closures depend on this property and do not protect against + // double scans. - // First, set our max_to_do: - HeapWord* max_to_do = NULL; uintptr_t cur_chunk_index = addr_to_chunk_index(chunk_mr.start()); cur_chunk_index = cur_chunk_index - lowest_non_clean_base_chunk_index; + NOISY(tty->print_cr("===========================================================================");) + NOISY(tty->print_cr(" process_chunk_boundary: Called with [" PTR_FORMAT "," PTR_FORMAT ")", + chunk_mr.start(), chunk_mr.end());) + + // First, set "our" lowest_non_clean entry, which would be + // used by the thread scanning an adjoining left chunk with + // a non-array object straddling the mutual boundary. + // Find the object that spans our boundary, if one exists. + // first_block is the block possibly straddling our left boundary. + HeapWord* first_block = sp->block_start(chunk_mr.start()); + assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()), + "First chunk should always have a co-initial block"); + // Does the block straddle the chunk's left boundary, and is it + // a non-array object? + if (first_block < chunk_mr.start() // first block straddles left bdry + && sp->block_is_obj(first_block) // first block is an object + && !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied) + || oop(first_block)->is_typeArray())) { + // Find our least non-clean card, so that a left neighbour + // does not scan an object straddling the mutual boundary + // too far to the right, and attempt to scan a portion of + // that object twice. + jbyte* first_dirty_card = NULL; + jbyte* last_card_of_first_obj = + byte_for(first_block + sp->block_size(first_block) - 1); + jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); + jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last()); + jbyte* last_card_to_check = + (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk, + (intptr_t) last_card_of_first_obj); + // Note that this does not need to go beyond our last card + // if our first object completely straddles this chunk. + for (jbyte* cur = first_card_of_cur_chunk; + cur <= last_card_to_check; cur++) { + jbyte val = *cur; + if (card_will_be_scanned(val)) { + first_dirty_card = cur; break; + } else { + assert(!card_may_have_been_dirty(val), "Error"); + } + } + if (first_dirty_card != NULL) { + NOISY(tty->print_cr(" LNC: Found a dirty card at " PTR_FORMAT " in current chunk", + first_dirty_card);) + assert(0 <= cur_chunk_index && cur_chunk_index < lowest_non_clean_chunk_size, + "Bounds error."); + assert(lowest_non_clean[cur_chunk_index] == NULL, + "Write exactly once : value should be stable hereafter for this round"); + lowest_non_clean[cur_chunk_index] = first_dirty_card; + } NOISY(else { + tty->print_cr(" LNC: Found no dirty card in current chunk; leaving LNC entry NULL"); + // In the future, we could have this thread look for a non-NULL value to copy from its + // right neighbour (up to the end of the first object). + if (last_card_of_cur_chunk < last_card_of_first_obj) { + tty->print_cr(" LNC: BEWARE!!! first obj straddles past right end of chunk:\n" + " might be efficient to get value from right neighbour?"); + } + }) + } else { + // In this case we can help our neighbour by just asking them + // to stop at our first card (even though it may not be dirty). + NOISY(tty->print_cr(" LNC: first block is not a non-array object; setting LNC to first card of current chunk");) + assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter"); + jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); + lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk; + } + NOISY(tty->print_cr(" process_chunk_boundary: lowest_non_clean[" INTPTR_FORMAT "] = " PTR_FORMAT + " which corresponds to the heap address " PTR_FORMAT, + cur_chunk_index, lowest_non_clean[cur_chunk_index], + (lowest_non_clean[cur_chunk_index] != NULL) + ? addr_for(lowest_non_clean[cur_chunk_index]) + : NULL);) + NOISY(tty->print_cr("---------------------------------------------------------------------------");) + + // Next, set our own max_to_do, which will strictly/exclusively bound + // the highest address that we will scan past the right end of our chunk. + HeapWord* max_to_do = NULL; if (chunk_mr.end() < used.end()) { - // This is not the last chunk in the used region. What is the last - // object? - HeapWord* last_block = sp->block_start(chunk_mr.end()); + // This is not the last chunk in the used region. + // What is our last block? We check the first block of + // the next (right) chunk rather than strictly check our last block + // because it's potentially more efficient to do so. + HeapWord* const last_block = sp->block_start(chunk_mr.end()); assert(last_block <= chunk_mr.end(), "In case this property changes."); - if (last_block == chunk_mr.end() - || !sp->block_is_obj(last_block)) { + if ((last_block == chunk_mr.end()) // our last block does not straddle boundary + || !sp->block_is_obj(last_block) // last_block isn't an object + || oop(last_block)->is_objArray() // last_block is an array (precisely marked) + || oop(last_block)->is_typeArray()) { max_to_do = chunk_mr.end(); - + NOISY(tty->print_cr(" process_chunk_boundary: Last block on this card is not a non-array object;\n" + " max_to_do left at " PTR_FORMAT, max_to_do);) } else { - // It is an object and starts before the end of the current chunk. + assert(last_block < chunk_mr.end(), "Tautology"); + // It is a non-array object that straddles the right boundary of this chunk. // last_obj_card is the card corresponding to the start of the last object // in the chunk. Note that the last object may not start in // the chunk. - jbyte* last_obj_card = byte_for(last_block); - if (!card_may_have_been_dirty(*last_obj_card)) { - // The card containing the head is not dirty. Any marks in + jbyte* const last_obj_card = byte_for(last_block); + const jbyte val = *last_obj_card; + if (!card_will_be_scanned(val)) { + assert(!card_may_have_been_dirty(val), "Error"); + // The card containing the head is not dirty. Any marks on // subsequent cards still in this chunk must have been made - // precisely; we can cap processing at the end. + // precisely; we can cap processing at the end of our chunk. max_to_do = chunk_mr.end(); + NOISY(tty->print_cr(" process_chunk_boundary: Head of last object on this card is not dirty;\n" + " max_to_do left at " PTR_FORMAT, + max_to_do);) } else { // The last object must be considered dirty, and extends onto the // following chunk. Look for a dirty card in that chunk that will // bound our processing. jbyte* limit_card = NULL; - size_t last_block_size = sp->block_size(last_block); - jbyte* last_card_of_last_obj = + const size_t last_block_size = sp->block_size(last_block); + jbyte* const last_card_of_last_obj = byte_for(last_block + last_block_size - 1); - jbyte* first_card_of_next_chunk = byte_for(chunk_mr.end()); + jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end()); // This search potentially goes a long distance looking - // for the next card that will be scanned. For example, - // an object that is an array of primitives will not - // have any cards covering regions interior to the array - // that will need to be scanned. The scan can be terminated - // at the last card of the next chunk. That would leave - // limit_card as NULL and would result in "max_to_do" - // being set with the LNC value or with the end - // of the last block. - jbyte* last_card_of_next_chunk = first_card_of_next_chunk + - CardsPerStrideChunk; - assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) - == CardsPerStrideChunk, "last card of next chunk may be wrong"); - jbyte* last_card_to_check = (jbyte*) MIN2(last_card_of_last_obj, - last_card_of_next_chunk); + // for the next card that will be scanned, terminating + // at the end of the last_block, if no earlier dirty card + // is found. + assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk, + "last card of next chunk may be wrong"); for (jbyte* cur = first_card_of_next_chunk; - cur <= last_card_to_check; cur++) { - if (card_will_be_scanned(*cur)) { + cur <= last_card_of_last_obj; cur++) { + const jbyte val = *cur; + if (card_will_be_scanned(val)) { + NOISY(tty->print_cr(" Found a non-clean card " PTR_FORMAT " with value 0x%x", + cur, (int)val);) limit_card = cur; break; + } else { + assert(!card_may_have_been_dirty(val), "Error: card can't be skipped"); } } - assert(0 <= cur_chunk_index+1 && - cur_chunk_index+1 < lowest_non_clean_chunk_size, + if (limit_card != NULL) { + max_to_do = addr_for(limit_card); + assert(limit_card != NULL && max_to_do != NULL, "Error"); + NOISY(tty->print_cr(" process_chunk_boundary: Found a dirty card at " PTR_FORMAT + " max_to_do set at " PTR_FORMAT " which is before end of last block in chunk: " + PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT, + limit_card, max_to_do, last_block, last_block_size, (last_block+last_block_size));) + } else { + // The following is a pessimistic value, because it's possible + // that a dirty card on a subsequent chunk has been cleared by + // the time we get to look at it; we'll correct for that further below, + // using the LNC array which records the least non-clean card + // before cards were cleared in a particular chunk. + limit_card = last_card_of_last_obj; + max_to_do = last_block + last_block_size; + assert(limit_card != NULL && max_to_do != NULL, "Error"); + NOISY(tty->print_cr(" process_chunk_boundary: Found no dirty card before end of last block in chunk\n" + " Setting limit_card to " PTR_FORMAT + " and max_to_do " PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT, + limit_card, last_block, last_block_size, max_to_do);) + } + assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size, "Bounds error."); - // LNC for the next chunk - jbyte* lnc_card = lowest_non_clean[cur_chunk_index+1]; - if (limit_card == NULL) { - limit_card = lnc_card; - } - if (limit_card != NULL) { + // It is possible that a dirty card for the last object may have been + // cleared before we had a chance to examine it. In that case, the value + // will have been logged in the LNC for that chunk. + // We need to examine as many chunks to the right as this object + // covers. + const uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1) + - lowest_non_clean_base_chunk_index; + DEBUG_ONLY(const uintptr_t last_chunk_index = addr_to_chunk_index(used.last()) + - lowest_non_clean_base_chunk_index;) + assert(last_chunk_index_to_check <= last_chunk_index, + err_msg("Out of bounds: last_chunk_index_to_check " INTPTR_FORMAT + " exceeds last_chunk_index " INTPTR_FORMAT, + last_chunk_index_to_check, last_chunk_index)); + for (uintptr_t lnc_index = cur_chunk_index + 1; + lnc_index <= last_chunk_index_to_check; + lnc_index++) { + jbyte* lnc_card = lowest_non_clean[lnc_index]; if (lnc_card != NULL) { - limit_card = (jbyte*)MIN2((intptr_t)limit_card, - (intptr_t)lnc_card); - } - max_to_do = addr_for(limit_card); - } else { - max_to_do = last_block + last_block_size; + // we can stop at the first non-NULL entry we find + if (lnc_card <= limit_card) { + NOISY(tty->print_cr(" process_chunk_boundary: LNC card " PTR_FORMAT " is lower than limit_card " PTR_FORMAT, + " max_to_do will be lowered to " PTR_FORMAT " from " PTR_FORMAT, + lnc_card, limit_card, addr_for(lnc_card), max_to_do);) + limit_card = lnc_card; + max_to_do = addr_for(limit_card); + assert(limit_card != NULL && max_to_do != NULL, "Error"); + } + // In any case, we break now + break; + } // else continue to look for a non-NULL entry if any } + assert(limit_card != NULL && max_to_do != NULL, "Error"); } + assert(max_to_do != NULL, "OOPS 1 !"); } - assert(max_to_do != NULL, "OOPS!"); + assert(max_to_do != NULL, "OOPS 2!"); } else { max_to_do = used.end(); + NOISY(tty->print_cr(" process_chunk_boundary: Last chunk of this space;\n" + " max_to_do left at " PTR_FORMAT, + max_to_do);) } + assert(max_to_do != NULL, "OOPS 3!"); // Now we can set the closure we're using so it doesn't to beyond // max_to_do. dcto_cl->set_min_done(max_to_do); #ifndef PRODUCT dcto_cl->set_last_bottom(max_to_do); #endif + NOISY(tty->print_cr("===========================================================================\n");) +} - // Now we set *our" lowest_non_clean entry. - // Find the object that spans our boundary, if one exists. - // Nothing to do on the first chunk. - if (chunk_mr.start() > used.start()) { - // first_block is the block possibly spanning the chunk start - HeapWord* first_block = sp->block_start(chunk_mr.start()); - // Does the block span the start of the chunk and is it - // an object? - if (first_block < chunk_mr.start() && - sp->block_is_obj(first_block)) { - jbyte* first_dirty_card = NULL; - jbyte* last_card_of_first_obj = - byte_for(first_block + sp->block_size(first_block) - 1); - jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); - jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last()); - jbyte* last_card_to_check = - (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk, - (intptr_t) last_card_of_first_obj); - for (jbyte* cur = first_card_of_cur_chunk; - cur <= last_card_to_check; cur++) { - if (card_will_be_scanned(*cur)) { - first_dirty_card = cur; break; - } - } - if (first_dirty_card != NULL) { - assert(0 <= cur_chunk_index && - cur_chunk_index < lowest_non_clean_chunk_size, - "Bounds error."); - lowest_non_clean[cur_chunk_index] = first_dirty_card; - } - } - } -} +#undef NOISY void CardTableModRefBS:: @@ -282,8 +413,8 @@ // LNC array for the covered region. Any later expansion can't affect // the used_at_save_marks region. // (I observed a bug in which the first thread to execute this would - // resize, and then it would cause "expand_and_allocates" that would - // Increase the number of chunks in the covered region. Then a second + // resize, and then it would cause "expand_and_allocate" that would + // increase the number of chunks in the covered region. Then a second // thread would come and execute this, see that the size didn't match, // and free and allocate again. So the first thread would be using a // freed "_lowest_non_clean" array.)
--- a/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -77,7 +77,23 @@ if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); if ((HeapWord*)obj < _boundary) { - assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); +#ifndef PRODUCT + if (_g->to()->is_in_reserved(obj)) { + tty->print_cr("Scanning field (" PTR_FORMAT ") twice?", p); + GenCollectedHeap* gch = (GenCollectedHeap*)Universe::heap(); + Space* sp = gch->space_containing(p); + oop obj = oop(sp->block_start(p)); + assert((HeapWord*)obj < (HeapWord*)p, "Error"); + tty->print_cr("Object: " PTR_FORMAT, obj); + tty->print_cr("-------"); + obj->print(); + tty->print_cr("-----"); + tty->print_cr("Heap:"); + tty->print_cr("-----"); + gch->print(); + ShouldNotReachHere(); + } +#endif // OK, we need to ensure that it is copied. // We read the klass and mark in this order, so that we can reliably // get the size of the object: if the mark we read is not a
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -339,6 +339,21 @@ return false; } +bool ParallelScavengeHeap::is_scavengable(const void* addr) { + return is_in_young((oop)addr); +} + +#ifdef ASSERT +// Don't implement this by using is_in_young(). This method is used +// in some cases to check that is_in_young() is correct. +bool ParallelScavengeHeap::is_in_partial_collection(const void *p) { + assert(is_in_reserved(p) || p == NULL, + "Does not work if address is non-null and outside of the heap"); + // The order of the generations is perm (low addr), old, young (high addr) + return p >= old_gen()->reserved().end(); +} +#endif + // There are two levels of allocation policy here. // // When an allocation request fails, the requesting thread must invoke a VM
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -127,6 +127,12 @@ // collection. virtual bool is_maximal_no_gc() const; + // Return true if the reference points to an object that + // can be moved in a partial collection. For currently implemented + // generational collectors that means during a collection of + // the young gen. + virtual bool is_scavengable(const void* addr); + // Does this heap support heap inspection? (+PrintClassHistogram) bool supports_heap_inspection() const { return true; } @@ -143,6 +149,10 @@ return perm_gen()->reserved().contains(p); } +#ifdef ASSERT + virtual bool is_in_partial_collection(const void *p); +#endif + bool is_permanent(const void *p) const { // committed part return perm_gen()->is_in(p); }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -51,7 +51,12 @@ } inline bool ParallelScavengeHeap::is_in_young(oop p) { - return young_gen()->is_in_reserved(p); + // Assumes the the old gen address range is lower than that of the young gen. + const void* loc = (void*) p; + bool result = ((HeapWord*)p) >= young_gen()->reserved().start(); + assert(result == young_gen()->is_in_reserved(p), + err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p)); + return result; } inline bool ParallelScavengeHeap::is_in_old_or_perm(oop p) {
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -173,7 +173,7 @@ TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty); TraceCollectorStats tcs(counters()); - TraceMemoryManagerStats tms(true /* Full GC */); + TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); if (TraceGen1Time) accumulated_time()->start();
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -176,10 +176,6 @@ object_mark_sweep()->compact(ZapUnusedHeapArea); } -void PSOldGen::move_and_update(ParCompactionManager* cm) { - PSParallelCompact::move_and_update(cm, PSParallelCompact::old_space_id); -} - size_t PSOldGen::contiguous_available() const { return object_space()->free_in_bytes() + virtual_space()->uncommitted_size(); } @@ -228,6 +224,12 @@ const size_t alignment = virtual_space()->alignment(); size_t aligned_bytes = align_size_up(bytes, alignment); size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment); + + if (UseNUMA) { + // With NUMA we use round-robin page allocation for the old gen. Expand by at least + // providing a page per lgroup. Alignment is larger or equal to the page size. + aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num()); + } if (aligned_bytes == 0){ // The alignment caused the number of bytes to wrap. An expand_by(0) will // return true with the implication that and expansion was done when it
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -143,9 +143,6 @@ void adjust_pointers(); void compact(); - // Parallel old - virtual void move_and_update(ParCompactionManager* cm); - // Size info size_t capacity_in_bytes() const { return object_space()->capacity_in_bytes(); } size_t used_in_bytes() const { return object_space()->used_in_bytes(); }
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -2057,7 +2057,7 @@ TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty); TraceCollectorStats tcs(counters()); - TraceMemoryManagerStats tms(true /* Full GC */); + TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); if (TraceGen1Time) accumulated_time()->start(); @@ -2104,11 +2104,7 @@ // klasses are used in the update of an object? compact_perm(vmthread_cm); - if (UseParallelOldGCCompacting) { - compact(); - } else { - compact_serial(vmthread_cm); - } + compact(); // Reset the mark bitmap, summary data, and do other bookkeeping. Must be // done before resizing. @@ -2582,18 +2578,16 @@ // each thread? if (total_dense_prefix_regions > 0) { uint tasks_for_dense_prefix = 1; - if (UseParallelDensePrefixUpdate) { - if (total_dense_prefix_regions <= - (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) { - // Don't over partition. This assumes that - // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value - // so there are not many regions to process. - tasks_for_dense_prefix = parallel_gc_threads; - } else { - // Over partition - tasks_for_dense_prefix = parallel_gc_threads * - PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING; - } + if (total_dense_prefix_regions <= + (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) { + // Don't over partition. This assumes that + // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value + // so there are not many regions to process. + tasks_for_dense_prefix = parallel_gc_threads; + } else { + // Over partition + tasks_for_dense_prefix = parallel_gc_threads * + PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING; } size_t regions_per_thread = total_dense_prefix_regions / tasks_for_dense_prefix; @@ -2733,21 +2727,6 @@ } #endif // #ifdef ASSERT -void PSParallelCompact::compact_serial(ParCompactionManager* cm) { - EventMark m("5 compact serial"); - TraceTime tm("compact serial", print_phases(), true, gclog_or_tty); - - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - - PSYoungGen* young_gen = heap->young_gen(); - PSOldGen* old_gen = heap->old_gen(); - - old_gen->start_array()->reset(); - old_gen->move_and_update(cm); - young_gen->move_and_update(cm); -} - void PSParallelCompact::follow_weak_klass_links() { // All klasses on the revisit stack are marked at this point. @@ -3530,11 +3509,8 @@ "Object liveness is wrong."); return ParMarkBitMap::incomplete; } - assert(UseParallelOldGCDensePrefix || - (HeapMaximumCompactionInterval > 1) || - (MarkSweepAlwaysCompactCount > 1) || - (forwarding_ptr == new_pointer), - "Calculation of new location is incorrect"); + assert(HeapMaximumCompactionInterval > 1 || MarkSweepAlwaysCompactCount > 1 || + forwarding_ptr == new_pointer, "new location is incorrect"); return ParMarkBitMap::incomplete; }
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1027,9 +1027,6 @@ ParallelTaskTerminator* terminator_ptr, uint parallel_gc_threads); - // For debugging only - compacts the old gen serially - static void compact_serial(ParCompactionManager* cm); - // If objects are left in eden after a collection, try to move the boundary // and absorb them into the old gen. Returns true if eden was emptied. static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
--- a/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -121,12 +121,6 @@ } } - - -void PSPermGen::move_and_update(ParCompactionManager* cm) { - PSParallelCompact::move_and_update(cm, PSParallelCompact::perm_space_id); -} - void PSPermGen::precompact() { // Reset start array first. _start_array.reset();
--- a/src/share/vm/gc_implementation/parallelScavenge/psPermGen.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPermGen.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,9 +51,6 @@ // MarkSweep code virtual void precompact(); - // Parallel old - virtual void move_and_update(ParCompactionManager* cm); - virtual const char* name() const { return "PSPermGen"; } };
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -322,7 +322,7 @@ TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty); TraceCollectorStats tcs(counters()); - TraceMemoryManagerStats tms(false /* not full GC */); + TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); if (TraceGen0Time) accumulated_time()->start();
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -792,12 +792,6 @@ to_mark_sweep()->compact(false); } -void PSYoungGen::move_and_update(ParCompactionManager* cm) { - PSParallelCompact::move_and_update(cm, PSParallelCompact::eden_space_id); - PSParallelCompact::move_and_update(cm, PSParallelCompact::from_space_id); - PSParallelCompact::move_and_update(cm, PSParallelCompact::to_space_id); -} - void PSYoungGen::print() const { print_on(tty); } void PSYoungGen::print_on(outputStream* st) const { st->print(" %-15s", "PSYoungGen");
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -127,9 +127,6 @@ void adjust_pointers(); void compact(); - // Parallel Old - void move_and_update(ParCompactionManager* cm); - // Called during/after gc void swap_spaces();
--- a/src/share/vm/gc_implementation/shared/allocationStats.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -76,7 +76,7 @@ _beforeSweep = 0; _coalBirths = 0; _coalDeaths = 0; - _splitBirths = split_birth? 1 : 0; + _splitBirths = (split_birth ? 1 : 0); _splitDeaths = 0; _returnedBytes = 0; }
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/gcUtil.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/shared/gcUtil.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/generationCounters.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/shared/generationCounters.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,15 +51,18 @@ cname = PerfDataManager::counter_name(_name_space, "minCapacity"); PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, + _virtual_space == NULL ? 0 : _virtual_space->committed_size(), CHECK); cname = PerfDataManager::counter_name(_name_space, "maxCapacity"); PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, + _virtual_space == NULL ? 0 : _virtual_space->reserved_size(), CHECK); cname = PerfDataManager::counter_name(_name_space, "capacity"); _current_size = PerfDataManager::create_variable(SUN_GC, cname, - PerfData::U_Bytes, + PerfData::U_Bytes, + _virtual_space == NULL ? 0 : _virtual_space->committed_size(), CHECK); } }
--- a/src/share/vm/gc_implementation/shared/generationCounters.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/shared/generationCounters.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,10 +61,11 @@ } virtual void update_all() { - _current_size->set_value(_virtual_space->committed_size()); + _current_size->set_value(_virtual_space == NULL ? 0 : + _virtual_space->committed_size()); } const char* name_space() const { return _name_space; } + }; - #endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GENERATIONCOUNTERS_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/hSpaceCounters.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/shared/hSpaceCounters.hpp" +#include "memory/generation.hpp" +#include "memory/resourceArea.hpp" + +HSpaceCounters::HSpaceCounters(const char* name, + int ordinal, + size_t max_size, + size_t initial_capacity, + GenerationCounters* gc) { + + if (UsePerfData) { + EXCEPTION_MARK; + ResourceMark rm; + + const char* cns = + PerfDataManager::name_space(gc->name_space(), "space", ordinal); + + _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1); + strcpy(_name_space, cns); + + const char* cname = PerfDataManager::counter_name(_name_space, "name"); + PerfDataManager::create_string_constant(SUN_GC, cname, name, CHECK); + + cname = PerfDataManager::counter_name(_name_space, "maxCapacity"); + PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, + (jlong)max_size, CHECK); + + cname = PerfDataManager::counter_name(_name_space, "capacity"); + _capacity = PerfDataManager::create_variable(SUN_GC, cname, + PerfData::U_Bytes, + initial_capacity, CHECK); + + cname = PerfDataManager::counter_name(_name_space, "used"); + _used = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, + (jlong) 0, CHECK); + + cname = PerfDataManager::counter_name(_name_space, "initCapacity"); + PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, + initial_capacity, CHECK); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP + +#ifndef SERIALGC +#include "gc_implementation/shared/generationCounters.hpp" +#include "memory/generation.hpp" +#include "runtime/perfData.hpp" +#endif + +// A HSpaceCounter is a holder class for performance counters +// that track a collections (logical spaces) in a heap; + +class HeapSpaceUsedHelper; +class G1SpaceMonitoringSupport; + +class HSpaceCounters: public CHeapObj { + friend class VMStructs; + + private: + PerfVariable* _capacity; + PerfVariable* _used; + + // Constant PerfData types don't need to retain a reference. + // However, it's a good idea to document them here. + + char* _name_space; + + public: + + HSpaceCounters(const char* name, int ordinal, size_t max_size, + size_t initial_capacity, GenerationCounters* gc); + + ~HSpaceCounters() { + if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space); + } + + inline void update_capacity(size_t v) { + _capacity->set_value(v); + } + + inline void update_used(size_t v) { + _used->set_value(v); + } + + debug_only( + // for security reasons, we do not allow arbitrary reads from + // the counters as they may live in shared memory. + jlong used() { + return _used->get_value(); + } + jlong capacity() { + return _used->get_value(); + } + ) + + inline void update_all(size_t capacity, size_t used) { + update_capacity(capacity); + update_used(used); + } + + const char* name_space() const { return _name_space; } +}; +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_implementation/shared/markSweep.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_interface/collectedHeap.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_interface/collectedHeap.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_interface/collectedHeap.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -269,6 +269,13 @@ // space). If you need the more conservative answer use is_permanent(). virtual bool is_in_permanent(const void *p) const = 0; + +#ifdef ASSERT + // Returns true if "p" is in the part of the + // heap being collected. + virtual bool is_in_partial_collection(const void *p) = 0; +#endif + bool is_in_permanent_or_null(const void *p) const { return p == NULL || is_in_permanent(p); } @@ -284,11 +291,7 @@ // An object is scavengable if its location may move during a scavenge. // (A scavenge is a GC which is not a full GC.) - // Currently, this just means it is not perm (and not null). - // This could change if we rethink what's in perm-gen. - bool is_scavengable(const void *p) const { - return !is_in_permanent_or_null(p); - } + virtual bool is_scavengable(const void *p) = 0; // Returns "TRUE" if "p" is a method oop in the // current heap, with high probability. This predicate
--- a/src/share/vm/interpreter/abstractInterpreter.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/abstractInterpreter.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -104,6 +104,7 @@ java_lang_math_sqrt, // implementation of java.lang.Math.sqrt (x) java_lang_math_log, // implementation of java.lang.Math.log (x) java_lang_math_log10, // implementation of java.lang.Math.log10 (x) + java_lang_ref_reference_get, // implementation of java.lang.ref.Reference.get() number_of_method_entries, invalid = -1 }; @@ -140,7 +141,7 @@ // Method activation static MethodKind method_kind(methodHandle m); static address entry_for_kind(MethodKind k) { assert(0 <= k && k < number_of_method_entries, "illegal kind"); return _entry_table[k]; } - static address entry_for_method(methodHandle m) { return _entry_table[method_kind(m)]; } + static address entry_for_method(methodHandle m) { return entry_for_kind(method_kind(m)); } static void print_method_kind(MethodKind kind) PRODUCT_RETURN; @@ -174,19 +175,32 @@ int temps, int popframe_args, int monitors, + int caller_actual_parameters, int callee_params, int callee_locals, - bool is_top_frame); + bool is_top_frame) { + return layout_activation(method, + temps, + popframe_args, + monitors, + caller_actual_parameters, + callee_params, + callee_locals, + (frame*)NULL, + (frame*)NULL, + is_top_frame); + } static int layout_activation(methodOop method, - int temps, - int popframe_args, - int monitors, - int callee_params, - int callee_locals, - frame* caller, - frame* interpreter_frame, - bool is_top_frame); + int temps, + int popframe_args, + int monitors, + int caller_actual_parameters, + int callee_params, + int callee_locals, + frame* caller, + frame* interpreter_frame, + bool is_top_frame); // Runtime support static bool is_not_reached( methodHandle method, int bci);
--- a/src/share/vm/interpreter/bytecodeInterpreter.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/bytecodeInterpreter.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/bytecodeTracer.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/bytecodeTracer.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -203,11 +203,14 @@ if (value == NULL) { st->print_cr(" NULL"); } else if (java_lang_String::is_instance(value)) { - EXCEPTION_MARK; - Handle h_value (THREAD, value); - Symbol* sym = java_lang_String::as_symbol(h_value, CATCH); - print_symbol(sym, st); - sym->decrement_refcount(); + char buf[40]; + int len = java_lang_String::utf8_length(value); + java_lang_String::as_utf8_string(value, buf, sizeof(buf)); + if (len >= (int)sizeof(buf)) { + st->print_cr(" %s...[%d]", buf, len); + } else { + st->print_cr(" %s", buf); + } } else { st->print_cr(" " PTR_FORMAT, (intptr_t) value); } @@ -345,7 +348,6 @@ break; case JVM_CONSTANT_NameAndType: case JVM_CONSTANT_InvokeDynamic: - case JVM_CONSTANT_InvokeDynamicTrans: has_klass = false; break; default:
--- a/src/share/vm/interpreter/cppInterpreter.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/cppInterpreter.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -125,6 +125,7 @@ method_entry(java_lang_math_sqrt ); method_entry(java_lang_math_log ); method_entry(java_lang_math_log10 ); + method_entry(java_lang_ref_reference_get); Interpreter::_native_entry_begin = Interpreter::code()->code_end(); method_entry(native); method_entry(native_synchronized);
--- a/src/share/vm/interpreter/cppInterpreter.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/cppInterpreter.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/cppInterpreterGenerator.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/cppInterpreterGenerator.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/interpreter.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/interpreter.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -208,12 +208,6 @@ return empty; } - // Accessor method? - if (m->is_accessor()) { - assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1"); - return accessor; - } - // Special intrinsic method? // Note: This test must come _after_ the test for native methods, // otherwise we will run into problems with JDK 1.2, see also @@ -227,6 +221,15 @@ case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ; case vmIntrinsics::_dlog : return java_lang_math_log ; case vmIntrinsics::_dlog10: return java_lang_math_log10; + + case vmIntrinsics::_Reference_get: + return java_lang_ref_reference_get; + } + + // Accessor method? + if (m->is_accessor()) { + assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1"); + return accessor; } // Note: for now: zero locals for all non-empty methods
--- a/src/share/vm/interpreter/interpreter.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/interpreter.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/interpreterGenerator.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/interpreterGenerator.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/interpreterRuntime.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -369,10 +369,7 @@ } // create exception - Symbol* java_lang_invoke_WrongMethodTypeException = vmSymbols::java_lang_invoke_WrongMethodTypeException(); - if (AllowTransitionalJSR292) - java_lang_invoke_WrongMethodTypeException = SystemDictionaryHandles::WrongMethodTypeException_klass()->name(); - THROW_MSG(java_lang_invoke_WrongMethodTypeException, message); + THROW_MSG(vmSymbols::java_lang_invoke_WrongMethodTypeException(), message); } IRT_END
--- a/src/share/vm/interpreter/linkResolver.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/linkResolver.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -221,9 +221,7 @@ // Make sure the Java part of the runtime has been booted up. klassOop natives = SystemDictionary::MethodHandleNatives_klass(); if (natives == NULL || instanceKlass::cast(natives)->is_not_initialized()) { - Symbol* natives_name = vmSymbols::java_lang_invoke_MethodHandleNatives(); - if (natives != NULL && AllowTransitionalJSR292) natives_name = Klass::cast(natives)->name(); - SystemDictionary::resolve_or_fail(natives_name, + SystemDictionary::resolve_or_fail(vmSymbols::java_lang_invoke_MethodHandleNatives(), Handle(), Handle(), true, @@ -329,6 +327,7 @@ // 1. check if klass is not interface if (resolved_klass->is_interface()) { + ResourceMark rm(THREAD); char buf[200]; jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected", Klass::cast(resolved_klass())->external_name()); THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); @@ -415,6 +414,7 @@ // check if klass is interface if (!resolved_klass->is_interface()) { + ResourceMark rm(THREAD); char buf[200]; jio_snprintf(buf, sizeof(buf), "Found class %s, but interface was expected", Klass::cast(resolved_klass())->external_name()); THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); @@ -536,6 +536,7 @@ // check for errors if (is_static != fd.is_static()) { + ResourceMark rm(THREAD); char msg[200]; jio_snprintf(msg, sizeof(msg), "Expected %s field %s.%s", is_static ? "static" : "non-static", Klass::cast(resolved_klass())->external_name(), fd.name()->as_C_string()); THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), msg); @@ -633,6 +634,7 @@ // check if static if (!resolved_method->is_static()) { + ResourceMark rm(THREAD); char buf[200]; jio_snprintf(buf, sizeof(buf), "Expected static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), resolved_method->name(), @@ -673,6 +675,7 @@ // check if not static if (resolved_method->is_static()) { + ResourceMark rm(THREAD); char buf[200]; jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", @@ -719,6 +722,7 @@ // check if not static if (sel_method->is_static()) { + ResourceMark rm(THREAD); char buf[200]; jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), resolved_method->name(), @@ -759,6 +763,7 @@ // check if not static if (resolved_method->is_static()) { + ResourceMark rm(THREAD); char buf[200]; jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), resolved_method->name(), @@ -875,6 +880,7 @@ // check if receiver klass implements the resolved interface if (!recv_klass->is_subtype_of(resolved_klass())) { + ResourceMark rm(THREAD); char buf[200]; jio_snprintf(buf, sizeof(buf), "Class %s does not implement the requested interface %s", (Klass::cast(recv_klass()))->external_name(),
--- a/src/share/vm/interpreter/linkResolver.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/linkResolver.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/rewriter.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/rewriter.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -52,7 +52,6 @@ case JVM_CONSTANT_MethodHandle : // fall through case JVM_CONSTANT_MethodType : // fall through case JVM_CONSTANT_InvokeDynamic : // fall through - case JVM_CONSTANT_InvokeDynamicTrans: // fall through add_cp_cache_entry(i); break; } @@ -62,7 +61,6 @@ "all cp cache indexes fit in a u2"); _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0); - _have_invoke_dynamic |= ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamicTrans)) != 0); } @@ -81,16 +79,10 @@ if (pool_index >= 0 && _pool->tag_at(pool_index).is_invoke_dynamic()) { int bsm_index = _pool->invoke_dynamic_bootstrap_method_ref_index_at(pool_index); - if (bsm_index != 0) { - assert(_pool->tag_at(bsm_index).is_method_handle(), "must be a MH constant"); - // There is a CP cache entry holding the BSM for these calls. - int bsm_cache_index = cp_entry_to_cp_cache(bsm_index); - cache->entry_at(i)->initialize_bootstrap_method_index_in_cache(bsm_cache_index); - } else { - // There is no CP cache entry holding the BSM for these calls. - // We will need to look for a class-global BSM, later. - guarantee(AllowTransitionalJSR292, ""); - } + assert(_pool->tag_at(bsm_index).is_method_handle(), "must be a MH constant"); + // There is a CP cache entry holding the BSM for these calls. + int bsm_cache_index = cp_entry_to_cp_cache(bsm_index); + cache->entry_at(i)->initialize_bootstrap_method_index_in_cache(bsm_cache_index); } } }
--- a/src/share/vm/interpreter/templateInterpreter.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/templateInterpreter.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -372,6 +372,7 @@ method_entry(java_lang_math_sqrt ) method_entry(java_lang_math_log ) method_entry(java_lang_math_log10) + method_entry(java_lang_ref_reference_get) // all native method kinds (must be one contiguous block) Interpreter::_native_entry_begin = Interpreter::code()->code_end();
--- a/src/share/vm/interpreter/templateInterpreter.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/templateInterpreter.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/templateInterpreterGenerator.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/templateInterpreterGenerator.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/templateTable.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/interpreter/templateTable.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/allocation.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/allocation.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -44,6 +44,14 @@ return (void *) AllocateHeap(size, "CHeapObj-new"); } +void* CHeapObj::operator new (size_t size, const std::nothrow_t& nothrow_constant) { + char* p = (char*) os::malloc(size); +#ifdef ASSERT + if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p); +#endif + return p; +} + void CHeapObj::operator delete(void* p){ FreeHeap(p); }
--- a/src/share/vm/memory/allocation.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/allocation.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -34,6 +34,8 @@ #include "opto/c2_globals.hpp" #endif +#include <new> + #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1) #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1)) #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK) @@ -99,6 +101,7 @@ class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { public: void* operator new(size_t size); + void* operator new (size_t size, const std::nothrow_t& nothrow_constant); void operator delete(void* p); void* new_array(size_t size); };
--- a/src/share/vm/memory/barrierSet.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/barrierSet.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/blockOffsetTable.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/blockOffsetTable.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -541,20 +541,33 @@ // to go back by. size_t n_cards_back = entry_to_cards_back(offset); q -= (N_words * n_cards_back); - assert(q >= _sp->bottom(), "Went below bottom!"); + assert(q >= _sp->bottom(), + err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT, + q, _sp->bottom())); + assert(q < _sp->end(), + err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT, + q, _sp->end())); index -= n_cards_back; offset = _array->offset_array(index); } assert(offset < N_words, "offset too large"); index--; q -= offset; + assert(q >= _sp->bottom(), + err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT, + q, _sp->bottom())); + assert(q < _sp->end(), + err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT, + q, _sp->end())); HeapWord* n = q; while (n <= addr) { debug_only(HeapWord* last = q); // for debugging q = n; n += _sp->block_size(n); - assert(n > q, err_msg("Looping at: " INTPTR_FORMAT, n)); + assert(n > q, + err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT " _sp = [" PTR_FORMAT "," PTR_FORMAT ")", + n, last, _sp->bottom(), _sp->end())); } assert(q <= addr, err_msg("wrong order for current (" INTPTR_FORMAT ") <= arg (" INTPTR_FORMAT ")", q, addr)); assert(addr <= n, err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", addr, n));
--- a/src/share/vm/memory/cardTableModRefBS.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/cardTableModRefBS.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -455,36 +455,40 @@ return true; } - -void CardTableModRefBS::non_clean_card_iterate(Space* sp, - MemRegion mr, - DirtyCardToOopClosure* dcto_cl, - MemRegionClosure* cl, - bool clear) { +void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, + MemRegion mr, + OopsInGenClosure* cl, + CardTableRS* ct) { if (!mr.is_empty()) { int n_threads = SharedHeap::heap()->n_par_threads(); if (n_threads > 0) { #ifndef SERIALGC - par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads); + non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); #else // SERIALGC fatal("Parallel gc not supported here."); #endif // SERIALGC } else { - non_clean_card_iterate_work(mr, cl, clear); + // We do not call the non_clean_card_iterate_serial() version below because + // we want to clear the cards (which non_clean_card_iterate_serial() does not + // do for us): clear_cl here does the work of finding contiguous dirty ranges + // of cards to process and clear. + + DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), + cl->gen_boundary()); + ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); + + clear_cl.do_MemRegion(mr); } } } -// NOTE: For this to work correctly, it is important that -// we look for non-clean cards below (so as to catch those -// marked precleaned), rather than look explicitly for dirty -// cards (and miss those marked precleaned). In that sense, -// the name precleaned is currently somewhat of a misnomer. -void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr, - MemRegionClosure* cl, - bool clear) { - // Figure out whether we have to worry about parallelism. - bool is_par = (SharedHeap::heap()->n_par_threads() > 1); +// The iterator itself is not MT-aware, but +// MT-aware callers and closures can use this to +// accomplish dirty card iteration in parallel. The +// iterator itself does not clear the dirty cards, or +// change their values in any manner. +void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr, + MemRegionClosure* cl) { for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (mri.word_size() > 0) { @@ -506,22 +510,6 @@ MemRegion cur_cards(addr_for(cur_entry), non_clean_cards * card_size_in_words); MemRegion dirty_region = cur_cards.intersection(mri); - if (clear) { - for (size_t i = 0; i < non_clean_cards; i++) { - // Clean the dirty cards (but leave the other non-clean - // alone.) If parallel, do the cleaning atomically. - jbyte cur_entry_val = cur_entry[i]; - if (card_is_dirty_wrt_gen_iter(cur_entry_val)) { - if (is_par) { - jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val); - assert(res != clean_card, - "Dirty card mysteriously cleaned"); - } else { - cur_entry[i] = clean_card; - } - } - } - } cl->do_MemRegion(dirty_region); } cur_entry = next_entry; @@ -530,22 +518,6 @@ } } -void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp, - OopClosure* cl, - bool clear, - bool before_save_marks) { - // Note that dcto_cl is resource-allocated, so there is no - // corresponding "delete". - DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision()); - MemRegion used_mr; - if (before_save_marks) { - used_mr = sp->used_region_at_save_marks(); - } else { - used_mr = sp->used_region(); - } - non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear); -} - void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); @@ -593,9 +565,8 @@ memset(first, dirty_card, last-first); } -// NOTES: -// (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate() -// iterates over dirty cards ranges in increasing address order. +// Unlike several other card table methods, dirty_card_iterate() +// iterates over dirty cards ranges in increasing address order. void CardTableModRefBS::dirty_card_iterate(MemRegion mr, MemRegionClosure* cl) { for (int i = 0; i < _cur_covered_regions; i++) { @@ -685,43 +656,37 @@ } #ifndef PRODUCT -class GuaranteeNotModClosure: public MemRegionClosure { - CardTableModRefBS* _ct; -public: - GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {} - void do_MemRegion(MemRegion mr) { - jbyte* entry = _ct->byte_for(mr.start()); - guarantee(*entry != CardTableModRefBS::clean_card, - "Dirty card in region that should be clean"); +void CardTableModRefBS::verify_region(MemRegion mr, + jbyte val, bool val_equals) { + jbyte* start = byte_for(mr.start()); + jbyte* end = byte_for(mr.last()); + bool failures = false; + for (jbyte* curr = start; curr <= end; ++curr) { + jbyte curr_val = *curr; + bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); + if (failed) { + if (!failures) { + tty->cr(); + tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]"); + tty->print_cr("== %sexpecting value: %d", + (val_equals) ? "" : "not ", val); + failures = true; + } + tty->print_cr("== card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], " + "val: %d", curr, addr_for(curr), + (HeapWord*) (((size_t) addr_for(curr)) + card_size), + (int) curr_val); + } } -}; - -void CardTableModRefBS::verify_clean_region(MemRegion mr) { - GuaranteeNotModClosure blk(this); - non_clean_card_iterate_work(mr, &blk, false); + guarantee(!failures, "there should not have been any failures"); } -// To verify a MemRegion is entirely dirty this closure is passed to -// dirty_card_iterate. If the region is dirty do_MemRegion will be -// invoked only once with a MemRegion equal to the one being -// verified. -class GuaranteeDirtyClosure: public MemRegionClosure { - CardTableModRefBS* _ct; - MemRegion _mr; - bool _result; -public: - GuaranteeDirtyClosure(CardTableModRefBS* ct, MemRegion mr) - : _ct(ct), _mr(mr), _result(false) {} - void do_MemRegion(MemRegion mr) { - _result = _mr.equals(mr); - } - bool result() const { return _result; } -}; +void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) { + verify_region(mr, dirty_card, false /* val_equals */); +} void CardTableModRefBS::verify_dirty_region(MemRegion mr) { - GuaranteeDirtyClosure blk(this, mr); - dirty_card_iterate(mr, &blk); - guarantee(blk.result(), "Non-dirty cards in region that should be dirty"); + verify_region(mr, dirty_card, true /* val_equals */); } #endif
--- a/src/share/vm/memory/cardTableModRefBS.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/cardTableModRefBS.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,7 @@ class Generation; class OopsInGenClosure; class DirtyCardToOopClosure; +class ClearNoncleanCardWrapper; class CardTableModRefBS: public ModRefBarrierSet { // Some classes get to look at some private stuff. @@ -149,7 +150,9 @@ // Mapping from address to card marking array entry jbyte* byte_for(const void* p) const { assert(_whole_heap.contains(p), - "out of bounds access to card marking array"); + err_msg("Attempt to access p = "PTR_FORMAT" out of bounds of " + " card marking array's _whole_heap = ["PTR_FORMAT","PTR_FORMAT")", + p, _whole_heap.start(), _whole_heap.end())); jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift]; assert(result >= _byte_map && result < _byte_map + _byte_map_size, "out of bounds accessor for card marking array"); @@ -165,25 +168,27 @@ // Iterate over the portion of the card-table which covers the given // region mr in the given space and apply cl to any dirty sub-regions - // of mr. cl and dcto_cl must either be the same closure or cl must - // wrap dcto_cl. Both are required - neither may be NULL. Also, dcto_cl - // may be modified. Note that this function will operate in a parallel - // mode if worker threads are available. - void non_clean_card_iterate(Space* sp, MemRegion mr, - DirtyCardToOopClosure* dcto_cl, - MemRegionClosure* cl, - bool clear); + // of mr. Dirty cards are _not_ cleared by the iterator method itself, + // but closures may arrange to do so on their own should they so wish. + void non_clean_card_iterate_serial(MemRegion mr, MemRegionClosure* cl); - // Utility function used to implement the other versions below. - void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl, - bool clear); + // A variant of the above that will operate in a parallel mode if + // worker threads are available, and clear the dirty cards as it + // processes them. + // XXX ??? MemRegionClosure above vs OopsInGenClosure below XXX + // XXX some new_dcto_cl's take OopClosure's, plus as above there are + // some MemRegionClosures. Clean this up everywhere. XXX + void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr, + OopsInGenClosure* cl, CardTableRS* ct); - void par_non_clean_card_iterate_work(Space* sp, MemRegion mr, - DirtyCardToOopClosure* dcto_cl, - MemRegionClosure* cl, - bool clear, - int n_threads); + private: + // Work method used to implement non_clean_card_iterate_possibly_parallel() + // above in the parallel case. + void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, + OopsInGenClosure* cl, CardTableRS* ct, + int n_threads); + protected: // Dirty the bytes corresponding to "mr" (not all of which must be // covered.) void dirty_MemRegion(MemRegion mr); @@ -194,11 +199,6 @@ // *** Support for parallel card scanning. - enum SomeConstantsForParallelism { - StridesPerThread = 2, - CardsPerStrideChunk = 256 - }; - // This is an array, one element per covered region of the card table. // Each entry is itself an array, with one element per chunk in the // covered region. Each entry of these arrays is the lowest non-clean @@ -231,7 +231,7 @@ // covers the given address. uintptr_t addr_to_chunk_index(const void* addr) { uintptr_t card = (uintptr_t) byte_for(addr); - return card / CardsPerStrideChunk; + return card / ParGCCardsPerStrideChunk; } // Apply cl, which must either itself apply dcto_cl or be dcto_cl, @@ -239,9 +239,8 @@ void process_stride(Space* sp, MemRegion used, jint stride, int n_strides, - DirtyCardToOopClosure* dcto_cl, - MemRegionClosure* cl, - bool clear, + OopsInGenClosure* cl, + CardTableRS* ct, jbyte** lowest_non_clean, uintptr_t lowest_non_clean_base_chunk_index, size_t lowest_non_clean_chunk_size); @@ -382,6 +381,11 @@ return (addr_for(pcard) == p); } + HeapWord* align_to_card_boundary(HeapWord* p) { + jbyte* pcard = byte_for(p + card_size_in_words - 1); + return addr_for(pcard); + } + // The kinds of precision a CardTableModRefBS may offer. enum PrecisionStyle { Precise, @@ -397,9 +401,6 @@ virtual void invalidate(MemRegion mr, bool whole_heap = false); void clear(MemRegion mr); void dirty(MemRegion mr); - void mod_oop_in_space_iterate(Space* sp, OopClosure* cl, - bool clear = false, - bool before_save_marks = false); // *** Card-table-RemSet-specific things. @@ -410,18 +411,15 @@ // *decreasing* address order. (This order aids with imprecise card // marking, where a dirty card may cause scanning, and summarization // marking, of objects that extend onto subsequent cards.) - // If "clear" is true, the card is (conceptually) marked unmodified before - // applying the closure. - void mod_card_iterate(MemRegionClosure* cl, bool clear = false) { - non_clean_card_iterate_work(_whole_heap, cl, clear); + void mod_card_iterate(MemRegionClosure* cl) { + non_clean_card_iterate_serial(_whole_heap, cl); } // Like the "mod_cards_iterate" above, except only invokes the closure // for cards within the MemRegion "mr" (which is required to be // card-aligned and sized.) - void mod_card_iterate(MemRegion mr, MemRegionClosure* cl, - bool clear = false) { - non_clean_card_iterate_work(mr, cl, clear); + void mod_card_iterate(MemRegion mr, MemRegionClosure* cl) { + non_clean_card_iterate_serial(mr, cl); } static uintx ct_max_alignment_constraint(); @@ -455,14 +453,18 @@ size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte)); HeapWord* result = (HeapWord*) (delta << card_shift); assert(_whole_heap.contains(result), - "out of bounds accessor from card marking array"); + err_msg("Returning result = "PTR_FORMAT" out of bounds of " + " card marking array's _whole_heap = ["PTR_FORMAT","PTR_FORMAT")", + result, _whole_heap.start(), _whole_heap.end())); return result; } // Mapping from address to card marking array index. size_t index_for(void* p) { assert(_whole_heap.contains(p), - "out of bounds access to card marking array"); + err_msg("Attempt to access p = "PTR_FORMAT" out of bounds of " + " card marking array's _whole_heap = ["PTR_FORMAT","PTR_FORMAT")", + p, _whole_heap.start(), _whole_heap.end())); return byte_for(p) - _byte_map; } @@ -473,11 +475,14 @@ void verify(); void verify_guard(); - void verify_clean_region(MemRegion mr) PRODUCT_RETURN; + // val_equals -> it will check that all cards covered by mr equal val + // !val_equals -> it will check that all cards covered by mr do not equal val + void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN; + void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN; void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; static size_t par_chunk_heapword_alignment() { - return CardsPerStrideChunk * card_size_in_words; + return ParGCCardsPerStrideChunk * card_size_in_words; } }; @@ -498,4 +503,5 @@ void set_CTRS(CardTableRS* rs) { _rs = rs; } }; + #endif // SHARE_VM_MEMORY_CARDTABLEMODREFBS_HPP
--- a/src/share/vm/memory/cardTableRS.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/cardTableRS.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -105,107 +105,111 @@ g->younger_refs_iterate(blk); } -class ClearNoncleanCardWrapper: public MemRegionClosure { - MemRegionClosure* _dirty_card_closure; - CardTableRS* _ct; - bool _is_par; -private: - // Clears the given card, return true if the corresponding card should be - // processed. - bool clear_card(jbyte* entry) { - if (_is_par) { - while (true) { - // In the parallel case, we may have to do this several times. - jbyte entry_val = *entry; - assert(entry_val != CardTableRS::clean_card_val(), - "We shouldn't be looking at clean cards, and this should " - "be the only place they get cleaned."); - if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val) - || _ct->is_prev_youngergen_card_val(entry_val)) { - jbyte res = - Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val); - if (res == entry_val) { - break; - } else { - assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card, - "The CAS above should only fail if another thread did " - "a GC write barrier."); - } - } else if (entry_val == - CardTableRS::cur_youngergen_and_prev_nonclean_card) { - // Parallelism shouldn't matter in this case. Only the thread - // assigned to scan the card should change this value. - *entry = _ct->cur_youngergen_card_val(); - break; - } else { - assert(entry_val == _ct->cur_youngergen_card_val(), - "Should be the only possibility."); - // In this case, the card was clean before, and become - // cur_youngergen only because of processing of a promoted object. - // We don't have to look at the card. - return false; - } +inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) { + if (_is_par) { + return clear_card_parallel(entry); + } else { + return clear_card_serial(entry); + } +} + +inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) { + while (true) { + // In the parallel case, we may have to do this several times. + jbyte entry_val = *entry; + assert(entry_val != CardTableRS::clean_card_val(), + "We shouldn't be looking at clean cards, and this should " + "be the only place they get cleaned."); + if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val) + || _ct->is_prev_youngergen_card_val(entry_val)) { + jbyte res = + Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val); + if (res == entry_val) { + break; + } else { + assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card, + "The CAS above should only fail if another thread did " + "a GC write barrier."); } - return true; + } else if (entry_val == + CardTableRS::cur_youngergen_and_prev_nonclean_card) { + // Parallelism shouldn't matter in this case. Only the thread + // assigned to scan the card should change this value. + *entry = _ct->cur_youngergen_card_val(); + break; } else { - jbyte entry_val = *entry; - assert(entry_val != CardTableRS::clean_card_val(), - "We shouldn't be looking at clean cards, and this should " - "be the only place they get cleaned."); - assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card, - "This should be possible in the sequential case."); - *entry = CardTableRS::clean_card_val(); - return true; + assert(entry_val == _ct->cur_youngergen_card_val(), + "Should be the only possibility."); + // In this case, the card was clean before, and become + // cur_youngergen only because of processing of a promoted object. + // We don't have to look at the card. + return false; } } + return true; +} -public: - ClearNoncleanCardWrapper(MemRegionClosure* dirty_card_closure, - CardTableRS* ct) : + +inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) { + jbyte entry_val = *entry; + assert(entry_val != CardTableRS::clean_card_val(), + "We shouldn't be looking at clean cards, and this should " + "be the only place they get cleaned."); + assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card, + "This should be possible in the sequential case."); + *entry = CardTableRS::clean_card_val(); + return true; +} + +ClearNoncleanCardWrapper::ClearNoncleanCardWrapper( + DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) : _dirty_card_closure(dirty_card_closure), _ct(ct) { _is_par = (SharedHeap::heap()->n_par_threads() > 0); +} + +void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) { + assert(mr.word_size() > 0, "Error"); + assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned"); + // mr.end() may not necessarily be card aligned. + jbyte* cur_entry = _ct->byte_for(mr.last()); + const jbyte* limit = _ct->byte_for(mr.start()); + HeapWord* end_of_non_clean = mr.end(); + HeapWord* start_of_non_clean = end_of_non_clean; + while (cur_entry >= limit) { + HeapWord* cur_hw = _ct->addr_for(cur_entry); + if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) { + // Continue the dirty range by opening the + // dirty window one card to the left. + start_of_non_clean = cur_hw; + } else { + // We hit a "clean" card; process any non-empty + // "dirty" range accumulated so far. + if (start_of_non_clean < end_of_non_clean) { + const MemRegion mrd(start_of_non_clean, end_of_non_clean); + _dirty_card_closure->do_MemRegion(mrd); + } + // Reset the dirty window, while continuing to look + // for the next dirty card that will start a + // new dirty window. + end_of_non_clean = cur_hw; + start_of_non_clean = cur_hw; + } + // Note that "cur_entry" leads "start_of_non_clean" in + // its leftward excursion after this point + // in the loop and, when we hit the left end of "mr", + // will point off of the left end of the card-table + // for "mr". + cur_entry--; } - void do_MemRegion(MemRegion mr) { - // We start at the high end of "mr", walking backwards - // while accumulating a contiguous dirty range of cards in - // [start_of_non_clean, end_of_non_clean) which we then - // process en masse. - HeapWord* end_of_non_clean = mr.end(); - HeapWord* start_of_non_clean = end_of_non_clean; - jbyte* entry = _ct->byte_for(mr.last()); - const jbyte* first_entry = _ct->byte_for(mr.start()); - while (entry >= first_entry) { - HeapWord* cur = _ct->addr_for(entry); - if (!clear_card(entry)) { - // We hit a clean card; process any non-empty - // dirty range accumulated so far. - if (start_of_non_clean < end_of_non_clean) { - MemRegion mr2(start_of_non_clean, end_of_non_clean); - _dirty_card_closure->do_MemRegion(mr2); - } - // Reset the dirty window while continuing to - // look for the next dirty window to process. - end_of_non_clean = cur; - start_of_non_clean = end_of_non_clean; - } - // Open the left end of the window one card to the left. - start_of_non_clean = cur; - // Note that "entry" leads "start_of_non_clean" in - // its leftward excursion after this point - // in the loop and, when we hit the left end of "mr", - // will point off of the left end of the card-table - // for "mr". - entry--; - } - // If the first card of "mr" was dirty, we will have - // been left with a dirty window, co-initial with "mr", - // which we now process. - if (start_of_non_clean < end_of_non_clean) { - MemRegion mr2(start_of_non_clean, end_of_non_clean); - _dirty_card_closure->do_MemRegion(mr2); - } + // If the first card of "mr" was dirty, we will have + // been left with a dirty window, co-initial with "mr", + // which we now process. + if (start_of_non_clean < end_of_non_clean) { + const MemRegion mrd(start_of_non_clean, end_of_non_clean); + _dirty_card_closure->do_MemRegion(mrd); } -}; +} + // clean (by dirty->clean before) ==> cur_younger_gen // dirty ==> cur_youngergen_and_prev_nonclean_card // precleaned ==> cur_youngergen_and_prev_nonclean_card @@ -242,12 +246,35 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl) { - DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, _ct_bs->precision(), - cl->gen_boundary()); - ClearNoncleanCardWrapper clear_cl(dcto_cl, this); - - _ct_bs->non_clean_card_iterate(sp, sp->used_region_at_save_marks(), - dcto_cl, &clear_cl, false); + const MemRegion urasm = sp->used_region_at_save_marks(); +#ifdef ASSERT + // Convert the assertion check to a warning if we are running + // CMS+ParNew until related bug is fixed. + MemRegion ur = sp->used_region(); + assert(ur.contains(urasm) || (UseConcMarkSweepGC && UseParNewGC), + err_msg("Did you forget to call save_marks()? " + "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " + "[" PTR_FORMAT ", " PTR_FORMAT ")", + urasm.start(), urasm.end(), ur.start(), ur.end())); + // In the case of CMS+ParNew, issue a warning + if (!ur.contains(urasm)) { + assert(UseConcMarkSweepGC && UseParNewGC, "Tautology: see assert above"); + warning("CMS+ParNew: Did you forget to call save_marks()? " + "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " + "[" PTR_FORMAT ", " PTR_FORMAT ")", + urasm.start(), urasm.end(), ur.start(), ur.end()); + MemRegion ur2 = sp->used_region(); + MemRegion urasm2 = sp->used_region_at_save_marks(); + if (!ur.equals(ur2)) { + warning("CMS+ParNew: Flickering used_region()!!"); + } + if (!urasm.equals(urasm2)) { + warning("CMS+ParNew: Flickering used_region_at_save_marks()!!"); + } + ShouldNotReachHere(); + } +#endif + _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this); } void CardTableRS::clear_into_younger(Generation* gen, bool clear_perm) { @@ -318,17 +345,28 @@ protected: template <class T> void do_oop_work(T* p) { HeapWord* jp = (HeapWord*)p; - if (jp >= _begin && jp < _end) { - oop obj = oopDesc::load_decode_heap_oop(p); - guarantee(obj == NULL || - (HeapWord*)p < _boundary || - (HeapWord*)obj >= _boundary, - "pointer on clean card crosses boundary"); - } + assert(jp >= _begin && jp < _end, + err_msg("Error: jp " PTR_FORMAT " should be within " + "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")", + _begin, _end)); + oop obj = oopDesc::load_decode_heap_oop(p); + guarantee(obj == NULL || (HeapWord*)obj >= _boundary, + err_msg("pointer " PTR_FORMAT " at " PTR_FORMAT " on " + "clean card crosses boundary" PTR_FORMAT, + (HeapWord*)obj, jp, _boundary)); } + public: VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) : - _boundary(b), _begin(begin), _end(end) {} + _boundary(b), _begin(begin), _end(end) { + assert(b <= begin, + err_msg("Error: boundary " PTR_FORMAT " should be at or below begin " PTR_FORMAT, + b, begin)); + assert(begin <= end, + err_msg("Error: begin " PTR_FORMAT " should be strictly below end " PTR_FORMAT, + begin, end)); + } + virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); } virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); } }; @@ -392,13 +430,14 @@ } } // Now traverse objects until end. - HeapWord* cur = start_block; - VerifyCleanCardClosure verify_blk(gen_boundary, begin, end); - while (cur < end) { - if (s->block_is_obj(cur) && s->obj_is_alive(cur)) { - oop(cur)->oop_iterate(&verify_blk); + if (begin < end) { + MemRegion mr(begin, end); + VerifyCleanCardClosure verify_blk(gen_boundary, begin, end); + for (HeapWord* cur = start_block; cur < end; cur += s->block_size(cur)) { + if (s->block_is_obj(cur) && s->obj_is_alive(cur)) { + oop(cur)->oop_iterate(&verify_blk, mr); + } } - cur += s->block_size(cur); } cur_entry = first_dirty; } else {
--- a/src/share/vm/memory/cardTableRS.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/cardTableRS.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,6 @@ class Space; class OopsInGenClosure; -class DirtyCardToOopClosure; // This kind of "GenRemSet" uses a card table both as shared data structure // for a mod ref barrier set and for the rem set information. @@ -166,4 +165,21 @@ }; +class ClearNoncleanCardWrapper: public MemRegionClosure { + DirtyCardToOopClosure* _dirty_card_closure; + CardTableRS* _ct; + bool _is_par; +private: + // Clears the given card, return true if the corresponding card should be + // processed. + inline bool clear_card(jbyte* entry); + // Work methods called by the clear_card() + inline bool clear_card_serial(jbyte* entry); + inline bool clear_card_parallel(jbyte* entry); + +public: + ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct); + void do_MemRegion(MemRegion mr); +}; + #endif // SHARE_VM_MEMORY_CARDTABLERS_HPP
--- a/src/share/vm/memory/classify.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/classify.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/collectorPolicy.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/collectorPolicy.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -265,8 +265,6 @@ MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); always_do_update_barrier = UseConcMarkSweepGC; - BlockOffsetArrayUseUnallocatedBlock = - BlockOffsetArrayUseUnallocatedBlock || ParallelGCThreads > 0; // Check validity of heap flags assert(OldSize % min_alignment() == 0, "old space alignment");
--- a/src/share/vm/memory/compactingPermGenGen.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/compactingPermGenGen.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/dump.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/dump.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -623,24 +623,48 @@ } }; -// Itable indices are calculated based on methods array order -// (see klassItable::compute_itable_index()). Must reinitialize +// Vtable and Itable indices are calculated based on methods array +// order (see klassItable::compute_itable_index()). Must reinitialize // after ALL methods of ALL classes have been reordered. // We assume that since checkconstraints is false, this method // cannot throw an exception. An exception here would be // problematic since this is the VMThread, not a JavaThread. -class ReinitializeItables: public ObjectClosure { +class ReinitializeTables: public ObjectClosure { private: Thread* _thread; public: - ReinitializeItables(Thread* thread) : _thread(thread) {} + ReinitializeTables(Thread* thread) : _thread(thread) {} + + // Initialize super vtable first, check if already initialized to avoid + // quadradic behavior. The vtable is cleared in remove_unshareable_info. + void reinitialize_vtables(klassOop k) { + if (k->blueprint()->oop_is_instanceKlass()) { + instanceKlass* ik = instanceKlass::cast(k); + if (ik->vtable()->is_initialized()) return; + if (ik->super() != NULL) { + reinitialize_vtables(ik->super()); + } + ik->vtable()->initialize_vtable(false, _thread); + } + } void do_object(oop obj) { if (obj->blueprint()->oop_is_instanceKlass()) { instanceKlass* ik = instanceKlass::cast((klassOop)obj); + ResourceMark rm(_thread); ik->itable()->initialize_itable(false, _thread); + reinitialize_vtables((klassOop)obj); +#ifdef ASSERT + ik->vtable()->verify(tty, true); +#endif // ASSERT + } else if (obj->blueprint()->oop_is_arrayKlass()) { + // The vtable for array klasses are that of its super class, + // ie. java.lang.Object. + arrayKlass* ak = arrayKlass::cast((klassOop)obj); + if (ak->vtable()->is_initialized()) return; + ak->vtable()->initialize_vtable(false, _thread); } } }; @@ -1205,9 +1229,9 @@ gen->ro_space()->object_iterate(&sort); gen->rw_space()->object_iterate(&sort); - ReinitializeItables reinit_itables(THREAD); - gen->ro_space()->object_iterate(&reinit_itables); - gen->rw_space()->object_iterate(&reinit_itables); + ReinitializeTables reinit_tables(THREAD); + gen->ro_space()->object_iterate(&reinit_tables); + gen->rw_space()->object_iterate(&reinit_tables); tty->print_cr("done. "); tty->cr();
--- a/src/share/vm/memory/genCollectedHeap.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/genCollectedHeap.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -537,7 +537,7 @@ // Timer for individual generations. Last argument is false: no CR TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty); TraceCollectorStats tcs(_gens[i]->counters()); - TraceMemoryManagerStats tmms(_gens[i]->kind()); + TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause()); size_t prev_used = _gens[i]->used(); _gens[i]->stat_record()->invocations++; @@ -711,15 +711,6 @@ _gen_process_strong_tasks->set_n_threads(t); } -class AssertIsPermClosure: public OopClosure { -public: - void do_oop(oop* p) { - assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm."); - } - void do_oop(narrowOop* p) { ShouldNotReachHere(); } -}; -static AssertIsPermClosure assert_is_perm_closure; - void GenCollectedHeap:: gen_process_strong_roots(int level, bool younger_gens_as_roots, @@ -962,6 +953,13 @@ } } +bool GenCollectedHeap::is_in_young(oop p) { + bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start(); + assert(result == _gens[0]->is_in_reserved(p), + err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p)); + return result; +} + // Returns "TRUE" iff "p" points into the allocated area of the heap. bool GenCollectedHeap::is_in(const void* p) const { #ifndef ASSERT @@ -984,10 +982,16 @@ return false; } -// Returns "TRUE" iff "p" points into the allocated area of the heap. -bool GenCollectedHeap::is_in_youngest(void* p) { - return _gens[0]->is_in(p); +#ifdef ASSERT +// Don't implement this by using is_in_young(). This method is used +// in some cases to check that is_in_young() is correct. +bool GenCollectedHeap::is_in_partial_collection(const void* p) { + assert(is_in_reserved(p) || p == NULL, + "Does not work if address is non-null and outside of the heap"); + // The order of the generations is young (low addr), old, perm (high addr) + return p < _gens[_n_gens - 2]->reserved().end() && p != NULL; } +#endif void GenCollectedHeap::oop_iterate(OopClosure* cl) { for (int i = 0; i < _n_gens; i++) {
--- a/src/share/vm/memory/genCollectedHeap.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/genCollectedHeap.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -216,8 +216,18 @@ } } - // Returns "TRUE" iff "p" points into the youngest generation. - bool is_in_youngest(void* p); + // Returns true if the reference is to an object in the reserved space + // for the young generation. + // Assumes the the young gen address range is less than that of the old gen. + bool is_in_young(oop p); + +#ifdef ASSERT + virtual bool is_in_partial_collection(const void* p); +#endif + + virtual bool is_scavengable(const void* addr) { + return is_in_young((oop)addr); + } // Iteration functions. void oop_iterate(OopClosure* cl); @@ -283,7 +293,7 @@ // "Check can_elide_initializing_store_barrier() for this collector"); // but unfortunately the flag UseSerialGC need not necessarily always // be set when DefNew+Tenured are being used. - return is_in_youngest((void*)new_obj); + return is_in_young(new_obj); } // Can a compiler elide a store barrier when it writes @@ -427,13 +437,13 @@ // explicitly mark reachable objects in younger generations, to avoid // excess storage retention.) If "collecting_perm_gen" is false, then // roots that may only contain references to permGen objects are not - // scanned. The "so" argument determines which of the roots + // scanned; instead, the older_gens closure is applied to all outgoing + // references in the perm gen. The "so" argument determines which of the roots // the closure is applied to: // "SO_None" does none; // "SO_AllClasses" applies the closure to all entries in the SystemDictionary; // "SO_SystemClasses" to all the "system" classes and loaders; - // "SO_Symbols_and_Strings" applies the closure to all entries in - // SymbolsTable and StringTable. + // "SO_Strings" applies the closure to all entries in the StringTable. void gen_process_strong_roots(int level, bool younger_gens_as_roots, // The remaining arguments are in an order
--- a/src/share/vm/memory/genMarkSweep.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/genMarkSweep.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/genOopClosures.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/genOopClosures.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -175,7 +175,7 @@ protected: template <class T> inline void do_oop_work(T* p) { oop obj = oopDesc::load_decode_heap_oop(p); - guarantee(obj->is_oop_or_null(), "invalid oop"); + guarantee(obj->is_oop_or_null(), err_msg("invalid oop: " INTPTR_FORMAT, (oopDesc*) obj)); } public: virtual void do_oop(oop* p);
--- a/src/share/vm/memory/heap.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/heap.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/heapInspection.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/heapInspection.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/iterator.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/iterator.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/modRefBarrierSet.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/modRefBarrierSet.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -88,15 +88,6 @@ assert(false, "can't call"); } - // Invoke "cl->do_oop" on (the address of) every possibly-modifed - // reference field in objects in "sp". If "clear" is "true", the oops - // are no longer considered possibly modified after application of the - // closure. If' "before_save_marks" is true, oops in objects allocated - // after the last call to "save_marks" on "sp" will not be considered. - virtual void mod_oop_in_space_iterate(Space* sp, OopClosure* cl, - bool clear = false, - bool before_save_marks = false) = 0; - // Causes all refs in "mr" to be assumed to be modified. If "whole_heap" // is true, the caller asserts that the entire heap is being invalidated, // which may admit an optimized implementation for some barriers. @@ -109,12 +100,6 @@ // Pass along the argument to the superclass. ModRefBarrierSet(int max_covered_regions) : BarrierSet(max_covered_regions) {} - -#ifndef PRODUCT - // Verifies that the given region contains no modified references. - virtual void verify_clean_region(MemRegion mr) = 0; -#endif - }; #endif // SHARE_VM_MEMORY_MODREFBARRIERSET_HPP
--- a/src/share/vm/memory/restore.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/restore.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/serialize.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/serialize.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/sharedHeap.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/sharedHeap.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,7 +46,6 @@ SH_PS_Management_oops_do, SH_PS_SystemDictionary_oops_do, SH_PS_jvmti_oops_do, - SH_PS_SymbolTable_oops_do, SH_PS_StringTable_oops_do, SH_PS_CodeCache_oops_do, // Leave this one last. @@ -103,6 +102,17 @@ }; static AssertIsPermClosure assert_is_perm_closure; +#ifdef ASSERT +class AssertNonScavengableClosure: public OopClosure { +public: + virtual void do_oop(oop* p) { + assert(!Universe::heap()->is_in_partial_collection(*p), + "Referent should not be scavengable."); } + virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } +}; +static AssertNonScavengableClosure assert_is_non_scavengable_closure; +#endif + void SharedHeap::change_strong_roots_parity() { // Also set the new collection parity. assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2, @@ -161,13 +171,9 @@ if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) { if (so & SO_AllClasses) { SystemDictionary::oops_do(roots); - } else - if (so & SO_SystemClasses) { - SystemDictionary::always_strong_oops_do(roots); - } - } - - if (!_process_strong_tasks->is_task_claimed(SH_PS_SymbolTable_oops_do)) { + } else if (so & SO_SystemClasses) { + SystemDictionary::always_strong_oops_do(roots); + } } if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) { @@ -201,9 +207,10 @@ CodeCache::scavenge_root_nmethods_do(code_roots); } } - // Verify if the code cache contents are in the perm gen - NOT_PRODUCT(CodeBlobToOopClosure assert_code_is_perm(&assert_is_perm_closure, /*do_marking=*/ false)); - NOT_PRODUCT(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_perm)); + // Verify that the code cache contents are not subject to + // movement by a scavenging collection. + DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false)); + DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable)); } if (!collecting_perm_gen) {
--- a/src/share/vm/memory/sharedHeap.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/sharedHeap.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -192,9 +192,8 @@ SO_None = 0x0, SO_AllClasses = 0x1, SO_SystemClasses = 0x2, - SO_Symbols = 0x4, - SO_Strings = 0x8, - SO_CodeCache = 0x10 + SO_Strings = 0x4, + SO_CodeCache = 0x8 }; FlexibleWorkGang* workers() const { return _workers; } @@ -208,14 +207,13 @@ // Invoke the "do_oop" method the closure "roots" on all root locations. // If "collecting_perm_gen" is false, then roots that may only contain - // references to permGen objects are not scanned. If true, the - // "perm_gen" closure is applied to all older-to-younger refs in the + // references to permGen objects are not scanned; instead, in that case, + // the "perm_blk" closure is applied to all outgoing refs in the // permanent generation. The "so" argument determines which of roots // the closure is applied to: // "SO_None" does none; // "SO_AllClasses" applies the closure to all entries in the SystemDictionary; // "SO_SystemClasses" to all the "system" classes and loaders; - // "SO_Symbols" applies the closure to all entries in SymbolsTable; // "SO_Strings" applies the closure to all entries in StringTable; // "SO_CodeCache" applies the closure to all elements of the CodeCache. void process_strong_roots(bool activate_scope,
--- a/src/share/vm/memory/space.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/space.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -97,6 +97,14 @@ } } +// We get called with "mr" representing the dirty region +// that we want to process. Because of imprecise marking, +// we may need to extend the incoming "mr" to the right, +// and scan more. However, because we may already have +// scanned some of that extended region, we may need to +// trim its right-end back some so we do not scan what +// we (or another worker thread) may already have scanned +// or planning to scan. void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { // Some collectors need to do special things whenever their dirty @@ -148,7 +156,7 @@ // e.g. the dirty card region is entirely in a now free object // -- something that could happen with a concurrent sweeper. bottom = MIN2(bottom, top); - mr = MemRegion(bottom, top); + MemRegion extended_mr = MemRegion(bottom, top); assert(bottom <= top && (_precision != CardTableModRefBS::ObjHeadPreciseArray || _min_done == NULL || @@ -156,8 +164,8 @@ "overlap!"); // Walk the region if it is not empty; otherwise there is nothing to do. - if (!mr.is_empty()) { - walk_mem_region(mr, bottom_obj, top); + if (!extended_mr.is_empty()) { + walk_mem_region(extended_mr, bottom_obj, top); } // An idempotent closure might be applied in any order, so we don't @@ -818,9 +826,14 @@ // This version requires locking. inline HeapWord* ContiguousSpace::allocate_impl(size_t size, HeapWord* const end_value) { + // In G1 there are places where a GC worker can allocates into a + // region using this serial allocation code without being prone to a + // race with other GC workers (we ensure that no other GC worker can + // access the same region at the same time). So the assert below is + // too strong in the case of G1. assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && - Thread::current()->is_VM_thread()), + (Thread::current()->is_VM_thread() || UseG1GC)), "not locked"); HeapWord* obj = top(); if (pointer_delta(end_value, obj) >= size) {
--- a/src/share/vm/memory/universe.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/memory/universe.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/arrayKlass.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/arrayKlass.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/arrayOop.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/arrayOop.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/constantPoolKlass.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/constantPoolKlass.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -245,13 +245,13 @@ } oop* addr; addr = cp->tags_addr(); - blk->do_oop(addr); + if (mr.contains(addr)) blk->do_oop(addr); addr = cp->cache_addr(); - blk->do_oop(addr); + if (mr.contains(addr)) blk->do_oop(addr); addr = cp->operands_addr(); - blk->do_oop(addr); + if (mr.contains(addr)) blk->do_oop(addr); addr = cp->pool_holder_addr(); - blk->do_oop(addr); + if (mr.contains(addr)) blk->do_oop(addr); return size; } @@ -285,10 +285,9 @@ void constantPoolKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { assert(obj->is_constantPool(), "should be constant pool"); constantPoolOop cp = (constantPoolOop) obj; - if (cp->tags() != NULL && - (!JavaObjectsInPerm || (EnableInvokeDynamic && cp->has_pseudo_string()))) { + if (cp->tags() != NULL) { for (int i = 1; i < cp->length(); ++i) { - if (cp->tag_at(i).is_string()) { + if (cp->is_pointer_entry(i)) { oop* base = cp->obj_at_addr_raw(i); if (PSScavenge::should_scavenge(base)) { pm->claim_or_forward_depth(base); @@ -342,6 +341,11 @@ anObj->print_value_on(st); st->print(" {0x%lx}", (address)anObj); break; + case JVM_CONSTANT_Object : + anObj = cp->object_at(index); + anObj->print_value_on(st); + st->print(" {0x%lx}", (address)anObj); + break; case JVM_CONSTANT_Integer : st->print("%d", cp->int_at(index)); break; @@ -381,7 +385,6 @@ case JVM_CONSTANT_MethodType : st->print("signature_index=%d", cp->method_type_index_at(index)); break; - case JVM_CONSTANT_InvokeDynamicTrans : case JVM_CONSTANT_InvokeDynamic : { st->print("bootstrap_method_index=%d", cp->invoke_dynamic_bootstrap_method_ref_index_at(index)); @@ -433,23 +436,21 @@ guarantee(cp->is_perm(), "should be in permspace"); if (!cp->partially_loaded()) { for (int i = 0; i< cp->length(); i++) { + constantTag tag = cp->tag_at(i); CPSlot entry = cp->slot_at(i); - if (cp->tag_at(i).is_klass()) { + if (tag.is_klass()) { if (entry.is_oop()) { guarantee(entry.get_oop()->is_perm(), "should be in permspace"); guarantee(entry.get_oop()->is_klass(), "should be klass"); } - } - if (cp->tag_at(i).is_unresolved_klass()) { + } else if (tag.is_unresolved_klass()) { if (entry.is_oop()) { guarantee(entry.get_oop()->is_perm(), "should be in permspace"); guarantee(entry.get_oop()->is_klass(), "should be klass"); } - } - if (cp->tag_at(i).is_symbol()) { + } else if (tag.is_symbol()) { guarantee(entry.get_symbol()->refcount() != 0, "should have nonzero reference count"); - } - if (cp->tag_at(i).is_unresolved_string()) { + } else if (tag.is_unresolved_string()) { if (entry.is_oop()) { guarantee(entry.get_oop()->is_perm(), "should be in permspace"); guarantee(entry.get_oop()->is_instance(), "should be instance"); @@ -457,8 +458,7 @@ else { guarantee(entry.get_symbol()->refcount() != 0, "should have nonzero reference count"); } - } - if (cp->tag_at(i).is_string()) { + } else if (tag.is_string()) { if (!cp->has_pseudo_string()) { if (entry.is_oop()) { guarantee(!JavaObjectsInPerm || entry.get_oop()->is_perm(), @@ -468,8 +468,11 @@ } else { // can be non-perm, can be non-instance (array) } + } else if (tag.is_object()) { + assert(entry.get_oop()->is_oop(), "should be some valid oop"); + } else { + assert(!cp->is_pointer_entry(i), "unhandled oop type in constantPoolKlass::verify_on"); } - // FIXME: verify JSR 292 tags JVM_CONSTANT_MethodHandle, etc. } guarantee(cp->tags()->is_perm(), "should be in permspace"); guarantee(cp->tags()->is_typeArray(), "should be type array");
--- a/src/share/vm/oops/constantPoolOop.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/constantPoolOop.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -284,17 +284,13 @@ if (constantPoolCacheOopDesc::is_secondary_index(which)) { // Invokedynamic index. int pool_index = cache()->main_entry_at(which)->constant_pool_index(); - if (!AllowTransitionalJSR292 || tag_at(pool_index).is_invoke_dynamic()) - pool_index = invoke_dynamic_name_and_type_ref_index_at(pool_index); + pool_index = invoke_dynamic_name_and_type_ref_index_at(pool_index); assert(tag_at(pool_index).is_name_and_type(), ""); return pool_index; } // change byte-ordering and go via cache i = remap_instruction_operand_from_cache(which); } else { - if (AllowTransitionalJSR292 && tag_at(which).is_name_and_type()) - // invokedynamic index is a simple name-and-type - return which; if (tag_at(which).is_invoke_dynamic()) { int pool_index = invoke_dynamic_name_and_type_ref_index_at(which); assert(tag_at(pool_index).is_name_and_type(), ""); @@ -953,7 +949,6 @@ } break; case JVM_CONSTANT_InvokeDynamic: - case JVM_CONSTANT_InvokeDynamicTrans: { int k1 = invoke_dynamic_bootstrap_method_ref_index_at(index1); int k2 = cp2->invoke_dynamic_bootstrap_method_ref_index_at(index2); @@ -1227,13 +1222,6 @@ to_cp->method_handle_index_at_put(to_i, k1, k2); } break; - case JVM_CONSTANT_InvokeDynamicTrans: - { - int k1 = from_cp->invoke_dynamic_bootstrap_method_ref_index_at(from_i); - int k2 = from_cp->invoke_dynamic_name_and_type_ref_index_at(from_i); - to_cp->invoke_dynamic_trans_at_put(to_i, k1, k2); - } break; - case JVM_CONSTANT_InvokeDynamic: { int k1 = from_cp->invoke_dynamic_bootstrap_specifier_index(from_i); @@ -1459,7 +1447,6 @@ return 5; case JVM_CONSTANT_InvokeDynamic: - case JVM_CONSTANT_InvokeDynamicTrans: // u1 tag, u2 bsm, u2 nt return 5; @@ -1674,7 +1661,6 @@ DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1)); break; } - case JVM_CONSTANT_InvokeDynamicTrans: case JVM_CONSTANT_InvokeDynamic: { *bytes = tag; idx1 = extract_low_short_from_int(*int_at_addr(idx));
--- a/src/share/vm/oops/constantPoolOop.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/constantPoolOop.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -244,12 +244,6 @@ *int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_specifier_index; } - void invoke_dynamic_trans_at_put(int which, int bootstrap_method_index, int name_and_type_index) { - tag_at_put(which, JVM_CONSTANT_InvokeDynamicTrans); - *int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_method_index; - assert(AllowTransitionalJSR292, ""); - } - // Temporary until actual use void unresolved_string_at_put(int which, Symbol* s) { release_tag_at_put(which, JVM_CONSTANT_UnresolvedString); @@ -570,15 +564,11 @@ }; int invoke_dynamic_bootstrap_method_ref_index_at(int which) { assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool"); - if (tag_at(which).value() == JVM_CONSTANT_InvokeDynamicTrans) - return extract_low_short_from_int(*int_at_addr(which)); int op_base = invoke_dynamic_operand_base(which); return operands()->short_at(op_base + _indy_bsm_offset); } int invoke_dynamic_argument_count_at(int which) { assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool"); - if (tag_at(which).value() == JVM_CONSTANT_InvokeDynamicTrans) - return 0; int op_base = invoke_dynamic_operand_base(which); int argc = operands()->short_at(op_base + _indy_argc_offset); DEBUG_ONLY(int end_offset = op_base + _indy_argv_offset + argc;
--- a/src/share/vm/oops/cpCacheKlass.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/cpCacheKlass.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/cpCacheOop.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/cpCacheOop.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -104,7 +104,7 @@ void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL); bool success = (result == NULL); if (success) { - update_barrier_set(f1_addr, f1); + update_barrier_set((void*) f1_addr, f1); } } @@ -185,7 +185,7 @@ this->print(tty, 0); } assert(method->can_be_statically_bound(), "must be a MH invoker method"); - assert(AllowTransitionalJSR292 || _f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized"); + assert(_f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized"); // SystemDictionary::find_method_handle_invoke only caches // methods which signature classes are on the boot classpath, // otherwise the newly created method is returned. To avoid @@ -275,21 +275,23 @@ return (int) bsm_cache_index; } -void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, - methodHandle signature_invoker) { +void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, methodHandle signature_invoker) { assert(is_secondary_entry(), ""); + // NOTE: it's important that all other values are set before f1 is + // set since some users short circuit on f1 being set + // (i.e. non-null) and that may result in uninitialized values for + // other racing threads (e.g. flags). int param_size = signature_invoker->size_of_parameters(); assert(param_size >= 1, "method argument size must include MH.this"); - param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic - if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) { - // racing threads might be trying to install their own favorites - set_f1(call_site()); - } + param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic bool is_final = true; assert(signature_invoker->is_final_method(), "is_final"); - set_flags(as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size); + int flags = as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size; + assert(_flags == 0 || _flags == flags, "flags should be the same"); + set_flags(flags); // do not do set_bytecode on a secondary CP cache entry //set_bytecode_1(Bytecodes::_invokedynamic); + set_f1_if_null_atomic(call_site()); // This must be the last one to set (see NOTE above)! }
--- a/src/share/vm/oops/generateOopMap.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/generateOopMap.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/instanceKlass.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/instanceKlass.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -191,8 +191,6 @@ typeArrayOop _inner_classes; // Implementors of this interface (not valid if it overflows) klassOop _implementors[implementors_limit]; - // invokedynamic bootstrap method (a java.lang.invoke.MethodHandle) - oop _bootstrap_method; // AllowTransitionalJSR292 ONLY // Annotations for this class, or null if none. typeArrayOop _class_annotations; // Annotation objects (byte arrays) for fields, or null if no annotations. @@ -403,6 +401,8 @@ ReferenceType reference_type() const { return _reference_type; } void set_reference_type(ReferenceType t) { _reference_type = t; } + static int reference_type_offset_in_bytes() { return offset_of(instanceKlass, _reference_type); } + // find local field, returns true if found bool find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const; // find field in direct superinterfaces, returns the interface in which the field is defined @@ -526,10 +526,6 @@ u2 method_index) { _enclosing_method_class_index = class_index; _enclosing_method_method_index = method_index; } - // JSR 292 support - oop bootstrap_method() const { return _bootstrap_method; } // AllowTransitionalJSR292 ONLY - void set_bootstrap_method(oop mh) { oop_store(&_bootstrap_method, mh); } - // jmethodID support static jmethodID get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h); @@ -793,7 +789,6 @@ oop* adr_signers() const { return (oop*)&this->_signers;} oop* adr_inner_classes() const { return (oop*)&this->_inner_classes;} oop* adr_implementors() const { return (oop*)&this->_implementors[0];} - oop* adr_bootstrap_method() const { return (oop*)&this->_bootstrap_method;} // AllowTransitionalJSR292 ONLY oop* adr_methods_jmethod_ids() const { return (oop*)&this->_methods_jmethod_ids;} oop* adr_methods_cached_itable_indices() const { return (oop*)&this->_methods_cached_itable_indices;} oop* adr_class_annotations() const { return (oop*)&this->_class_annotations;}
--- a/src/share/vm/oops/instanceKlassKlass.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/instanceKlassKlass.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -105,7 +105,6 @@ MarkSweep::mark_and_push(ik->adr_protection_domain()); MarkSweep::mark_and_push(ik->adr_host_klass()); MarkSweep::mark_and_push(ik->adr_signers()); - MarkSweep::mark_and_push(ik->adr_bootstrap_method()); MarkSweep::mark_and_push(ik->adr_class_annotations()); MarkSweep::mark_and_push(ik->adr_fields_annotations()); MarkSweep::mark_and_push(ik->adr_methods_annotations()); @@ -142,7 +141,6 @@ PSParallelCompact::mark_and_push(cm, ik->adr_protection_domain()); PSParallelCompact::mark_and_push(cm, ik->adr_host_klass()); PSParallelCompact::mark_and_push(cm, ik->adr_signers()); - PSParallelCompact::mark_and_push(cm, ik->adr_bootstrap_method()); PSParallelCompact::mark_and_push(cm, ik->adr_class_annotations()); PSParallelCompact::mark_and_push(cm, ik->adr_fields_annotations()); PSParallelCompact::mark_and_push(cm, ik->adr_methods_annotations()); @@ -185,7 +183,6 @@ for (int i = 0; i < instanceKlass::implementors_limit; i++) { blk->do_oop(&ik->adr_implementors()[i]); } - blk->do_oop(ik->adr_bootstrap_method()); blk->do_oop(ik->adr_class_annotations()); blk->do_oop(ik->adr_fields_annotations()); blk->do_oop(ik->adr_methods_annotations()); @@ -239,8 +236,6 @@ for (int i = 0; i < instanceKlass::implementors_limit; i++) { if (mr.contains(&adr[i])) blk->do_oop(&adr[i]); } - adr = ik->adr_bootstrap_method(); - if (mr.contains(adr)) blk->do_oop(adr); adr = ik->adr_class_annotations(); if (mr.contains(adr)) blk->do_oop(adr); adr = ik->adr_fields_annotations(); @@ -281,7 +276,6 @@ for (int i = 0; i < instanceKlass::implementors_limit; i++) { MarkSweep::adjust_pointer(&ik->adr_implementors()[i]); } - MarkSweep::adjust_pointer(ik->adr_bootstrap_method()); MarkSweep::adjust_pointer(ik->adr_class_annotations()); MarkSweep::adjust_pointer(ik->adr_fields_annotations()); MarkSweep::adjust_pointer(ik->adr_methods_annotations()); @@ -317,11 +311,6 @@ pm->claim_or_forward_depth(sg_addr); } - oop* bsm_addr = ik->adr_bootstrap_method(); - if (PSScavenge::should_scavenge(bsm_addr)) { - pm->claim_or_forward_depth(bsm_addr); - } - klassKlass::oop_push_contents(pm, obj); } @@ -420,7 +409,6 @@ ik->set_breakpoints(NULL); ik->init_previous_versions(); ik->set_generic_signature(NULL); - ik->set_bootstrap_method(NULL); ik->release_set_methods_jmethod_ids(NULL); ik->release_set_methods_cached_itable_indices(NULL); ik->set_class_annotations(NULL); @@ -542,11 +530,6 @@ } // pvw is cleaned up } // rm is cleaned up - if (ik->bootstrap_method() != NULL) { - st->print(BULLET"bootstrap method: "); - ik->bootstrap_method()->print_value_on(st); - st->cr(); - } if (ik->generic_signature() != NULL) { st->print(BULLET"generic signature: "); ik->generic_signature()->print_value_on(st); @@ -707,7 +690,8 @@ guarantee(method_ordering->is_perm(), "should be in permspace"); guarantee(method_ordering->is_typeArray(), "should be type array"); int length = method_ordering->length(); - if (JvmtiExport::can_maintain_original_method_order()) { + if (JvmtiExport::can_maintain_original_method_order() || + (UseSharedSpaces && length != 0)) { guarantee(length == methods->length(), "invalid method ordering length"); jlong sum = 0; for (j = 0; j < length; j++) {
--- a/src/share/vm/oops/instanceRefKlass.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/instanceRefKlass.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -397,7 +397,7 @@ if (referent != NULL) { guarantee(referent->is_oop(), "referent field heap failed"); - if (gch != NULL && !gch->is_in_youngest(obj)) { + if (gch != NULL && !gch->is_in_young(obj)) { // We do a specific remembered set check here since the referent // field is not part of the oop mask and therefore skipped by the // regular verify code. @@ -415,7 +415,7 @@ if (next != NULL) { guarantee(next->is_oop(), "next field verify failed"); guarantee(next->is_instanceRef(), "next field verify failed"); - if (gch != NULL && !gch->is_in_youngest(obj)) { + if (gch != NULL && !gch->is_in_young(obj)) { // We do a specific remembered set check here since the next field is // not part of the oop mask and therefore skipped by the regular // verify code.
--- a/src/share/vm/oops/klass.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/klass.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -453,6 +453,14 @@ ik->unlink_class(); } } + // Clear the Java vtable if the oop has one. + // The vtable isn't shareable because it's in the wrong order wrt the methods + // once the method names get moved and resorted. + klassVtable* vt = vtable(); + if (vt != NULL) { + assert(oop_is_instance() || oop_is_array(), "nothing else has vtable"); + vt->clear_vtable(); + } set_subklass(NULL); set_next_sibling(NULL); }
--- a/src/share/vm/oops/klassVtable.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/klassVtable.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -645,6 +645,15 @@ } } +// CDS/RedefineClasses support - clear vtables so they can be reinitialized +void klassVtable::clear_vtable() { + for (int i = 0; i < _length; i++) table()[i].clear(); +} + +bool klassVtable::is_initialized() { + return _length == 0 || table()[0].method() != NULL; +} + // Garbage collection void klassVtable::oop_follow_contents() {
--- a/src/share/vm/oops/klassVtable.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/klassVtable.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -75,7 +75,15 @@ void initialize_vtable(bool checkconstraints, TRAPS); // initialize vtable of a new klass - // conputes vtable length (in words) and the number of miranda methods + // CDS/RedefineClasses support - clear vtables so they can be reinitialized + // at dump time. Clearing gives us an easy way to tell if the vtable has + // already been reinitialized at dump time (see dump.cpp). Vtables can + // be initialized at run time by RedefineClasses so dumping the right order + // is necessary. + void clear_vtable(); + bool is_initialized(); + + // computes vtable length (in words) and the number of miranda methods static void compute_vtable_size_and_num_mirandas(int &vtable_length, int &num_miranda_methods, klassOop super, objArrayOop methods, AccessFlags class_flags, Handle classloader,
--- a/src/share/vm/oops/markOop.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/markOop.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/methodDataOop.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/methodDataOop.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1194,7 +1194,7 @@ // Whole-method sticky bits and flags public: enum { - _trap_hist_limit = 16, // decoupled from Deoptimization::Reason_LIMIT + _trap_hist_limit = 17, // decoupled from Deoptimization::Reason_LIMIT _trap_hist_mask = max_jubyte, _extra_data_count = 4 // extra DataLayout headers, for trap history }; // Public flag values
--- a/src/share/vm/oops/methodOop.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/methodOop.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -852,11 +852,11 @@ bool methodOopDesc::is_method_handle_invoke_name(vmSymbols::SID name_sid) { switch (name_sid) { case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name): - case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name): + case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name): return true; } - if ((AllowTransitionalJSR292 || AllowInvokeForInvokeGeneric) - && name_sid == vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name)) + if (AllowInvokeGeneric + && name_sid == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name)) return true; return false; } @@ -921,6 +921,10 @@ tty->cr(); } + // invariant: cp->symbol_at_put is preceded by a refcount increment (more usually a lookup) + name->increment_refcount(); + signature->increment_refcount(); + constantPoolHandle cp; { constantPoolOop cp_oop = oopFactory::new_constantPool(_imcp_limit, IsSafeConc, CHECK_(empty)); @@ -1092,7 +1096,6 @@ if (name_id == vmSymbols::NO_SID) return; vmSymbols::SID sig_id = vmSymbols::find_sid(signature()); if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle) - && !(klass_id == vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_MethodHandle) && AllowTransitionalJSR292) && sig_id == vmSymbols::NO_SID) return; jshort flags = access_flags().as_short(); @@ -1118,20 +1121,17 @@ break; // Signature-polymorphic methods: MethodHandle.invoke*, InvokeDynamic.*. - case vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_MethodHandle): // AllowTransitionalJSR292 ONLY case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle): if (is_static() || !is_native()) break; switch (name_id) { case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name): + if (!AllowInvokeGeneric) break; + case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name): id = vmIntrinsics::_invokeGeneric; break; case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name): id = vmIntrinsics::_invokeExact; break; - case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name): - if (AllowInvokeForInvokeGeneric) id = vmIntrinsics::_invokeGeneric; - else if (AllowTransitionalJSR292) id = vmIntrinsics::_invokeExact; - break; } break; case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_InvokeDynamic):
--- a/src/share/vm/oops/symbol.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/symbol.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/symbol.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/symbol.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/typeArrayOop.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/oops/typeArrayOop.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/buildOopMap.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/buildOopMap.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/bytecodeInfo.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/bytecodeInfo.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -89,7 +89,7 @@ } // positive filter: should send be inlined? returns NULL, if yes, or rejection msg -const char* InlineTree::shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const { +const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const { // Allows targeted inlining if(callee_method->should_inline()) { *wci_result = *(WarmCallInfo::always_hot()); @@ -102,8 +102,7 @@ // positive filter: should send be inlined? returns NULL (--> yes) // or rejection msg - int max_size = C->max_inline_size(); - int size = callee_method->code_size(); + int size = callee_method->code_size(); // Check for too many throws (and not too huge) if(callee_method->interpreter_throwout_count() > InlineThrowCount && @@ -120,18 +119,36 @@ return NULL; // size and frequency are represented in a new way } + int default_max_inline_size = C->max_inline_size(); + int inline_small_code_size = InlineSmallCode / 4; + int max_inline_size = default_max_inline_size; + int call_site_count = method()->scale_count(profile.count()); int invoke_count = method()->interpreter_invocation_count(); - assert( invoke_count != 0, "Require invokation count greater than zero"); - int freq = call_site_count/invoke_count; + + // Bytecoded method handle adapters do not have interpreter + // profiling data but only made up MDO data. Get the counter from + // there. + if (caller_method->is_method_handle_adapter()) { + assert(method()->method_data_or_null(), "must have an MDO"); + ciMethodData* mdo = method()->method_data(); + ciProfileData* mha_profile = mdo->bci_to_data(caller_bci); + assert(mha_profile, "must exist"); + CounterData* cd = mha_profile->as_CounterData(); + invoke_count = cd->count(); + call_site_count = invoke_count; // use the same value + } + + assert(invoke_count != 0, "require invocation count greater than zero"); + int freq = call_site_count / invoke_count; // bump the max size if the call is frequent if ((freq >= InlineFrequencyRatio) || (call_site_count >= InlineFrequencyCount) || is_init_with_ea(callee_method, caller_method, C)) { - max_size = C->freq_inline_size(); - if (size <= max_size && TraceFrequencyInlining) { + max_inline_size = C->freq_inline_size(); + if (size <= max_inline_size && TraceFrequencyInlining) { CompileTask::print_inline_indent(inline_depth()); tty->print_cr("Inlined frequent method (freq=%d count=%d):", freq, call_site_count); CompileTask::print_inline_indent(inline_depth()); @@ -141,11 +158,11 @@ } else { // Not hot. Check for medium-sized pre-existing nmethod at cold sites. if (callee_method->has_compiled_code() && - callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode/4) + callee_method->instructions_size(CompLevel_full_optimization) > inline_small_code_size) return "already compiled into a medium method"; } - if (size > max_size) { - if (max_size > C->max_inline_size()) + if (size > max_inline_size) { + if (max_inline_size > default_max_inline_size) return "hot method too big"; return "too big"; } @@ -154,7 +171,7 @@ // negative filter: should send NOT be inlined? returns NULL, ok to inline, or rejection msg -const char* InlineTree::shouldNotInline(ciMethod *callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const { +const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const { // negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg if (!UseOldInlining) { const char* fail = NULL; @@ -269,14 +286,13 @@ } const char *msg = NULL; - if ((msg = shouldInline(callee_method, caller_method, caller_bci, - profile, wci_result)) != NULL) { + msg = should_inline(callee_method, caller_method, caller_bci, profile, wci_result); + if (msg != NULL) return msg; - } - if ((msg = shouldNotInline(callee_method, caller_method, - wci_result)) != NULL) { + + msg = should_not_inline(callee_method, caller_method, wci_result); + if (msg != NULL) return msg; - } if (InlineAccessors && callee_method->is_accessor()) { // accessor methods are not subject to any of the following limits. @@ -310,13 +326,14 @@ return "inlining too deep"; } - // We need to detect recursive inlining of method handle targets: if - // the current method is a method handle adapter and one of the - // callers is the same method as the callee, we bail out if - // MaxRecursiveInlineLevel is hit. - if (method()->is_method_handle_adapter()) { + // detect direct and indirect recursive inlining + { + // count the current method and the callee + int inline_level = (method() == callee_method) ? 1 : 0; + if (inline_level > MaxRecursiveInlineLevel) + return "recursively inlining too deep"; + // count callers of current method and callee JVMState* jvms = caller_jvms(); - int inline_level = 0; while (jvms != NULL && jvms->has_method()) { if (jvms->method() == callee_method) { inline_level++; @@ -327,10 +344,6 @@ } } - if (method() == callee_method && inline_depth() > MaxRecursiveInlineLevel) { - return "recursively inlining too deep"; - } - int size = callee_method->code_size(); if (UseOldInlining && ClipInlining @@ -376,7 +389,6 @@ return true; } -#ifndef PRODUCT //------------------------------print_inlining--------------------------------- // Really, the failure_msg can be a success message also. void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const { @@ -388,7 +400,6 @@ tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count()); } } -#endif //------------------------------ok_to_inline----------------------------------- WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallProfile& profile, WarmCallInfo* initial_wci) { @@ -497,9 +508,8 @@ new_depth_adjust -= 1; // don't count method handle calls from java.lang.invoke implem } if (new_depth_adjust != 0 && PrintInlining) { - stringStream nm1; caller_jvms->method()->print_name(&nm1); - stringStream nm2; callee_method->print_name(&nm2); - tty->print_cr("discounting inlining depth from %s to %s", nm1.base(), nm2.base()); + CompileTask::print_inline_indent(inline_depth()); + tty->print_cr(" \\-> discounting inline depth"); } if (new_depth_adjust != 0 && C->log()) { int id1 = C->log()->identify(caller_jvms->method());
--- a/src/share/vm/opto/c2_globals.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/c2_globals.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -183,6 +183,21 @@ develop(bool, TraceLoopOpts, false, \ "Trace executed loop optimizations") \ \ + diagnostic(bool, LoopLimitCheck, true, \ + "Generate a loop limits check for overflow") \ + \ + develop(bool, TraceLoopLimitCheck, false, \ + "Trace generation of loop limits checks") \ + \ + diagnostic(bool, RangeLimitCheck, true, \ + "Additional overflow checks during range check elimination") \ + \ + develop(bool, TraceRangeLimitCheck, false, \ + "Trace additional overflow checks in RCE") \ + \ + diagnostic(bool, UnrollLimitCheck, true, \ + "Additional overflow checks during loop unroll") \ + \ product(bool, OptimizeFill, false, \ "convert fill/copy loops into intrinsic") \ \
--- a/src/share/vm/opto/c2compiler.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/c2compiler.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/callGenerator.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/callGenerator.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -978,31 +978,19 @@ return head; } -WarmCallInfo* WarmCallInfo::_always_hot = NULL; -WarmCallInfo* WarmCallInfo::_always_cold = NULL; +WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(), + WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE()); +WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(), + WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE()); WarmCallInfo* WarmCallInfo::always_hot() { - if (_always_hot == NULL) { - static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0}; - WarmCallInfo* ci = (WarmCallInfo*) bits; - ci->_profit = ci->_count = MAX_VALUE(); - ci->_work = ci->_size = MIN_VALUE(); - _always_hot = ci; - } - assert(_always_hot->is_hot(), "must always be hot"); - return _always_hot; + assert(_always_hot.is_hot(), "must always be hot"); + return &_always_hot; } WarmCallInfo* WarmCallInfo::always_cold() { - if (_always_cold == NULL) { - static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0}; - WarmCallInfo* ci = (WarmCallInfo*) bits; - ci->_profit = ci->_count = MIN_VALUE(); - ci->_work = ci->_size = MAX_VALUE(); - _always_cold = ci; - } - assert(_always_cold->is_cold(), "must always be cold"); - return _always_cold; + assert(_always_cold.is_cold(), "must always be cold"); + return &_always_cold; }
--- a/src/share/vm/opto/callGenerator.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/callGenerator.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -215,8 +215,20 @@ WarmCallInfo* next() const { return _next; } void set_next(WarmCallInfo* n) { _next = n; } - static WarmCallInfo* _always_hot; - static WarmCallInfo* _always_cold; + static WarmCallInfo _always_hot; + static WarmCallInfo _always_cold; + + // Constructor intitialization of always_hot and always_cold + WarmCallInfo(float c, float p, float w, float s) { + _call = NULL; + _hot_cg = NULL; + _next = NULL; + _count = c; + _profit = p; + _work = w; + _size = s; + _heat = 0; + } public: // Because WarmInfo objects live over the entire lifetime of the
--- a/src/share/vm/opto/cfgnode.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/cfgnode.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1351,9 +1351,17 @@ static void split_once(PhaseIterGVN *igvn, Node *phi, Node *val, Node *n, Node *newn) { igvn->hash_delete(n); // Remove from hash before hacking edges + Node* predicate_proj = NULL; uint j = 1; - for( uint i = phi->req()-1; i > 0; i-- ) { - if( phi->in(i) == val ) { // Found a path with val? + for (uint i = phi->req()-1; i > 0; i--) { + if (phi->in(i) == val) { // Found a path with val? + if (n->is_Region()) { + Node* proj = PhaseIdealLoop::find_predicate(n->in(i)); + if (proj != NULL) { + assert(predicate_proj == NULL, "only one predicate entry expected"); + predicate_proj = proj; + } + } // Add to NEW Region/Phi, no DU info newn->set_req( j++, n->in(i) ); // Remove from OLD Region/Phi @@ -1364,6 +1372,12 @@ // Register the new node but do not transform it. Cannot transform until the // entire Region/Phi conglomerate has been hacked as a single huge transform. igvn->register_new_node_with_optimizer( newn ); + + // Clone loop predicates + if (predicate_proj != NULL) { + newn = igvn->clone_loop_predicates(predicate_proj, newn, !n->is_CountedLoop()); + } + // Now I can point to the new node. n->add_req(newn); igvn->_worklist.push(n);
--- a/src/share/vm/opto/chaitin.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/chaitin.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/classes.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/classes.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -156,6 +156,7 @@ macro(LogD) macro(Log10D) macro(Loop) +macro(LoopLimit) macro(Mach) macro(MachProj) macro(MaxI)
--- a/src/share/vm/opto/compile.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/compile.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -629,7 +629,7 @@ initial_gvn()->transform_no_reclaim(top()); // Set up tf(), start(), and find a CallGenerator. - CallGenerator* cg; + CallGenerator* cg = NULL; if (is_osr_compilation()) { const TypeTuple *domain = StartOSRNode::osr_domain(); const TypeTuple *range = TypeTuple::make_range(method()->signature()); @@ -644,9 +644,24 @@ StartNode* s = new (this, 2) StartNode(root(), tf()->domain()); initial_gvn()->set_type_bottom(s); init_start(s); - float past_uses = method()->interpreter_invocation_count(); - float expected_uses = past_uses; - cg = CallGenerator::for_inline(method(), expected_uses); + if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) { + // With java.lang.ref.reference.get() we must go through the + // intrinsic when G1 is enabled - even when get() is the root + // method of the compile - so that, if necessary, the value in + // the referent field of the reference object gets recorded by + // the pre-barrier code. + // Specifically, if G1 is enabled, the value in the referent + // field is recorded by the G1 SATB pre barrier. This will + // result in the referent being marked live and the reference + // object removed from the list of discovered references during + // reference processing. + cg = find_intrinsic(method(), false); + } + if (cg == NULL) { + float past_uses = method()->interpreter_invocation_count(); + float expected_uses = past_uses; + cg = CallGenerator::for_inline(method(), expected_uses); + } } if (failing()) return; if (cg == NULL) { @@ -1632,7 +1647,6 @@ igvn.replace_node(n, n->in(1)); } assert(predicate_count()==0, "should be clean!"); - igvn.optimize(); } //------------------------------Optimize--------------------------------------- @@ -1689,7 +1703,7 @@ if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) { { TracePhase t2("idealLoop", &_t_idealLoop, true); - PhaseIdealLoop ideal_loop( igvn, true, UseLoopPredicate); + PhaseIdealLoop ideal_loop( igvn, true ); loop_opts_cnt--; if (major_progress()) print_method("PhaseIdealLoop 1", 2); if (failing()) return; @@ -1697,7 +1711,7 @@ // Loop opts pass if partial peeling occurred in previous pass if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) { TracePhase t3("idealLoop", &_t_idealLoop, true); - PhaseIdealLoop ideal_loop( igvn, false, UseLoopPredicate); + PhaseIdealLoop ideal_loop( igvn, false ); loop_opts_cnt--; if (major_progress()) print_method("PhaseIdealLoop 2", 2); if (failing()) return; @@ -1705,7 +1719,7 @@ // Loop opts pass for loop-unrolling before CCP if(major_progress() && (loop_opts_cnt > 0)) { TracePhase t4("idealLoop", &_t_idealLoop, true); - PhaseIdealLoop ideal_loop( igvn, false, UseLoopPredicate); + PhaseIdealLoop ideal_loop( igvn, false ); loop_opts_cnt--; if (major_progress()) print_method("PhaseIdealLoop 3", 2); } @@ -1743,21 +1757,13 @@ // peeling, unrolling, etc. if(loop_opts_cnt > 0) { debug_only( int cnt = 0; ); - bool loop_predication = UseLoopPredicate; while(major_progress() && (loop_opts_cnt > 0)) { TracePhase t2("idealLoop", &_t_idealLoop, true); assert( cnt++ < 40, "infinite cycle in loop optimization" ); - PhaseIdealLoop ideal_loop( igvn, true, loop_predication); + PhaseIdealLoop ideal_loop( igvn, true); loop_opts_cnt--; if (major_progress()) print_method("PhaseIdealLoop iterations", 2); if (failing()) return; - // Perform loop predication optimization during first iteration after CCP. - // After that switch it off and cleanup unused loop predicates. - if (loop_predication) { - loop_predication = false; - cleanup_loop_predicates(igvn); - if (failing()) return; - } } } @@ -2050,6 +2056,52 @@ // Note that OffsetBot and OffsetTop are very negative. } +// Eliminate trivially redundant StoreCMs and accumulate their +// precedence edges. +static void eliminate_redundant_card_marks(Node* n) { + assert(n->Opcode() == Op_StoreCM, "expected StoreCM"); + if (n->in(MemNode::Address)->outcnt() > 1) { + // There are multiple users of the same address so it might be + // possible to eliminate some of the StoreCMs + Node* mem = n->in(MemNode::Memory); + Node* adr = n->in(MemNode::Address); + Node* val = n->in(MemNode::ValueIn); + Node* prev = n; + bool done = false; + // Walk the chain of StoreCMs eliminating ones that match. As + // long as it's a chain of single users then the optimization is + // safe. Eliminating partially redundant StoreCMs would require + // cloning copies down the other paths. + while (mem->Opcode() == Op_StoreCM && mem->outcnt() == 1 && !done) { + if (adr == mem->in(MemNode::Address) && + val == mem->in(MemNode::ValueIn)) { + // redundant StoreCM + if (mem->req() > MemNode::OopStore) { + // Hasn't been processed by this code yet. + n->add_prec(mem->in(MemNode::OopStore)); + } else { + // Already converted to precedence edge + for (uint i = mem->req(); i < mem->len(); i++) { + // Accumulate any precedence edges + if (mem->in(i) != NULL) { + n->add_prec(mem->in(i)); + } + } + // Everything above this point has been processed. + done = true; + } + // Eliminate the previous StoreCM + prev->set_req(MemNode::Memory, mem->in(MemNode::Memory)); + assert(mem->outcnt() == 0, "should be dead"); + mem->disconnect_inputs(NULL); + } else { + prev = mem; + } + mem = prev->in(MemNode::Memory); + } + } +} + //------------------------------final_graph_reshaping_impl---------------------- // Implement items 1-5 from final_graph_reshaping below. static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) { @@ -2176,9 +2228,19 @@ frc.inc_float_count(); goto handle_mem; + case Op_StoreCM: + { + // Convert OopStore dependence into precedence edge + Node* prec = n->in(MemNode::OopStore); + n->del_req(MemNode::OopStore); + n->add_prec(prec); + eliminate_redundant_card_marks(n); + } + + // fall through + case Op_StoreB: case Op_StoreC: - case Op_StoreCM: case Op_StorePConditional: case Op_StoreI: case Op_StoreL:
--- a/src/share/vm/opto/compile.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/compile.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -489,6 +489,9 @@ // remove the opaque nodes that protect the predicates so that the unused checks and // uncommon traps will be eliminated from the graph. void cleanup_loop_predicates(PhaseIterGVN &igvn); + bool is_predicate_opaq(Node * n) { + return _predicate_opaqs->contains(n); + } // Compilation environment. Arena* comp_arena() { return &_comp_arena; }
--- a/src/share/vm/opto/doCall.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/doCall.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -62,7 +62,11 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float prof_factor) { - CallGenerator* cg; + CallGenerator* cg; + ciMethod* caller = jvms->method(); + int bci = jvms->bci(); + Bytecodes::Code bytecode = caller->java_code_at_bci(bci); + guarantee(call_method != NULL, "failed method resolution"); // Dtrace currently doesn't work unless all calls are vanilla if (env()->dtrace_method_probes()) { @@ -72,7 +76,7 @@ // Note: When we get profiling during stage-1 compiles, we want to pull // from more specific profile data which pertains to this inlining. // Right now, ignore the information in jvms->caller(), and do method[bci]. - ciCallProfile profile = jvms->method()->call_profile_at_bci(jvms->bci()); + ciCallProfile profile = caller->call_profile_at_bci(bci); // See how many times this site has been invoked. int site_count = profile.count(); @@ -115,7 +119,7 @@ // MethodHandle.invoke* are native methods which obviously don't // have bytecodes and so normal inlining fails. if (call_method->is_method_handle_invoke()) { - if (jvms->method()->java_code_at_bci(jvms->bci()) != Bytecodes::_invokedynamic) { + if (bytecode != Bytecodes::_invokedynamic) { GraphKit kit(jvms); Node* n = kit.argument(0); @@ -124,16 +128,19 @@ ciObject* const_oop = oop_ptr->const_oop(); ciMethodHandle* method_handle = const_oop->as_method_handle(); - // Set the actually called method to have access to the class - // and signature in the MethodHandleCompiler. + // Set the callee to have access to the class and signature in + // the MethodHandleCompiler. method_handle->set_callee(call_method); + method_handle->set_caller(caller); + method_handle->set_call_profile(&profile); // Get an adapter for the MethodHandle. ciMethod* target_method = method_handle->get_method_handle_adapter(); - - CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor); - if (hit_cg != NULL && hit_cg->is_inline()) - return hit_cg; + if (target_method != NULL) { + CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor); + if (hit_cg != NULL && hit_cg->is_inline()) + return hit_cg; + } } return CallGenerator::for_direct_call(call_method); @@ -146,17 +153,20 @@ ciCallSite* call_site = str.get_call_site(); ciMethodHandle* method_handle = call_site->get_target(); - // Set the actually called method to have access to the class - // and signature in the MethodHandleCompiler. + // Set the callee to have access to the class and signature in + // the MethodHandleCompiler. method_handle->set_callee(call_method); + method_handle->set_caller(caller); + method_handle->set_call_profile(&profile); // Get an adapter for the MethodHandle. ciMethod* target_method = method_handle->get_invokedynamic_adapter(); - - CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor); - if (hit_cg != NULL && hit_cg->is_inline()) { - CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method); - return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor); + if (target_method != NULL) { + CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor); + if (hit_cg != NULL && hit_cg->is_inline()) { + CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method); + return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor); + } } // If something failed, generate a normal dynamic call.
--- a/src/share/vm/opto/escape.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/escape.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -594,7 +594,7 @@ // // Create a new version of orig_phi if necessary. Returns either the newly -// created phi or an existing phi. Sets create_new to indicate wheter a new +// created phi or an existing phi. Sets create_new to indicate whether a new // phi was created. Cache the last newly created phi in the node map. // PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn, bool &new_created) { @@ -649,7 +649,7 @@ } // -// Return a new version of Memory Phi "orig_phi" with the inputs having the +// Return a new version of Memory Phi "orig_phi" with the inputs having the // specified alias index. // PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn) { @@ -828,11 +828,15 @@ break; // hit one of our sentinels if (result->is_Mem()) { const Type *at = phase->type(result->in(MemNode::Address)); - if (at != Type::TOP) { - assert (at->isa_ptr() != NULL, "pointer type required."); - int idx = C->get_alias_index(at->is_ptr()); - if (idx == alias_idx) - break; + if (at == Type::TOP) + break; // Dead + assert (at->isa_ptr() != NULL, "pointer type required."); + int idx = C->get_alias_index(at->is_ptr()); + if (idx == alias_idx) + break; // Found + if (!is_instance && (at->isa_oopptr() == NULL || + !at->is_oopptr()->is_known_instance())) { + break; // Do not skip store to general memory slice. } result = result->in(MemNode::Memory); } @@ -902,13 +906,13 @@ PhiNode *mphi = result->as_Phi(); assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); const TypePtr *t = mphi->adr_type(); - if (C->get_alias_index(t) != alias_idx) { - // Create a new Phi with the specified alias index type. - result = split_memory_phi(mphi, alias_idx, orig_phis, phase); - } else if (!is_instance) { + if (!is_instance) { // Push all non-instance Phis on the orig_phis worklist to update inputs // during Phase 4 if needed. orig_phis.append_if_missing(mphi); + } else if (C->get_alias_index(t) != alias_idx) { + // Create a new Phi with the specified alias index type. + result = split_memory_phi(mphi, alias_idx, orig_phis, phase); } } // the result is either MemNode, PhiNode, InitializeNode. @@ -1433,7 +1437,10 @@ // Update the memory inputs of MemNodes with the value we computed // in Phase 2 and move stores memory users to corresponding memory slices. -#ifdef ASSERT + + // Disable memory split verification code until the fix for 6984348. + // Currently it produces false negative results since it does not cover all cases. +#if 0 // ifdef ASSERT visited.Reset(); Node_Stack old_mems(arena, _compile->unique() >> 2); #endif @@ -1443,7 +1450,7 @@ Node *n = ptnode_adr(i)->_node; assert(n != NULL, "sanity"); if (n->is_Mem()) { -#ifdef ASSERT +#if 0 // ifdef ASSERT Node* old_mem = n->in(MemNode::Memory); if (!visited.test_set(old_mem->_idx)) { old_mems.push(old_mem, old_mem->outcnt()); @@ -1465,13 +1472,13 @@ } } } -#ifdef ASSERT +#if 0 // ifdef ASSERT // Verify that memory was split correctly while (old_mems.is_nonempty()) { Node* old_mem = old_mems.node(); uint old_cnt = old_mems.index(); old_mems.pop(); - assert(old_cnt = old_mem->outcnt(), "old mem could be lost"); + assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); } #endif }
--- a/src/share/vm/opto/gcm.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/gcm.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/graphKit.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/graphKit.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1033,14 +1033,10 @@ iter.reset_to_bci(bci()); iter.next(); ciMethod* method = iter.get_method(ignore); - inputs = method->arg_size_no_receiver(); - // Add a receiver argument, maybe: - if (code != Bytecodes::_invokestatic && - code != Bytecodes::_invokedynamic) - inputs += 1; // (Do not use ciMethod::arg_size(), because // it might be an unloaded method, which doesn't // know whether it is static or not.) + inputs = method->invoke_arg_size(code); int size = method->return_type()->size(); depth = size - inputs; } @@ -1457,19 +1453,22 @@ } -void GraphKit::pre_barrier(Node* ctl, +void GraphKit::pre_barrier(bool do_load, + Node* ctl, Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type, + Node* pre_val, BasicType bt) { + BarrierSet* bs = Universe::heap()->barrier_set(); set_control(ctl); switch (bs->kind()) { case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: - g1_write_barrier_pre(obj, adr, adr_idx, val, val_type, bt); + g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt); break; case BarrierSet::CardTableModRef: @@ -1532,7 +1531,11 @@ uint adr_idx = C->get_alias_index(adr_type); assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); - pre_barrier(control(), obj, adr, adr_idx, val, val_type, bt); + pre_barrier(true /* do_load */, + control(), obj, adr, adr_idx, val, val_type, + NULL /* pre_val */, + bt); + Node* store = store_to_memory(control(), adr, val, bt, adr_idx); post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise); return store; @@ -2950,8 +2953,7 @@ //---------------------------set_output_for_allocation------------------------- Node* GraphKit::set_output_for_allocation(AllocateNode* alloc, - const TypeOopPtr* oop_type, - bool raw_mem_only) { + const TypeOopPtr* oop_type) { int rawidx = Compile::AliasIdxRaw; alloc->set_req( TypeFunc::FramePtr, frameptr() ); add_safepoint_edges(alloc); @@ -2975,7 +2977,7 @@ rawoop)->as_Initialize(); assert(alloc->initialization() == init, "2-way macro link must work"); assert(init ->allocation() == alloc, "2-way macro link must work"); - if (ReduceFieldZeroing && !raw_mem_only) { + { // Extract memory strands which may participate in the new object's // initialization, and source them from the new InitializeNode. // This will allow us to observe initializations when they occur, @@ -3036,11 +3038,9 @@ // the type to a constant. // The optional arguments are for specialized use by intrinsics: // - If 'extra_slow_test' if not null is an extra condition for the slow-path. -// - If 'raw_mem_only', do not cast the result to an oop. // - If 'return_size_val', report the the total object size to the caller. Node* GraphKit::new_instance(Node* klass_node, Node* extra_slow_test, - bool raw_mem_only, // affect only raw memory Node* *return_size_val) { // Compute size in doublewords // The size is always an integral number of doublewords, represented @@ -3111,7 +3111,7 @@ size, klass_node, initial_slow_test); - return set_output_for_allocation(alloc, oop_type, raw_mem_only); + return set_output_for_allocation(alloc, oop_type); } //-------------------------------new_array------------------------------------- @@ -3121,7 +3121,6 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) Node* length, // number of array elements int nargs, // number of arguments to push back for uncommon trap - bool raw_mem_only, // affect only raw memory Node* *return_size_val) { jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); @@ -3266,7 +3265,7 @@ ary_type = ary_type->is_aryptr()->cast_to_size(length_type); } - Node* javaoop = set_output_for_allocation(alloc, ary_type, raw_mem_only); + Node* javaoop = set_output_for_allocation(alloc, ary_type); // Cast length on remaining path to be as narrow as possible if (map()->find_edge(length) >= 0) { @@ -3379,16 +3378,25 @@ if (UseLoopPredicate) { add_predicate_impl(Deoptimization::Reason_predicate, nargs); } + // loop's limit check predicate should be near the loop. + if (LoopLimitCheck) { + add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs); + } } //----------------------------- store barriers ---------------------------- #define __ ideal. void GraphKit::sync_kit(IdealKit& ideal) { + set_all_memory(__ merged_memory()); + set_i_o(__ i_o()); + set_control(__ ctrl()); +} + +void GraphKit::final_sync(IdealKit& ideal) { // Final sync IdealKit and graphKit. __ drain_delay_transform(); - set_all_memory(__ merged_memory()); - set_control(__ ctrl()); + sync_kit(ideal); } // vanilla/CMS post barrier @@ -3435,7 +3443,7 @@ // (Else it's an array (or unknown), and we want more precise card marks.) assert(adr != NULL, ""); - IdealKit ideal(gvn(), control(), merged_memory(), true); + IdealKit ideal(this, true); // Convert the pointer to an int prior to doing math on it Node* cast = __ CastPX(__ ctrl(), adr); @@ -3450,9 +3458,22 @@ // Get the alias_index for raw card-mark memory int adr_type = Compile::AliasIdxRaw; + Node* zero = __ ConI(0); // Dirty card value + BasicType bt = T_BYTE; + + if (UseCondCardMark) { + // The classic GC reference write barrier is typically implemented + // as a store into the global card mark table. Unfortunately + // unconditional stores can result in false sharing and excessive + // coherence traffic as well as false transactional aborts. + // UseCondCardMark enables MP "polite" conditional card mark + // stores. In theory we could relax the load from ctrl() to + // no_ctrl, but that doesn't buy much latitude. + Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type); + __ if_then(card_val, BoolTest::ne, zero); + } + // Smash zero into card - Node* zero = __ ConI(0); - BasicType bt = T_BYTE; if( !UseConcMarkSweepGC ) { __ store(__ ctrl(), card_adr, zero, bt, adr_type); } else { @@ -3460,18 +3481,41 @@ __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type); } + if (UseCondCardMark) { + __ end_if(); + } + // Final sync IdealKit and GraphKit. - sync_kit(ideal); + final_sync(ideal); } // G1 pre/post barriers -void GraphKit::g1_write_barrier_pre(Node* obj, +void GraphKit::g1_write_barrier_pre(bool do_load, + Node* obj, Node* adr, uint alias_idx, Node* val, const TypeOopPtr* val_type, + Node* pre_val, BasicType bt) { - IdealKit ideal(gvn(), control(), merged_memory(), true); + + // Some sanity checks + // Note: val is unused in this routine. + + if (do_load) { + // We need to generate the load of the previous value + assert(obj != NULL, "must have a base"); + assert(adr != NULL, "where are loading from?"); + assert(pre_val == NULL, "loaded already?"); + assert(val_type != NULL, "need a type"); + } else { + // In this case both val_type and alias_idx are unused. + assert(pre_val != NULL, "must be loaded already"); + assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here"); + } + assert(bt == T_OBJECT, "or we shouldn't be here"); + + IdealKit ideal(this, true); Node* tls = __ thread(); // ThreadLocalStorage @@ -3492,32 +3536,28 @@ PtrQueue::byte_offset_of_index()); const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 652 PtrQueue::byte_offset_of_buf()); + // Now the actual pointers into the thread - - // set_control( ctl); - Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset)); Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset)); Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); // Now some of the values - Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw); // if (!marking) __ if_then(marking, BoolTest::ne, zero); { Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); - const Type* t1 = adr->bottom_type(); - const Type* t2 = val->bottom_type(); - - Node* orig = __ load(no_ctrl, adr, val_type, bt, alias_idx); - // if (orig != NULL) - __ if_then(orig, BoolTest::ne, null()); { - Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); - + if (do_load) { // load original value // alias_idx correct?? + pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx); + } + + // if (pre_val != NULL) + __ if_then(pre_val, BoolTest::ne, null()); { + Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); // is the queue for this thread full? __ if_then(index, BoolTest::ne, zero, likely); { @@ -3531,10 +3571,9 @@ next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) ); #endif - // Now get the buffer location we will log the original value into and store it + // Now get the buffer location we will log the previous value into and store it Node *log_addr = __ AddP(no_base, buffer, next_indexX); - __ store(__ ctrl(), log_addr, orig, T_OBJECT, Compile::AliasIdxRaw); - + __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw); // update the index __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw); @@ -3542,13 +3581,13 @@ // logging buffer is full, call the runtime const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type(); - __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, tls); + __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls); } __ end_if(); // (!index) - } __ end_if(); // (orig != NULL) + } __ end_if(); // (pre_val != NULL) } __ end_if(); // (!marking) // Final sync IdealKit and GraphKit. - sync_kit(ideal); + final_sync(ideal); } // @@ -3614,7 +3653,7 @@ // (Else it's an array (or unknown), and we want more precise card marks.) assert(adr != NULL, ""); - IdealKit ideal(gvn(), control(), merged_memory(), true); + IdealKit ideal(this, true); Node* tls = __ thread(); // ThreadLocalStorage @@ -3688,6 +3727,6 @@ } // Final sync IdealKit and GraphKit. - sync_kit(ideal); + final_sync(ideal); } #undef __
--- a/src/share/vm/opto/graphKit.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/graphKit.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -544,8 +544,10 @@ BasicType bt); // For the few case where the barriers need special help - void pre_barrier(Node* ctl, Node* obj, Node* adr, uint adr_idx, - Node* val, const TypeOopPtr* val_type, BasicType bt); + void pre_barrier(bool do_load, Node* ctl, + Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type, + Node* pre_val, + BasicType bt); void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx, Node* val, BasicType bt, bool use_precise); @@ -662,18 +664,22 @@ && Universe::heap()->can_elide_tlab_store_barriers()); } + // Sync Ideal and Graph kits. void sync_kit(IdealKit& ideal); + void final_sync(IdealKit& ideal); // vanilla/CMS post barrier void write_barrier_post(Node *store, Node* obj, Node* adr, uint adr_idx, Node* val, bool use_precise); // G1 pre/post barriers - void g1_write_barrier_pre(Node* obj, + void g1_write_barrier_pre(bool do_load, + Node* obj, Node* adr, uint alias_idx, Node* val, const TypeOopPtr* val_type, + Node* pre_val, BasicType bt); void g1_write_barrier_post(Node* store, @@ -767,15 +773,13 @@ // implementation of object creation Node* set_output_for_allocation(AllocateNode* alloc, - const TypeOopPtr* oop_type, - bool raw_mem_only); + const TypeOopPtr* oop_type); Node* get_layout_helper(Node* klass_node, jint& constant_value); Node* new_instance(Node* klass_node, Node* slow_test = NULL, - bool raw_mem_only = false, Node* *return_size_val = NULL); Node* new_array(Node* klass_node, Node* count_val, int nargs, - bool raw_mem_only = false, Node* *return_size_val = NULL); + Node* *return_size_val = NULL); // Handy for making control flow IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
--- a/src/share/vm/opto/idealGraphPrinter.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/idealGraphPrinter.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -615,6 +615,7 @@ } } +#ifdef ASSERT if (node->debug_orig() != NULL) { stringStream dorigStream; Node* dorig = node->debug_orig(); @@ -629,6 +630,7 @@ } print_prop("debug_orig", dorigStream.as_string()); } +#endif if (_chaitin && _chaitin != (PhaseChaitin *)0xdeadbeef) { buffer[0] = 0;
--- a/src/share/vm/opto/idealKit.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/idealKit.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,15 +38,16 @@ const uint IdealKit::first_var = TypeFunc::Parms + 1; //----------------------------IdealKit----------------------------------------- -IdealKit::IdealKit(PhaseGVN &gvn, Node* control, Node* mem, bool delay_all_transforms, bool has_declarations) : - _gvn(gvn), C(gvn.C) { - _initial_ctrl = control; - _initial_memory = mem; +IdealKit::IdealKit(GraphKit* gkit, bool delay_all_transforms, bool has_declarations) : + _gvn(gkit->gvn()), C(gkit->C) { + _initial_ctrl = gkit->control(); + _initial_memory = gkit->merged_memory(); + _initial_i_o = gkit->i_o(); _delay_all_transforms = delay_all_transforms; _var_ct = 0; _cvstate = NULL; // We can go memory state free or else we need the entire memory state - assert(mem == NULL || mem->Opcode() == Op_MergeMem, "memory must be pre-split"); + assert(_initial_memory == NULL || _initial_memory->Opcode() == Op_MergeMem, "memory must be pre-split"); int init_size = 5; _pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0); _delay_transform = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0); @@ -56,6 +57,13 @@ } } +//----------------------------sync_kit----------------------------------------- +void IdealKit::sync_kit(GraphKit* gkit) { + set_all_memory(gkit->merged_memory()); + set_i_o(gkit->i_o()); + set_ctrl(gkit->control()); +} + //-------------------------------if_then------------------------------------- // Create: if(left relop right) // / \ @@ -156,16 +164,14 @@ // onto the stack. void IdealKit::loop(GraphKit* gkit, int nargs, IdealVariable& iv, Node* init, BoolTest::mask relop, Node* limit, float prob, float cnt) { assert((state() & (BlockS|LoopS|IfThenS|ElseS)), "bad state for new loop"); - - // Sync IdealKit and graphKit. - gkit->set_all_memory(this->merged_memory()); - gkit->set_control(this->ctrl()); - // Add loop predicate. - gkit->add_predicate(nargs); - // Update IdealKit memory. - this->set_all_memory(gkit->merged_memory()); - this->set_ctrl(gkit->control()); - + if (UseLoopPredicate) { + // Sync IdealKit and graphKit. + gkit->sync_kit(*this); + // Add loop predicate. + gkit->add_predicate(nargs); + // Update IdealKit memory. + sync_kit(gkit); + } set(iv, init); Node* head = make_label(1); bind(head); @@ -280,6 +286,7 @@ _cvstate = new_cvstate(); // initialize current cvstate set_ctrl(_initial_ctrl); // initialize control in current cvstate set_all_memory(_initial_memory);// initialize memory in current cvstate + set_i_o(_initial_i_o); // initialize i_o in current cvstate DEBUG_ONLY(_state->push(BlockS)); } @@ -421,6 +428,9 @@ // Get the region for the join state Node* join_region = join->in(TypeFunc::Control); assert(join_region != NULL, "join region must exist"); + if (join->in(TypeFunc::I_O) == NULL ) { + join->set_req(TypeFunc::I_O, merging->in(TypeFunc::I_O)); + } if (join->in(TypeFunc::Memory) == NULL ) { join->set_req(TypeFunc::Memory, merging->in(TypeFunc::Memory)); return; @@ -467,6 +477,20 @@ mms.set_memory(phi); } } + + Node* join_io = join->in(TypeFunc::I_O); + Node* merging_io = merging->in(TypeFunc::I_O); + if (join_io != merging_io) { + PhiNode* phi; + if (join_io->is_Phi() && join_io->as_Phi()->region() == join_region) { + phi = join_io->as_Phi(); + } else { + phi = PhiNode::make(join_region, join_io, Type::ABIO); + phi = (PhiNode*) delay_transform(phi); + join->set_req(TypeFunc::I_O, phi); + } + phi->set_req(slot, merging_io); + } } @@ -477,7 +501,8 @@ const char *leaf_name, Node* parm0, Node* parm1, - Node* parm2) { + Node* parm2, + Node* parm3) { // We only handle taking in RawMem and modifying RawMem const TypePtr* adr_type = TypeRawPtr::BOTTOM; @@ -498,6 +523,7 @@ if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0); if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1); if (parm2 != NULL) call->init_req(TypeFunc::Parms+2, parm2); + if (parm3 != NULL) call->init_req(TypeFunc::Parms+3, parm3); // Node *c = _gvn.transform(call); call = (CallNode *) _gvn.transform(call); @@ -516,3 +542,51 @@ assert(C->alias_type(call->adr_type()) == C->alias_type(adr_type), "call node must be constructed correctly"); } + + +void IdealKit::make_leaf_call_no_fp(const TypeFunc *slow_call_type, + address slow_call, + const char *leaf_name, + const TypePtr* adr_type, + Node* parm0, + Node* parm1, + Node* parm2, + Node* parm3) { + + // We only handle taking in RawMem and modifying RawMem + uint adr_idx = C->get_alias_index(adr_type); + + // Slow-path leaf call + int size = slow_call_type->domain()->cnt(); + CallNode *call = (CallNode*)new (C, size) CallLeafNoFPNode( slow_call_type, slow_call, leaf_name, adr_type); + + // Set fixed predefined input arguments + call->init_req( TypeFunc::Control, ctrl() ); + call->init_req( TypeFunc::I_O , top() ) ; // does no i/o + // Narrow memory as only memory input + call->init_req( TypeFunc::Memory , memory(adr_idx)); + call->init_req( TypeFunc::FramePtr, top() /* frameptr() */ ); + call->init_req( TypeFunc::ReturnAdr, top() ); + + if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0); + if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1); + if (parm2 != NULL) call->init_req(TypeFunc::Parms+2, parm2); + if (parm3 != NULL) call->init_req(TypeFunc::Parms+3, parm3); + + // Node *c = _gvn.transform(call); + call = (CallNode *) _gvn.transform(call); + Node *c = call; // dbx gets confused with call call->dump() + + // Slow leaf call has no side-effects, sets few values + + set_ctrl(transform( new (C, 1) ProjNode(call,TypeFunc::Control) )); + + // Make memory for the call + Node* mem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) ); + + // Set the RawPtr memory state only. + set_memory(mem, adr_idx); + + assert(C->alias_type(call->adr_type()) == C->alias_type(adr_type), + "call node must be constructed correctly"); +}
--- a/src/share/vm/opto/idealKit.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/idealKit.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -108,6 +108,7 @@ bool _delay_all_transforms; // flag forcing all transforms to be delayed Node* _initial_ctrl; // saves initial control until variables declared Node* _initial_memory; // saves initial memory until variables declared + Node* _initial_i_o; // saves initial i_o until variables declared PhaseGVN& gvn() const { return _gvn; } // Create a new cvstate filled with nulls @@ -142,17 +143,21 @@ Node* memory(uint alias_idx); public: - IdealKit(PhaseGVN &gvn, Node* control, Node* memory, bool delay_all_transforms = false, bool has_declarations = false); + IdealKit(GraphKit* gkit, bool delay_all_transforms = false, bool has_declarations = false); ~IdealKit() { stop(); drain_delay_transform(); } + void sync_kit(GraphKit* gkit); + // Control Node* ctrl() { return _cvstate->in(TypeFunc::Control); } void set_ctrl(Node* ctrl) { _cvstate->set_req(TypeFunc::Control, ctrl); } Node* top() { return C->top(); } MergeMemNode* merged_memory() { return _cvstate->in(TypeFunc::Memory)->as_MergeMem(); } void set_all_memory(Node* mem) { _cvstate->set_req(TypeFunc::Memory, mem); } + Node* i_o() { return _cvstate->in(TypeFunc::I_O); } + void set_i_o(Node* c) { _cvstate->set_req(TypeFunc::I_O, c); } void set(IdealVariable& v, Node* rhs) { _cvstate->set_req(first_var + v.id(), rhs); } Node* value(IdealVariable& v) { return _cvstate->in(first_var + v.id()); } void dead(IdealVariable& v) { set(v, (Node*)NULL); } @@ -239,7 +244,18 @@ const char *leaf_name, Node* parm0, Node* parm1 = NULL, - Node* parm2 = NULL); + Node* parm2 = NULL, + Node* parm3 = NULL); + + void make_leaf_call_no_fp(const TypeFunc *slow_call_type, + address slow_call, + const char *leaf_name, + const TypePtr* adr_type, + Node* parm0, + Node* parm1, + Node* parm2, + Node* parm3); + }; #endif // SHARE_VM_OPTO_IDEALKIT_HPP
--- a/src/share/vm/opto/ifnode.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/ifnode.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -27,6 +27,7 @@ #include "opto/addnode.hpp" #include "opto/cfgnode.hpp" #include "opto/connode.hpp" +#include "opto/loopnode.hpp" #include "opto/phaseX.hpp" #include "opto/runtime.hpp" #include "opto/subnode.hpp" @@ -222,22 +223,36 @@ // Make a region merging constants and a region merging the rest uint req_c = 0; + Node* predicate_proj = NULL; for (uint ii = 1; ii < r->req(); ii++) { - if( phi->in(ii) == con1 ) { + if (phi->in(ii) == con1) { req_c++; } + Node* proj = PhaseIdealLoop::find_predicate(r->in(ii)); + if (proj != NULL) { + assert(predicate_proj == NULL, "only one predicate entry expected"); + predicate_proj = proj; + } } + Node* predicate_c = NULL; + Node* predicate_x = NULL; + bool counted_loop = r->is_CountedLoop(); + Node *region_c = new (igvn->C, req_c + 1) RegionNode(req_c + 1); Node *phi_c = con1; uint len = r->req(); - Node *region_x = new (igvn->C, len - req_c + 1) RegionNode(len - req_c + 1); + Node *region_x = new (igvn->C, len - req_c) RegionNode(len - req_c); Node *phi_x = PhiNode::make_blank(region_x, phi); for (uint i = 1, i_c = 1, i_x = 1; i < len; i++) { - if( phi->in(i) == con1 ) { + if (phi->in(i) == con1) { region_c->init_req( i_c++, r ->in(i) ); + if (r->in(i) == predicate_proj) + predicate_c = predicate_proj; } else { region_x->init_req( i_x, r ->in(i) ); phi_x ->init_req( i_x++, phi->in(i) ); + if (r->in(i) == predicate_proj) + predicate_x = predicate_proj; } } @@ -277,8 +292,20 @@ // Make the true/false arms Node *iff_c_t = phase->transform(new (igvn->C, 1) IfTrueNode (iff_c)); Node *iff_c_f = phase->transform(new (igvn->C, 1) IfFalseNode(iff_c)); + if (predicate_c != NULL) { + assert(predicate_x == NULL, "only one predicate entry expected"); + // Clone loop predicates to each path + iff_c_t = igvn->clone_loop_predicates(predicate_c, iff_c_t, !counted_loop); + iff_c_f = igvn->clone_loop_predicates(predicate_c, iff_c_f, !counted_loop); + } Node *iff_x_t = phase->transform(new (igvn->C, 1) IfTrueNode (iff_x)); Node *iff_x_f = phase->transform(new (igvn->C, 1) IfFalseNode(iff_x)); + if (predicate_x != NULL) { + assert(predicate_c == NULL, "only one predicate entry expected"); + // Clone loop predicates to each path + iff_x_t = igvn->clone_loop_predicates(predicate_x, iff_x_t, !counted_loop); + iff_x_f = igvn->clone_loop_predicates(predicate_x, iff_x_f, !counted_loop); + } // Merge the TRUE paths Node *region_s = new (igvn->C, 3) RegionNode(3); @@ -519,6 +546,7 @@ Node *new_bol = gvn->transform( new (gvn->C, 2) BoolNode( new_cmp, bol->as_Bool()->_test._test ) ); igvn->hash_delete( iff ); iff->set_req_X( 1, new_bol, igvn ); + igvn->_worklist.push( iff ); } //------------------------------up_one_dom-------------------------------------
--- a/src/share/vm/opto/lcm.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/lcm.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -688,20 +688,22 @@ } ready_cnt[n->_idx] = local; // Count em up - // A few node types require changing a required edge to a precedence edge - // before allocation. +#ifdef ASSERT if( UseConcMarkSweepGC || UseG1GC ) { if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) { - // Note: Required edges with an index greater than oper_input_base - // are not supported by the allocator. - // Note2: Can only depend on unmatched edge being last, - // can not depend on its absolute position. - Node *oop_store = n->in(n->req() - 1); - n->del_req(n->req() - 1); - n->add_prec(oop_store); - assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark"); + // Check the precedence edges + for (uint prec = n->req(); prec < n->len(); prec++) { + Node* oop_store = n->in(prec); + if (oop_store != NULL) { + assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark"); + } + } } } +#endif + + // A few node types require changing a required edge to a precedence edge + // before allocation. if( n->is_Mach() && n->req() > TypeFunc::Parms && (n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire || n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) {
--- a/src/share/vm/opto/library_call.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/library_call.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -166,6 +166,10 @@ // This returns Type::AnyPtr, RawPtr, or OopPtr. int classify_unsafe_addr(Node* &base, Node* &offset); Node* make_unsafe_address(Node* base, Node* offset); + // Helper for inline_unsafe_access. + // Generates the guards that check whether the result of + // Unsafe.getObject should be recorded in an SATB log buffer. + void insert_g1_pre_barrier(Node* base_oop, Node* offset, Node* pre_val); bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile); bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static); bool inline_unsafe_allocate(); @@ -240,6 +244,8 @@ bool inline_numberOfTrailingZeros(vmIntrinsics::ID id); bool inline_bitCount(vmIntrinsics::ID id); bool inline_reverseBytes(vmIntrinsics::ID id); + + bool inline_reference_get(); }; @@ -336,6 +342,14 @@ if (!UsePopCountInstruction) return NULL; break; + case vmIntrinsics::_Reference_get: + // It is only when G1 is enabled that we absolutely + // need to use the intrinsic version of Reference.get() + // so that the value in the referent field, if necessary, + // can be registered by the pre-barrier code. + if (!UseG1GC) return NULL; + break; + default: assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility"); assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?"); @@ -387,6 +401,7 @@ tty->print_cr("Intrinsic %s", str); } #endif + if (kit.try_to_inline()) { if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); @@ -402,11 +417,19 @@ } if (PrintIntrinsics) { - tty->print("Did not inline intrinsic %s%s at bci:%d in", + if (jvms->has_method()) { + // Not a root compile. + tty->print("Did not inline intrinsic %s%s at bci:%d in", + vmIntrinsics::name_at(intrinsic_id()), + (is_virtual() ? " (virtual)" : ""), kit.bci()); + kit.caller()->print_short_name(tty); + tty->print_cr(" (%d bytes)", kit.caller()->code_size()); + } else { + // Root compile + tty->print("Did not generate intrinsic %s%s at bci:%d in", vmIntrinsics::name_at(intrinsic_id()), (is_virtual() ? " (virtual)" : ""), kit.bci()); - kit.caller()->print_short_name(tty); - tty->print_cr(" (%d bytes)", kit.caller()->code_size()); + } } C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed); return NULL; @@ -418,6 +441,14 @@ const bool is_native_ptr = true; const bool is_static = true; + if (!jvms()->has_method()) { + // Root JVMState has a null method. + assert(map()->memory()->Opcode() == Op_Parm, ""); + // Insert the memory aliasing node + set_all_memory(reset_memory()); + } + assert(merged_memory(), ""); + switch (intrinsic_id()) { case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static); @@ -658,6 +689,9 @@ case vmIntrinsics::_getCallerClass: return inline_native_Reflection_getCallerClass(); + case vmIntrinsics::_Reference_get: + return inline_reference_get(); + default: // If you get here, it may be that someone has added a new intrinsic // to the list in vmSymbols.hpp without implementing it here. @@ -833,12 +867,10 @@ Node* str1_offset = make_load(no_ctrl, str1_offseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset)); Node* str1_start = array_element_address(str1_value, str1_offset, T_CHAR); - // Pin loads from String::equals() argument since it could be NULL. - Node* str2_ctrl = (opcode == Op_StrEquals) ? control() : no_ctrl; Node* str2_valuea = basic_plus_adr(str2, str2, value_offset); - Node* str2_value = make_load(str2_ctrl, str2_valuea, value_type, T_OBJECT, string_type->add_offset(value_offset)); + Node* str2_value = make_load(no_ctrl, str2_valuea, value_type, T_OBJECT, string_type->add_offset(value_offset)); Node* str2_offseta = basic_plus_adr(str2, str2, offset_offset); - Node* str2_offset = make_load(str2_ctrl, str2_offseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset)); + Node* str2_offset = make_load(no_ctrl, str2_offseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset)); Node* str2_start = array_element_address(str2_value, str2_offset, T_CHAR); Node* result = NULL; @@ -978,14 +1010,15 @@ if (!stopped()) { // Properly cast the argument to String argument = _gvn.transform(new (C, 2) CheckCastPPNode(control(), argument, string_type)); + // This path is taken only when argument's type is String:NotNull. + argument = cast_not_null(argument, false); // Get counts for string and argument Node* receiver_cnta = basic_plus_adr(receiver, receiver, count_offset); receiver_cnt = make_load(no_ctrl, receiver_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset)); - // Pin load from argument string since it could be NULL. Node* argument_cnta = basic_plus_adr(argument, argument, count_offset); - argument_cnt = make_load(control(), argument_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset)); + argument_cnt = make_load(no_ctrl, argument_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset)); // Check for receiver count != argument count Node* cmp = _gvn.transform( new(C, 3) CmpINode(receiver_cnt, argument_cnt) ); @@ -1120,7 +1153,7 @@ const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin)); const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot); - IdealKit kit(gvn(), control(), merged_memory(), false, true); + IdealKit kit(this, false, true); #define __ kit. Node* zero = __ ConI(0); Node* one = __ ConI(1); @@ -1171,7 +1204,7 @@ __ bind(return_); // Final sync IdealKit and GraphKit. - sync_kit(kit); + final_sync(kit); Node* result = __ value(rtn); #undef __ C->set_has_loops(true); @@ -2076,6 +2109,106 @@ const static BasicType T_ADDRESS_HOLDER = T_LONG; +// Helper that guards and inserts a G1 pre-barrier. +void LibraryCallKit::insert_g1_pre_barrier(Node* base_oop, Node* offset, Node* pre_val) { + assert(UseG1GC, "should not call this otherwise"); + + // We could be accessing the referent field of a reference object. If so, when G1 + // is enabled, we need to log the value in the referent field in an SATB buffer. + // This routine performs some compile time filters and generates suitable + // runtime filters that guard the pre-barrier code. + + // Some compile time checks. + + // If offset is a constant, is it java_lang_ref_Reference::_reference_offset? + const TypeX* otype = offset->find_intptr_t_type(); + if (otype != NULL && otype->is_con() && + otype->get_con() != java_lang_ref_Reference::referent_offset) { + // Constant offset but not the reference_offset so just return + return; + } + + // We only need to generate the runtime guards for instances. + const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr(); + if (btype != NULL) { + if (btype->isa_aryptr()) { + // Array type so nothing to do + return; + } + + const TypeInstPtr* itype = btype->isa_instptr(); + if (itype != NULL) { + // Can the klass of base_oop be statically determined + // to be _not_ a sub-class of Reference? + ciKlass* klass = itype->klass(); + if (klass->is_subtype_of(env()->Reference_klass()) && + !env()->Reference_klass()->is_subtype_of(klass)) { + return; + } + } + } + + // The compile time filters did not reject base_oop/offset so + // we need to generate the following runtime filters + // + // if (offset == java_lang_ref_Reference::_reference_offset) { + // if (base != null) { + // if (klass(base)->reference_type() != REF_NONE)) { + // pre_barrier(_, pre_val, ...); + // } + // } + // } + + float likely = PROB_LIKELY(0.999); + float unlikely = PROB_UNLIKELY(0.999); + + IdealKit ideal(this); +#define __ ideal. + + const int reference_type_offset = instanceKlass::reference_type_offset_in_bytes() + + sizeof(oopDesc); + + Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset); + + __ if_then(offset, BoolTest::eq, referent_off, unlikely); { + __ if_then(base_oop, BoolTest::ne, null(), likely); { + + // Update graphKit memory and control from IdealKit. + sync_kit(ideal); + + Node* ref_klass_con = makecon(TypeKlassPtr::make(env()->Reference_klass())); + Node* is_instof = gen_instanceof(base_oop, ref_klass_con); + + // Update IdealKit memory and control from graphKit. + __ sync_kit(this); + + Node* one = __ ConI(1); + + __ if_then(is_instof, BoolTest::eq, one, unlikely); { + + // Update graphKit from IdeakKit. + sync_kit(ideal); + + // Use the pre-barrier to record the value in the referent field + pre_barrier(false /* do_load */, + __ ctrl(), + NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, + pre_val /* pre_val */, + T_OBJECT); + + // Update IdealKit from graphKit. + __ sync_kit(this); + + } __ end_if(); // _ref_type != ref_none + } __ end_if(); // base != NULL + } __ end_if(); // offset == referent_offset + + // Final sync IdealKit and GraphKit. + final_sync(ideal); +#undef __ +} + + // Interpret Unsafe.fieldOffset cookies correctly: extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset); @@ -2152,9 +2285,11 @@ // Build address expression. See the code in inline_unsafe_prefetch. Node *adr; Node *heap_base_oop = top(); + Node* offset = top(); + if (!is_native_ptr) { // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset - Node* offset = pop_pair(); + offset = pop_pair(); // The base is either a Java object or a value produced by Unsafe.staticFieldBase Node* base = pop(); // We currently rely on the cookies produced by Unsafe.xxxFieldOffset @@ -2195,6 +2330,13 @@ // or Compile::must_alias will throw a diagnostic assert.) bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM); + // If we are reading the value of the referent field of a Reference + // object (either by using Unsafe directly or through reflection) + // then, if G1 is enabled, we need to record the referent in an + // SATB log buffer using the pre-barrier mechanism. + bool need_read_barrier = UseG1GC && !is_native_ptr && !is_store && + offset != top() && heap_base_oop != top(); + if (!is_store && type == T_OBJECT) { // Attempt to infer a sharper value type from the offset and base type. ciKlass* sharpened_klass = NULL; @@ -2278,8 +2420,13 @@ case T_SHORT: case T_INT: case T_FLOAT: + push(p); + break; case T_OBJECT: - push( p ); + if (need_read_barrier) { + insert_g1_pre_barrier(heap_base_oop, offset, p); + } + push(p); break; case T_ADDRESS: // Cast to an int type. @@ -2318,22 +2465,20 @@ // of it. So we need to emit code to conditionally do the proper type of // store. - IdealKit ideal(gvn(), control(), merged_memory()); + IdealKit ideal(this); #define __ ideal. // QQQ who knows what probability is here?? __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); { // Sync IdealKit and graphKit. - set_all_memory( __ merged_memory()); - set_control(__ ctrl()); + sync_kit(ideal); Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type); // Update IdealKit memory. - __ set_all_memory(merged_memory()); - __ set_ctrl(control()); + __ sync_kit(this); } __ else_(); { __ store(__ ctrl(), adr, val, type, alias_type->index(), is_volatile); } __ end_if(); // Final sync IdealKit and GraphKit. - sync_kit(ideal); + final_sync(ideal); #undef __ } } @@ -2536,7 +2681,10 @@ case T_OBJECT: // reference stores need a store barrier. // (They don't if CAS fails, but it isn't worth checking.) - pre_barrier(control(), base, adr, alias_idx, newval, value_type->make_oopptr(), T_OBJECT); + pre_barrier(true /* do_load*/, + control(), base, adr, alias_idx, newval, value_type->make_oopptr(), + NULL /* pre_val*/, + T_OBJECT); #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop())); @@ -3378,8 +3526,7 @@ Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) ); Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length); - const bool raw_mem_only = true; - newcopy = new_array(klass_node, length, 0, raw_mem_only); + newcopy = new_array(klass_node, length, 0); // Generate a direct call to the right arraycopy function(s). // We know the copy is disjoint but we might not know if the @@ -4176,8 +4323,6 @@ const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; int raw_adr_idx = Compile::AliasIdxRaw; - const bool raw_mem_only = true; - Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL); if (array_ctl != NULL) { @@ -4186,8 +4331,7 @@ set_control(array_ctl); Node* obj_length = load_array_length(obj); Node* obj_size = NULL; - Node* alloc_obj = new_array(obj_klass, obj_length, 0, - raw_mem_only, &obj_size); + Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); if (!use_ReduceInitialCardMarks()) { // If it is an oop array, it requires very special treatment, @@ -4259,7 +4403,7 @@ // It's an instance, and it passed the slow-path tests. PreserveJVMState pjvms(this); Node* obj_size = NULL; - Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size); + Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size); copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks()); @@ -4294,81 +4438,6 @@ return true; } - -// constants for computing the copy function -enum { - COPYFUNC_UNALIGNED = 0, - COPYFUNC_ALIGNED = 1, // src, dest aligned to HeapWordSize - COPYFUNC_CONJOINT = 0, - COPYFUNC_DISJOINT = 2 // src != dest, or transfer can descend -}; - -// Note: The condition "disjoint" applies also for overlapping copies -// where an descending copy is permitted (i.e., dest_offset <= src_offset). -static address -select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) { - int selector = - (aligned ? COPYFUNC_ALIGNED : COPYFUNC_UNALIGNED) + - (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT); - -#define RETURN_STUB(xxx_arraycopy) { \ - name = #xxx_arraycopy; \ - return StubRoutines::xxx_arraycopy(); } - -#define RETURN_STUB_PARM(xxx_arraycopy, parm) { \ - name = #xxx_arraycopy; \ - return StubRoutines::xxx_arraycopy(parm); } - - switch (t) { - case T_BYTE: - case T_BOOLEAN: - switch (selector) { - case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jbyte_arraycopy); - case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jbyte_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jbyte_disjoint_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jbyte_disjoint_arraycopy); - } - case T_CHAR: - case T_SHORT: - switch (selector) { - case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jshort_arraycopy); - case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jshort_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jshort_disjoint_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jshort_disjoint_arraycopy); - } - case T_INT: - case T_FLOAT: - switch (selector) { - case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jint_arraycopy); - case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jint_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jint_disjoint_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jint_disjoint_arraycopy); - } - case T_DOUBLE: - case T_LONG: - switch (selector) { - case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jlong_arraycopy); - case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jlong_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jlong_disjoint_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jlong_disjoint_arraycopy); - } - case T_ARRAY: - case T_OBJECT: - switch (selector) { - case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized); - case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized); - case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized); - case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized); - } - default: - ShouldNotReachHere(); - return NULL; - } - -#undef RETURN_STUB -#undef RETURN_STUB_PARM -} - //------------------------------basictype2arraycopy---------------------------- address LibraryCallKit::basictype2arraycopy(BasicType t, Node* src_offset, @@ -4401,7 +4470,7 @@ disjoint = true; } - return select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized); + return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized); } @@ -5312,3 +5381,44 @@ copyfunc_addr, copyfunc_name, adr_type, src_start, dest_start, copy_length XTOP); } + +//----------------------------inline_reference_get---------------------------- + +bool LibraryCallKit::inline_reference_get() { + const int nargs = 1; // self + + guarantee(java_lang_ref_Reference::referent_offset > 0, + "should have already been set"); + + int referent_offset = java_lang_ref_Reference::referent_offset; + + // Restore the stack and pop off the argument + _sp += nargs; + Node *reference_obj = pop(); + + // Null check on self without removing any arguments. + _sp += nargs; + reference_obj = do_null_check(reference_obj, T_OBJECT); + _sp -= nargs;; + + if (stopped()) return true; + + Node *adr = basic_plus_adr(reference_obj, reference_obj, referent_offset); + + ciInstanceKlass* klass = env()->Object_klass(); + const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass); + + Node* no_ctrl = NULL; + Node *result = make_load(no_ctrl, adr, object_type, T_OBJECT); + + // Use the pre-barrier to record the value in the referent field + pre_barrier(false /* do_load */, + control(), + NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, + result /* pre_val */, + T_OBJECT); + + push(result); + return true; +} +
--- a/src/share/vm/opto/locknode.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/locknode.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/opto/loopPredicate.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,1024 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "opto/loopnode.hpp" +#include "opto/addnode.hpp" +#include "opto/callnode.hpp" +#include "opto/connode.hpp" +#include "opto/loopnode.hpp" +#include "opto/mulnode.hpp" +#include "opto/rootnode.hpp" +#include "opto/subnode.hpp" + +/* + * The general idea of Loop Predication is to insert a predicate on the entry + * path to a loop, and raise a uncommon trap if the check of the condition fails. + * The condition checks are promoted from inside the loop body, and thus + * the checks inside the loop could be eliminated. Currently, loop predication + * optimization has been applied to remove array range check and loop invariant + * checks (such as null checks). +*/ + +//-------------------------------is_uncommon_trap_proj---------------------------- +// Return true if proj is the form of "proj->[region->..]call_uct" +bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason) { + int path_limit = 10; + assert(proj, "invalid argument"); + Node* out = proj; + for (int ct = 0; ct < path_limit; ct++) { + out = out->unique_ctrl_out(); + if (out == NULL) + return false; + if (out->is_CallStaticJava()) { + int req = out->as_CallStaticJava()->uncommon_trap_request(); + if (req != 0) { + Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); + if (trap_reason == reason || reason == Deoptimization::Reason_none) { + return true; + } + } + return false; // don't do further after call + } + if (out->Opcode() != Op_Region) + return false; + } + return false; +} + +//-------------------------------is_uncommon_trap_if_pattern------------------------- +// Return true for "if(test)-> proj -> ... +// | +// V +// other_proj->[region->..]call_uct" +// +// "must_reason_predicate" means the uct reason must be Reason_predicate +bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, Deoptimization::DeoptReason reason) { + Node *in0 = proj->in(0); + if (!in0->is_If()) return false; + // Variation of a dead If node. + if (in0->outcnt() < 2) return false; + IfNode* iff = in0->as_If(); + + // we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate + if (reason != Deoptimization::Reason_none) { + if (iff->in(1)->Opcode() != Op_Conv2B || + iff->in(1)->in(1)->Opcode() != Op_Opaque1) { + return false; + } + } + + ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj(); + if (is_uncommon_trap_proj(other_proj, reason)) { + assert(reason == Deoptimization::Reason_none || + Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list"); + return true; + } + return false; +} + +//-------------------------------register_control------------------------- +void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred) { + assert(n->is_CFG(), "must be control node"); + _igvn.register_new_node_with_optimizer(n); + loop->_body.push(n); + set_loop(n, loop); + // When called from beautify_loops() idom is not constructed yet. + if (_idom != NULL) { + set_idom(n, pred, dom_depth(pred)); + } +} + +//------------------------------create_new_if_for_predicate------------------------ +// create a new if above the uct_if_pattern for the predicate to be promoted. +// +// before after +// ---------- ---------- +// ctrl ctrl +// | | +// | | +// v v +// iff new_iff +// / \ / \ +// / \ / \ +// v v v v +// uncommon_proj cont_proj if_uct if_cont +// \ | | | | +// \ | | | | +// v v v | v +// rgn loop | iff +// | | / \ +// | | / \ +// v | v v +// uncommon_trap | uncommon_proj cont_proj +// \ \ | | +// \ \ | | +// v v v v +// rgn loop +// | +// | +// v +// uncommon_trap +// +// +// We will create a region to guard the uct call if there is no one there. +// The true projecttion (if_cont) of the new_iff is returned. +// This code is also used to clone predicates to clonned loops. +ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, + Deoptimization::DeoptReason reason) { + assert(is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!"); + IfNode* iff = cont_proj->in(0)->as_If(); + + ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con); + Node *rgn = uncommon_proj->unique_ctrl_out(); + assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct"); + + uint proj_index = 1; // region's edge corresponding to uncommon_proj + if (!rgn->is_Region()) { // create a region to guard the call + assert(rgn->is_Call(), "must be call uct"); + CallNode* call = rgn->as_Call(); + IdealLoopTree* loop = get_loop(call); + rgn = new (C, 1) RegionNode(1); + rgn->add_req(uncommon_proj); + register_control(rgn, loop, uncommon_proj); + _igvn.hash_delete(call); + call->set_req(0, rgn); + // When called from beautify_loops() idom is not constructed yet. + if (_idom != NULL) { + set_idom(call, rgn, dom_depth(rgn)); + } + } else { + // Find region's edge corresponding to uncommon_proj + for (; proj_index < rgn->req(); proj_index++) + if (rgn->in(proj_index) == uncommon_proj) break; + assert(proj_index < rgn->req(), "sanity"); + } + + Node* entry = iff->in(0); + if (new_entry != NULL) { + // Clonning the predicate to new location. + entry = new_entry; + } + // Create new_iff + IdealLoopTree* lp = get_loop(entry); + IfNode *new_iff = iff->clone()->as_If(); + new_iff->set_req(0, entry); + register_control(new_iff, lp, entry); + Node *if_cont = new (C, 1) IfTrueNode(new_iff); + Node *if_uct = new (C, 1) IfFalseNode(new_iff); + if (cont_proj->is_IfFalse()) { + // Swap + Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp; + } + register_control(if_cont, lp, new_iff); + register_control(if_uct, get_loop(rgn), new_iff); + + // if_uct to rgn + _igvn.hash_delete(rgn); + rgn->add_req(if_uct); + // When called from beautify_loops() idom is not constructed yet. + if (_idom != NULL) { + Node* ridom = idom(rgn); + Node* nrdom = dom_lca(ridom, new_iff); + set_idom(rgn, nrdom, dom_depth(rgn)); + } + + // If rgn has phis add new edges which has the same + // value as on original uncommon_proj pass. + assert(rgn->in(rgn->req() -1) == if_uct, "new edge should be last"); + bool has_phi = false; + for (DUIterator_Fast imax, i = rgn->fast_outs(imax); i < imax; i++) { + Node* use = rgn->fast_out(i); + if (use->is_Phi() && use->outcnt() > 0) { + assert(use->in(0) == rgn, ""); + _igvn.hash_delete(use); + use->add_req(use->in(proj_index)); + _igvn._worklist.push(use); + has_phi = true; + } + } + assert(!has_phi || rgn->req() > 3, "no phis when region is created"); + + if (new_entry == NULL) { + // Attach if_cont to iff + _igvn.hash_delete(iff); + iff->set_req(0, if_cont); + if (_idom != NULL) { + set_idom(iff, if_cont, dom_depth(iff)); + } + } + return if_cont->as_Proj(); +} + +//------------------------------create_new_if_for_predicate------------------------ +// Create a new if below new_entry for the predicate to be cloned (IGVN optimization) +ProjNode* PhaseIterGVN::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, + Deoptimization::DeoptReason reason) { + assert(new_entry != 0, "only used for clone predicate"); + assert(PhaseIdealLoop::is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!"); + IfNode* iff = cont_proj->in(0)->as_If(); + + ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con); + Node *rgn = uncommon_proj->unique_ctrl_out(); + assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct"); + + uint proj_index = 1; // region's edge corresponding to uncommon_proj + if (!rgn->is_Region()) { // create a region to guard the call + assert(rgn->is_Call(), "must be call uct"); + CallNode* call = rgn->as_Call(); + rgn = new (C, 1) RegionNode(1); + register_new_node_with_optimizer(rgn); + rgn->add_req(uncommon_proj); + hash_delete(call); + call->set_req(0, rgn); + } else { + // Find region's edge corresponding to uncommon_proj + for (; proj_index < rgn->req(); proj_index++) + if (rgn->in(proj_index) == uncommon_proj) break; + assert(proj_index < rgn->req(), "sanity"); + } + + // Create new_iff in new location. + IfNode *new_iff = iff->clone()->as_If(); + new_iff->set_req(0, new_entry); + + register_new_node_with_optimizer(new_iff); + Node *if_cont = new (C, 1) IfTrueNode(new_iff); + Node *if_uct = new (C, 1) IfFalseNode(new_iff); + if (cont_proj->is_IfFalse()) { + // Swap + Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp; + } + register_new_node_with_optimizer(if_cont); + register_new_node_with_optimizer(if_uct); + + // if_uct to rgn + hash_delete(rgn); + rgn->add_req(if_uct); + + // If rgn has phis add corresponding new edges which has the same + // value as on original uncommon_proj pass. + assert(rgn->in(rgn->req() -1) == if_uct, "new edge should be last"); + bool has_phi = false; + for (DUIterator_Fast imax, i = rgn->fast_outs(imax); i < imax; i++) { + Node* use = rgn->fast_out(i); + if (use->is_Phi() && use->outcnt() > 0) { + hash_delete(use); + use->add_req(use->in(proj_index)); + _worklist.push(use); + has_phi = true; + } + } + assert(!has_phi || rgn->req() > 3, "no phis when region is created"); + + return if_cont->as_Proj(); +} + +//--------------------------clone_predicate----------------------- +ProjNode* PhaseIdealLoop::clone_predicate(ProjNode* predicate_proj, Node* new_entry, + Deoptimization::DeoptReason reason, + PhaseIdealLoop* loop_phase, + PhaseIterGVN* igvn) { + ProjNode* new_predicate_proj; + if (loop_phase != NULL) { + new_predicate_proj = loop_phase->create_new_if_for_predicate(predicate_proj, new_entry, reason); + } else { + new_predicate_proj = igvn->create_new_if_for_predicate(predicate_proj, new_entry, reason); + } + IfNode* iff = new_predicate_proj->in(0)->as_If(); + Node* ctrl = iff->in(0); + + // Match original condition since predicate's projections could be swapped. + assert(predicate_proj->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be"); + Node* opq = new (igvn->C, 2) Opaque1Node(igvn->C, predicate_proj->in(0)->in(1)->in(1)->in(1)); + igvn->C->add_predicate_opaq(opq); + + Node* bol = new (igvn->C, 2) Conv2BNode(opq); + if (loop_phase != NULL) { + loop_phase->register_new_node(opq, ctrl); + loop_phase->register_new_node(bol, ctrl); + } else { + igvn->register_new_node_with_optimizer(opq); + igvn->register_new_node_with_optimizer(bol); + } + igvn->hash_delete(iff); + iff->set_req(1, bol); + return new_predicate_proj; +} + +//--------------------------move_predicate----------------------- +// Cut predicate from old place and move it to new. +ProjNode* PhaseIdealLoop::move_predicate(ProjNode* predicate_proj, Node* new_entry, + Deoptimization::DeoptReason reason, + PhaseIdealLoop* loop_phase, + PhaseIterGVN* igvn) { + assert(new_entry != NULL, "must be"); + assert(predicate_proj->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be"); + IfNode* iff = predicate_proj->in(0)->as_If(); + Node* old_entry = iff->in(0); + + // Cut predicate from old place. + Node* old = predicate_proj; + igvn->_worklist.push(old); + for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin;) { + Node* use = old->last_out(i); // for each use... + igvn->hash_delete(use); + igvn->_worklist.push(use); + // Update use-def info + uint uses_found = 0; + for (uint j = 0; j < use->req(); j++) { + if (use->in(j) == old) { + use->set_req(j, old_entry); + uses_found++; + if (loop_phase != NULL) { + if (use->is_CFG()) { + // When called from beautify_loops() idom is not constructed yet. + if (loop_phase->_idom != NULL) + loop_phase->set_idom(use, old_entry, loop_phase->dom_depth(use)); + } else { + loop_phase->set_ctrl(use, old_entry); + } + } + } + } + i -= uses_found; // we deleted 1 or more copies of this edge + } + + // Move predicate. + igvn->hash_delete(iff); + iff->set_req(0, new_entry); + igvn->_worklist.push(iff); + + if (loop_phase != NULL) { + // Fix up idom and ctrl. + loop_phase->set_ctrl(iff->in(1), new_entry); + loop_phase->set_ctrl(iff->in(1)->in(1), new_entry); + // When called from beautify_loops() idom is not constructed yet. + if (loop_phase->_idom != NULL) + loop_phase->set_idom(iff, new_entry, loop_phase->dom_depth(iff)); + } + + return predicate_proj; +} + +//--------------------------clone_loop_predicates----------------------- +// Interface from IGVN +Node* PhaseIterGVN::clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) { + return PhaseIdealLoop::clone_loop_predicates(old_entry, new_entry, false, clone_limit_check, NULL, this); +} +Node* PhaseIterGVN::move_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) { + return PhaseIdealLoop::clone_loop_predicates(old_entry, new_entry, true, clone_limit_check, NULL, this); +} + +// Interface from PhaseIdealLoop +Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) { + return clone_loop_predicates(old_entry, new_entry, false, clone_limit_check, this, &this->_igvn); +} +Node* PhaseIdealLoop::move_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check) { + return clone_loop_predicates(old_entry, new_entry, true, clone_limit_check, this, &this->_igvn); +} + +// Clone loop predicates to cloned loops (peeled, unswitched, split_if). +Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry, + bool move_predicates, + bool clone_limit_check, + PhaseIdealLoop* loop_phase, + PhaseIterGVN* igvn) { +#ifdef ASSERT + if (new_entry == NULL || !(new_entry->is_Proj() || new_entry->is_Region() || new_entry->is_SafePoint())) { + if (new_entry != NULL) + new_entry->dump(); + assert(false, "not IfTrue, IfFalse, Region or SafePoint"); + } +#endif + // Search original predicates + Node* entry = old_entry; + ProjNode* limit_check_proj = NULL; + if (LoopLimitCheck) { + limit_check_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); + if (limit_check_proj != NULL) { + entry = entry->in(0)->in(0); + } + } + if (UseLoopPredicate) { + ProjNode* predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); + if (predicate_proj != NULL) { // right pattern that can be used by loop predication + if (move_predicates) { + new_entry = move_predicate(predicate_proj, new_entry, + Deoptimization::Reason_predicate, + loop_phase, igvn); + assert(new_entry == predicate_proj, "old predicate fall through projection"); + } else { + // clone predicate + new_entry = clone_predicate(predicate_proj, new_entry, + Deoptimization::Reason_predicate, + loop_phase, igvn); + assert(new_entry != NULL && new_entry->is_Proj(), "IfTrue or IfFalse after clone predicate"); + } + if (TraceLoopPredicate) { + tty->print_cr("Loop Predicate %s: ", move_predicates ? "moved" : "cloned"); + debug_only( new_entry->in(0)->dump(); ) + } + } + } + if (limit_check_proj != NULL && clone_limit_check) { + // Clone loop limit check last to insert it before loop. + // Don't clone a limit check which was already finalized + // for this counted loop (only one limit check is needed). + if (move_predicates) { + new_entry = move_predicate(limit_check_proj, new_entry, + Deoptimization::Reason_loop_limit_check, + loop_phase, igvn); + assert(new_entry == limit_check_proj, "old limit check fall through projection"); + } else { + new_entry = clone_predicate(limit_check_proj, new_entry, + Deoptimization::Reason_loop_limit_check, + loop_phase, igvn); + assert(new_entry != NULL && new_entry->is_Proj(), "IfTrue or IfFalse after clone limit check"); + } + if (TraceLoopLimitCheck) { + tty->print_cr("Loop Limit Check %s: ", move_predicates ? "moved" : "cloned"); + debug_only( new_entry->in(0)->dump(); ) + } + } + return new_entry; +} + +//--------------------------eliminate_loop_predicates----------------------- +void PhaseIdealLoop::eliminate_loop_predicates(Node* entry) { + if (LoopLimitCheck) { + Node* predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); + if (predicate != NULL) { + entry = entry->in(0)->in(0); + } + } + if (UseLoopPredicate) { + ProjNode* predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); + if (predicate_proj != NULL) { // right pattern that can be used by loop predication + Node* n = entry->in(0)->in(1)->in(1); + assert(n->Opcode()==Op_Opaque1, "must be"); + // Remove Opaque1 node from predicates list. + // IGVN will remove this predicate check. + _igvn.replace_node(n, n->in(1)); + } + } +} + +//--------------------------skip_loop_predicates------------------------------ +// Skip related predicates. +Node* PhaseIdealLoop::skip_loop_predicates(Node* entry) { + Node* predicate = NULL; + if (LoopLimitCheck) { + predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); + if (predicate != NULL) { + entry = entry->in(0)->in(0); + } + } + if (UseLoopPredicate) { + predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); + if (predicate != NULL) { // right pattern that can be used by loop predication + IfNode* iff = entry->in(0)->as_If(); + ProjNode* uncommon_proj = iff->proj_out(1 - entry->as_Proj()->_con); + Node* rgn = uncommon_proj->unique_ctrl_out(); + assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct"); + entry = entry->in(0)->in(0); + while (entry != NULL && entry->is_Proj() && entry->in(0)->is_If()) { + uncommon_proj = entry->in(0)->as_If()->proj_out(1 - entry->as_Proj()->_con); + if (uncommon_proj->unique_ctrl_out() != rgn) + break; + entry = entry->in(0)->in(0); + } + } + } + return entry; +} + +//--------------------------find_predicate_insertion_point------------------- +// Find a good location to insert a predicate +ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) { + if (start_c == NULL || !start_c->is_Proj()) + return NULL; + if (is_uncommon_trap_if_pattern(start_c->as_Proj(), reason)) { + return start_c->as_Proj(); + } + return NULL; +} + +//--------------------------find_predicate------------------------------------ +// Find a predicate +Node* PhaseIdealLoop::find_predicate(Node* entry) { + Node* predicate = NULL; + if (LoopLimitCheck) { + predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); + if (predicate != NULL) { // right pattern that can be used by loop predication + return entry; + } + } + if (UseLoopPredicate) { + predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); + if (predicate != NULL) { // right pattern that can be used by loop predication + return entry; + } + } + return NULL; +} + +//------------------------------Invariance----------------------------------- +// Helper class for loop_predication_impl to compute invariance on the fly and +// clone invariants. +class Invariance : public StackObj { + VectorSet _visited, _invariant; + Node_Stack _stack; + VectorSet _clone_visited; + Node_List _old_new; // map of old to new (clone) + IdealLoopTree* _lpt; + PhaseIdealLoop* _phase; + + // Helper function to set up the invariance for invariance computation + // If n is a known invariant, set up directly. Otherwise, look up the + // the possibility to push n onto the stack for further processing. + void visit(Node* use, Node* n) { + if (_lpt->is_invariant(n)) { // known invariant + _invariant.set(n->_idx); + } else if (!n->is_CFG()) { + Node *n_ctrl = _phase->ctrl_or_self(n); + Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG + if (_phase->is_dominator(n_ctrl, u_ctrl)) { + _stack.push(n, n->in(0) == NULL ? 1 : 0); + } + } + } + + // Compute invariance for "the_node" and (possibly) all its inputs recursively + // on the fly + void compute_invariance(Node* n) { + assert(_visited.test(n->_idx), "must be"); + visit(n, n); + while (_stack.is_nonempty()) { + Node* n = _stack.node(); + uint idx = _stack.index(); + if (idx == n->req()) { // all inputs are processed + _stack.pop(); + // n is invariant if it's inputs are all invariant + bool all_inputs_invariant = true; + for (uint i = 0; i < n->req(); i++) { + Node* in = n->in(i); + if (in == NULL) continue; + assert(_visited.test(in->_idx), "must have visited input"); + if (!_invariant.test(in->_idx)) { // bad guy + all_inputs_invariant = false; + break; + } + } + if (all_inputs_invariant) { + _invariant.set(n->_idx); // I am a invariant too + } + } else { // process next input + _stack.set_index(idx + 1); + Node* m = n->in(idx); + if (m != NULL && !_visited.test_set(m->_idx)) { + visit(n, m); + } + } + } + } + + // Helper function to set up _old_new map for clone_nodes. + // If n is a known invariant, set up directly ("clone" of n == n). + // Otherwise, push n onto the stack for real cloning. + void clone_visit(Node* n) { + assert(_invariant.test(n->_idx), "must be invariant"); + if (_lpt->is_invariant(n)) { // known invariant + _old_new.map(n->_idx, n); + } else { // to be cloned + assert(!n->is_CFG(), "should not see CFG here"); + _stack.push(n, n->in(0) == NULL ? 1 : 0); + } + } + + // Clone "n" and (possibly) all its inputs recursively + void clone_nodes(Node* n, Node* ctrl) { + clone_visit(n); + while (_stack.is_nonempty()) { + Node* n = _stack.node(); + uint idx = _stack.index(); + if (idx == n->req()) { // all inputs processed, clone n! + _stack.pop(); + // clone invariant node + Node* n_cl = n->clone(); + _old_new.map(n->_idx, n_cl); + _phase->register_new_node(n_cl, ctrl); + for (uint i = 0; i < n->req(); i++) { + Node* in = n_cl->in(i); + if (in == NULL) continue; + n_cl->set_req(i, _old_new[in->_idx]); + } + } else { // process next input + _stack.set_index(idx + 1); + Node* m = n->in(idx); + if (m != NULL && !_clone_visited.test_set(m->_idx)) { + clone_visit(m); // visit the input + } + } + } + } + + public: + Invariance(Arena* area, IdealLoopTree* lpt) : + _lpt(lpt), _phase(lpt->_phase), + _visited(area), _invariant(area), _stack(area, 10 /* guess */), + _clone_visited(area), _old_new(area) + {} + + // Map old to n for invariance computation and clone + void map_ctrl(Node* old, Node* n) { + assert(old->is_CFG() && n->is_CFG(), "must be"); + _old_new.map(old->_idx, n); // "clone" of old is n + _invariant.set(old->_idx); // old is invariant + _clone_visited.set(old->_idx); + } + + // Driver function to compute invariance + bool is_invariant(Node* n) { + if (!_visited.test_set(n->_idx)) + compute_invariance(n); + return (_invariant.test(n->_idx) != 0); + } + + // Driver function to clone invariant + Node* clone(Node* n, Node* ctrl) { + assert(ctrl->is_CFG(), "must be"); + assert(_invariant.test(n->_idx), "must be an invariant"); + if (!_clone_visited.test(n->_idx)) + clone_nodes(n, ctrl); + return _old_new[n->_idx]; + } +}; + +//------------------------------is_range_check_if ----------------------------------- +// Returns true if the predicate of iff is in "scale*iv + offset u< load_range(ptr)" format +// Note: this function is particularly designed for loop predication. We require load_range +// and offset to be loop invariant computed on the fly by "invar" +bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const { + if (!is_loop_exit(iff)) { + return false; + } + if (!iff->in(1)->is_Bool()) { + return false; + } + const BoolNode *bol = iff->in(1)->as_Bool(); + if (bol->_test._test != BoolTest::lt) { + return false; + } + if (!bol->in(1)->is_Cmp()) { + return false; + } + const CmpNode *cmp = bol->in(1)->as_Cmp(); + if (cmp->Opcode() != Op_CmpU) { + return false; + } + Node* range = cmp->in(2); + if (range->Opcode() != Op_LoadRange) { + const TypeInt* tint = phase->_igvn.type(range)->isa_int(); + if (tint == NULL || tint->empty() || tint->_lo < 0) { + // Allow predication on positive values that aren't LoadRanges. + // This allows optimization of loops where the length of the + // array is a known value and doesn't need to be loaded back + // from the array. + return false; + } + } + if (!invar.is_invariant(range)) { + return false; + } + Node *iv = _head->as_CountedLoop()->phi(); + int scale = 0; + Node *offset = NULL; + if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, &scale, &offset)) { + return false; + } + if (offset && !invar.is_invariant(offset)) { // offset must be invariant + return false; + } + return true; +} + +//------------------------------rc_predicate----------------------------------- +// Create a range check predicate +// +// for (i = init; i < limit; i += stride) { +// a[scale*i+offset] +// } +// +// Compute max(scale*i + offset) for init <= i < limit and build the predicate +// as "max(scale*i + offset) u< a.length". +// +// There are two cases for max(scale*i + offset): +// (1) stride*scale > 0 +// max(scale*i + offset) = scale*(limit-stride) + offset +// (2) stride*scale < 0 +// max(scale*i + offset) = scale*init + offset +BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree *loop, Node* ctrl, + int scale, Node* offset, + Node* init, Node* limit, Node* stride, + Node* range, bool upper) { + stringStream* predString = NULL; + if (TraceLoopPredicate) { + predString = new stringStream(); + predString->print("rc_predicate "); + } + + Node* max_idx_expr = init; + int stride_con = stride->get_int(); + if ((stride_con > 0) == (scale > 0) == upper) { + if (LoopLimitCheck) { + // With LoopLimitCheck limit is not exact. + // Calculate exact limit here. + // Note, counted loop's test is '<' or '>'. + limit = exact_limit(loop); + max_idx_expr = new (C, 3) SubINode(limit, stride); + register_new_node(max_idx_expr, ctrl); + if (TraceLoopPredicate) predString->print("(limit - stride) "); + } else { + max_idx_expr = new (C, 3) SubINode(limit, stride); + register_new_node(max_idx_expr, ctrl); + if (TraceLoopPredicate) predString->print("(limit - stride) "); + } + } else { + if (TraceLoopPredicate) predString->print("init "); + } + + if (scale != 1) { + ConNode* con_scale = _igvn.intcon(scale); + max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale); + register_new_node(max_idx_expr, ctrl); + if (TraceLoopPredicate) predString->print("* %d ", scale); + } + + if (offset && (!offset->is_Con() || offset->get_int() != 0)){ + max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset); + register_new_node(max_idx_expr, ctrl); + if (TraceLoopPredicate) + if (offset->is_Con()) predString->print("+ %d ", offset->get_int()); + else predString->print("+ offset "); + } + + CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range); + register_new_node(cmp, ctrl); + BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt); + register_new_node(bol, ctrl); + + if (TraceLoopPredicate) { + predString->print_cr("<u range"); + tty->print(predString->as_string()); + } + return bol; +} + +//------------------------------ loop_predication_impl-------------------------- +// Insert loop predicates for null checks and range checks +bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) { + if (!UseLoopPredicate) return false; + + if (!loop->_head->is_Loop()) { + // Could be a simple region when irreducible loops are present. + return false; + } + LoopNode* head = loop->_head->as_Loop(); + + if (head->unique_ctrl_out()->Opcode() == Op_NeverBranch) { + // do nothing for infinite loops + return false; + } + + CountedLoopNode *cl = NULL; + if (head->is_CountedLoop()) { + cl = head->as_CountedLoop(); + // do nothing for iteration-splitted loops + if (!cl->is_normal_loop()) return false; + } + + Node* entry = head->in(LoopNode::EntryControl); + ProjNode *predicate_proj = NULL; + // Loop limit check predicate should be near the loop. + if (LoopLimitCheck) { + predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); + if (predicate_proj != NULL) + entry = predicate_proj->in(0)->in(0); + } + + predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); + if (!predicate_proj) { +#ifndef PRODUCT + if (TraceLoopPredicate) { + tty->print("missing predicate:"); + loop->dump_head(); + head->dump(1); + } +#endif + return false; + } + ConNode* zero = _igvn.intcon(0); + set_ctrl(zero, C->root()); + + ResourceArea *area = Thread::current()->resource_area(); + Invariance invar(area, loop); + + // Create list of if-projs such that a newer proj dominates all older + // projs in the list, and they all dominate loop->tail() + Node_List if_proj_list(area); + Node *current_proj = loop->tail(); //start from tail + while (current_proj != head) { + if (loop == get_loop(current_proj) && // still in the loop ? + current_proj->is_Proj() && // is a projection ? + current_proj->in(0)->Opcode() == Op_If) { // is a if projection ? + if_proj_list.push(current_proj); + } + current_proj = idom(current_proj); + } + + bool hoisted = false; // true if at least one proj is promoted + while (if_proj_list.size() > 0) { + // Following are changed to nonnull when a predicate can be hoisted + ProjNode* new_predicate_proj = NULL; + + ProjNode* proj = if_proj_list.pop()->as_Proj(); + IfNode* iff = proj->in(0)->as_If(); + + if (!is_uncommon_trap_if_pattern(proj, Deoptimization::Reason_none)) { + if (loop->is_loop_exit(iff)) { + // stop processing the remaining projs in the list because the execution of them + // depends on the condition of "iff" (iff->in(1)). + break; + } else { + // Both arms are inside the loop. There are two cases: + // (1) there is one backward branch. In this case, any remaining proj + // in the if_proj list post-dominates "iff". So, the condition of "iff" + // does not determine the execution the remining projs directly, and we + // can safely continue. + // (2) both arms are forwarded, i.e. a diamond shape. In this case, "proj" + // does not dominate loop->tail(), so it can not be in the if_proj list. + continue; + } + } + + Node* test = iff->in(1); + if (!test->is_Bool()){ //Conv2B, ... + continue; + } + BoolNode* bol = test->as_Bool(); + if (invar.is_invariant(bol)) { + // Invariant test + new_predicate_proj = create_new_if_for_predicate(predicate_proj, NULL, + Deoptimization::Reason_predicate); + Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0); + BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool(); + + // Negate test if necessary + bool negated = false; + if (proj->_con != predicate_proj->_con) { + new_predicate_bol = new (C, 2) BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate()); + register_new_node(new_predicate_bol, ctrl); + negated = true; + } + IfNode* new_predicate_iff = new_predicate_proj->in(0)->as_If(); + _igvn.hash_delete(new_predicate_iff); + new_predicate_iff->set_req(1, new_predicate_bol); +#ifndef PRODUCT + if (TraceLoopPredicate) { + tty->print("Predicate invariant if%s: %d ", negated ? " negated" : "", new_predicate_iff->_idx); + loop->dump_head(); + } else if (TraceLoopOpts) { + tty->print("Predicate IC "); + loop->dump_head(); + } +#endif + } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) { + assert(proj->_con == predicate_proj->_con, "must match"); + + // Range check for counted loops + const Node* cmp = bol->in(1)->as_Cmp(); + Node* idx = cmp->in(1); + assert(!invar.is_invariant(idx), "index is variant"); + Node* rng = cmp->in(2); + assert(rng->Opcode() == Op_LoadRange || _igvn.type(rng)->is_int() >= 0, "must be"); + assert(invar.is_invariant(rng), "range must be invariant"); + int scale = 1; + Node* offset = zero; + bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset); + assert(ok, "must be index expression"); + + Node* init = cl->init_trip(); + Node* limit = cl->limit(); + Node* stride = cl->stride(); + + // Build if's for the upper and lower bound tests. The + // lower_bound test will dominate the upper bound test and all + // cloned or created nodes will use the lower bound test as + // their declared control. + ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate); + ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate); + assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate"); + Node *ctrl = lower_bound_proj->in(0)->as_If()->in(0); + + // Perform cloning to keep Invariance state correct since the + // late schedule will place invariant things in the loop. + rng = invar.clone(rng, ctrl); + if (offset && offset != zero) { + assert(invar.is_invariant(offset), "offset must be loop invariant"); + offset = invar.clone(offset, ctrl); + } + + // Test the lower bound + Node* lower_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, false); + IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If(); + _igvn.hash_delete(lower_bound_iff); + lower_bound_iff->set_req(1, lower_bound_bol); + if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx); + + // Test the upper bound + Node* upper_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, true); + IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If(); + _igvn.hash_delete(upper_bound_iff); + upper_bound_iff->set_req(1, upper_bound_bol); + if (TraceLoopPredicate) tty->print_cr("upper bound check if: %d", lower_bound_iff->_idx); + + // Fall through into rest of the clean up code which will move + // any dependent nodes onto the upper bound test. + new_predicate_proj = upper_bound_proj; + +#ifndef PRODUCT + if (TraceLoopOpts && !TraceLoopPredicate) { + tty->print("Predicate RC "); + loop->dump_head(); + } +#endif + } else { + // Loop variant check (for example, range check in non-counted loop) + // with uncommon trap. + continue; + } + assert(new_predicate_proj != NULL, "sanity"); + // Success - attach condition (new_predicate_bol) to predicate if + invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate + + // Eliminate the old If in the loop body + dominated_by( new_predicate_proj, iff, proj->_con != new_predicate_proj->_con ); + + hoisted = true; + C->set_major_progress(); + } // end while + +#ifndef PRODUCT + // report that the loop predication has been actually performed + // for this loop + if (TraceLoopPredicate && hoisted) { + tty->print("Loop Predication Performed:"); + loop->dump_head(); + } +#endif + + return hoisted; +} + +//------------------------------loop_predication-------------------------------- +// driver routine for loop predication optimization +bool IdealLoopTree::loop_predication( PhaseIdealLoop *phase) { + bool hoisted = false; + // Recursively promote predicates + if (_child) { + hoisted = _child->loop_predication( phase); + } + + // self + if (!_irreducible && !tail()->is_top()) { + hoisted |= phase->loop_predication_impl(this); + } + + if (_next) { //sibling + hoisted |= _next->loop_predication( phase); + } + + return hoisted; +}
--- a/src/share/vm/opto/loopTransform.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/loopTransform.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,6 +63,46 @@ } } +//------------------------------compute_exact_trip_count----------------------- +// Compute loop exact trip count if possible. Do not recalculate trip count for +// split loops (pre-main-post) which have their limits and inits behind Opaque node. +void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) { + if (!_head->as_Loop()->is_valid_counted_loop()) { + return; + } + CountedLoopNode* cl = _head->as_CountedLoop(); + // Trip count may become nonexact for iteration split loops since + // RCE modifies limits. Note, _trip_count value is not reset since + // it is used to limit unrolling of main loop. + cl->set_nonexact_trip_count(); + + // Loop's test should be part of loop. + if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) + return; // Infinite loop + +#ifdef ASSERT + BoolTest::mask bt = cl->loopexit()->test_trip(); + assert(bt == BoolTest::lt || bt == BoolTest::gt || + (bt == BoolTest::ne && !LoopLimitCheck), "canonical test is expected"); +#endif + + Node* init_n = cl->init_trip(); + Node* limit_n = cl->limit(); + if (init_n != NULL && init_n->is_Con() && + limit_n != NULL && limit_n->is_Con()) { + // Use longs to avoid integer overflow. + int stride_con = cl->stride_con(); + long init_con = cl->init_trip()->get_int(); + long limit_con = cl->limit()->get_int(); + int stride_m = stride_con - (stride_con > 0 ? 1 : -1); + long trip_count = (limit_con - init_con + stride_m)/stride_con; + if (trip_count > 0 && (julong)trip_count < (julong)max_juint) { + // Set exact trip count. + cl->set_exact_trip_count((uint)trip_count); + } + } +} + //------------------------------compute_profile_trip_cnt---------------------------- // Compute loop trip count from profile data as // (backedge_count + loop_exit_count) / loop_exit_count @@ -301,6 +341,132 @@ // peeled-loop backedge has 2 users. // Step 3: Cut the backedge on the clone (so its not a loop) and remove the // extra backedge user. +// +// orig +// +// stmt1 +// | +// v +// loop predicate +// | +// v +// loop<----+ +// | | +// stmt2 | +// | | +// v | +// if ^ +// / \ | +// / \ | +// v v | +// false true | +// / \ | +// / ----+ +// | +// v +// exit +// +// +// after clone loop +// +// stmt1 +// | +// v +// loop predicate +// / \ +// clone / \ orig +// / \ +// / \ +// v v +// +---->loop clone loop<----+ +// | | | | +// | stmt2 clone stmt2 | +// | | | | +// | v v | +// ^ if clone If ^ +// | / \ / \ | +// | / \ / \ | +// | v v v v | +// | true false false true | +// | / \ / \ | +// +---- \ / ----+ +// \ / +// 1v v2 +// region +// | +// v +// exit +// +// +// after peel and predicate move +// +// stmt1 +// / +// / +// clone / orig +// / +// / +----------+ +// / | | +// / loop predicate | +// / | | +// v v | +// TOP-->loop clone loop<----+ | +// | | | | +// stmt2 clone stmt2 | | +// | | | ^ +// v v | | +// if clone If ^ | +// / \ / \ | | +// / \ / \ | | +// v v v v | | +// true false false true | | +// | \ / \ | | +// | \ / ----+ ^ +// | \ / | +// | 1v v2 | +// v region | +// | | | +// | v | +// | exit | +// | | +// +--------------->-----------------+ +// +// +// final graph +// +// stmt1 +// | +// v +// stmt2 clone +// | +// v +// if clone +// / | +// / | +// v v +// false true +// | | +// | v +// | loop predicate +// | | +// | v +// | loop<----+ +// | | | +// | stmt2 | +// | | | +// | v | +// v if ^ +// | / \ | +// | / \ | +// | v v | +// | false true | +// | | \ | +// v v --+ +// region +// | +// v +// exit +// void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) { C->set_major_progress(); @@ -315,9 +481,10 @@ loop->dump_head(); } #endif - Node *h = loop->_head; - if (h->is_CountedLoop()) { - CountedLoopNode *cl = h->as_CountedLoop(); + Node* head = loop->_head; + bool counted_loop = head->is_CountedLoop(); + if (counted_loop) { + CountedLoopNode *cl = head->as_CountedLoop(); assert(cl->trip_count() > 0, "peeling a fully unrolled loop"); cl->set_trip_count(cl->trip_count() - 1); if (cl->is_main_loop()) { @@ -330,11 +497,11 @@ #endif } } + Node* entry = head->in(LoopNode::EntryControl); // Step 1: Clone the loop body. The clone becomes the peeled iteration. // The pre-loop illegally has 2 control users (old & new loops). - clone_loop( loop, old_new, dom_depth(loop->_head) ); - + clone_loop( loop, old_new, dom_depth(head) ); // Step 2: Make the old-loop fall-in edges point to the peeled iteration. // Do this by making the old-loop fall-in edges act as if they came @@ -342,12 +509,15 @@ // backedges) and then map to the new peeled iteration. This leaves // the pre-loop with only 1 user (the new peeled iteration), but the // peeled-loop backedge has 2 users. - for (DUIterator_Fast jmax, j = loop->_head->fast_outs(jmax); j < jmax; j++) { - Node* old = loop->_head->fast_out(j); - if( old->in(0) == loop->_head && old->req() == 3 && - (old->is_Loop() || old->is_Phi()) ) { - Node *new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx]; - if( !new_exit_value ) // Backedge value is ALSO loop invariant? + Node* new_exit_value = old_new[head->in(LoopNode::LoopBackControl)->_idx]; + new_exit_value = move_loop_predicates(entry, new_exit_value, !counted_loop); + _igvn.hash_delete(head); + head->set_req(LoopNode::EntryControl, new_exit_value); + for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { + Node* old = head->fast_out(j); + if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) { + new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx]; + if (!new_exit_value ) // Backedge value is ALSO loop invariant? // Then loop body backedge value remains the same. new_exit_value = old->in(LoopNode::LoopBackControl); _igvn.hash_delete(old); @@ -358,12 +528,12 @@ // Step 3: Cut the backedge on the clone (so its not a loop) and remove the // extra backedge user. - Node *nnn = old_new[loop->_head->_idx]; - _igvn.hash_delete(nnn); - nnn->set_req(LoopNode::LoopBackControl, C->top()); - for (DUIterator_Fast j2max, j2 = nnn->fast_outs(j2max); j2 < j2max; j2++) { - Node* use = nnn->fast_out(j2); - if( use->in(0) == nnn && use->req() == 3 && use->is_Phi() ) { + Node* new_head = old_new[head->_idx]; + _igvn.hash_delete(new_head); + new_head->set_req(LoopNode::LoopBackControl, C->top()); + for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) { + Node* use = new_head->fast_out(j2); + if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) { _igvn.hash_delete(use); use->set_req(LoopNode::LoopBackControl, C->top()); } @@ -371,15 +541,15 @@ // Step 4: Correct dom-depth info. Set to loop-head depth. - int dd = dom_depth(loop->_head); - set_idom(loop->_head, loop->_head->in(1), dd); + int dd = dom_depth(head); + set_idom(head, head->in(1), dd); for (uint j3 = 0; j3 < loop->_body.size(); j3++) { Node *old = loop->_body.at(j3); Node *nnn = old_new[old->_idx]; if (!has_ctrl(nnn)) set_idom(nnn, idom(nnn), dd-1); // While we're at it, remove any SafePoints from the peeled code - if( old->Opcode() == Op_SafePoint ) { + if (old->Opcode() == Op_SafePoint) { Node *nnn = old_new[old->_idx]; lazy_replace(nnn,nnn->in(TypeFunc::Control)); } @@ -392,34 +562,26 @@ loop->record_for_igvn(); } +#define EMPTY_LOOP_SIZE 7 // number of nodes in an empty loop + //------------------------------policy_maximally_unroll------------------------ -// Return exact loop trip count, or 0 if not maximally unrolling +// Calculate exact loop trip count and return true if loop can be maximally +// unrolled. bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const { CountedLoopNode *cl = _head->as_CountedLoop(); assert(cl->is_normal_loop(), ""); - - Node *init_n = cl->init_trip(); - Node *limit_n = cl->limit(); + if (!cl->is_valid_counted_loop()) + return false; // Malformed counted loop - // Non-constant bounds - if (init_n == NULL || !init_n->is_Con() || - limit_n == NULL || !limit_n->is_Con() || - // protect against stride not being a constant - !cl->stride_is_con()) { + if (!cl->has_exact_trip_count()) { + // Trip count is not exact. return false; } - int init = init_n->get_int(); - int limit = limit_n->get_int(); - int span = limit - init; - int stride = cl->stride_con(); - if (init >= limit || stride > span) { - // return a false (no maximally unroll) and the regular unroll/peel - // route will make a small mess which CCP will fold away. - return false; - } - uint trip_count = span/stride; // trip_count can be greater than 2 Gig. - assert( (int)trip_count*stride == span, "must divide evenly" ); + uint trip_count = cl->trip_count(); + // Note, max_juint is used to indicate unknown trip count. + assert(trip_count > 1, "one iteration loop should be optimized out already"); + assert(trip_count < max_juint, "exact trip_count should be less than max_uint."); // Real policy: if we maximally unroll, does it get too big? // Allow the unrolled mess to get larger than standard loop @@ -427,17 +589,28 @@ uint body_size = _body.size(); uint unroll_limit = (uint)LoopUnrollLimit * 4; assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits"); - cl->set_trip_count(trip_count); if (trip_count > unroll_limit || body_size > unroll_limit) { return false; } - // Currently we don't have policy to optimize one iteration loops. - // Maximally unrolling transformation is used for that: - // it is peeled and the original loop become non reachable (dead). - if (trip_count == 1) + // Fully unroll a loop with few iterations regardless next + // conditions since following loop optimizations will split + // such loop anyway (pre-main-post). + if (trip_count <= 3) return true; + // Take into account that after unroll conjoined heads and tails will fold, + // otherwise policy_unroll() may allow more unrolling than max unrolling. + uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count; + uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE; + if (body_size != tst_body_size) // Check for int overflow + return false; + if (new_body_size > unroll_limit || + // Unrolling can result in a large amount of node construction + new_body_size >= MaxNodeLimit - phase->C->unique()) { + return false; + } + // Do not unroll a loop with String intrinsics code. // String intrinsics are large and have loops. for (uint k = 0; k < _body.size(); k++) { @@ -452,20 +625,12 @@ } // switch } - if (body_size <= unroll_limit) { - uint new_body_size = body_size * trip_count; - if (new_body_size <= unroll_limit && - body_size == new_body_size / trip_count && - // Unrolling can result in a large amount of node construction - new_body_size < MaxNodeLimit - phase->C->unique()) { - return true; // maximally unroll - } - } - - return false; // Do not maximally unroll + return true; // Do maximally unroll } +#define MAX_UNROLL 16 // maximum number of unrolls for main loop + //------------------------------policy_unroll---------------------------------- // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if // the loop is a CountedLoop and the body is small enough. @@ -474,13 +639,18 @@ CountedLoopNode *cl = _head->as_CountedLoop(); assert(cl->is_normal_loop() || cl->is_main_loop(), ""); - // protect against stride not being a constant - if (!cl->stride_is_con()) return false; + if (!cl->is_valid_counted_loop()) + return false; // Malformed counted loop - // protect against over-unrolling - if (cl->trip_count() <= 1) return false; + // Protect against over-unrolling. + // After split at least one iteration will be executed in pre-loop. + if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false; int future_unroll_ct = cl->unrolled_count() * 2; + if (future_unroll_ct > MAX_UNROLL) return false; + + // Check for initial stride being a small enough constant + if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false; // Don't unroll if the next round of unrolling would push us // over the expected trip count of the loop. One is subtracted @@ -506,6 +676,7 @@ Node *init_n = cl->init_trip(); Node *limit_n = cl->limit(); + int stride_con = cl->stride_con(); // Non-constant bounds. // Protect against over-unrolling when init or/and limit are not constant // (so that trip_count's init value is maxint) but iv range is known. @@ -515,7 +686,7 @@ if (phi != NULL) { assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi."); const TypeInt* iv_type = phase->_igvn.type(phi)->is_int(); - int next_stride = cl->stride_con() * 2; // stride after this unroll + int next_stride = stride_con * 2; // stride after this unroll if (next_stride > 0) { if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow iv_type->_lo + next_stride > iv_type->_hi) { @@ -530,15 +701,19 @@ } } + // After unroll limit will be adjusted: new_limit = limit-stride. + // Bailout if adjustment overflow. + const TypeInt* limit_type = phase->_igvn.type(limit_n)->is_int(); + if (stride_con > 0 && ((limit_type->_hi - stride_con) >= limit_type->_hi) || + stride_con < 0 && ((limit_type->_lo - stride_con) <= limit_type->_lo)) + return false; // overflow + // Adjust body_size to determine if we unroll or not uint body_size = _body.size(); - // Key test to unroll CaffeineMark's Logic test - int xors_in_loop = 0; // Also count ModL, DivL and MulL which expand mightly for (uint k = 0; k < _body.size(); k++) { Node* n = _body.at(k); switch (n->Opcode()) { - case Op_XorI: xors_in_loop++; break; // CaffeineMark's Logic test case Op_ModL: body_size += 30; break; case Op_DivL: body_size += 30; break; case Op_MulL: body_size += 10; break; @@ -555,14 +730,10 @@ // Check for being too big if (body_size > (uint)LoopUnrollLimit) { - if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true; - // Normal case: loop too big + // Normal case: loop too big return false; } - // Check for stride being a small enough constant - if (abs(cl->stride_con()) > (1<<3)) return false; - // Unroll once! (Each trip will soon do double iterations) return true; } @@ -581,28 +752,31 @@ // Return TRUE or FALSE if the loop should be range-check-eliminated. // Actually we do iteration-splitting, a more powerful form of RCE. bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const { - if( !RangeCheckElimination ) return false; + if (!RangeCheckElimination) return false; CountedLoopNode *cl = _head->as_CountedLoop(); // If we unrolled with no intention of doing RCE and we later // changed our minds, we got no pre-loop. Either we need to // make a new pre-loop, or we gotta disallow RCE. - if( cl->is_main_no_pre_loop() ) return false; // Disallowed for now. + if (cl->is_main_no_pre_loop()) return false; // Disallowed for now. Node *trip_counter = cl->phi(); // Check loop body for tests of trip-counter plus loop-invariant vs // loop-invariant. - for( uint i = 0; i < _body.size(); i++ ) { + for (uint i = 0; i < _body.size(); i++) { Node *iff = _body[i]; - if( iff->Opcode() == Op_If ) { // Test? + if (iff->Opcode() == Op_If) { // Test? // Comparing trip+off vs limit Node *bol = iff->in(1); - if( bol->req() != 2 ) continue; // dead constant test + if (bol->req() != 2) continue; // dead constant test if (!bol->is_Bool()) { assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only"); continue; } + if (bol->as_Bool()->_test._test == BoolTest::ne) + continue; // not RC + Node *cmp = bol->in(1); Node *rc_exp = cmp->in(1); @@ -898,6 +1072,7 @@ // negative stride use > if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) { + assert(!LoopLimitCheck, "only canonical tests (lt or gt) are expected"); BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt; // Modify pre loop end condition @@ -924,6 +1099,9 @@ main_head->set_main_loop(); if( peel_only ) main_head->set_main_no_pre_loop(); + // Subtract a trip count for the pre-loop. + main_head->set_trip_count(main_head->trip_count() - 1); + // It's difficult to be precise about the trip-counts // for the pre/post loops. They are usually very short, // so guess that 4 trips is a reasonable value. @@ -956,7 +1134,11 @@ tty->print("Unrolling "); loop->dump_head(); } else if (TraceLoopOpts) { - tty->print("Unroll %d ", loop_head->unrolled_count()*2); + if (loop_head->trip_count() < (uint)LoopUnrollLimit) { + tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count()); + } else { + tty->print("Unroll %d ", loop_head->unrolled_count()*2); + } loop->dump_head(); } #endif @@ -971,7 +1153,8 @@ Node *stride = loop_head->stride(); Node *opaq = NULL; - if( adjust_min_trip ) { // If not maximally unrolling, need adjustment + if (adjust_min_trip) { // If not maximally unrolling, need adjustment + // Search for zero-trip guard. assert( loop_head->is_main_loop(), "" ); assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" ); Node *iff = ctrl->in(0); @@ -981,63 +1164,210 @@ Node *cmp = bol->in(1); assert( cmp->Opcode() == Op_CmpI, "" ); opaq = cmp->in(2); - // Occasionally it's possible for a pre-loop Opaque1 node to be + // Occasionally it's possible for a zero-trip guard Opaque1 node to be // optimized away and then another round of loop opts attempted. // We can not optimize this particular loop in that case. - if( opaq->Opcode() != Op_Opaque1 ) - return; // Cannot find pre-loop! Bail out! + if (opaq->Opcode() != Op_Opaque1) + return; // Cannot find zero-trip guard! Bail out! + // Zero-trip test uses an 'opaque' node which is not shared. + assert(opaq->outcnt() == 1 && opaq->in(1) == limit, ""); } C->set_major_progress(); - // Adjust max trip count. The trip count is intentionally rounded - // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, - // the main, unrolled, part of the loop will never execute as it is protected - // by the min-trip test. See bug 4834191 for a case where we over-unrolled - // and later determined that part of the unrolled loop was dead. - loop_head->set_trip_count(loop_head->trip_count() / 2); + Node* new_limit = NULL; + if (UnrollLimitCheck) { + int stride_con = stride->get_int(); + int stride_p = (stride_con > 0) ? stride_con : -stride_con; + uint old_trip_count = loop_head->trip_count(); + // Verify that unroll policy result is still valid. + assert(old_trip_count > 1 && + (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity"); - // Double the count of original iterations in the unrolled loop body. - loop_head->double_unrolled_count(); + // Adjust loop limit to keep valid iterations number after unroll. + // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride + // which may overflow. + if (!adjust_min_trip) { + assert(old_trip_count > 1 && (old_trip_count & 1) == 0, + "odd trip count for maximally unroll"); + // Don't need to adjust limit for maximally unroll since trip count is even. + } else if (loop_head->has_exact_trip_count() && init->is_Con()) { + // Loop's limit is constant. Loop's init could be constant when pre-loop + // become peeled iteration. + long init_con = init->get_int(); + // We can keep old loop limit if iterations count stays the same: + // old_trip_count == new_trip_count * 2 + // Note: since old_trip_count >= 2 then new_trip_count >= 1 + // so we also don't need to adjust zero trip test. + long limit_con = limit->get_int(); + // (stride_con*2) not overflow since stride_con <= 8. + int new_stride_con = stride_con * 2; + int stride_m = new_stride_con - (stride_con > 0 ? 1 : -1); + long trip_count = (limit_con - init_con + stride_m)/new_stride_con; + // New trip count should satisfy next conditions. + assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity"); + uint new_trip_count = (uint)trip_count; + adjust_min_trip = (old_trip_count != new_trip_count*2); + } + + if (adjust_min_trip) { + // Step 2: Adjust the trip limit if it is called for. + // The adjustment amount is -stride. Need to make sure if the + // adjustment underflows or overflows, then the main loop is skipped. + Node* cmp = loop_end->cmp_node(); + assert(cmp->in(2) == limit, "sanity"); + assert(opaq != NULL && opaq->in(1) == limit, "sanity"); + + // Verify that policy_unroll result is still valid. + const TypeInt* limit_type = _igvn.type(limit)->is_int(); + assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) || + stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity"); - // ----------- - // Step 2: Cut back the trip counter for an unroll amount of 2. - // Loop will normally trip (limit - init)/stride_con. Since it's a - // CountedLoop this is exact (stride divides limit-init exactly). - // We are going to double the loop body, so we want to knock off any - // odd iteration: (trip_cnt & ~1). Then back compute a new limit. - Node *span = new (C, 3) SubINode( limit, init ); - register_new_node( span, ctrl ); - Node *trip = new (C, 3) DivINode( 0, span, stride ); - register_new_node( trip, ctrl ); - Node *mtwo = _igvn.intcon(-2); - set_ctrl(mtwo, C->root()); - Node *rond = new (C, 3) AndINode( trip, mtwo ); - register_new_node( rond, ctrl ); - Node *spn2 = new (C, 3) MulINode( rond, stride ); - register_new_node( spn2, ctrl ); - Node *lim2 = new (C, 3) AddINode( spn2, init ); - register_new_node( lim2, ctrl ); + if (limit->is_Con()) { + // The check in policy_unroll and the assert above guarantee + // no underflow if limit is constant. + new_limit = _igvn.intcon(limit->get_int() - stride_con); + set_ctrl(new_limit, C->root()); + } else { + // Limit is not constant. + if (loop_head->unrolled_count() == 1) { // only for first unroll + // Separate limit by Opaque node in case it is an incremented + // variable from previous loop to avoid using pre-incremented + // value which could increase register pressure. + // Otherwise reorg_offsets() optimization will create a separate + // Opaque node for each use of trip-counter and as result + // zero trip guard limit will be different from loop limit. + assert(has_ctrl(opaq), "should have it"); + Node* opaq_ctrl = get_ctrl(opaq); + limit = new (C, 2) Opaque2Node( C, limit ); + register_new_node( limit, opaq_ctrl ); + } + if (stride_con > 0 && ((limit_type->_lo - stride_con) < limit_type->_lo) || + stride_con < 0 && ((limit_type->_hi - stride_con) > limit_type->_hi)) { + // No underflow. + new_limit = new (C, 3) SubINode(limit, stride); + } else { + // (limit - stride) may underflow. + // Clamp the adjustment value with MININT or MAXINT: + // + // new_limit = limit-stride + // if (stride > 0) + // new_limit = (limit < new_limit) ? MININT : new_limit; + // else + // new_limit = (limit > new_limit) ? MAXINT : new_limit; + // + BoolTest::mask bt = loop_end->test_trip(); + assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); + Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint); + set_ctrl(adj_max, C->root()); + Node* old_limit = NULL; + Node* adj_limit = NULL; + Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL; + if (loop_head->unrolled_count() > 1 && + limit->is_CMove() && limit->Opcode() == Op_CMoveI && + limit->in(CMoveNode::IfTrue) == adj_max && + bol->as_Bool()->_test._test == bt && + bol->in(1)->Opcode() == Op_CmpI && + bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) { + // Loop was unrolled before. + // Optimize the limit to avoid nested CMove: + // use original limit as old limit. + old_limit = bol->in(1)->in(1); + // Adjust previous adjusted limit. + adj_limit = limit->in(CMoveNode::IfFalse); + adj_limit = new (C, 3) SubINode(adj_limit, stride); + } else { + old_limit = limit; + adj_limit = new (C, 3) SubINode(limit, stride); + } + assert(old_limit != NULL && adj_limit != NULL, ""); + register_new_node( adj_limit, ctrl ); // adjust amount + Node* adj_cmp = new (C, 3) CmpINode(old_limit, adj_limit); + register_new_node( adj_cmp, ctrl ); + Node* adj_bool = new (C, 2) BoolNode(adj_cmp, bt); + register_new_node( adj_bool, ctrl ); + new_limit = new (C, 4) CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT); + } + register_new_node(new_limit, ctrl); + } + assert(new_limit != NULL, ""); + // Replace in loop test. + _igvn.hash_delete(cmp); + cmp->set_req(2, new_limit); - // Hammer in the new limit - Node *ctrl2 = loop_end->in(0); - Node *cmp2 = new (C, 3) CmpINode( loop_head->incr(), lim2 ); - register_new_node( cmp2, ctrl2 ); - Node *bol2 = new (C, 2) BoolNode( cmp2, loop_end->test_trip() ); - register_new_node( bol2, ctrl2 ); - _igvn.hash_delete(loop_end); - loop_end->set_req(CountedLoopEndNode::TestValue, bol2); + // Step 3: Find the min-trip test guaranteed before a 'main' loop. + // Make it a 1-trip test (means at least 2 trips). + + // Guard test uses an 'opaque' node which is not shared. Hence I + // can edit it's inputs directly. Hammer in the new limit for the + // minimum-trip guard. + assert(opaq->outcnt() == 1, ""); + _igvn.hash_delete(opaq); + opaq->set_req(1, new_limit); + } + + // Adjust max trip count. The trip count is intentionally rounded + // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, + // the main, unrolled, part of the loop will never execute as it is protected + // by the min-trip test. See bug 4834191 for a case where we over-unrolled + // and later determined that part of the unrolled loop was dead. + loop_head->set_trip_count(old_trip_count / 2); + + // Double the count of original iterations in the unrolled loop body. + loop_head->double_unrolled_count(); + + } else { // LoopLimitCheck + + // Adjust max trip count. The trip count is intentionally rounded + // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, + // the main, unrolled, part of the loop will never execute as it is protected + // by the min-trip test. See bug 4834191 for a case where we over-unrolled + // and later determined that part of the unrolled loop was dead. + loop_head->set_trip_count(loop_head->trip_count() / 2); + + // Double the count of original iterations in the unrolled loop body. + loop_head->double_unrolled_count(); - // Step 3: Find the min-trip test guaranteed before a 'main' loop. - // Make it a 1-trip test (means at least 2 trips). - if( adjust_min_trip ) { - // Guard test uses an 'opaque' node which is not shared. Hence I - // can edit it's inputs directly. Hammer in the new limit for the - // minimum-trip guard. - assert( opaq->outcnt() == 1, "" ); - _igvn.hash_delete(opaq); - opaq->set_req(1, lim2); - } + // ----------- + // Step 2: Cut back the trip counter for an unroll amount of 2. + // Loop will normally trip (limit - init)/stride_con. Since it's a + // CountedLoop this is exact (stride divides limit-init exactly). + // We are going to double the loop body, so we want to knock off any + // odd iteration: (trip_cnt & ~1). Then back compute a new limit. + Node *span = new (C, 3) SubINode( limit, init ); + register_new_node( span, ctrl ); + Node *trip = new (C, 3) DivINode( 0, span, stride ); + register_new_node( trip, ctrl ); + Node *mtwo = _igvn.intcon(-2); + set_ctrl(mtwo, C->root()); + Node *rond = new (C, 3) AndINode( trip, mtwo ); + register_new_node( rond, ctrl ); + Node *spn2 = new (C, 3) MulINode( rond, stride ); + register_new_node( spn2, ctrl ); + new_limit = new (C, 3) AddINode( spn2, init ); + register_new_node( new_limit, ctrl ); + + // Hammer in the new limit + Node *ctrl2 = loop_end->in(0); + Node *cmp2 = new (C, 3) CmpINode( loop_head->incr(), new_limit ); + register_new_node( cmp2, ctrl2 ); + Node *bol2 = new (C, 2) BoolNode( cmp2, loop_end->test_trip() ); + register_new_node( bol2, ctrl2 ); + _igvn.hash_delete(loop_end); + loop_end->set_req(CountedLoopEndNode::TestValue, bol2); + + // Step 3: Find the min-trip test guaranteed before a 'main' loop. + // Make it a 1-trip test (means at least 2 trips). + if( adjust_min_trip ) { + assert( new_limit != NULL, "" ); + // Guard test uses an 'opaque' node which is not shared. Hence I + // can edit it's inputs directly. Hammer in the new limit for the + // minimum-trip guard. + assert( opaq->outcnt() == 1, "" ); + _igvn.hash_delete(opaq); + opaq->set_req(1, new_limit); + } + } // LoopLimitCheck // --------- // Step 4: Clone the loop body. Move it inside the loop. This loop body @@ -1093,6 +1423,7 @@ void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) { CountedLoopNode *cl = loop->_head->as_CountedLoop(); + assert(cl->has_exact_trip_count(), "trip count is not exact"); assert(cl->trip_count() > 0, ""); #ifndef PRODUCT if (TraceLoopOpts) { @@ -1109,6 +1440,7 @@ // Now its tripping an even number of times remaining. Double loop body. // Do not adjust pre-guards; they are not needed and do not exist. if (cl->trip_count() > 0) { + assert((cl->trip_count() & 1) == 0, "missed peeling"); do_unroll(loop, old_new, false); } } @@ -1121,23 +1453,31 @@ return _phase->dom_lca_internal(ctrl, backedge) == ctrl; } +//------------------------------adjust_limit----------------------------------- +// Helper function for add_constraint(). +Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl) { + // Compute "I :: (limit-offset)/scale" + Node *con = new (C, 3) SubINode(rc_limit, offset); + register_new_node(con, pre_ctrl); + Node *X = new (C, 3) DivINode(0, con, scale); + register_new_node(X, pre_ctrl); + + // Adjust loop limit + loop_limit = (stride_con > 0) + ? (Node*)(new (C, 3) MinINode(loop_limit, X)) + : (Node*)(new (C, 3) MaxINode(loop_limit, X)); + register_new_node(loop_limit, pre_ctrl); + return loop_limit; +} + //------------------------------add_constraint--------------------------------- -// Constrain the main loop iterations so the condition: -// scale_con * I + offset < limit +// Constrain the main loop iterations so the conditions: +// low_limit <= scale_con * I + offset < upper_limit // always holds true. That is, either increase the number of iterations in // the pre-loop or the post-loop until the condition holds true in the main // loop. Stride, scale, offset and limit are all loop invariant. Further, // stride and scale are constants (offset and limit often are). -void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) { - - // Compute "I :: (limit-offset)/scale_con" - Node *con = new (C, 3) SubINode( limit, offset ); - register_new_node( con, pre_ctrl ); - Node *scale = _igvn.intcon(scale_con); - set_ctrl(scale, C->root()); - Node *X = new (C, 3) DivINode( 0, con, scale ); - register_new_node( X, pre_ctrl ); - +void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) { // For positive stride, the pre-loop limit always uses a MAX function // and the main loop a MIN function. For negative stride these are // reversed. @@ -1146,48 +1486,120 @@ // pre-loop must check for underflow and the post-loop for overflow. // Negative stride*scale reverses this; pre-loop checks for overflow and // post-loop for underflow. - if( stride_con*scale_con > 0 ) { - // Compute I < (limit-offset)/scale_con - // Adjust main-loop last iteration to be MIN/MAX(main_loop,X) - *main_limit = (stride_con > 0) - ? (Node*)(new (C, 3) MinINode( *main_limit, X )) - : (Node*)(new (C, 3) MaxINode( *main_limit, X )); - register_new_node( *main_limit, pre_ctrl ); + + Node *scale = _igvn.intcon(scale_con); + set_ctrl(scale, C->root()); + + if ((stride_con^scale_con) >= 0) { // Use XOR to avoid overflow + // The overflow limit: scale*I+offset < upper_limit + // For main-loop compute + // ( if (scale > 0) /* and stride > 0 */ + // I < (upper_limit-offset)/scale + // else /* scale < 0 and stride < 0 */ + // I > (upper_limit-offset)/scale + // ) + // + // (upper_limit-offset) may overflow or underflow. + // But it is fine since main loop will either have + // less iterations or will be skipped in such case. + *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl); - } else { - // Compute (limit-offset)/scale_con + SGN(-scale_con) <= I - // Add the negation of the main-loop constraint to the pre-loop. - // See footnote [++] below for a derivation of the limit expression. - Node *incr = _igvn.intcon(scale_con > 0 ? -1 : 1); - set_ctrl(incr, C->root()); - Node *adj = new (C, 3) AddINode( X, incr ); - register_new_node( adj, pre_ctrl ); - *pre_limit = (scale_con > 0) - ? (Node*)new (C, 3) MinINode( *pre_limit, adj ) - : (Node*)new (C, 3) MaxINode( *pre_limit, adj ); - register_new_node( *pre_limit, pre_ctrl ); + // The underflow limit: low_limit <= scale*I+offset. + // For pre-loop compute + // NOT(scale*I+offset >= low_limit) + // scale*I+offset < low_limit + // ( if (scale > 0) /* and stride > 0 */ + // I < (low_limit-offset)/scale + // else /* scale < 0 and stride < 0 */ + // I > (low_limit-offset)/scale + // ) + + if (low_limit->get_int() == -max_jint) { + if (!RangeLimitCheck) return; + // We need this guard when scale*pre_limit+offset >= limit + // due to underflow. So we need execute pre-loop until + // scale*I+offset >= min_int. But (min_int-offset) will + // underflow when offset > 0 and X will be > original_limit + // when stride > 0. To avoid it we replace positive offset with 0. + // + // Also (min_int+1 == -max_int) is used instead of min_int here + // to avoid problem with scale == -1 (min_int/(-1) == min_int). + Node* shift = _igvn.intcon(31); + set_ctrl(shift, C->root()); + Node* sign = new (C, 3) RShiftINode(offset, shift); + register_new_node(sign, pre_ctrl); + offset = new (C, 3) AndINode(offset, sign); + register_new_node(offset, pre_ctrl); + } else { + assert(low_limit->get_int() == 0, "wrong low limit for range check"); + // The only problem we have here when offset == min_int + // since (0-min_int) == min_int. It may be fine for stride > 0 + // but for stride < 0 X will be < original_limit. To avoid it + // max(pre_limit, original_limit) is used in do_range_check(). + } + // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond); + *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl); -// [++] Here's the algebra that justifies the pre-loop limit expression: -// -// NOT( scale_con * I + offset < limit ) -// == -// scale_con * I + offset >= limit -// == -// SGN(scale_con) * I >= (limit-offset)/|scale_con| -// == -// (limit-offset)/|scale_con| <= I * SGN(scale_con) -// == -// (limit-offset)/|scale_con|-1 < I * SGN(scale_con) -// == -// ( if (scale_con > 0) /*common case*/ -// (limit-offset)/scale_con - 1 < I -// else -// (limit-offset)/scale_con + 1 > I -// ) -// ( if (scale_con > 0) /*common case*/ -// (limit-offset)/scale_con + SGN(-scale_con) < I -// else -// (limit-offset)/scale_con + SGN(-scale_con) > I + } else { // stride_con*scale_con < 0 + // For negative stride*scale pre-loop checks for overflow and + // post-loop for underflow. + // + // The overflow limit: scale*I+offset < upper_limit + // For pre-loop compute + // NOT(scale*I+offset < upper_limit) + // scale*I+offset >= upper_limit + // scale*I+offset+1 > upper_limit + // ( if (scale < 0) /* and stride > 0 */ + // I < (upper_limit-(offset+1))/scale + // else /* scale > 0 and stride < 0 */ + // I > (upper_limit-(offset+1))/scale + // ) + // + // (upper_limit-offset-1) may underflow or overflow. + // To avoid it min(pre_limit, original_limit) is used + // in do_range_check() for stride > 0 and max() for < 0. + Node *one = _igvn.intcon(1); + set_ctrl(one, C->root()); + + Node *plus_one = new (C, 3) AddINode(offset, one); + register_new_node( plus_one, pre_ctrl ); + // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond); + *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl); + + if (low_limit->get_int() == -max_jint) { + if (!RangeLimitCheck) return; + // We need this guard when scale*main_limit+offset >= limit + // due to underflow. So we need execute main-loop while + // scale*I+offset+1 > min_int. But (min_int-offset-1) will + // underflow when (offset+1) > 0 and X will be < main_limit + // when scale < 0 (and stride > 0). To avoid it we replace + // positive (offset+1) with 0. + // + // Also (min_int+1 == -max_int) is used instead of min_int here + // to avoid problem with scale == -1 (min_int/(-1) == min_int). + Node* shift = _igvn.intcon(31); + set_ctrl(shift, C->root()); + Node* sign = new (C, 3) RShiftINode(plus_one, shift); + register_new_node(sign, pre_ctrl); + plus_one = new (C, 3) AndINode(plus_one, sign); + register_new_node(plus_one, pre_ctrl); + } else { + assert(low_limit->get_int() == 0, "wrong low limit for range check"); + // The only problem we have here when offset == max_int + // since (max_int+1) == min_int and (0-min_int) == min_int. + // But it is fine since main loop will either have + // less iterations or will be skipped in such case. + } + // The underflow limit: low_limit <= scale*I+offset. + // For main-loop compute + // scale*I+offset+1 > low_limit + // ( if (scale < 0) /* and stride > 0 */ + // I < (low_limit-(offset+1))/scale + // else /* scale > 0 and stride < 0 */ + // I > (low_limit-(offset+1))/scale + // ) + + *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl); } } @@ -1318,7 +1730,7 @@ Node *cmpzm = bolzm->in(1); assert(cmpzm->is_Cmp(), ""); Node *opqzm = cmpzm->in(2); - // Can not optimize a loop if pre-loop Opaque1 node is optimized + // Can not optimize a loop if zero-trip Opaque1 node is optimized // away and then another round of loop opts attempted. if (opqzm->Opcode() != Op_Opaque1) return; @@ -1353,8 +1765,11 @@ int stride_con = cl->stride_con(); Node *zero = _igvn.intcon(0); Node *one = _igvn.intcon(1); + // Use symmetrical int range [-max_jint,max_jint] + Node *mini = _igvn.intcon(-max_jint); set_ctrl(zero, C->root()); set_ctrl(one, C->root()); + set_ctrl(mini, C->root()); // Range checks that do not dominate the loop backedge (ie. // conditionally executed) can lengthen the pre loop limit beyond @@ -1429,7 +1844,12 @@ if( offset_c == ctrl ) { continue; // Don't rce this check but continue looking for other candidates. } - +#ifdef ASSERT + if (TraceRangeLimitCheck) { + tty->print_cr("RC bool node%s", flip ? " flipped:" : ":"); + bol->dump(2); + } +#endif // At this point we have the expression as: // scale_con * trip_counter + offset :: limit // where scale_con, offset and limit are loop invariant. Trip_counter @@ -1440,17 +1860,11 @@ // Adjust pre and main loop limits to guard the correct iteration set if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests if( b_test._test == BoolTest::lt ) { // Range checks always use lt - // The overflow limit: scale*I+offset < limit - add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit ); - // The underflow limit: 0 <= scale*I+offset. - // Some math yields: -scale*I-(offset+1) < 0 - Node *plus_one = new (C, 3) AddINode( offset, one ); - register_new_node( plus_one, pre_ctrl ); - Node *neg_offset = new (C, 3) SubINode( zero, plus_one ); - register_new_node( neg_offset, pre_ctrl ); - add_constraint( stride_con, -scale_con, neg_offset, zero, pre_ctrl, &pre_limit, &main_limit ); + // The underflow and overflow limits: 0 <= scale*I+offset < limit + add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit ); if (!conditional_rc) { - conditional_rc = !loop->dominates_backedge(iff); + // (0-offset)/scale could be outside of loop iterations range. + conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; } } else { #ifndef PRODUCT @@ -1461,21 +1875,33 @@ } } else { // Otherwise work on normal compares switch( b_test._test ) { - case BoolTest::ge: // Convert X >= Y to -X <= -Y + case BoolTest::gt: + // Fall into GE case + case BoolTest::ge: + // Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit scale_con = -scale_con; offset = new (C, 3) SubINode( zero, offset ); register_new_node( offset, pre_ctrl ); limit = new (C, 3) SubINode( zero, limit ); register_new_node( limit, pre_ctrl ); // Fall into LE case - case BoolTest::le: // Convert X <= Y to X < Y+1 - limit = new (C, 3) AddINode( limit, one ); - register_new_node( limit, pre_ctrl ); + case BoolTest::le: + if (b_test._test != BoolTest::gt) { + // Convert X <= Y to X < Y+1 + limit = new (C, 3) AddINode( limit, one ); + register_new_node( limit, pre_ctrl ); + } // Fall into LT case case BoolTest::lt: - add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit ); + // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit + // Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here + // to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT. + add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit ); if (!conditional_rc) { - conditional_rc = !loop->dominates_backedge(iff); + // ((MIN_INT+1)-offset)/scale could be outside of loop iterations range. + // Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could + // still be outside of loop range. + conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; } break; default: @@ -1526,7 +1952,8 @@ // Note:: we are making the main loop limit no longer precise; // need to round up based on stride. - if( stride_con != 1 && stride_con != -1 ) { // Cutout for common case + cl->set_nonexact_trip_count(); + if (!LoopLimitCheck && stride_con != 1 && stride_con != -1) { // Cutout for common case // "Standard" round-up logic: ([main_limit-init+(y-1)]/y)*y+init // Hopefully, compiler will optimize for powers of 2. Node *ctrl = get_ctrl(main_limit); @@ -1631,7 +2058,7 @@ // have on the last iteration. This will break the loop. bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) { // Minimum size must be empty loop - if (_body.size() > 7/*number of nodes in an empty loop*/) + if (_body.size() > EMPTY_LOOP_SIZE) return false; if (!_head->is_CountedLoop()) @@ -1658,8 +2085,19 @@ // main and post loops have explicitly created zero trip guard bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop(); if (needs_guard) { + // Skip guard if values not overlap. + const TypeInt* init_t = phase->_igvn.type(cl->init_trip())->is_int(); + const TypeInt* limit_t = phase->_igvn.type(cl->limit())->is_int(); + int stride_con = cl->stride_con(); + if (stride_con > 0) { + needs_guard = (init_t->_hi >= limit_t->_lo); + } else { + needs_guard = (init_t->_lo <= limit_t->_hi); + } + } + if (needs_guard) { // Check for an obvious zero trip guard. - Node* inctrl = cl->in(LoopNode::EntryControl); + Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl)); if (inctrl->Opcode() == Op_IfTrue) { // The test should look like just the backedge of a CountedLoop Node* iff = inctrl->in(0); @@ -1695,19 +2133,68 @@ // iteration. Then the CountedLoopEnd will collapse (backedge never // taken) and all loop-invariant uses of the exit values will be correct. Node *phi = cl->phi(); - Node *final = new (phase->C, 3) SubINode( cl->limit(), cl->stride() ); + Node *exact_limit = phase->exact_limit(this); + if (exact_limit != cl->limit()) { + // We also need to replace the original limit to collapse loop exit. + Node* cmp = cl->loopexit()->cmp_node(); + assert(cl->limit() == cmp->in(2), "sanity"); + phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist + phase->_igvn.hash_delete(cmp); + cmp->set_req(2, exact_limit); + phase->_igvn._worklist.push(cmp); // put cmp on worklist + } + // Note: the final value after increment should not overflow since + // counted loop has limit check predicate. + Node *final = new (phase->C, 3) SubINode( exact_limit, cl->stride() ); phase->register_new_node(final,cl->in(LoopNode::EntryControl)); phase->_igvn.replace_node(phi,final); phase->C->set_major_progress(); return true; } +//------------------------------policy_do_one_iteration_loop------------------- +// Convert one iteration loop into normal code. +bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) { + if (!_head->as_Loop()->is_valid_counted_loop()) + return false; // Only for counted loop + + CountedLoopNode *cl = _head->as_CountedLoop(); + if (!cl->has_exact_trip_count() || cl->trip_count() != 1) { + return false; + } + +#ifndef PRODUCT + if(TraceLoopOpts) { + tty->print("OneIteration "); + this->dump_head(); + } +#endif + + Node *init_n = cl->init_trip(); +#ifdef ASSERT + // Loop boundaries should be constant since trip count is exact. + assert(init_n->get_int() + cl->stride_con() >= cl->limit()->get_int(), "should be one iteration"); +#endif + // Replace the phi at loop head with the value of the init_trip. + // Then the CountedLoopEnd will collapse (backedge will not be taken) + // and all loop-invariant uses of the exit values will be correct. + phase->_igvn.replace_node(cl->phi(), cl->init_trip()); + phase->C->set_major_progress(); + return true; +} //============================================================================= //------------------------------iteration_split_impl--------------------------- bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) { + // Compute exact loop trip count if possible. + compute_exact_trip_count(phase); + + // Convert one iteration loop into normal code. + if (policy_do_one_iteration_loop(phase)) + return true; + // Check and remove empty loops (spam micro-benchmarks) - if( policy_do_remove_empty_loop(phase) ) + if (policy_do_remove_empty_loop(phase)) return true; // Here we removed an empty loop bool should_peel = policy_peeling(phase); // Should we peel? @@ -1716,40 +2203,40 @@ // Non-counted loops may be peeled; exactly 1 iteration is peeled. // This removes loop-invariant tests (usually null checks). - if( !_head->is_CountedLoop() ) { // Non-counted loop + if (!_head->is_CountedLoop()) { // Non-counted loop if (PartialPeelLoop && phase->partial_peel(this, old_new)) { // Partial peel succeeded so terminate this round of loop opts return false; } - if( should_peel ) { // Should we peel? + if (should_peel) { // Should we peel? #ifndef PRODUCT if (PrintOpto) tty->print_cr("should_peel"); #endif phase->do_peeling(this,old_new); - } else if( should_unswitch ) { + } else if (should_unswitch) { phase->do_unswitching(this, old_new); } return true; } CountedLoopNode *cl = _head->as_CountedLoop(); - if( !cl->loopexit() ) return true; // Ignore various kinds of broken loops + if (!cl->loopexit()) return true; // Ignore various kinds of broken loops // Do nothing special to pre- and post- loops - if( cl->is_pre_loop() || cl->is_post_loop() ) return true; + if (cl->is_pre_loop() || cl->is_post_loop()) return true; // Compute loop trip count from profile data compute_profile_trip_cnt(phase); // Before attempting fancy unrolling, RCE or alignment, see if we want // to completely unroll this loop or do loop unswitching. - if( cl->is_normal_loop() ) { + if (cl->is_normal_loop()) { if (should_unswitch) { phase->do_unswitching(this, old_new); return true; } bool should_maximally_unroll = policy_maximally_unroll(phase); - if( should_maximally_unroll ) { + if (should_maximally_unroll) { // Here we did some unrolling and peeling. Eventually we will // completely unroll this loop and it will no longer be a loop. phase->do_maximally_unroll(this,old_new); @@ -1757,6 +2244,12 @@ } } + // Skip next optimizations if running low on nodes. Note that + // policy_unswitching and policy_maximally_unroll have this check. + uint nodes_left = MaxNodeLimit - phase->C->unique(); + if ((2 * _body.size()) > nodes_left) { + return true; + } // Counted loops may be peeled, may need some iterations run up // front for RCE, and may want to align loop refs to a cache @@ -1787,14 +2280,14 @@ // If we have any of these conditions (RCE, alignment, unrolling) met, then // we switch to the pre-/main-/post-loop model. This model also covers // peeling. - if( should_rce || should_align || should_unroll ) { - if( cl->is_normal_loop() ) // Convert to 'pre/main/post' loops + if (should_rce || should_align || should_unroll) { + if (cl->is_normal_loop()) // Convert to 'pre/main/post' loops phase->insert_pre_post_loops(this,old_new, !may_rce_align); // Adjust the pre- and main-loop limits to let the pre and post loops run // with full checks, but the main-loop with no checks. Remove said // checks from the main body. - if( should_rce ) + if (should_rce) phase->do_range_check(this,old_new); // Double loop body for unrolling. Adjust the minimum-trip test (will do @@ -1802,16 +2295,16 @@ // an even number of trips). If we are peeling, we might enable some RCE // and we'd rather unroll the post-RCE'd loop SO... do not unroll if // peeling. - if( should_unroll && !should_peel ) - phase->do_unroll(this,old_new, true); + if (should_unroll && !should_peel) + phase->do_unroll(this,old_new, true); // Adjust the pre-loop limits to align the main body // iterations. - if( should_align ) + if (should_align) Unimplemented(); } else { // Else we have an unchanged counted loop - if( should_peel ) // Might want to peel but do nothing else + if (should_peel) // Might want to peel but do nothing else phase->do_peeling(this,old_new); } return true; @@ -1861,651 +2354,8 @@ return true; } -//-------------------------------is_uncommon_trap_proj---------------------------- -// Return true if proj is the form of "proj->[region->..]call_uct" -bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason) { - int path_limit = 10; - assert(proj, "invalid argument"); - Node* out = proj; - for (int ct = 0; ct < path_limit; ct++) { - out = out->unique_ctrl_out(); - if (out == NULL || out->is_Root() || out->is_Start()) - return false; - if (out->is_CallStaticJava()) { - int req = out->as_CallStaticJava()->uncommon_trap_request(); - if (req != 0) { - Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); - if (trap_reason == reason || reason == Deoptimization::Reason_none) { - return true; - } - } - return false; // don't do further after call - } - } - return false; -} -//-------------------------------is_uncommon_trap_if_pattern------------------------- -// Return true for "if(test)-> proj -> ... -// | -// V -// other_proj->[region->..]call_uct" -// -// "must_reason_predicate" means the uct reason must be Reason_predicate -bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, Deoptimization::DeoptReason reason) { - Node *in0 = proj->in(0); - if (!in0->is_If()) return false; - // Variation of a dead If node. - if (in0->outcnt() < 2) return false; - IfNode* iff = in0->as_If(); - - // we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate - if (reason != Deoptimization::Reason_none) { - if (iff->in(1)->Opcode() != Op_Conv2B || - iff->in(1)->in(1)->Opcode() != Op_Opaque1) { - return false; - } - } - - ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj(); - return is_uncommon_trap_proj(other_proj, reason); -} - -//-------------------------------register_control------------------------- -void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred) { - assert(n->is_CFG(), "must be control node"); - _igvn.register_new_node_with_optimizer(n); - loop->_body.push(n); - set_loop(n, loop); - // When called from beautify_loops() idom is not constructed yet. - if (_idom != NULL) { - set_idom(n, pred, dom_depth(pred)); - } -} - -//------------------------------create_new_if_for_predicate------------------------ -// create a new if above the uct_if_pattern for the predicate to be promoted. -// -// before after -// ---------- ---------- -// ctrl ctrl -// | | -// | | -// v v -// iff new_iff -// / \ / \ -// / \ / \ -// v v v v -// uncommon_proj cont_proj if_uct if_cont -// \ | | | | -// \ | | | | -// v v v | v -// rgn loop | iff -// | | / \ -// | | / \ -// v | v v -// uncommon_trap | uncommon_proj cont_proj -// \ \ | | -// \ \ | | -// v v v v -// rgn loop -// | -// | -// v -// uncommon_trap -// -// -// We will create a region to guard the uct call if there is no one there. -// The true projecttion (if_cont) of the new_iff is returned. -// This code is also used to clone predicates to clonned loops. -ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, - Deoptimization::DeoptReason reason) { - assert(is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!"); - IfNode* iff = cont_proj->in(0)->as_If(); - - ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con); - Node *rgn = uncommon_proj->unique_ctrl_out(); - assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct"); - - if (!rgn->is_Region()) { // create a region to guard the call - assert(rgn->is_Call(), "must be call uct"); - CallNode* call = rgn->as_Call(); - IdealLoopTree* loop = get_loop(call); - rgn = new (C, 1) RegionNode(1); - rgn->add_req(uncommon_proj); - register_control(rgn, loop, uncommon_proj); - _igvn.hash_delete(call); - call->set_req(0, rgn); - // When called from beautify_loops() idom is not constructed yet. - if (_idom != NULL) { - set_idom(call, rgn, dom_depth(rgn)); - } - } - - Node* entry = iff->in(0); - if (new_entry != NULL) { - // Clonning the predicate to new location. - entry = new_entry; - } - // Create new_iff - IdealLoopTree* lp = get_loop(entry); - IfNode *new_iff = new (C, 2) IfNode(entry, NULL, iff->_prob, iff->_fcnt); - register_control(new_iff, lp, entry); - Node *if_cont = new (C, 1) IfTrueNode(new_iff); - Node *if_uct = new (C, 1) IfFalseNode(new_iff); - if (cont_proj->is_IfFalse()) { - // Swap - Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp; - } - register_control(if_cont, lp, new_iff); - register_control(if_uct, get_loop(rgn), new_iff); - - // if_uct to rgn - _igvn.hash_delete(rgn); - rgn->add_req(if_uct); - // When called from beautify_loops() idom is not constructed yet. - if (_idom != NULL) { - Node* ridom = idom(rgn); - Node* nrdom = dom_lca(ridom, new_iff); - set_idom(rgn, nrdom, dom_depth(rgn)); - } - // rgn must have no phis - assert(!rgn->as_Region()->has_phi(), "region must have no phis"); - - if (new_entry == NULL) { - // Attach if_cont to iff - _igvn.hash_delete(iff); - iff->set_req(0, if_cont); - if (_idom != NULL) { - set_idom(iff, if_cont, dom_depth(iff)); - } - } - return if_cont->as_Proj(); -} - -//--------------------------find_predicate_insertion_point------------------- -// Find a good location to insert a predicate -ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) { - if (start_c == NULL || !start_c->is_Proj()) - return NULL; - if (is_uncommon_trap_if_pattern(start_c->as_Proj(), reason)) { - return start_c->as_Proj(); - } - return NULL; -} - -//--------------------------find_predicate------------------------------------ -// Find a predicate -Node* PhaseIdealLoop::find_predicate(Node* entry) { - Node* predicate = NULL; - if (UseLoopPredicate) { - predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); - if (predicate != NULL) { // right pattern that can be used by loop predication - assert(entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be"); - return entry; - } - } - return NULL; -} - -//------------------------------Invariance----------------------------------- -// Helper class for loop_predication_impl to compute invariance on the fly and -// clone invariants. -class Invariance : public StackObj { - VectorSet _visited, _invariant; - Node_Stack _stack; - VectorSet _clone_visited; - Node_List _old_new; // map of old to new (clone) - IdealLoopTree* _lpt; - PhaseIdealLoop* _phase; - - // Helper function to set up the invariance for invariance computation - // If n is a known invariant, set up directly. Otherwise, look up the - // the possibility to push n onto the stack for further processing. - void visit(Node* use, Node* n) { - if (_lpt->is_invariant(n)) { // known invariant - _invariant.set(n->_idx); - } else if (!n->is_CFG()) { - Node *n_ctrl = _phase->ctrl_or_self(n); - Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG - if (_phase->is_dominator(n_ctrl, u_ctrl)) { - _stack.push(n, n->in(0) == NULL ? 1 : 0); - } - } - } - - // Compute invariance for "the_node" and (possibly) all its inputs recursively - // on the fly - void compute_invariance(Node* n) { - assert(_visited.test(n->_idx), "must be"); - visit(n, n); - while (_stack.is_nonempty()) { - Node* n = _stack.node(); - uint idx = _stack.index(); - if (idx == n->req()) { // all inputs are processed - _stack.pop(); - // n is invariant if it's inputs are all invariant - bool all_inputs_invariant = true; - for (uint i = 0; i < n->req(); i++) { - Node* in = n->in(i); - if (in == NULL) continue; - assert(_visited.test(in->_idx), "must have visited input"); - if (!_invariant.test(in->_idx)) { // bad guy - all_inputs_invariant = false; - break; - } - } - if (all_inputs_invariant) { - _invariant.set(n->_idx); // I am a invariant too - } - } else { // process next input - _stack.set_index(idx + 1); - Node* m = n->in(idx); - if (m != NULL && !_visited.test_set(m->_idx)) { - visit(n, m); - } - } - } - } - - // Helper function to set up _old_new map for clone_nodes. - // If n is a known invariant, set up directly ("clone" of n == n). - // Otherwise, push n onto the stack for real cloning. - void clone_visit(Node* n) { - assert(_invariant.test(n->_idx), "must be invariant"); - if (_lpt->is_invariant(n)) { // known invariant - _old_new.map(n->_idx, n); - } else{ // to be cloned - assert (!n->is_CFG(), "should not see CFG here"); - _stack.push(n, n->in(0) == NULL ? 1 : 0); - } - } - - // Clone "n" and (possibly) all its inputs recursively - void clone_nodes(Node* n, Node* ctrl) { - clone_visit(n); - while (_stack.is_nonempty()) { - Node* n = _stack.node(); - uint idx = _stack.index(); - if (idx == n->req()) { // all inputs processed, clone n! - _stack.pop(); - // clone invariant node - Node* n_cl = n->clone(); - _old_new.map(n->_idx, n_cl); - _phase->register_new_node(n_cl, ctrl); - for (uint i = 0; i < n->req(); i++) { - Node* in = n_cl->in(i); - if (in == NULL) continue; - n_cl->set_req(i, _old_new[in->_idx]); - } - } else { // process next input - _stack.set_index(idx + 1); - Node* m = n->in(idx); - if (m != NULL && !_clone_visited.test_set(m->_idx)) { - clone_visit(m); // visit the input - } - } - } - } - - public: - Invariance(Arena* area, IdealLoopTree* lpt) : - _lpt(lpt), _phase(lpt->_phase), - _visited(area), _invariant(area), _stack(area, 10 /* guess */), - _clone_visited(area), _old_new(area) - {} - - // Map old to n for invariance computation and clone - void map_ctrl(Node* old, Node* n) { - assert(old->is_CFG() && n->is_CFG(), "must be"); - _old_new.map(old->_idx, n); // "clone" of old is n - _invariant.set(old->_idx); // old is invariant - _clone_visited.set(old->_idx); - } - - // Driver function to compute invariance - bool is_invariant(Node* n) { - if (!_visited.test_set(n->_idx)) - compute_invariance(n); - return (_invariant.test(n->_idx) != 0); - } - - // Driver function to clone invariant - Node* clone(Node* n, Node* ctrl) { - assert(ctrl->is_CFG(), "must be"); - assert(_invariant.test(n->_idx), "must be an invariant"); - if (!_clone_visited.test(n->_idx)) - clone_nodes(n, ctrl); - return _old_new[n->_idx]; - } -}; - -//------------------------------is_range_check_if ----------------------------------- -// Returns true if the predicate of iff is in "scale*iv + offset u< load_range(ptr)" format -// Note: this function is particularly designed for loop predication. We require load_range -// and offset to be loop invariant computed on the fly by "invar" -bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const { - if (!is_loop_exit(iff)) { - return false; - } - if (!iff->in(1)->is_Bool()) { - return false; - } - const BoolNode *bol = iff->in(1)->as_Bool(); - if (bol->_test._test != BoolTest::lt) { - return false; - } - if (!bol->in(1)->is_Cmp()) { - return false; - } - const CmpNode *cmp = bol->in(1)->as_Cmp(); - if (cmp->Opcode() != Op_CmpU ) { - return false; - } - Node* range = cmp->in(2); - if (range->Opcode() != Op_LoadRange) { - const TypeInt* tint = phase->_igvn.type(range)->isa_int(); - if (!OptimizeFill || tint == NULL || tint->empty() || tint->_lo < 0) { - // Allow predication on positive values that aren't LoadRanges. - // This allows optimization of loops where the length of the - // array is a known value and doesn't need to be loaded back - // from the array. - return false; - } - } - if (!invar.is_invariant(range)) { - return false; - } - Node *iv = _head->as_CountedLoop()->phi(); - int scale = 0; - Node *offset = NULL; - if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, &scale, &offset)) { - return false; - } - if(offset && !invar.is_invariant(offset)) { // offset must be invariant - return false; - } - return true; -} - -//------------------------------rc_predicate----------------------------------- -// Create a range check predicate -// -// for (i = init; i < limit; i += stride) { -// a[scale*i+offset] -// } -// -// Compute max(scale*i + offset) for init <= i < limit and build the predicate -// as "max(scale*i + offset) u< a.length". -// -// There are two cases for max(scale*i + offset): -// (1) stride*scale > 0 -// max(scale*i + offset) = scale*(limit-stride) + offset -// (2) stride*scale < 0 -// max(scale*i + offset) = scale*init + offset -BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl, - int scale, Node* offset, - Node* init, Node* limit, Node* stride, - Node* range, bool upper) { - DEBUG_ONLY(ttyLocker ttyl); - if (TraceLoopPredicate) tty->print("rc_predicate "); - - Node* max_idx_expr = init; - int stride_con = stride->get_int(); - if ((stride_con > 0) == (scale > 0) == upper) { - max_idx_expr = new (C, 3) SubINode(limit, stride); - register_new_node(max_idx_expr, ctrl); - if (TraceLoopPredicate) tty->print("(limit - stride) "); - } else { - if (TraceLoopPredicate) tty->print("init "); - } - - if (scale != 1) { - ConNode* con_scale = _igvn.intcon(scale); - max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale); - register_new_node(max_idx_expr, ctrl); - if (TraceLoopPredicate) tty->print("* %d ", scale); - } - - if (offset && (!offset->is_Con() || offset->get_int() != 0)){ - max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset); - register_new_node(max_idx_expr, ctrl); - if (TraceLoopPredicate) - if (offset->is_Con()) tty->print("+ %d ", offset->get_int()); - else tty->print("+ offset "); - } - - CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range); - register_new_node(cmp, ctrl); - BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt); - register_new_node(bol, ctrl); - - if (TraceLoopPredicate) tty->print_cr("<u range"); - return bol; -} - -//------------------------------ loop_predication_impl-------------------------- -// Insert loop predicates for null checks and range checks -bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) { - if (!UseLoopPredicate) return false; - - if (!loop->_head->is_Loop()) { - // Could be a simple region when irreducible loops are present. - return false; - } - - if (loop->_head->unique_ctrl_out()->Opcode() == Op_NeverBranch) { - // do nothing for infinite loops - return false; - } - - CountedLoopNode *cl = NULL; - if (loop->_head->is_CountedLoop()) { - cl = loop->_head->as_CountedLoop(); - // do nothing for iteration-splitted loops - if (!cl->is_normal_loop()) return false; - } - - LoopNode *lpn = loop->_head->as_Loop(); - Node* entry = lpn->in(LoopNode::EntryControl); - - ProjNode *predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); - if (!predicate_proj) { -#ifndef PRODUCT - if (TraceLoopPredicate) { - tty->print("missing predicate:"); - loop->dump_head(); - lpn->dump(1); - } -#endif - return false; - } - ConNode* zero = _igvn.intcon(0); - set_ctrl(zero, C->root()); - - ResourceArea *area = Thread::current()->resource_area(); - Invariance invar(area, loop); - - // Create list of if-projs such that a newer proj dominates all older - // projs in the list, and they all dominate loop->tail() - Node_List if_proj_list(area); - LoopNode *head = loop->_head->as_Loop(); - Node *current_proj = loop->tail(); //start from tail - while ( current_proj != head ) { - if (loop == get_loop(current_proj) && // still in the loop ? - current_proj->is_Proj() && // is a projection ? - current_proj->in(0)->Opcode() == Op_If) { // is a if projection ? - if_proj_list.push(current_proj); - } - current_proj = idom(current_proj); - } - - bool hoisted = false; // true if at least one proj is promoted - while (if_proj_list.size() > 0) { - // Following are changed to nonnull when a predicate can be hoisted - ProjNode* new_predicate_proj = NULL; - - ProjNode* proj = if_proj_list.pop()->as_Proj(); - IfNode* iff = proj->in(0)->as_If(); - - if (!is_uncommon_trap_if_pattern(proj, Deoptimization::Reason_none)) { - if (loop->is_loop_exit(iff)) { - // stop processing the remaining projs in the list because the execution of them - // depends on the condition of "iff" (iff->in(1)). - break; - } else { - // Both arms are inside the loop. There are two cases: - // (1) there is one backward branch. In this case, any remaining proj - // in the if_proj list post-dominates "iff". So, the condition of "iff" - // does not determine the execution the remining projs directly, and we - // can safely continue. - // (2) both arms are forwarded, i.e. a diamond shape. In this case, "proj" - // does not dominate loop->tail(), so it can not be in the if_proj list. - continue; - } - } - - Node* test = iff->in(1); - if (!test->is_Bool()){ //Conv2B, ... - continue; - } - BoolNode* bol = test->as_Bool(); - if (invar.is_invariant(bol)) { - // Invariant test - new_predicate_proj = create_new_if_for_predicate(predicate_proj, NULL, - Deoptimization::Reason_predicate); - Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0); - BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool(); - - // Negate test if necessary - bool negated = false; - if (proj->_con != predicate_proj->_con) { - new_predicate_bol = new (C, 2) BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate()); - register_new_node(new_predicate_bol, ctrl); - negated = true; - } - IfNode* new_predicate_iff = new_predicate_proj->in(0)->as_If(); - _igvn.hash_delete(new_predicate_iff); - new_predicate_iff->set_req(1, new_predicate_bol); -#ifndef PRODUCT - if (TraceLoopPredicate) { - tty->print("Predicate invariant if%s: %d ", negated ? " negated" : "", new_predicate_iff->_idx); - loop->dump_head(); - } else if (TraceLoopOpts) { - tty->print("Predicate IC "); - loop->dump_head(); - } -#endif - } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) { - assert(proj->_con == predicate_proj->_con, "must match"); - - // Range check for counted loops - const Node* cmp = bol->in(1)->as_Cmp(); - Node* idx = cmp->in(1); - assert(!invar.is_invariant(idx), "index is variant"); - assert(cmp->in(2)->Opcode() == Op_LoadRange || OptimizeFill, "must be"); - Node* rng = cmp->in(2); - assert(invar.is_invariant(rng), "range must be invariant"); - int scale = 1; - Node* offset = zero; - bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset); - assert(ok, "must be index expression"); - - Node* init = cl->init_trip(); - Node* limit = cl->limit(); - Node* stride = cl->stride(); - - // Build if's for the upper and lower bound tests. The - // lower_bound test will dominate the upper bound test and all - // cloned or created nodes will use the lower bound test as - // their declared control. - ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate); - ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate); - assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate"); - Node *ctrl = lower_bound_proj->in(0)->as_If()->in(0); - - // Perform cloning to keep Invariance state correct since the - // late schedule will place invariant things in the loop. - rng = invar.clone(rng, ctrl); - if (offset && offset != zero) { - assert(invar.is_invariant(offset), "offset must be loop invariant"); - offset = invar.clone(offset, ctrl); - } - - // Test the lower bound - Node* lower_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, false); - IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If(); - _igvn.hash_delete(lower_bound_iff); - lower_bound_iff->set_req(1, lower_bound_bol); - if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx); - - // Test the upper bound - Node* upper_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, true); - IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If(); - _igvn.hash_delete(upper_bound_iff); - upper_bound_iff->set_req(1, upper_bound_bol); - if (TraceLoopPredicate) tty->print_cr("upper bound check if: %d", lower_bound_iff->_idx); - - // Fall through into rest of the clean up code which will move - // any dependent nodes onto the upper bound test. - new_predicate_proj = upper_bound_proj; - -#ifndef PRODUCT - if (TraceLoopOpts && !TraceLoopPredicate) { - tty->print("Predicate RC "); - loop->dump_head(); - } -#endif - } else { - // Loop variant check (for example, range check in non-counted loop) - // with uncommon trap. - continue; - } - assert(new_predicate_proj != NULL, "sanity"); - // Success - attach condition (new_predicate_bol) to predicate if - invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate - - // Eliminate the old If in the loop body - dominated_by( new_predicate_proj, iff, proj->_con != new_predicate_proj->_con ); - - hoisted = true; - C->set_major_progress(); - } // end while - -#ifndef PRODUCT - // report that the loop predication has been actually performed - // for this loop - if (TraceLoopPredicate && hoisted) { - tty->print("Loop Predication Performed:"); - loop->dump_head(); - } -#endif - - return hoisted; -} - -//------------------------------loop_predication-------------------------------- -// driver routine for loop predication optimization -bool IdealLoopTree::loop_predication( PhaseIdealLoop *phase) { - bool hoisted = false; - // Recursively promote predicates - if ( _child ) { - hoisted = _child->loop_predication( phase); - } - - // self - if (!_irreducible && !tail()->is_top()) { - hoisted |= phase->loop_predication_impl(this); - } - - if ( _next ) { //sibling - hoisted |= _next->loop_predication( phase); - } - - return hoisted; -} - - +//============================================================================= // Process all the loops in the loop tree and replace any fill // patterns with an intrisc version. bool PhaseIdealLoop::do_intrinsify_fill() { @@ -2625,9 +2475,12 @@ if (value != head->phi()) { msg = "unhandled shift in address"; } else { - found_index = true; - shift = n; - assert(type2aelembytes(store->as_Mem()->memory_type(), true) == 1 << shift->in(2)->get_int(), "scale should match"); + if (type2aelembytes(store->as_Mem()->memory_type(), true) != (1 << n->in(2)->get_int())) { + msg = "scale doesn't match"; + } else { + found_index = true; + shift = n; + } } } else if (n->Opcode() == Op_ConvI2L && conv == NULL) { if (n->in(1) == head->phi()) { @@ -2762,6 +2615,13 @@ return false; } +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print("ArrayFill "); + lpt->dump_head(); + } +#endif + // Now replace the whole loop body by a call to a fill routine that // covers the same region as the loop. Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base);
--- a/src/share/vm/opto/loopUnswitch.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/loopUnswitch.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,15 +32,17 @@ // // orig: transformed: // if (invariant-test) then +// predicate predicate // loop loop // stmt1 stmt1 // if (invariant-test) then stmt2 // stmt2 stmt4 // else endloop // stmt3 else -// endif loop [clone] -// stmt4 stmt1 [clone] -// endloop stmt3 +// endif predicate [clone] +// stmt4 loop [clone] +// endloop stmt1 [clone] +// stmt3 // stmt4 [clone] // endloop // endif @@ -124,8 +126,20 @@ ProjNode* proj_true = create_slow_version_of_loop(loop, old_new); - assert(proj_true->is_IfTrue() && proj_true->unique_ctrl_out() == head, "by construction"); - +#ifdef ASSERT + Node* uniqc = proj_true->unique_ctrl_out(); + Node* entry = head->in(LoopNode::EntryControl); + Node* predicate = find_predicate(entry); + if (predicate != NULL && LoopLimitCheck && UseLoopPredicate) { + // We may have two predicates, find first. + entry = find_predicate(entry->in(0)->in(0)); + if (entry != NULL) predicate = entry; + } + if (predicate != NULL) predicate = predicate->in(0); + assert(proj_true->is_IfTrue() && + (predicate == NULL && uniqc == head || + predicate != NULL && uniqc == predicate), "by construction"); +#endif // Increment unswitch count LoopNode* head_clone = old_new[head->_idx]->as_Loop(); int nct = head->unswitch_count() + 1; @@ -208,6 +222,7 @@ ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop, Node_List &old_new) { LoopNode* head = loop->_head->as_Loop(); + bool counted_loop = head->is_CountedLoop(); Node* entry = head->in(LoopNode::EntryControl); _igvn.hash_delete(entry); _igvn._worklist.push(entry); @@ -227,21 +242,24 @@ register_node(ifslow, outer_loop, iff, dom_depth(iff)); // Clone the loop body. The clone becomes the fast loop. The - // original pre-header will (illegally) have 2 control users (old & new loops). + // original pre-header will (illegally) have 3 control users + // (old & new loops & new if). clone_loop(loop, old_new, dom_depth(head), iff); assert(old_new[head->_idx]->is_Loop(), "" ); // Fast (true) control + Node* iffast_pred = clone_loop_predicates(entry, iffast, !counted_loop); _igvn.hash_delete(head); - head->set_req(LoopNode::EntryControl, iffast); - set_idom(head, iffast, dom_depth(head)); + head->set_req(LoopNode::EntryControl, iffast_pred); + set_idom(head, iffast_pred, dom_depth(head)); _igvn._worklist.push(head); // Slow (false) control + Node* ifslow_pred = move_loop_predicates(entry, ifslow, !counted_loop); LoopNode* slow_head = old_new[head->_idx]->as_Loop(); _igvn.hash_delete(slow_head); - slow_head->set_req(LoopNode::EntryControl, ifslow); - set_idom(slow_head, ifslow, dom_depth(slow_head)); + slow_head->set_req(LoopNode::EntryControl, ifslow_pred); + set_idom(slow_head, ifslow_pred, dom_depth(slow_head)); _igvn._worklist.push(slow_head); recompute_dom_depth();
--- a/src/share/vm/opto/loopnode.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/loopnode.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -206,7 +206,7 @@ // Get backedge compare Node *cmp = test->in(1); int cmp_op = cmp->Opcode(); - if( cmp_op != Op_CmpI ) + if (cmp_op != Op_CmpI) return false; // Avoid pointer & float compares // Find the trip-counter increment & limit. Limit must be loop invariant. @@ -259,7 +259,8 @@ } // Stride must be constant int stride_con = stride->get_int(); - assert(stride_con != 0, "missed some peephole opt"); + if (stride_con == 0) + return false; // missed some peephole opt if (!xphi->is_Phi()) return false; // Too much math on the trip counter @@ -319,7 +320,7 @@ // Count down loop rolls through MAXINT (bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0 || // Count up loop rolls through MININT - (bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0 ) { + (bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0) { return false; // Bail out } @@ -342,6 +343,136 @@ assert(x->Opcode() == Op_Loop, "regular loops only"); C->print_method("Before CountedLoop", 3); + Node *hook = new (C, 6) Node(6); + + if (LoopLimitCheck) { + + // =================================================== + // Generate loop limit check to avoid integer overflow + // in cases like next (cyclic loops): + // + // for (i=0; i <= max_jint; i++) {} + // for (i=0; i < max_jint; i+=2) {} + // + // + // Limit check predicate depends on the loop test: + // + // for(;i != limit; i++) --> limit <= (max_jint) + // for(;i < limit; i+=stride) --> limit <= (max_jint - stride + 1) + // for(;i <= limit; i+=stride) --> limit <= (max_jint - stride ) + // + + // Check if limit is excluded to do more precise int overflow check. + bool incl_limit = (bt == BoolTest::le || bt == BoolTest::ge); + int stride_m = stride_con - (incl_limit ? 0 : (stride_con > 0 ? 1 : -1)); + + // If compare points directly to the phi we need to adjust + // the compare so that it points to the incr. Limit have + // to be adjusted to keep trip count the same and the + // adjusted limit should be checked for int overflow. + if (phi_incr != NULL) { + stride_m += stride_con; + } + + if (limit->is_Con()) { + int limit_con = limit->get_int(); + if ((stride_con > 0 && limit_con > (max_jint - stride_m)) || + (stride_con < 0 && limit_con < (min_jint - stride_m))) { + // Bailout: it could be integer overflow. + return false; + } + } else if ((stride_con > 0 && limit_t->_hi <= (max_jint - stride_m)) || + (stride_con < 0 && limit_t->_lo >= (min_jint - stride_m))) { + // Limit's type may satisfy the condition, for example, + // when it is an array length. + } else { + // Generate loop's limit check. + // Loop limit check predicate should be near the loop. + ProjNode *limit_check_proj = find_predicate_insertion_point(init_control, Deoptimization::Reason_loop_limit_check); + if (!limit_check_proj) { + // The limit check predicate is not generated if this method trapped here before. +#ifdef ASSERT + if (TraceLoopLimitCheck) { + tty->print("missing loop limit check:"); + loop->dump_head(); + x->dump(1); + } +#endif + return false; + } + + IfNode* check_iff = limit_check_proj->in(0)->as_If(); + Node* cmp_limit; + Node* bol; + + if (stride_con > 0) { + cmp_limit = new (C, 3) CmpINode(limit, _igvn.intcon(max_jint - stride_m)); + bol = new (C, 2) BoolNode(cmp_limit, BoolTest::le); + } else { + cmp_limit = new (C, 3) CmpINode(limit, _igvn.intcon(min_jint - stride_m)); + bol = new (C, 2) BoolNode(cmp_limit, BoolTest::ge); + } + cmp_limit = _igvn.register_new_node_with_optimizer(cmp_limit); + bol = _igvn.register_new_node_with_optimizer(bol); + set_subtree_ctrl(bol); + + // Replace condition in original predicate but preserve Opaque node + // so that previous predicates could be found. + assert(check_iff->in(1)->Opcode() == Op_Conv2B && + check_iff->in(1)->in(1)->Opcode() == Op_Opaque1, ""); + Node* opq = check_iff->in(1)->in(1); + _igvn.hash_delete(opq); + opq->set_req(1, bol); + // Update ctrl. + set_ctrl(opq, check_iff->in(0)); + set_ctrl(check_iff->in(1), check_iff->in(0)); + +#ifndef PRODUCT + // report that the loop predication has been actually performed + // for this loop + if (TraceLoopLimitCheck) { + tty->print_cr("Counted Loop Limit Check generated:"); + debug_only( bol->dump(2); ) + } +#endif + } + + if (phi_incr != NULL) { + // If compare points directly to the phi we need to adjust + // the compare so that it points to the incr. Limit have + // to be adjusted to keep trip count the same and we + // should avoid int overflow. + // + // i = init; do {} while(i++ < limit); + // is converted to + // i = init; do {} while(++i < limit+1); + // + limit = gvn->transform(new (C, 3) AddINode(limit, stride)); + } + + // Now we need to canonicalize loop condition. + if (bt == BoolTest::ne) { + assert(stride_con == 1 || stride_con == -1, "simple increment only"); + bt = (stride_con > 0) ? BoolTest::lt : BoolTest::gt; + } + + if (incl_limit) { + // The limit check guaranties that 'limit <= (max_jint - stride)' so + // we can convert 'i <= limit' to 'i < limit+1' since stride != 0. + // + Node* one = (stride_con > 0) ? gvn->intcon( 1) : gvn->intcon(-1); + limit = gvn->transform(new (C, 3) AddINode(limit, one)); + if (bt == BoolTest::le) + bt = BoolTest::lt; + else if (bt == BoolTest::ge) + bt = BoolTest::gt; + else + ShouldNotReachHere(); + } + set_subtree_ctrl( limit ); + + } else { // LoopLimitCheck + // If compare points to incr, we are ok. Otherwise the compare // can directly point to the phi; in this case adjust the compare so that // it points to the incr by adjusting the limit. @@ -354,7 +485,6 @@ Node *one_m = gvn->intcon(-1); Node *trip_count = NULL; - Node *hook = new (C, 6) Node(6); switch( bt ) { case BoolTest::eq: ShouldNotReachHere(); @@ -436,6 +566,8 @@ limit = gvn->transform(new (C, 3) AddINode(span,init_trip)); set_subtree_ctrl( limit ); + } // LoopLimitCheck + // Check for SafePoint on backedge and remove Node *sfpt = x->in(LoopNode::LoopBackControl); if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) { @@ -526,7 +658,7 @@ // Check for immediately preceding SafePoint and remove Node *sfpt2 = le->in(0); - if( sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2)) + if (sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2)) lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control)); // Free up intermediate goo @@ -536,12 +668,56 @@ assert(l->is_valid_counted_loop(), "counted loop shape is messed up"); assert(l == loop->_head && l->phi() == phi && l->loopexit() == lex, "" ); #endif +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print("Counted "); + loop->dump_head(); + } +#endif C->print_method("After CountedLoop", 3); return true; } +//----------------------exact_limit------------------------------------------- +Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) { + assert(loop->_head->is_CountedLoop(), ""); + CountedLoopNode *cl = loop->_head->as_CountedLoop(); + + if (!LoopLimitCheck || ABS(cl->stride_con()) == 1 || + cl->limit()->Opcode() == Op_LoopLimit) { + // Old code has exact limit (it could be incorrect in case of int overflow). + // Loop limit is exact with stride == 1. And loop may already have exact limit. + return cl->limit(); + } + Node *limit = NULL; +#ifdef ASSERT + BoolTest::mask bt = cl->loopexit()->test_trip(); + assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); +#endif + if (cl->has_exact_trip_count()) { + // Simple case: loop has constant boundaries. + // Use longs to avoid integer overflow. + int stride_con = cl->stride_con(); + long init_con = cl->init_trip()->get_int(); + long limit_con = cl->limit()->get_int(); + julong trip_cnt = cl->trip_count(); + long final_con = init_con + trip_cnt*stride_con; + final_con -= stride_con; + int final_int = (int)final_con; + // The final value should be in integer range since the loop + // is counted and the limit was checked for overflow. + assert(final_con == (long)final_int, "final value should be integer"); + limit = _igvn.intcon(final_int); + } else { + // Create new LoopLimit node to get exact limit (final iv value). + limit = new (C, 4) LoopLimitNode(C, cl->init_trip(), cl->limit(), cl->stride()); + register_new_node(limit, cl->in(LoopNode::EntryControl)); + } + assert(limit != NULL, "sanity"); + return limit; +} //------------------------------Ideal------------------------------------------ // Return a node which is more "ideal" than the current node. @@ -567,14 +743,12 @@ #ifndef PRODUCT void CountedLoopNode::dump_spec(outputStream *st) const { LoopNode::dump_spec(st); - if( stride_is_con() ) { + if (stride_is_con()) { st->print("stride: %d ",stride_con()); - } else { - st->print("stride: not constant "); } - if( is_pre_loop () ) st->print("pre of N%d" , _main_idx ); - if( is_main_loop() ) st->print("main of N%d", _idx ); - if( is_post_loop() ) st->print("post of N%d", _main_idx ); + if (is_pre_loop ()) st->print("pre of N%d" , _main_idx); + if (is_main_loop()) st->print("main of N%d", _idx); + if (is_post_loop()) st->print("post of N%d", _main_idx); } #endif @@ -583,7 +757,130 @@ return stride()->bottom_type()->is_int()->get_con(); } - +//============================================================================= +//------------------------------Value----------------------------------------- +const Type *LoopLimitNode::Value( PhaseTransform *phase ) const { + const Type* init_t = phase->type(in(Init)); + const Type* limit_t = phase->type(in(Limit)); + const Type* stride_t = phase->type(in(Stride)); + // Either input is TOP ==> the result is TOP + if (init_t == Type::TOP) return Type::TOP; + if (limit_t == Type::TOP) return Type::TOP; + if (stride_t == Type::TOP) return Type::TOP; + + int stride_con = stride_t->is_int()->get_con(); + if (stride_con == 1) + return NULL; // Identity + + if (init_t->is_int()->is_con() && limit_t->is_int()->is_con()) { + // Use longs to avoid integer overflow. + long init_con = init_t->is_int()->get_con(); + long limit_con = limit_t->is_int()->get_con(); + int stride_m = stride_con - (stride_con > 0 ? 1 : -1); + long trip_count = (limit_con - init_con + stride_m)/stride_con; + long final_con = init_con + stride_con*trip_count; + int final_int = (int)final_con; + // The final value should be in integer range since the loop + // is counted and the limit was checked for overflow. + assert(final_con == (long)final_int, "final value should be integer"); + return TypeInt::make(final_int); + } + + return bottom_type(); // TypeInt::INT +} + +//------------------------------Ideal------------------------------------------ +// Return a node which is more "ideal" than the current node. +Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) { + if (phase->type(in(Init)) == Type::TOP || + phase->type(in(Limit)) == Type::TOP || + phase->type(in(Stride)) == Type::TOP) + return NULL; // Dead + + int stride_con = phase->type(in(Stride))->is_int()->get_con(); + if (stride_con == 1) + return NULL; // Identity + + if (in(Init)->is_Con() && in(Limit)->is_Con()) + return NULL; // Value + + // Delay following optimizations until all loop optimizations + // done to keep Ideal graph simple. + if (!can_reshape || phase->C->major_progress()) + return NULL; + + const TypeInt* init_t = phase->type(in(Init) )->is_int(); + const TypeInt* limit_t = phase->type(in(Limit))->is_int(); + int stride_p; + long lim, ini; + julong max; + if (stride_con > 0) { + stride_p = stride_con; + lim = limit_t->_hi; + ini = init_t->_lo; + max = (julong)max_jint; + } else { + stride_p = -stride_con; + lim = init_t->_hi; + ini = limit_t->_lo; + max = (julong)min_jint; + } + julong range = lim - ini + stride_p; + if (range <= max) { + // Convert to integer expression if it is not overflow. + Node* stride_m = phase->intcon(stride_con - (stride_con > 0 ? 1 : -1)); + Node *range = phase->transform(new (phase->C, 3) SubINode(in(Limit), in(Init))); + Node *bias = phase->transform(new (phase->C, 3) AddINode(range, stride_m)); + Node *trip = phase->transform(new (phase->C, 3) DivINode(0, bias, in(Stride))); + Node *span = phase->transform(new (phase->C, 3) MulINode(trip, in(Stride))); + return new (phase->C, 3) AddINode(span, in(Init)); // exact limit + } + + if (is_power_of_2(stride_p) || // divisor is 2^n + !Matcher::has_match_rule(Op_LoopLimit)) { // or no specialized Mach node? + // Convert to long expression to avoid integer overflow + // and let igvn optimizer convert this division. + // + Node* init = phase->transform( new (phase->C, 2) ConvI2LNode(in(Init))); + Node* limit = phase->transform( new (phase->C, 2) ConvI2LNode(in(Limit))); + Node* stride = phase->longcon(stride_con); + Node* stride_m = phase->longcon(stride_con - (stride_con > 0 ? 1 : -1)); + + Node *range = phase->transform(new (phase->C, 3) SubLNode(limit, init)); + Node *bias = phase->transform(new (phase->C, 3) AddLNode(range, stride_m)); + Node *span; + if (stride_con > 0 && is_power_of_2(stride_p)) { + // bias >= 0 if stride >0, so if stride is 2^n we can use &(-stride) + // and avoid generating rounding for division. Zero trip guard should + // guarantee that init < limit but sometimes the guard is missing and + // we can get situation when init > limit. Note, for the empty loop + // optimization zero trip guard is generated explicitly which leaves + // only RCE predicate where exact limit is used and the predicate + // will simply fail forcing recompilation. + Node* neg_stride = phase->longcon(-stride_con); + span = phase->transform(new (phase->C, 3) AndLNode(bias, neg_stride)); + } else { + Node *trip = phase->transform(new (phase->C, 3) DivLNode(0, bias, stride)); + span = phase->transform(new (phase->C, 3) MulLNode(trip, stride)); + } + // Convert back to int + Node *span_int = phase->transform(new (phase->C, 2) ConvL2INode(span)); + return new (phase->C, 3) AddINode(span_int, in(Init)); // exact limit + } + + return NULL; // No progress +} + +//------------------------------Identity--------------------------------------- +// If stride == 1 return limit node. +Node *LoopLimitNode::Identity( PhaseTransform *phase ) { + int stride_con = phase->type(in(Stride))->is_int()->get_con(); + if (stride_con == 1 || stride_con == -1) + return in(Limit); + return this; +} + +//============================================================================= //----------------------match_incr_with_optional_truncation-------------------- // Match increment with optional truncation: // CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16 @@ -864,8 +1161,10 @@ Node *outer = new (phase->C, 3) LoopNode( ctl, _head->in(outer_idx) ); outer = igvn.register_new_node_with_optimizer(outer, _head); phase->set_created_loop_node(); + + Node* pred = phase->clone_loop_predicates(ctl, outer, true); // Outermost loop falls into '_head' loop - _head->set_req(LoopNode::EntryControl, outer); + _head->set_req(LoopNode::EntryControl, pred); _head->del_req(outer_idx); // Split all the Phis up between '_head' loop and 'outer' loop. for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) { @@ -1103,12 +1402,13 @@ // backedges into a private merge point and use the merge point as // the one true backedge. if( _head->req() > 3 ) { - // Merge the many backedges into a single backedge. + // Merge the many backedges into a single backedge but leave + // the hottest backedge as separate edge for the following peel. merge_many_backedges( phase ); result = true; } - // If I am a shared header (multiple backedges), peel off myself loop. + // If I have one hot backedge, peel off myself loop. // I better be the outermost loop. if( _head->req() > 3 ) { split_outer_loop( phase ); @@ -1432,16 +1732,38 @@ tty->print(" "); tty->print("Loop: N%d/N%d ",_head->_idx,_tail->_idx); if (_irreducible) tty->print(" IRREDUCIBLE"); + Node* entry = _head->in(LoopNode::EntryControl); + if (LoopLimitCheck) { + Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); + if (predicate != NULL ) { + tty->print(" limit_check"); + entry = entry->in(0)->in(0); + } + } if (UseLoopPredicate) { - Node* entry = _head->in(LoopNode::EntryControl); - if (entry != NULL && entry->is_Proj() && - PhaseIdealLoop::is_uncommon_trap_if_pattern(entry->as_Proj(), Deoptimization::Reason_predicate)) { + entry = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); + if (entry != NULL) { tty->print(" predicated"); } } if (_head->is_CountedLoop()) { CountedLoopNode *cl = _head->as_CountedLoop(); tty->print(" counted"); + + Node* init_n = cl->init_trip(); + if (init_n != NULL && init_n->is_Con()) + tty->print(" [%d,", cl->init_trip()->get_int()); + else + tty->print(" [int,"); + Node* limit_n = cl->limit(); + if (limit_n != NULL && limit_n->is_Con()) + tty->print("%d),", cl->limit()->get_int()); + else + tty->print("int),"); + int stride_con = cl->stride_con(); + if (stride_con > 0) tty->print("+"); + tty->print("%d", stride_con); + if (cl->is_pre_loop ()) tty->print(" pre" ); if (cl->is_main_loop()) tty->print(" main"); if (cl->is_post_loop()) tty->print(" post"); @@ -1505,10 +1827,15 @@ !loop->tail()->is_top()) { LoopNode* lpn = loop->_head->as_Loop(); Node* entry = lpn->in(LoopNode::EntryControl); - Node* predicate_proj = find_predicate(entry); + Node* predicate_proj = find_predicate(entry); // loop_limit_check first if (predicate_proj != NULL ) { // right pattern that can be used by loop predication assert(entry->in(0)->in(1)->in(1)->Opcode() == Op_Opaque1, "must be"); useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one + entry = entry->in(0)->in(0); + } + predicate_proj = find_predicate(entry); // Predicate + if (predicate_proj != NULL ) { + useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one } } @@ -1519,6 +1846,8 @@ //------------------------eliminate_useless_predicates----------------------------- // Eliminate all inserted predicates if they could not be used by loop predication. +// Note: it will also eliminates loop limits check predicate since it also uses +// Opaque1 node (see Parse::add_predicate()). void PhaseIdealLoop::eliminate_useless_predicates() { if (C->predicate_count() == 0) return; // no predicate left @@ -1541,7 +1870,7 @@ //----------------------------build_and_optimize------------------------------- // Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to // its corresponding LoopNode. If 'optimize' is true, do some loop cleanups. -void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool do_loop_pred) { +void PhaseIdealLoop::build_and_optimize(bool do_split_ifs) { ResourceMark rm; int old_progress = C->major_progress(); @@ -1573,6 +1902,13 @@ // Do not need a safepoint at the top level _ltree_root->_has_sfpt = 1; + // Initialize Dominators. + // Checked in clone_loop_predicate() during beautify_loops(). + _idom_size = 0; + _idom = NULL; + _dom_depth = NULL; + _dom_stk = NULL; + // Empty pre-order array allocate_preorders(); @@ -1698,9 +2034,10 @@ return; } - // some parser-inserted loop predicates could never be used by loop - // predication. Eliminate them before loop optimization - if (UseLoopPredicate) { + // Some parser-inserted loop predicates could never be used by loop + // predication or they were moved away from loop during some optimizations. + // For example, peeling. Eliminate them before next loop optimizations. + if (UseLoopPredicate || LoopLimitCheck) { eliminate_useless_predicates(); } @@ -1750,7 +2087,7 @@ } // Perform loop predication before iteration splitting - if (do_loop_pred && C->has_loops() && !C->major_progress()) { + if (C->has_loops() && !C->major_progress() && (C->predicate_count() > 0)) { _ltree_root->_child->loop_predication(this); } @@ -1793,8 +2130,20 @@ C->set_major_progress(); } - // Convert scalar to superword operations - + // Keep loop predicates and perform optimizations with them + // until no more loop optimizations could be done. + // After that switch predicates off and do more loop optimizations. + if (!C->major_progress() && (C->predicate_count() > 0)) { + C->cleanup_loop_predicates(_igvn); +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print_cr("PredicatesOff"); + } +#endif + C->set_major_progress(); + } + + // Convert scalar to superword operations at the end of all loop opts. if (UseSuperWord && C->has_loops() && !C->major_progress()) { // SuperWord transform SuperWord sw(this);
--- a/src/share/vm/opto/loopnode.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/loopnode.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -57,7 +57,12 @@ protected: short _loop_flags; // Names for flag bitfields - enum { pre_post_main=0, inner_loop=8, partial_peel_loop=16, partial_peel_failed=32 }; + enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3, + MainHasNoPreLoop=4, + HasExactTripCount=8, + InnerLoop=16, + PartialPeelLoop=32, + PartialPeelFailed=64 }; char _unswitch_count; enum { _unswitch_max=3 }; @@ -65,13 +70,13 @@ // Names for edge indices enum { Self=0, EntryControl, LoopBackControl }; - int is_inner_loop() const { return _loop_flags & inner_loop; } - void set_inner_loop() { _loop_flags |= inner_loop; } + int is_inner_loop() const { return _loop_flags & InnerLoop; } + void set_inner_loop() { _loop_flags |= InnerLoop; } - int is_partial_peel_loop() const { return _loop_flags & partial_peel_loop; } - void set_partial_peel_loop() { _loop_flags |= partial_peel_loop; } - int partial_peel_has_failed() const { return _loop_flags & partial_peel_failed; } - void mark_partial_peel_failed() { _loop_flags |= partial_peel_failed; } + int is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; } + void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; } + int partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; } + void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; } int unswitch_max() { return _unswitch_max; } int unswitch_count() { return _unswitch_count; } @@ -137,8 +142,8 @@ // the Main CountedLoop. Used to assert that we understand the graph shape. node_idx_t _main_idx; - // Known trip count calculated by policy_maximally_unroll - int _trip_count; + // Known trip count calculated by compute_exact_trip_count() + uint _trip_count; // Expected trip count from profile data float _profile_trip_cnt; @@ -152,7 +157,7 @@ public: CountedLoopNode( Node *entry, Node *backedge ) - : LoopNode(entry, backedge), _trip_count(max_jint), + : LoopNode(entry, backedge), _main_idx(0), _trip_count(max_juint), _profile_trip_cnt(COUNT_UNKNOWN), _unrolled_count_log2(0), _node_count_before_unroll(0) { init_class_id(Class_CountedLoop); @@ -194,13 +199,12 @@ // A 'main' loop that is ONLY unrolled or peeled, never RCE'd or // Aligned, may be missing it's pre-loop. - enum { Normal=0, Pre=1, Main=2, Post=3, PrePostFlagsMask=3, Main_Has_No_Pre_Loop=4 }; - int is_normal_loop() const { return (_loop_flags&PrePostFlagsMask) == Normal; } - int is_pre_loop () const { return (_loop_flags&PrePostFlagsMask) == Pre; } - int is_main_loop () const { return (_loop_flags&PrePostFlagsMask) == Main; } - int is_post_loop () const { return (_loop_flags&PrePostFlagsMask) == Post; } - int is_main_no_pre_loop() const { return _loop_flags & Main_Has_No_Pre_Loop; } - void set_main_no_pre_loop() { _loop_flags |= Main_Has_No_Pre_Loop; } + int is_normal_loop() const { return (_loop_flags&PreMainPostFlagsMask) == Normal; } + int is_pre_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Pre; } + int is_main_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Main; } + int is_post_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Post; } + int is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; } + void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; } int main_idx() const { return _main_idx; } @@ -208,10 +212,19 @@ void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; } void set_main_loop ( ) { assert(is_normal_loop(),""); _loop_flags |= Main; } void set_post_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Post; _main_idx = main->_idx; } - void set_normal_loop( ) { _loop_flags &= ~PrePostFlagsMask; } + void set_normal_loop( ) { _loop_flags &= ~PreMainPostFlagsMask; } + + void set_trip_count(uint tc) { _trip_count = tc; } + uint trip_count() { return _trip_count; } - void set_trip_count(int tc) { _trip_count = tc; } - int trip_count() { return _trip_count; } + bool has_exact_trip_count() const { return (_loop_flags & HasExactTripCount) != 0; } + void set_exact_trip_count(uint tc) { + _trip_count = tc; + _loop_flags |= HasExactTripCount; + } + void set_nonexact_trip_count() { + _loop_flags &= ~HasExactTripCount; + } void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; } float profile_trip_cnt() { return _profile_trip_cnt; } @@ -276,6 +289,28 @@ inline Node *CountedLoopNode::incr() const { return loopexit() ? loopexit()->incr() : NULL; } inline Node *CountedLoopNode::phi() const { return loopexit() ? loopexit()->phi() : NULL; } +//------------------------------LoopLimitNode----------------------------- +// Counted Loop limit node which represents exact final iterator value: +// trip_count = (limit - init_trip + stride - 1)/stride +// final_value= trip_count * stride + init_trip. +// Use HW instructions to calculate it when it can overflow in integer. +// Note, final_value should fit into integer since counted loop has +// limit check: limit <= max_int-stride. +class LoopLimitNode : public Node { + enum { Init=1, Limit=2, Stride=3 }; + public: + LoopLimitNode( Compile* C, Node *init, Node *limit, Node *stride ) : Node(0,init,limit,stride) { + // Put it on the Macro nodes list to optimize during macro nodes expansion. + init_flags(Flag_is_macro); + C->add_macro_node(this); + } + virtual int Opcode() const; + virtual const Type *bottom_type() const { return TypeInt::INT; } + virtual uint ideal_reg() const { return Op_RegI; } + virtual const Type *Value( PhaseTransform *phase ) const; + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); + virtual Node *Identity( PhaseTransform *phase ); +}; // -----------------------------IdealLoopTree---------------------------------- class IdealLoopTree : public ResourceObj { @@ -384,6 +419,9 @@ // Micro-benchmark spamming. Remove empty loops. bool policy_do_remove_empty_loop( PhaseIdealLoop *phase ); + // Convert one iteration loop into normal code. + bool policy_do_one_iteration_loop( PhaseIdealLoop *phase ); + // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can // make some loop-invariant test (usually a null-check) happen before the // loop. @@ -412,6 +450,9 @@ // Return TRUE if "iff" is a range check. bool is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const; + // Compute loop exact trip count if possible + void compute_exact_trip_count( PhaseIdealLoop *phase ); + // Compute loop trip count from profile data void compute_profile_trip_cnt( PhaseIdealLoop *phase ); @@ -706,11 +747,11 @@ _dom_lca_tags(arena()), // Thread::resource_area _verify_me(NULL), _verify_only(true) { - build_and_optimize(false, false); + build_and_optimize(false); } // build the loop tree and perform any requested optimizations - void build_and_optimize(bool do_split_if, bool do_loop_pred); + void build_and_optimize(bool do_split_if); public: // Dominators for the sea of nodes @@ -721,13 +762,13 @@ Node *dom_lca_internal( Node *n1, Node *n2 ) const; // Compute the Ideal Node to Loop mapping - PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs, bool do_loop_pred) : + PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs) : PhaseTransform(Ideal_Loop), _igvn(igvn), _dom_lca_tags(arena()), // Thread::resource_area _verify_me(NULL), _verify_only(false) { - build_and_optimize(do_split_ifs, do_loop_pred); + build_and_optimize(do_split_ifs); } // Verify that verify_me made the same decisions as a fresh run. @@ -737,7 +778,7 @@ _dom_lca_tags(arena()), // Thread::resource_area _verify_me(verify_me), _verify_only(false) { - build_and_optimize(false, false); + build_and_optimize(false); } // Build and verify the loop tree without modifying the graph. This @@ -756,6 +797,8 @@ bool is_counted_loop( Node *x, IdealLoopTree *loop ); + Node* exact_limit( IdealLoopTree *loop ); + // Return a post-walked LoopNode IdealLoopTree *get_loop( Node *n ) const { // Dead nodes have no loop, so return the top level loop instead @@ -818,7 +861,6 @@ bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0); // Return true if proj is for "proj->[region->..]call_uct" - // Return true if proj is for "proj->[region->..]call_uct" static bool is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason); // Return true for "if(test)-> proj -> ... // | @@ -830,12 +872,32 @@ Deoptimization::DeoptReason reason); void register_control(Node* n, IdealLoopTree *loop, Node* pred); - // Find a good location to insert a predicate + // Clone loop predicates to cloned loops (peeled, unswitched) + static ProjNode* clone_predicate(ProjNode* predicate_proj, Node* new_entry, + Deoptimization::DeoptReason reason, + PhaseIdealLoop* loop_phase, + PhaseIterGVN* igvn); + static ProjNode* move_predicate(ProjNode* predicate_proj, Node* new_entry, + Deoptimization::DeoptReason reason, + PhaseIdealLoop* loop_phase, + PhaseIterGVN* igvn); + static Node* clone_loop_predicates(Node* old_entry, Node* new_entry, + bool move_predicates, + bool clone_limit_check, + PhaseIdealLoop* loop_phase, + PhaseIterGVN* igvn); + Node* clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check); + Node* move_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check); + + void eliminate_loop_predicates(Node* entry); + static Node* skip_loop_predicates(Node* entry); + + // Find a good location to insert a predicate static ProjNode* find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason); // Find a predicate static Node* find_predicate(Node* entry); // Construct a range check for a predicate if - BoolNode* rc_predicate(Node* ctrl, + BoolNode* rc_predicate(IdealLoopTree *loop, Node* ctrl, int scale, Node* offset, Node* init, Node* limit, Node* stride, Node* range, bool upper); @@ -865,11 +927,13 @@ // Range Check Elimination uses this function! // Constrain the main loop iterations so the affine function: - // scale_con * I + offset < limit + // low_limit <= scale_con * I + offset < upper_limit // always holds true. That is, either increase the number of iterations in // the pre-loop or the post-loop until the condition holds true in the main // loop. Scale_con, offset and limit are all loop invariant. - void add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ); + void add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ); + // Helper function for add_constraint(). + Node* adjust_limit( int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl ); // Partially peel loop up through last_peel node. bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
--- a/src/share/vm/opto/loopopts.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/loopopts.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2139,9 +2139,12 @@ // // orig // -// stmt1 -// | -// v +// stmt1 +// | +// v +// loop predicate +// | +// v // loop<----+ // | | // stmt2 | @@ -2172,6 +2175,9 @@ // after clone loop // // stmt1 +// | +// v +// loop predicate // / \ // clone / \ orig // / \ @@ -2210,12 +2216,15 @@ // after partial peel // // stmt1 +// | +// v +// loop predicate // / // clone / orig // / TOP // / \ // v v -// TOP->region region----+ +// TOP->loop loop----+ // | | | // stmt2 stmt2 | // | | | @@ -2253,14 +2262,18 @@ // stmt1 // | // v +// loop predicate +// | +// v +// stmt2 clone +// | +// v // ........> ifA clone // : / | // dom / | // : v v // : false true // : | | -// : | stmt2 clone -// : | | // : | v // : | newloop<-----+ // : | | | @@ -2289,6 +2302,7 @@ // bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) { + assert(!loop->_head->is_CountedLoop(), "Non-counted loop only"); if (!loop->_head->is_Loop()) { return false; }
--- a/src/share/vm/opto/macro.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/macro.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -221,9 +221,16 @@ Node *shift = p2x->unique_out(); Node *addp = shift->unique_out(); for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) { - Node *st = addp->last_out(j); - assert(st->is_Store(), "store required"); - _igvn.replace_node(st, st->in(MemNode::Memory)); + Node *mem = addp->last_out(j); + if (UseCondCardMark && mem->is_Load()) { + assert(mem->Opcode() == Op_LoadB, "unexpected code shape"); + // The load is checking if the card has been written so + // replace it with zero to fold the test. + _igvn.replace_node(mem, intcon(0)); + continue; + } + assert(mem->is_Store(), "store required"); + _igvn.replace_node(mem, mem->in(MemNode::Memory)); } } else { // G1 pre/post barriers @@ -2147,6 +2154,11 @@ debug_only(int old_macro_count = C->macro_count();); if (n->is_AbstractLock()) { success = eliminate_locking_node(n->as_AbstractLock()); + } else if (n->Opcode() == Op_LoopLimit) { + // Remove it from macro list and put on IGVN worklist to optimize. + C->remove_macro_node(n); + _igvn._worklist.push(n); + success = true; } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) { _igvn.replace_node(n, n->in(1)); success = true;
--- a/src/share/vm/opto/matcher.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/matcher.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2086,6 +2086,13 @@ n->del_req(3); break; } + case Op_LoopLimit: { + Node *pair1 = new (C, 3) BinaryNode(n->in(1),n->in(2)); + n->set_req(1,pair1); + n->set_req(2,n->in(3)); + n->del_req(3); + break; + } case Op_StrEquals: { Node *pair1 = new (C, 3) BinaryNode(n->in(2),n->in(3)); n->set_req(2,pair1);
--- a/src/share/vm/opto/matcher.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/matcher.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/memnode.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/memnode.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1259,15 +1259,18 @@ return NULL; // Wait stable graph } uint cnt = mem->req(); - for( uint i = 1; i < cnt; i++ ) { + for (uint i = 1; i < cnt; i++) { + Node* rc = region->in(i); + if (rc == NULL || phase->type(rc) == Type::TOP) + return NULL; // Wait stable graph Node *in = mem->in(i); - if( in == NULL ) { + if (in == NULL) { return NULL; // Wait stable graph } } // Check for loop invariant. if (cnt == 3) { - for( uint i = 1; i < cnt; i++ ) { + for (uint i = 1; i < cnt; i++) { Node *in = mem->in(i); Node* m = MemNode::optimize_memory_chain(in, addr_t, phase); if (m == mem) { @@ -1281,38 +1284,37 @@ // Do nothing here if Identity will find a value // (to avoid infinite chain of value phis generation). - if ( !phase->eqv(this, this->Identity(phase)) ) + if (!phase->eqv(this, this->Identity(phase))) return NULL; // Skip the split if the region dominates some control edge of the address. - if (cnt == 3 && !MemNode::all_controls_dominate(address, region)) + if (!MemNode::all_controls_dominate(address, region)) return NULL; const Type* this_type = this->bottom_type(); int this_index = phase->C->get_alias_index(addr_t); int this_offset = addr_t->offset(); int this_iid = addr_t->is_oopptr()->instance_id(); - int wins = 0; PhaseIterGVN *igvn = phase->is_IterGVN(); Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset); - for( uint i = 1; i < region->req(); i++ ) { + for (uint i = 1; i < region->req(); i++) { Node *x; Node* the_clone = NULL; - if( region->in(i) == phase->C->top() ) { + if (region->in(i) == phase->C->top()) { x = phase->C->top(); // Dead path? Use a dead data op } else { x = this->clone(); // Else clone up the data op the_clone = x; // Remember for possible deletion. // Alter data node to use pre-phi inputs - if( this->in(0) == region ) { - x->set_req( 0, region->in(i) ); + if (this->in(0) == region) { + x->set_req(0, region->in(i)); } else { - x->set_req( 0, NULL ); + x->set_req(0, NULL); } - for( uint j = 1; j < this->req(); j++ ) { + for (uint j = 1; j < this->req(); j++) { Node *in = this->in(j); - if( in->is_Phi() && in->in(0) == region ) - x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone + if (in->is_Phi() && in->in(0) == region) + x->set_req(j, in->in(i)); // Use pre-Phi input for the clone } } // Check for a 'win' on some paths @@ -1321,12 +1323,11 @@ bool singleton = t->singleton(); // See comments in PhaseIdealLoop::split_thru_phi(). - if( singleton && t == Type::TOP ) { + if (singleton && t == Type::TOP) { singleton &= region->is_Loop() && (i != LoopNode::EntryControl); } - if( singleton ) { - wins++; + if (singleton) { x = igvn->makecon(t); } else { // We now call Identity to try to simplify the cloned node. @@ -1340,13 +1341,11 @@ // igvn->type(x) is set to x->Value() already. x->raise_bottom_type(t); Node *y = x->Identity(igvn); - if( y != x ) { - wins++; + if (y != x) { x = y; } else { y = igvn->hash_find(x); - if( y ) { - wins++; + if (y) { x = y; } else { // Else x is a new node we are keeping @@ -1360,13 +1359,9 @@ igvn->remove_dead_node(the_clone); phi->set_req(i, x); } - if( wins > 0 ) { - // Record Phi - igvn->register_new_node_with_optimizer(phi); - return phi; - } - igvn->remove_dead_node(phi); - return NULL; + // Record Phi + igvn->register_new_node_with_optimizer(phi); + return phi; } //------------------------------Ideal------------------------------------------ @@ -1677,14 +1672,15 @@ // If we are loading from a freshly-allocated object, produce a zero, // if the load is provably beyond the header of the object. // (Also allow a variable load from a fresh array to produce zero.) - if (ReduceFieldZeroing) { + const TypeOopPtr *tinst = tp->isa_oopptr(); + bool is_instance = (tinst != NULL) && tinst->is_known_instance_field(); + if (ReduceFieldZeroing || is_instance) { Node* value = can_see_stored_value(mem,phase); if (value != NULL && value->is_Con()) return value->bottom_type(); } - const TypeOopPtr *tinst = tp->isa_oopptr(); - if (tinst != NULL && tinst->is_known_instance_field()) { + if (is_instance) { // If we have an instance type and our memory input is the // programs's initial memory state, there is no matching store, // so just return a zero of the appropriate type @@ -2159,9 +2155,12 @@ Node* mem = in(MemNode::Memory); Node* address = in(MemNode::Address); - // Back-to-back stores to same address? Fold em up. - // Generally unsafe if I have intervening uses... - if (mem->is_Store() && phase->eqv_uncast(mem->in(MemNode::Address), address)) { + // Back-to-back stores to same address? Fold em up. Generally + // unsafe if I have intervening uses... Also disallowed for StoreCM + // since they must follow each StoreP operation. Redundant StoreCMs + // are eliminated just before matching in final_graph_reshape. + if (mem->is_Store() && phase->eqv_uncast(mem->in(MemNode::Address), address) && + mem->Opcode() != Op_StoreCM) { // Looking at a dead closed cycle of memory? assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
--- a/src/share/vm/opto/memnode.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/memnode.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/node.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/node.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/output.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/output.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1354,15 +1354,20 @@ // Check that oop-store precedes the card-mark else if( mach->ideal_Opcode() == Op_StoreCM ) { uint storeCM_idx = j; - Node *oop_store = mach->in(mach->_cnt); // First precedence edge - assert( oop_store != NULL, "storeCM expects a precedence edge"); - uint i4; - for( i4 = 0; i4 < last_inst; ++i4 ) { - if( b->_nodes[i4] == oop_store ) break; + int count = 0; + for (uint prec = mach->req(); prec < mach->len(); prec++) { + Node *oop_store = mach->in(prec); // Precedence edge + if (oop_store == NULL) continue; + count++; + uint i4; + for( i4 = 0; i4 < last_inst; ++i4 ) { + if( b->_nodes[i4] == oop_store ) break; + } + // Note: This test can provide a false failure if other precedence + // edges have been added to the storeCMNode. + assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store"); } - // Note: This test can provide a false failure if other precedence - // edges have been added to the storeCMNode. - assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store"); + assert(count > 0, "storeCM expects at least one precedence edge"); } #endif
--- a/src/share/vm/opto/output.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/output.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/parse.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/parse.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -68,9 +68,9 @@ JVMState* caller_jvms, int caller_bci); const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result); - const char* shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const; - const char* shouldNotInline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const; - void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const PRODUCT_RETURN; + const char* should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const; + const char* should_not_inline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const; + void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const; InlineTree *caller_tree() const { return _caller_tree; } InlineTree* callee_at(int bci, ciMethod* m) const;
--- a/src/share/vm/opto/parse1.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/parse1.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -638,7 +638,7 @@ ensure_phis_everywhere(); if (block->is_SEL_head() && - UseLoopPredicate) { + (UseLoopPredicate || LoopLimitCheck)) { // Add predicate to single entry (not irreducible) loop head. assert(!block->has_merged_backedge(), "only entry paths should be merged for now"); // Need correct bci for predicate.
--- a/src/share/vm/opto/parse2.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/parse2.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -795,8 +795,9 @@ taken = method()->scale_count(taken); not_taken = method()->scale_count(not_taken); - // Give up if too few counts to be meaningful - if (taken + not_taken < 40) { + // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. + // We also check that individual counters are positive first, overwise the sum can become positive. + if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { if (C->log() != NULL) { C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); } @@ -804,13 +805,13 @@ } // Compute frequency that we arrive here - int sum = taken + not_taken; + float sum = taken + not_taken; // Adjust, if this block is a cloned private block but the // Jump counts are shared. Taken the private counts for // just this path instead of the shared counts. if( block()->count() > 0 ) sum = block()->count(); - cnt = (float)sum / (float)FreqCountInvocations; + cnt = sum / FreqCountInvocations; // Pin probability to sane limits float prob;
--- a/src/share/vm/opto/phaseX.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/phaseX.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -471,6 +471,13 @@ _delay_transform = delay; } + // Clone loop predicates. Defined in loopTransform.cpp. + Node* clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check); + Node* move_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check); + // Create a new if below new_entry for the predicate to be cloned + ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, + Deoptimization::DeoptReason reason); + #ifndef PRODUCT protected: // Sub-quadratic implementation of VerifyIterativeGVN.
--- a/src/share/vm/opto/regmask.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/regmask.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/regmask.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/regmask.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/runtime.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/runtime.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/split_if.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/split_if.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -399,6 +399,9 @@ #ifndef PRODUCT if( PrintOpto && VerifyLoopOptimizations ) tty->print_cr("Split-if"); + if (TraceLoopOpts) { + tty->print_cr("SplitIf"); + } #endif C->set_major_progress(); Node *region = iff->in(0);
--- a/src/share/vm/opto/stringopts.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/stringopts.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1172,16 +1172,16 @@ Node* PhaseStringOpts::copy_string(GraphKit& kit, Node* str, Node* char_array, Node* start) { Node* string = str; - Node* offset = kit.make_load(NULL, + Node* offset = kit.make_load(kit.control(), kit.basic_plus_adr(string, string, java_lang_String::offset_offset_in_bytes()), TypeInt::INT, T_INT, offset_field_idx); - Node* count = kit.make_load(NULL, + Node* count = kit.make_load(kit.control(), kit.basic_plus_adr(string, string, java_lang_String::count_offset_in_bytes()), TypeInt::INT, T_INT, count_field_idx); const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull, TypeAry::make(TypeInt::CHAR,TypeInt::POS), ciTypeArrayKlass::make(T_CHAR), true, 0); - Node* value = kit.make_load(NULL, + Node* value = kit.make_load(kit.control(), kit.basic_plus_adr(string, string, java_lang_String::value_offset_in_bytes()), value_type, T_OBJECT, value_field_idx); @@ -1342,7 +1342,7 @@ } // Node* offset = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, offset_offset), // TypeInt::INT, T_INT, offset_field_idx); - Node* count = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, java_lang_String::count_offset_in_bytes()), + Node* count = kit.make_load(kit.control(), kit.basic_plus_adr(arg, arg, java_lang_String::count_offset_in_bytes()), TypeInt::INT, T_INT, count_field_idx); length = __ AddI(length, count); string_sizes->init_req(argi, NULL);
--- a/src/share/vm/opto/subnode.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/subnode.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1223,21 +1223,6 @@ } //============================================================================= -//------------------------------NegNode---------------------------------------- -Node *NegFNode::Ideal(PhaseGVN *phase, bool can_reshape) { - if( in(1)->Opcode() == Op_SubF ) - return new (phase->C, 3) SubFNode( in(1)->in(2), in(1)->in(1) ); - return NULL; -} - -Node *NegDNode::Ideal(PhaseGVN *phase, bool can_reshape) { - if( in(1)->Opcode() == Op_SubD ) - return new (phase->C, 3) SubDNode( in(1)->in(2), in(1)->in(1) ); - return NULL; -} - - -//============================================================================= //------------------------------Value------------------------------------------ // Compute sqrt const Type *SqrtDNode::Value( PhaseTransform *phase ) const {
--- a/src/share/vm/opto/subnode.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/subnode.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -377,7 +377,6 @@ public: NegFNode( Node *in1 ) : NegNode(in1) {} virtual int Opcode() const; - virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); const Type *bottom_type() const { return Type::FLOAT; } virtual uint ideal_reg() const { return Op_RegF; } }; @@ -391,7 +390,6 @@ public: NegDNode( Node *in1 ) : NegNode(in1) {} virtual int Opcode() const; - virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); const Type *bottom_type() const { return Type::DOUBLE; } virtual uint ideal_reg() const { return Op_RegD; } };
--- a/src/share/vm/opto/superword.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/superword.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1132,6 +1132,13 @@ void SuperWord::output() { if (_packset.length() == 0) return; +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print("SuperWord "); + lpt()->dump_head(); + } +#endif + // MUST ENSURE main loop's initial value is properly aligned: // (iv_initial_value + min_iv_offset) % vector_width_in_bytes() == 0
--- a/src/share/vm/opto/type.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/type.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/vectornode.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/opto/vectornode.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -32,6 +32,7 @@ //------------------------------VectorNode-------------------------------------- // Vector Operation class VectorNode : public Node { + virtual uint size_of() const { return sizeof(*this); } protected: uint _length; // vector length virtual BasicType elt_basic_type() const = 0; // Vector element basic type
--- a/src/share/vm/precompiled.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/precompiled.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/forte.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/forte.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jni.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jni.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -29,6 +29,9 @@ #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "interpreter/linkResolver.hpp" +#ifndef SERIALGC +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" +#endif // SERIALGC #include "memory/allocation.inline.hpp" #include "memory/gcLocker.inline.hpp" #include "memory/oopFactory.hpp" @@ -1724,6 +1727,26 @@ o = JvmtiExport::jni_GetField_probe(thread, obj, o, k, fieldID, false); } jobject ret = JNIHandles::make_local(env, o->obj_field(offset)); +#ifndef SERIALGC + // If G1 is enabled and we are accessing the value of the referent + // field in a reference object then we need to register a non-null + // referent with the SATB barrier. + if (UseG1GC) { + bool needs_barrier = false; + + if (ret != NULL && + offset == java_lang_ref_Reference::referent_offset && + instanceKlass::cast(k)->reference_type() != REF_NONE) { + assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity"); + needs_barrier = true; + } + + if (needs_barrier) { + oop referent = JNIHandles::resolve(ret); + G1SATBCardTableModRefBS::enqueue(referent); + } + } +#endif // SERIALGC DTRACE_PROBE1(hotspot_jni, GetObjectField__return, ret); return ret; JNI_END
--- a/src/share/vm/prims/jni_md.h Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jni_md.h Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvm.h Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jvm.h Thu Jun 02 18:59:50 2011 +0100 @@ -1062,7 +1062,7 @@ JVM_CONSTANT_NameAndType, JVM_CONSTANT_MethodHandle = 15, // JSR 292 JVM_CONSTANT_MethodType = 16, // JSR 292 - JVM_CONSTANT_InvokeDynamicTrans = 17, // JSR 292, only occurs in old class files + //JVM_CONSTANT_(unused) = 17, // JSR 292 early drafts only JVM_CONSTANT_InvokeDynamic = 18, // JSR 292 JVM_CONSTANT_ExternalMax = 18 // Last tag found in classfiles };
--- a/src/share/vm/prims/jvm_misc.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jvm_misc.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmti.xml Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jvmti.xml Thu Jun 02 18:59:50 2011 +0100 @@ -280,10 +280,8 @@ <!ELEMENT externallink (#PCDATA|jvmti|code|i|b|tm)*> <!ATTLIST externallink id CDATA #REQUIRED> - <!ELEMENT vmspeclink EMPTY> - <!ATTLIST vmspeclink id CDATA #IMPLIED> - <!ATTLIST vmspeclink name CDATA #IMPLIED> - <!ATTLIST vmspeclink preposition CDATA #IMPLIED> + <!ELEMENT vmspec EMPTY> + <!ATTLIST vmspec chapter CDATA #IMPLIED> <!ELEMENT internallink (#PCDATA|jvmti|code|i|b)*> <!ATTLIST internallink id CDATA #REQUIRED> @@ -2285,9 +2283,8 @@ Stack frames are referenced by depth. The frame at depth zero is the current frame. <p/> - Stack frames are as described in the - <vmspeclink id="Overview.doc.html#17257" - name="Frames section"/>. + Stack frames are as described in + <vmspec chapter="3.6"/>, That is, they correspond to method invocations (including native methods) but do not correspond to platform native or VM internal frames. @@ -2627,7 +2624,7 @@ <param id="use_java_stack"> <jboolean/> <description> - Return the stack showing the <vmspeclink/> + Return the stack showing <vmspec/> model of the stack; otherwise, show the internal representation of the stack with inlined and optimized methods missing. If the virtual machine @@ -2707,7 +2704,7 @@ When the thread is resumed, the execution state of the thread is reset to the state immediately before the called method was invoked. - That is (using the <vmspeclink/> terminology): + That is (using <vmspec/> terminology): <ul> <li>the current frame is discarded as the previous frame becomes the current one</li> <li>the operand stack is restored--the argument values are added back @@ -2868,9 +2865,8 @@ to return at any point during its execution. The method which will return early is referred to as the <i>called method</i>. The called method is the current method - (as defined by the - <vmspeclink id="Overview.doc.html#17257" - name="Frames section"/>) + (as defined by + <vmspec chapter="3.6"/>) for the specified thread at the time the function is called. <p/> @@ -3576,10 +3572,8 @@ <field id="index"> <jint/> <description> - The index into the constant pool of the class. See the - <vmspeclink id="ClassFile.doc.html#20080" - name="Constant Pool section"/> - description. + The index into the constant pool of the class. See the description in + <vmspec chapter="4.4"/>. </description> </field> </typedef> @@ -5006,9 +5000,8 @@ For references of this kind the <code>referrer_index</code> parameter to the <internallink id="jvmtiObjectReferenceCallback"> jvmtiObjectReferenceCallback</internallink> is the index into - constant pool table of the class, starting at 1. See the - <vmspeclink id="ClassFile.doc.html#20080" - name="Constant Pool section"/> + constant pool table of the class, starting at 1. See + <vmspec chapter="4.4"/>. </constant> </constants> @@ -6441,9 +6434,7 @@ been recorded as an initiating loader. Each class in the returned array was created by this class loader, either by defining it directly or by delegation to another class loader. - See the - <vmspeclink id="ConstantPool.doc.html#72007" - name="Creation and Loading section"/>. + See <vmspec chapter="5.3"/>. <p/> For JDK version 1.1 implementations that don't recognize the distinction between initiating and defining class loaders, @@ -6626,9 +6617,7 @@ For the class indicated by <code>klass</code>, return the access flags via <code>modifiers_ptr</code>. - Access flags are defined in the - <vmspeclink id="ClassFile.doc.html" - name="Class File Format chapter"/>. + Access flags are defined in <vmspec chapter="4"/>. <p/> If the class is an array class, then its public, private, and protected modifiers are the same as those of its component type. For arrays of @@ -6794,9 +6783,8 @@ <description> For the class indicated by <code>klass</code>, return the minor and major version numbers, - as defined in the - <vmspeclink id="ClassFile.doc.html" - name="Class File Format chapter"/>. + as defined in + <vmspec chapter="4"/>. </description> <origin>new</origin> <capabilities> @@ -6839,10 +6827,8 @@ <description> For the class indicated by <code>klass</code>, return the raw bytes of the constant pool in the format of the - <code>constant_pool</code> item of the - <vmspeclink id="ClassFile.doc.html" - name="Class File Format" - preposition="in"/>. + <code>constant_pool</code> item of + <vmspec chapter="4"/>. The format of the constant pool may differ between versions of the Class File Format, so, the <functionlink id="GetClassVersionNumbers">minor and major @@ -7286,9 +7272,7 @@ <field id="class_bytes"> <inbuf incount="class_byte_count"><uchar/></inbuf> <description> - Bytes defining class (in the - <vmspeclink id="ClassFile.doc.html" - name="Class File Format"/>) + Bytes defining class (in <vmspec chapter="4"/>) </description> </field> </typedef> @@ -7611,10 +7595,8 @@ <paramlink id="signature_ptr"/>. <p/> Field signatures are defined in the JNI Specification and - are referred to as - <vmspeclink id="ClassFile.doc.html#14152" - name="field descriptors" - preposition="in"/>. + are referred to as <code>field descriptors</code> in + <vmspec chapter="4.3.2"/>. </description> <origin>jvmdiClone</origin> <capabilities> @@ -7709,9 +7691,7 @@ <description> For the field indicated by <code>klass</code> and <code>field</code> return the access flags via <code>modifiers_ptr</code>. - Access flags are defined in the - <vmspeclink id="ClassFile.doc.html" - name="Class File Format chapter"/>. + Access flags are defined in <vmspec chapter="4"/>. </description> <origin>jvmdi</origin> <capabilities> @@ -7810,10 +7790,9 @@ return the method name via <code>name_ptr</code> and method signature via <code>signature_ptr</code>. <p/> - Method signatures are defined in the JNI Specification and are referred to as - <vmspeclink id="ClassFile.doc.html#7035" - name="method descriptors" - preposition="in"/>. + Method signatures are defined in the JNI Specification and are + referred to as <code>method descriptors</code> in + <vmspec chapter="4.3.3"/>. Note this is different than method signatures as defined in the <i>Java Language Specification</i>. </description> @@ -7902,9 +7881,7 @@ <description> For the method indicated by <code>method</code>, return the access flags via <code>modifiers_ptr</code>. - Access flags are defined in the - <vmspeclink id="ClassFile.doc.html" - name="Class File Format chapter"/>. + Access flags are defined in <vmspec chapter="4"/>. </description> <origin>jvmdi</origin> <capabilities> @@ -7941,9 +7918,7 @@ including the local variables used to pass parameters to the method on its invocation. <p/> - See <code>max_locals</code> in the - <vmspeclink id="ClassFile.doc.html#1546" - name="Code Attribute section"/>. + See <code>max_locals</code> in <vmspec chapter="4.7.3"/>. </description> <origin>jvmdi</origin> <capabilities> @@ -8150,8 +8125,7 @@ The local variable's type signature, encoded as a <internallink id="mUTF">modified UTF-8</internallink> string. The signature format is the same as that defined in - <vmspeclink id="ClassFile.doc.html#14152" - name="Field Descriptors section"/> + <vmspec chapter="4.3.2"/>. </description> </field> <field id="generic_signature"> @@ -10460,10 +10434,7 @@ <synopsis>Add To Bootstrap Class Loader Search</synopsis> <description> This function can be used to cause instrumentation classes to be defined by the - bootstrap class loader. See - <vmspeclink id="ConstantPool.doc.html#79383" - name="Loading Using the Bootstrap Class Loader" - preposition="in"/>. + bootstrap class loader. See <vmspec chapter="5.3.1"/>. After the bootstrap class loader unsuccessfully searches for a class, the specified platform-dependent search path <paramlink id="segment"/> will be searched as well. Only one segment may be specified in @@ -10480,7 +10451,7 @@ contain any classes or resources other than those to be defined by the bootstrap class loader for the purposes of instrumentation. <p/> - The <vmspeclink/> specifies that a subsequent attempt to resolve a symbolic + <vmspec/> specifies that a subsequent attempt to resolve a symbolic reference that the Java virtual machine has previously unsuccessfully attempted to resolve always fails with the same error that was thrown as a result of the initial resolution attempt. Consequently, if the JAR file contains an entry @@ -10512,10 +10483,7 @@ <synopsis>Add To System Class Loader Search</synopsis> <description> This function can be used to cause instrumentation classes to be - defined by the system class loader. See - <vmspeclink id="ConstantPool.doc.html#79441" - name="Loading Using a User-defined Class Loader" - preposition="in"/>. + defined by the system class loader. See <vmspec chapter="5.3.2"/>. After the class loader unsuccessfully searches for a class, the specified platform-dependent search path <paramlink id="segment"/> will be searched as well. Only one segment may be specified in the <paramlink id="segment"/>. This function may be called multiple times to add multiple segments, the @@ -10536,7 +10504,7 @@ which takes a single parameter of type <code>java.lang.String</code>. The method is not required to have <code>public</code> access. <p/> - The <vmspeclink/> specifies that a subsequent attempt to resolve a symbolic + <vmspec/> specifies that a subsequent attempt to resolve a symbolic reference that the Java virtual machine has previously unsuccessfully attempted to resolve always fails with the same error that was thrown as a result of the initial resolution attempt. Consequently, if the JAR file contains an entry @@ -11438,7 +11406,7 @@ at the finest granularity allowed by the VM. A single step event is generated whenever a thread reaches a new location. Typically, single step events represent the completion of one VM - instruction as defined in the <vmspeclink/>. However, some implementations + instruction as defined in <vmspec/>. However, some implementations may define locations differently. In any case the <code>method</code> and <code>location</code> parameters uniquely identify the current location and allow @@ -13841,7 +13809,7 @@ and can_get_source_debug_extension. PopFrame cannot have a native calling method. Removed incorrect statement in GetClassloaderClasses - (see http://java.sun.com/docs/books/vmspec/2nd-edition/html/ConstantPool.doc.html#79383). + (see <vmspec chapter="4.4"/>). </change> <change date="24 July 2003" version="v79"> XML and text fixes.
--- a/src/share/vm/prims/jvmti.xsl Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jvmti.xsl Thu Jun 02 18:59:50 2011 +0100 @@ -1039,34 +1039,14 @@ </a> </xsl:template> -<xsl:template match="vmspeclink"> - <xsl:if test="count(@id)=1"> - <a> - <xsl:attribute name="href"> - <xsl:text>http://java.sun.com/docs/books/vmspec/2nd-edition/html/</xsl:text> - <xsl:value-of select="@id"/> - </xsl:attribute> - <xsl:value-of select="@name"/> - </a> - <xsl:text> </xsl:text> - <xsl:choose> - <xsl:when test="count(@preposition)=1"> - <xsl:value-of select="@preposition"/> - </xsl:when> - <xsl:otherwise> - <xsl:text>of</xsl:text> - </xsl:otherwise> - </xsl:choose> - <xsl:text> the </xsl:text> - </xsl:if> - <a> - <xsl:attribute name="href"> - <xsl:text>http://java.sun.com/docs/books/vmspec/</xsl:text> - </xsl:attribute> - <i> - <xsl:text>Java Virtual Machine Specification</xsl:text> - </i> - </a> +<xsl:template match="vmspec"> + <cite> + <xsl:text>The Java™ Virtual Machine Specification</xsl:text> + <xsl:if test="count(@chapter)=1"> + <xsl:text>, Chapter </xsl:text> + <xsl:value-of select="@chapter"/> + </xsl:if> + </cite> </xsl:template> <xsl:template match="internallink">
--- a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiClassFileReconstituter.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jvmtiClassFileReconstituter.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiEnv.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jvmtiEnv.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -525,7 +525,7 @@ ObjectLocker ol(loader, THREAD); // need the path as java.lang.String - Handle path = java_lang_String::create_from_str(segment, THREAD); + Handle path = java_lang_String::create_from_platform_dependent_str(segment, THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return JVMTI_ERROR_INTERNAL;
--- a/src/share/vm/prims/jvmtiEventController.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jvmtiEventController.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiExport.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jvmtiExport.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1804,6 +1804,8 @@ } void JvmtiExport::post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) { + assert(name != NULL && name[0] != '\0', "sanity check"); + JavaThread* thread = JavaThread::current(); // In theory everyone coming thru here is in_vm but we need to be certain // because a callee will do a vm->native transition
--- a/src/share/vm/prims/jvmtiImpl.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jvmtiImpl.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -38,6 +38,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/os.hpp" #include "runtime/serviceThread.hpp" #include "runtime/signature.hpp" #include "runtime/vframe.hpp" @@ -939,10 +940,15 @@ nmethodLocker::lock_nmethod(nm, true /* zombie_ok */); return event; } + JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event( const char* name, const void* code_begin, const void* code_end) { JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED); - event._event_data.dynamic_code_generated.name = name; + // Need to make a copy of the name since we don't know how long + // the event poster will keep it around after we enqueue the + // deferred event and return. strdup() failure is handled in + // the post() routine below. + event._event_data.dynamic_code_generated.name = os::strdup(name); event._event_data.dynamic_code_generated.code_begin = code_begin; event._event_data.dynamic_code_generated.code_end = code_end; return event; @@ -968,12 +974,19 @@ nmethodLocker::unlock_nmethod(nm); break; } - case TYPE_DYNAMIC_CODE_GENERATED: + case TYPE_DYNAMIC_CODE_GENERATED: { JvmtiExport::post_dynamic_code_generated_internal( - _event_data.dynamic_code_generated.name, + // if strdup failed give the event a default name + (_event_data.dynamic_code_generated.name == NULL) + ? "unknown_code" : _event_data.dynamic_code_generated.name, _event_data.dynamic_code_generated.code_begin, _event_data.dynamic_code_generated.code_end); + if (_event_data.dynamic_code_generated.name != NULL) { + // release our copy + os::free((void *)_event_data.dynamic_code_generated.name); + } break; + } default: ShouldNotReachHere(); }
--- a/src/share/vm/prims/jvmtiManageCapabilities.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jvmtiManageCapabilities.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -319,8 +319,11 @@ bool enter_all_methods = interp_events || avail.can_generate_breakpoint_events; - UseFastEmptyMethods = !enter_all_methods; - UseFastAccessorMethods = !enter_all_methods; + if (enter_all_methods) { + // Disable these when tracking the bytecodes + UseFastEmptyMethods = false; + UseFastAccessorMethods = false; + } if (avail.can_generate_breakpoint_events) { RewriteFrequentPairs = false;
--- a/src/share/vm/prims/jvmtiRedefineClasses.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jvmtiRedefineClasses.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiTagMap.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jvmtiTagMap.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -3158,6 +3158,9 @@ if (fr->is_entry_frame()) { last_entry_frame = fr; } + if (fr->is_ricochet_frame()) { + fr->oops_ricochet_do(blk, vf->register_map()); + } } vf = vf->sender();
--- a/src/share/vm/prims/jvmtiTagMap.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/jvmtiTagMap.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/methodHandleWalk.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/methodHandleWalk.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -31,6 +31,11 @@ * JSR 292 reference implementation: method handle structure analysis */ +#ifdef PRODUCT +#define print_method_handle(mh) {} +#else //PRODUCT +extern "C" void print_method_handle(oop mh); +#endif //PRODUCT // ----------------------------------------------------------------------------- // MethodHandleChain @@ -82,10 +87,8 @@ void MethodHandleChain::set_last_method(oop target, TRAPS) { _is_last = true; - klassOop receiver_limit_oop = NULL; - int flags = 0; - methodOop m = MethodHandles::decode_method(target, receiver_limit_oop, flags); - _last_method = methodHandle(THREAD, m); + KlassHandle receiver_limit; int flags = 0; + _last_method = MethodHandles::decode_method(target, receiver_limit, flags); if ((flags & MethodHandles::_dmf_has_receiver) == 0) _last_invoke = Bytecodes::_invokestatic; else if ((flags & MethodHandles::_dmf_does_dispatch) == 0) @@ -208,8 +211,10 @@ lose("bad argument index", CHECK_(empty)); } + bool retain_original_args = false; // used by fold/collect logic + // perform the adapter action - switch (chain().adapter_conversion_op()) { + switch (conv_op) { case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY: // No changes to arguments; pass the bits through. break; @@ -218,51 +223,36 @@ // To keep the verifier happy, emit bitwise ("raw") conversions as needed. // See MethodHandles::same_basic_type_for_arguments for allowed conversions. Handle incoming_mtype(THREAD, chain().method_type_oop()); - oop outgoing_mh_oop = chain().vmtarget_oop(); - if (!java_lang_invoke_MethodHandle::is_instance(outgoing_mh_oop)) - lose("outgoing target not a MethodHandle", CHECK_(empty)); - Handle outgoing_mtype(THREAD, java_lang_invoke_MethodHandle::type(outgoing_mh_oop)); - outgoing_mh_oop = NULL; // GC safety + Handle outgoing_mtype; + { + oop outgoing_mh_oop = chain().vmtarget_oop(); + if (!java_lang_invoke_MethodHandle::is_instance(outgoing_mh_oop)) + lose("outgoing target not a MethodHandle", CHECK_(empty)); + outgoing_mtype = Handle(THREAD, java_lang_invoke_MethodHandle::type(outgoing_mh_oop)); + } int nptypes = java_lang_invoke_MethodType::ptype_count(outgoing_mtype()); if (nptypes != java_lang_invoke_MethodType::ptype_count(incoming_mtype())) lose("incoming and outgoing parameter count do not agree", CHECK_(empty)); + // Argument types. for (int i = 0, slot = _outgoing.length() - 1; slot >= 0; slot--) { SlotState* arg_state = slot_state(slot); if (arg_state->_type == T_VOID) continue; - ArgToken arg = _outgoing.at(slot)._arg; - klassOop in_klass = NULL; - klassOop out_klass = NULL; - BasicType inpbt = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(incoming_mtype(), i), &in_klass); - BasicType outpbt = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(outgoing_mtype(), i), &out_klass); - assert(inpbt == arg.basic_type(), "sanity"); - - if (inpbt != outpbt) { - vmIntrinsics::ID iid = vmIntrinsics::for_raw_conversion(inpbt, outpbt); - if (iid == vmIntrinsics::_none) { - lose("no raw conversion method", CHECK_(empty)); - } - ArgToken arglist[2]; - arglist[0] = arg; // outgoing 'this' - arglist[1] = ArgToken(); // sentinel - arg = make_invoke(NULL, iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(empty)); - change_argument(inpbt, slot, outpbt, arg); - } - + klassOop src_klass = NULL; + klassOop dst_klass = NULL; + BasicType src = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(incoming_mtype(), i), &src_klass); + BasicType dst = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(outgoing_mtype(), i), &dst_klass); + retype_raw_argument_type(src, dst, slot, CHECK_(empty)); i++; // We need to skip void slots at the top of the loop. } - BasicType inrbt = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(incoming_mtype())); - BasicType outrbt = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(outgoing_mtype())); - if (inrbt != outrbt) { - if (inrbt == T_INT && outrbt == T_VOID) { - // See comments in MethodHandles::same_basic_type_for_arguments. - } else { - assert(false, "IMPLEMENT ME"); - lose("no raw conversion method", CHECK_(empty)); - } + // Return type. + { + BasicType src = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(incoming_mtype())); + BasicType dst = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(outgoing_mtype())); + retype_raw_return_type(src, dst, CHECK_(empty)); } break; } @@ -275,7 +265,7 @@ assert(dest == arg_state->_type, ""); ArgToken arg = arg_state->_arg; ArgToken new_arg = make_conversion(T_OBJECT, dest_klass, Bytecodes::_checkcast, arg, CHECK_(empty)); - assert(arg.index() == new_arg.index(), "should be the same index"); + assert(!arg.has_index() || arg.index() == new_arg.index(), "should be the same index"); debug_only(dest_klass = (klassOop)badOop); break; } @@ -334,7 +324,7 @@ ArgToken arglist[2]; arglist[0] = arg; // outgoing value arglist[1] = ArgToken(); // sentinel - arg = make_invoke(NULL, boxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty)); + arg = make_invoke(NULL, boxer, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(empty)); change_argument(src, arg_slot, T_OBJECT, arg); break; } @@ -406,8 +396,57 @@ break; } - case java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS: { //NYI, may GC - lose("unimplemented", CHECK_(empty)); + case java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS: + retain_original_args = true; // and fall through: + case java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS: { + // call argument MH recursively + //{static int x; if (!x++) print_method_handle(chain().method_handle_oop()); --x;} + Handle recursive_mh(THREAD, chain().adapter_arg_oop()); + if (!java_lang_invoke_MethodHandle::is_instance(recursive_mh())) { + lose("recursive target not a MethodHandle", CHECK_(empty)); + } + Handle recursive_mtype(THREAD, java_lang_invoke_MethodHandle::type(recursive_mh())); + int argc = java_lang_invoke_MethodType::ptype_count(recursive_mtype()); + int coll_slots = java_lang_invoke_MethodHandle::vmslots(recursive_mh()); + BasicType rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(recursive_mtype())); + ArgToken* arglist = NEW_RESOURCE_ARRAY(ArgToken, 1 + argc + 1); // 1+: mh, +1: sentinel + arglist[0] = make_oop_constant(recursive_mh(), CHECK_(empty)); + if (arg_slot < 0 || coll_slots < 0 || arg_slot + coll_slots > _outgoing.length()) { + lose("bad fold/collect arg slot", CHECK_(empty)); + } + for (int i = 0, slot = arg_slot + coll_slots - 1; slot >= arg_slot; slot--) { + SlotState* arg_state = slot_state(slot); + BasicType arg_type = arg_state->_type; + if (arg_type == T_VOID) continue; + ArgToken arg = _outgoing.at(slot)._arg; + if (i >= argc) { lose("bad fold/collect arg", CHECK_(empty)); } + arglist[1+i] = arg; + if (!retain_original_args) + change_argument(arg_type, slot, T_VOID, ArgToken(tt_void)); + i++; + } + arglist[1+argc] = ArgToken(); // sentinel + oop invoker = java_lang_invoke_MethodTypeForm::vmlayout( + java_lang_invoke_MethodType::form(recursive_mtype()) ); + if (invoker == NULL || !invoker->is_method()) { + lose("bad vmlayout slot", CHECK_(empty)); + } + // FIXME: consider inlining the invokee at the bytecode level + ArgToken ret = make_invoke(methodOop(invoker), vmIntrinsics::_none, + Bytecodes::_invokevirtual, false, 1+argc, &arglist[0], CHECK_(empty)); + DEBUG_ONLY(invoker = NULL); + if (rtype == T_OBJECT) { + klassOop rklass = java_lang_Class::as_klassOop( java_lang_invoke_MethodType::rtype(recursive_mtype()) ); + if (rklass != SystemDictionary::Object_klass() && + !Klass::cast(rklass)->is_interface()) { + // preserve type safety + ret = make_conversion(T_OBJECT, rklass, Bytecodes::_checkcast, ret, CHECK_(empty)); + } + } + if (rtype != T_VOID) { + int ret_slot = arg_slot + (retain_original_args ? coll_slots : 0); + change_argument(T_VOID, ret_slot, rtype, ret); + } break; } @@ -451,12 +490,21 @@ arglist[1] = length_arg; // length to check arglist[2] = ArgToken(); // sentinel make_invoke(NULL, vmIntrinsics::_checkSpreadArgument, - Bytecodes::_invokestatic, false, 3, &arglist[0], CHECK_(empty)); + Bytecodes::_invokestatic, false, 2, &arglist[0], CHECK_(empty)); // Spread out the array elements. - Bytecodes::Code aload_op = Bytecodes::_aaload; - if (element_type != T_OBJECT) { - lose("primitive array NYI", CHECK_(empty)); + Bytecodes::Code aload_op = Bytecodes::_nop; + switch (element_type) { + case T_INT: aload_op = Bytecodes::_iaload; break; + case T_LONG: aload_op = Bytecodes::_laload; break; + case T_FLOAT: aload_op = Bytecodes::_faload; break; + case T_DOUBLE: aload_op = Bytecodes::_daload; break; + case T_OBJECT: aload_op = Bytecodes::_aaload; break; + case T_BOOLEAN: // fall through: + case T_BYTE: aload_op = Bytecodes::_baload; break; + case T_CHAR: aload_op = Bytecodes::_caload; break; + case T_SHORT: aload_op = Bytecodes::_saload; break; + default: lose("primitive array NYI", CHECK_(empty)); } int ap = arg_slot; for (int i = 0; i < spread_length; i++) { @@ -469,11 +517,6 @@ break; } - case java_lang_invoke_AdapterMethodHandle::OP_FLYBY: //NYI, runs Java code - case java_lang_invoke_AdapterMethodHandle::OP_RICOCHET: //NYI, runs Java code - lose("unimplemented", CHECK_(empty)); - break; - default: lose("bad adapter conversion", CHECK_(empty)); break; @@ -497,7 +540,7 @@ lose("bad bound value", CHECK_(empty)); } } - debug_only(arg_oop = badOop); + DEBUG_ONLY(arg_oop = badOop); change_argument(T_VOID, arg_slot, arg_type, arg); } @@ -540,11 +583,10 @@ } for (int i = 0; i < nptypes; i++) { klassOop arg_type_klass = NULL; - BasicType arg_type = java_lang_Class::as_BasicType( - java_lang_invoke_MethodType::ptype(mtype(), i), &arg_type_klass); + BasicType arg_type = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(mtype(), i), &arg_type_klass); int index = new_local_index(arg_type); ArgToken arg = make_parameter(arg_type, arg_type_klass, index, CHECK); - debug_only(arg_type_klass = (klassOop) NULL); + DEBUG_ONLY(arg_type_klass = (klassOop) NULL); _outgoing.at_put(argp, make_state(arg_type, arg)); if (type2size[arg_type] == 2) { // add the extra slot, so we can model the JVM stack @@ -554,8 +596,7 @@ } // call make_parameter at the end of the list for the return type klassOop ret_type_klass = NULL; - BasicType ret_type = java_lang_Class::as_BasicType( - java_lang_invoke_MethodType::rtype(mtype()), &ret_type_klass); + BasicType ret_type = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(mtype()), &ret_type_klass); ArgToken ret = make_parameter(ret_type, ret_type_klass, -1, CHECK); // ignore ret; client can catch it if needed } @@ -607,11 +648,53 @@ // ----------------------------------------------------------------------------- +// MethodHandleWalker::retype_raw_conversion +// +// Do the raw retype conversions for OP_RETYPE_RAW. +void MethodHandleWalker::retype_raw_conversion(BasicType src, BasicType dst, bool for_return, int slot, TRAPS) { + if (src != dst) { + if (MethodHandles::same_basic_type_for_returns(src, dst, /*raw*/ true)) { + if (MethodHandles::is_float_fixed_reinterpretation_cast(src, dst)) { + if (for_return) Untested("MHW return raw conversion"); // still untested + vmIntrinsics::ID iid = vmIntrinsics::for_raw_conversion(src, dst); + if (iid == vmIntrinsics::_none) { + lose("no raw conversion method", CHECK); + } + ArgToken arglist[2]; + if (!for_return) { + // argument type conversion + ArgToken arg = _outgoing.at(slot)._arg; + assert(arg.token_type() >= tt_symbolic || src == arg.basic_type(), "sanity"); + arglist[0] = arg; // outgoing 'this' + arglist[1] = ArgToken(); // sentinel + arg = make_invoke(NULL, iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK); + change_argument(src, slot, dst, arg); + } else { + // return type conversion + klassOop arg_klass = NULL; + arglist[0] = make_parameter(src, arg_klass, -1, CHECK); // return value + arglist[1] = ArgToken(); // sentinel + (void) make_invoke(NULL, iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK); + } + } else { + // Nothing to do. + } + } else if (src == T_OBJECT && is_java_primitive(dst)) { + // ref-to-prim: discard ref, push zero + lose("requested ref-to-prim conversion not expected", CHECK); + } else { + lose("requested raw conversion not allowed", CHECK); + } + } +} + + +// ----------------------------------------------------------------------------- // MethodHandleCompiler -MethodHandleCompiler::MethodHandleCompiler(Handle root, methodHandle callee, bool is_invokedynamic, TRAPS) +MethodHandleCompiler::MethodHandleCompiler(Handle root, Symbol* name, Symbol* signature, int invoke_count, bool is_invokedynamic, TRAPS) : MethodHandleWalker(root, is_invokedynamic, THREAD), - _callee(callee), + _invoke_count(invoke_count), _thread(THREAD), _bytecode(THREAD, 50), _constants(THREAD, 10), @@ -624,8 +707,8 @@ (void) _constants.append(NULL); // Set name and signature index. - _name_index = cpool_symbol_put(_callee->name()); - _signature_index = cpool_symbol_put(_callee->signature()); + _name_index = cpool_symbol_put(name); + _signature_index = cpool_symbol_put(signature); // Get return type klass. Handle first_mtype(THREAD, chain().method_type_oop()); @@ -633,7 +716,8 @@ _rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(first_mtype()), &_rklass); if (_rtype == T_ARRAY) _rtype = T_OBJECT; - int params = _callee->size_of_parameters(); // Incoming arguments plus receiver. + ArgumentSizeComputer args(signature); + int params = args.size() + 1; // Incoming arguments plus receiver. _num_params = for_invokedynamic() ? params - 1 : params; // XXX Check if callee is static? } @@ -651,7 +735,7 @@ } -void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index) { +void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index, int args_size) { Bytecodes::check(op); // Are we legal? switch (op) { @@ -711,6 +795,7 @@ case Bytecodes::_astore_1: case Bytecodes::_astore_2: case Bytecodes::_astore_3: + case Bytecodes::_iand: case Bytecodes::_i2l: case Bytecodes::_i2f: case Bytecodes::_i2d: @@ -726,6 +811,14 @@ case Bytecodes::_d2i: case Bytecodes::_d2l: case Bytecodes::_d2f: + case Bytecodes::_iaload: + case Bytecodes::_laload: + case Bytecodes::_faload: + case Bytecodes::_daload: + case Bytecodes::_aaload: + case Bytecodes::_baload: + case Bytecodes::_caload: + case Bytecodes::_saload: case Bytecodes::_ireturn: case Bytecodes::_lreturn: case Bytecodes::_freturn: @@ -739,9 +832,14 @@ // bi case Bytecodes::_ldc: assert(Bytecodes::format_bits(op, false) == (Bytecodes::_fmt_b|Bytecodes::_fmt_has_k), "wrong bytecode format"); - assert((char) index == index, "index does not fit in 8-bit"); - _bytecode.push(op); - _bytecode.push(index); + if (index == (index & 0xff)) { + _bytecode.push(op); + _bytecode.push(index); + } else { + _bytecode.push(Bytecodes::_ldc_w); + _bytecode.push(index >> 8); + _bytecode.push(index); + } break; case Bytecodes::_iload: @@ -755,9 +853,16 @@ case Bytecodes::_dstore: case Bytecodes::_astore: assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bi, "wrong bytecode format"); - assert((char) index == index, "index does not fit in 8-bit"); - _bytecode.push(op); - _bytecode.push(index); + if (index == (index & 0xff)) { + _bytecode.push(op); + _bytecode.push(index); + } else { + // doesn't fit in a u2 + _bytecode.push(Bytecodes::_wide); + _bytecode.push(op); + _bytecode.push(index >> 8); + _bytecode.push(index); + } break; // bkk @@ -765,7 +870,7 @@ case Bytecodes::_ldc2_w: case Bytecodes::_checkcast: assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bkk, "wrong bytecode format"); - assert((short) index == index, "index does not fit in 16-bit"); + assert((unsigned short) index == index, "index does not fit in 16-bit"); _bytecode.push(op); _bytecode.push(index >> 8); _bytecode.push(index); @@ -776,12 +881,23 @@ case Bytecodes::_invokespecial: case Bytecodes::_invokevirtual: assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format"); - assert((short) index == index, "index does not fit in 16-bit"); + assert((unsigned short) index == index, "index does not fit in 16-bit"); _bytecode.push(op); _bytecode.push(index >> 8); _bytecode.push(index); break; + case Bytecodes::_invokeinterface: + assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format"); + assert((unsigned short) index == index, "index does not fit in 16-bit"); + assert(args_size > 0, "valid args_size"); + _bytecode.push(op); + _bytecode.push(index >> 8); + _bytecode.push(index); + _bytecode.push(args_size); + _bytecode.push(0); + break; + default: ShouldNotReachHere(); } @@ -900,7 +1016,8 @@ const ArgToken& src, TRAPS) { BasicType srctype = src.basic_type(); - int index = src.index(); + TokenType tt = src.token_type(); + int index = -1; switch (op) { case Bytecodes::_i2l: @@ -921,23 +1038,40 @@ case Bytecodes::_d2i: case Bytecodes::_d2l: case Bytecodes::_d2f: - emit_load(srctype, index); + if (tt == tt_constant) { + emit_load_constant(src); + } else { + emit_load(srctype, src.index()); + } stack_pop(srctype); // pop the src type emit_bc(op); stack_push(type); // push the dest value - if (srctype != type) + if (tt != tt_constant) + index = src.index(); + if (srctype != type || index == -1) index = new_local_index(type); emit_store(type, index); break; case Bytecodes::_checkcast: - emit_load(srctype, index); + if (tt == tt_constant) { + emit_load_constant(src); + } else { + emit_load(srctype, src.index()); + index = src.index(); + } emit_bc(op, cpool_klass_put(tk)); + if (index == -1) + index = new_local_index(type); emit_store(srctype, index); break; default: - ShouldNotReachHere(); + if (op == Bytecodes::_illegal) + lose("no such primitive conversion", THREAD); + else + lose("bad primitive conversion op", THREAD); + return make_prim_constant(type, &zero_jvalue, THREAD); } return make_parameter(type, tk, index, THREAD); @@ -948,7 +1082,9 @@ // MethodHandleCompiler // -static jvalue zero_jvalue; +// Values used by the compiler. +jvalue MethodHandleCompiler::zero_jvalue = { 0 }; +jvalue MethodHandleCompiler::one_jvalue = { 1 }; // Emit bytecodes for the given invoke instruction. MethodHandleWalker::ArgToken @@ -956,24 +1092,23 @@ Bytecodes::Code op, bool tailcall, int argc, MethodHandleWalker::ArgToken* argv, TRAPS) { + ArgToken zero; if (m == NULL) { // Get the intrinsic methodOop. m = vmIntrinsics::method_for(iid); - if (m == NULL && iid == vmIntrinsics::_checkSpreadArgument && AllowTransitionalJSR292) { - m = vmIntrinsics::method_for(vmIntrinsics::_checkSpreadArgument_TRANS); - if (m == NULL) - // sun.dyn.MethodHandleImpl not found, look for java.dyn.MethodHandleNatives: - m = vmIntrinsics::method_for(vmIntrinsics::_checkSpreadArgument_TRANS2); - } if (m == NULL) { - ArgToken zero; lose(vmIntrinsics::name_at(iid), CHECK_(zero)); } } - klassOop klass = m->method_holder(); - Symbol* name = m->name(); - Symbol* signature = m->signature(); + klassOop klass = m->method_holder(); + Symbol* name = m->name(); + Symbol* signature = m->signature(); + + // Count the number of arguments, not the size + ArgumentCount asc(signature); + assert(argc == asc.size() + ((op == Bytecodes::_invokestatic || op == Bytecodes::_invokedynamic) ? 0 : 1), + "argc mismatch"); if (tailcall) { // Actually, in order to make these methods more recognizable, @@ -1023,9 +1158,13 @@ case Bytecodes::_invokevirtual: emit_bc(op, methodref_index); break; - case Bytecodes::_invokeinterface: - Unimplemented(); + + case Bytecodes::_invokeinterface: { + ArgumentSizeComputer asc(signature); + emit_bc(op, methodref_index, asc.size() + 1); break; + } + default: ShouldNotReachHere(); } @@ -1034,12 +1173,12 @@ // Otherwise, make a recursive call to some helper routine. BasicType rbt = m->result_type(); if (rbt == T_ARRAY) rbt = T_OBJECT; + stack_push(rbt); // The return value is already pushed onto the stack. ArgToken ret; if (tailcall) { if (rbt != _rtype) { if (rbt == T_VOID) { // push a zero of the right sort - ArgToken zero; if (_rtype == T_OBJECT) { zero = make_oop_constant(NULL, CHECK_(zero)); } else { @@ -1049,9 +1188,27 @@ } else if (_rtype == T_VOID) { // We'll emit a _return with something on the stack. // It's OK to ignore what's on the stack. + } else if (rbt == T_INT && is_subword_type(_rtype)) { + // Convert value to match return type. + switch (_rtype) { + case T_BOOLEAN: { + // boolean is treated as a one-bit unsigned integer. + // Cf. API documentation: java/lang/invoke/MethodHandles.html#explicitCastArguments + ArgToken one = make_prim_constant(T_INT, &one_jvalue, CHECK_(zero)); + emit_load_constant(one); + emit_bc(Bytecodes::_iand); + break; + } + case T_BYTE: emit_bc(Bytecodes::_i2b); break; + case T_CHAR: emit_bc(Bytecodes::_i2c); break; + case T_SHORT: emit_bc(Bytecodes::_i2s); break; + default: ShouldNotReachHere(); + } + } else if (is_subword_type(rbt) && (is_subword_type(_rtype) || (_rtype == T_INT))) { + // The subword type was returned as an int and will be passed + // on as an int. } else { - tty->print_cr("*** rbt=%d != rtype=%d", rbt, _rtype); - assert(false, "IMPLEMENT ME"); + lose("unknown conversion", CHECK_(zero)); } } switch (_rtype) { @@ -1071,7 +1228,6 @@ ret = ArgToken(); // Dummy return value. } else { - stack_push(rbt); // The return value is already pushed onto the stack. int index = new_local_index(rbt); switch (rbt) { case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT: @@ -1096,8 +1252,32 @@ const MethodHandleWalker::ArgToken& base, const MethodHandleWalker::ArgToken& offset, TRAPS) { - Unimplemented(); - return ArgToken(); + switch (base.token_type()) { + case tt_parameter: + case tt_temporary: + emit_load(base.basic_type(), base.index()); + break; + case tt_constant: + emit_load_constant(base); + break; + default: + ShouldNotReachHere(); + } + switch (offset.token_type()) { + case tt_parameter: + case tt_temporary: + emit_load(offset.basic_type(), offset.index()); + break; + case tt_constant: + emit_load_constant(offset); + break; + default: + ShouldNotReachHere(); + } + emit_bc(op); + int index = new_local_index(type); + emit_store(type, index); + return ArgToken(tt_temporary, type, index); } @@ -1181,7 +1361,7 @@ methodHandle MethodHandleCompiler::get_method_oop(TRAPS) const { - methodHandle nullHandle; + methodHandle empty; // Create a method that holds the generated bytecode. invokedynamic // has no receiver, normal MH calls do. int flags_bits; @@ -1190,13 +1370,16 @@ else flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_SYNTHETIC); - methodOop m_oop = oopFactory::new_method(bytecode_length(), - accessFlags_from(flags_bits), - 0, 0, 0, oopDesc::IsSafeConc, CHECK_(nullHandle)); - methodHandle m(THREAD, m_oop); - m_oop = NULL; // oop not GC safe + // Create a new method + methodHandle m; + { + methodOop m_oop = oopFactory::new_method(bytecode_length(), + accessFlags_from(flags_bits), + 0, 0, 0, oopDesc::IsSafeConc, CHECK_(empty)); + m = methodHandle(THREAD, m_oop); + } - constantPoolHandle cpool = get_constant_pool(CHECK_(nullHandle)); + constantPoolHandle cpool = get_constant_pool(CHECK_(empty)); m->set_constants(cpool()); m->set_name_index(_name_index); @@ -1211,16 +1394,34 @@ typeArrayHandle exception_handlers(THREAD, Universe::the_empty_int_array()); m->set_exception_table(exception_handlers()); - // Set the carry bit of the invocation counter to force inlining of - // the adapter. - InvocationCounter* ic = m->invocation_counter(); - ic->set_carry_flag(); - // Rewrite the method and set up the constant pool cache. - objArrayOop m_array = oopFactory::new_system_objArray(1, CHECK_(nullHandle)); + objArrayOop m_array = oopFactory::new_system_objArray(1, CHECK_(empty)); objArrayHandle methods(THREAD, m_array); methods->obj_at_put(0, m()); - Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(nullHandle)); // Use fake class. + Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(empty)); // Use fake class. + + // Set the invocation counter's count to the invoke count of the + // original call site. + InvocationCounter* ic = m->invocation_counter(); + ic->set(InvocationCounter::wait_for_compile, _invoke_count); + + // Create a new MDO + { + methodDataOop mdo = oopFactory::new_methodData(m, CHECK_(empty)); + assert(m->method_data() == NULL, "there should not be an MDO yet"); + m->set_method_data(mdo); + + // Iterate over all profile data and set the count of the counter + // data entries to the original call site counter. + for (ProfileData* profile_data = mdo->first_data(); + mdo->is_valid(profile_data); + profile_data = mdo->next_data(profile_data)) { + if (profile_data->is_CounterData()) { + CounterData* counter_data = profile_data->as_CounterData(); + counter_data->set_count(_invoke_count); + } + } + } #ifndef PRODUCT if (TraceMethodHandles) { @@ -1236,7 +1437,6 @@ #ifndef PRODUCT -#if 0 // MH printer for debugging. class MethodHandlePrinter : public MethodHandleWalker { @@ -1244,6 +1444,7 @@ outputStream* _out; bool _verbose; int _temp_num; + int _param_state; stringStream _strbuf; const char* strbuf() { const char* s = _strbuf.as_string(); @@ -1251,14 +1452,19 @@ return s; } ArgToken token(const char* str) { - return (ArgToken) str; + return ArgToken(str); + } + const char* string(ArgToken token) { + return token.str(); } void start_params() { + _param_state <<= 1; _out->print("("); } void end_params() { if (_verbose) _out->print("\n"); _out->print(") => {"); + _param_state >>= 1; } void put_type_name(BasicType type, klassOop tk, outputStream* s) { const char* kname = NULL; @@ -1278,9 +1484,10 @@ public: MethodHandlePrinter(Handle root, bool verbose, outputStream* out, TRAPS) - : MethodHandleWalker(root, THREAD), + : MethodHandleWalker(root, false, THREAD), _out(out), _verbose(verbose), + _param_state(0), _temp_num(0) { start_params(); @@ -1288,9 +1495,10 @@ virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) { if (argnum < 0) { end_params(); - return NULL; + return token("return"); } - if (argnum == 0) { + if ((_param_state & 1) == 0) { + _param_state |= 1; _out->print(_verbose ? "\n " : ""); } else { _out->print(_verbose ? ",\n " : ", "); @@ -1320,8 +1528,15 @@ java_lang_boxing_object::print(type, con, &_strbuf); return maybe_make_temp("constant", type, "k"); } - virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, ArgToken src, TRAPS) { - _strbuf.print("%s(%s", Bytecodes::name(op), (const char*)src); + void print_bytecode_name(Bytecodes::Code op) { + if (Bytecodes::is_defined(op)) + _strbuf.print("%s", Bytecodes::name(op)); + else + _strbuf.print("bytecode_%d", (int) op); + } + virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS) { + print_bytecode_name(op); + _strbuf.print("(%s", string(src)); if (tk != NULL) { _strbuf.print(", "); put_type_name(type, tk, &_strbuf); @@ -1329,8 +1544,8 @@ _strbuf.print(")"); return maybe_make_temp("convert", type, "v"); } - virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, ArgToken base, ArgToken offset, TRAPS) { - _strbuf.print("%s(%s, %s", Bytecodes::name(op), (const char*)base, (const char*)offset); + virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS) { + _strbuf.print("%s(%s, %s", Bytecodes::name(op), string(base), string(offset)); if (tk != NULL) { _strbuf.print(", "); put_type_name(type, tk, &_strbuf); @@ -1341,7 +1556,8 @@ virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS) { - Symbol* name, sig; + Symbol* name; + Symbol* sig; if (m != NULL) { name = m->name(); sig = m->signature(); @@ -1351,7 +1567,7 @@ } _strbuf.print("%s %s%s(", Bytecodes::name(op), name->as_C_string(), sig->as_C_string()); for (int i = 0; i < argc; i++) { - _strbuf.print("%s%s", (i > 0 ? ", " : ""), (const char*)argv[i]); + _strbuf.print("%s%s", (i > 0 ? ", " : ""), string(argv[i])); } _strbuf.print(")"); if (!tailcall) { @@ -1389,24 +1605,20 @@ if (HAS_PENDING_EXCEPTION) { oop ex = PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION; - out->print("\n*** "); - if (ex != Universe::virtual_machine_error_instance()) - ex->print_on(out); - else - out->print("lose: %s", printer.lose_message()); - out->print("\n}\n"); + out->print(" *** "); + if (printer.lose_message() != NULL) out->print("%s ", printer.lose_message()); + out->print("}"); } out->print("\n"); } }; -#endif // 0 extern "C" void print_method_handle(oop mh) { if (!mh->is_oop()) { - tty->print_cr("*** not a method handle: "INTPTR_FORMAT, (intptr_t)mh); + tty->print_cr("*** not a method handle: "PTR_FORMAT, (intptr_t)mh); } else if (java_lang_invoke_MethodHandle::is_instance(mh)) { - //MethodHandlePrinter::print(mh); + MethodHandlePrinter::print(mh); } else { tty->print("*** not a method handle: "); mh->print();
--- a/src/share/vm/prims/methodHandleWalk.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/methodHandleWalk.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -113,6 +113,7 @@ tt_parameter, tt_temporary, tt_constant, + tt_symbolic, tt_illegal }; @@ -125,26 +126,34 @@ Handle _handle; public: - ArgToken(TokenType tt = tt_illegal) : _tt(tt) {} - ArgToken(TokenType tt, BasicType bt, jvalue value) : _tt(tt), _bt(bt), _value(value) {} + ArgToken(TokenType tt = tt_illegal) : _tt(tt) { + assert(tt == tt_illegal || tt == tt_void, "invalid token type"); + } ArgToken(TokenType tt, BasicType bt, int index) : _tt(tt), _bt(bt) { + assert(_tt == tt_parameter || _tt == tt_temporary, "must have index"); _value.i = index; } - ArgToken(TokenType tt, BasicType bt, Handle value) : _tt(tt), _bt(bt) { - _handle = value; + ArgToken(BasicType bt, jvalue value) : _tt(tt_constant), _bt(bt), _value(value) {} + ArgToken(BasicType bt, Handle value) : _tt(tt_constant), _bt(bt), _handle(value) {} + + + ArgToken(const char* str) : _tt(tt_symbolic), _bt(T_LONG) { + _value.j = (intptr_t)str; } TokenType token_type() const { return _tt; } BasicType basic_type() const { return _bt; } - int index() const { return _value.i; } - Handle object() const { return _handle; } + bool has_index() const { return _tt == tt_parameter || _tt == tt_temporary; } + int index() const { assert(has_index(), "must have index");; return _value.i; } + Handle object() const { assert(_tt == tt_constant, "value type"); return _handle; } + const char* str() const { assert(_tt == tt_symbolic, "string type"); return (const char*)_value.j; } - jint get_jint() const { return _value.i; } - jlong get_jlong() const { return _value.j; } - jfloat get_jfloat() const { return _value.f; } - jdouble get_jdouble() const { return _value.d; } + jint get_jint() const { assert(_tt == tt_constant, "value types"); return _value.i; } + jlong get_jlong() const { assert(_tt == tt_constant, "value types"); return _value.j; } + jfloat get_jfloat() const { assert(_tt == tt_constant, "value types"); return _value.f; } + jdouble get_jdouble() const { assert(_tt == tt_constant, "value types"); return _value.d; } }; // Abstract interpretation state: @@ -164,6 +173,10 @@ bool _for_invokedynamic; int _local_index; + // This array is kept in an unusual order, indexed by low-level "slot number". + // TOS is always _outgoing.at(0), so simple pushes and pops shift the whole _outgoing array. + // If there is a receiver in the current argument list, it is at _outgoing.at(_outgoing.length()-1). + // If a value at _outgoing.at(n) is T_LONG or T_DOUBLE, the value at _outgoing.at(n+1) is T_VOID. GrowableArray<SlotState> _outgoing; // current outgoing parameter slots int _outgoing_argc; // # non-empty outgoing slots @@ -173,6 +186,11 @@ // Insert or delete a second empty slot as needed. void change_argument(BasicType old_type, int slot, BasicType new_type, const ArgToken& new_arg); + // Raw retype conversions for OP_RAW_RETYPE. + void retype_raw_conversion(BasicType src, BasicType dst, bool for_return, int slot, TRAPS); + void retype_raw_argument_type(BasicType src, BasicType dst, int slot, TRAPS) { retype_raw_conversion(src, dst, false, slot, CHECK); } + void retype_raw_return_type( BasicType src, BasicType dst, TRAPS) { retype_raw_conversion(src, dst, true, -1, CHECK); } + SlotState* slot_state(int slot) { if (slot < 0 || slot >= _outgoing.length()) return NULL; @@ -221,12 +239,12 @@ int max_locals() const { return _local_index; } // plug-in abstract interpretation steps: - virtual ArgToken make_parameter( BasicType type, klassOop tk, int argnum, TRAPS ) = 0; - virtual ArgToken make_prim_constant( BasicType type, jvalue* con, TRAPS ) = 0; - virtual ArgToken make_oop_constant( oop con, TRAPS ) = 0; - virtual ArgToken make_conversion( BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS ) = 0; - virtual ArgToken make_fetch( BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS ) = 0; - virtual ArgToken make_invoke( methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS ) = 0; + virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) = 0; + virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) = 0; + virtual ArgToken make_oop_constant(oop con, TRAPS) = 0; + virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS) = 0; + virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS) = 0; + virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS) = 0; // For make_invoke, the methodOop can be NULL if the intrinsic ID // is something other than vmIntrinsics::_none. @@ -246,12 +264,16 @@ // The IR happens to be JVM bytecodes. class MethodHandleCompiler : public MethodHandleWalker { private: - methodHandle _callee; + int _invoke_count; // count the original call site has been executed KlassHandle _rklass; // Return type for casting. BasicType _rtype; KlassHandle _target_klass; Thread* _thread; + // Values used by the compiler. + static jvalue zero_jvalue; + static jvalue one_jvalue; + // Fake constant pool entry. class ConstantValue { private: @@ -343,6 +365,7 @@ int cpool_symbol_put(int tag, Symbol* con) { if (con == NULL) return 0; ConstantValue* cv = new ConstantValue(tag, con); + con->increment_refcount(); return _constants.append(cv); } @@ -388,7 +411,7 @@ return cpool_oop_reference_put(JVM_CONSTANT_NameAndType, name_index, signature_index); } - void emit_bc(Bytecodes::Code op, int index = 0); + void emit_bc(Bytecodes::Code op, int index = 0, int args_size = -1); void emit_load(BasicType bt, int index); void emit_store(BasicType bt, int index); void emit_load_constant(ArgToken arg); @@ -398,10 +421,10 @@ } virtual ArgToken make_oop_constant(oop con, TRAPS) { Handle h(THREAD, con); - return ArgToken(tt_constant, T_OBJECT, h); + return ArgToken(T_OBJECT, h); } virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) { - return ArgToken(tt_constant, type, *con); + return ArgToken(type, *con); } virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS); @@ -415,7 +438,7 @@ methodHandle get_method_oop(TRAPS) const; public: - MethodHandleCompiler(Handle root, methodHandle call_method, bool for_invokedynamic, TRAPS); + MethodHandleCompiler(Handle root, Symbol* name, Symbol* signature, int invoke_count, bool for_invokedynamic, TRAPS); // Compile the given MH chain into bytecode. methodHandle compile(TRAPS);
--- a/src/share/vm/prims/methodHandles.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/methodHandles.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -25,9 +25,11 @@ #include "precompiled.hpp" #include "classfile/symbolTable.hpp" #include "interpreter/interpreter.hpp" +#include "interpreter/oopMapCache.hpp" #include "memory/allocation.inline.hpp" #include "memory/oopFactory.hpp" #include "prims/methodHandles.hpp" +#include "prims/methodHandleWalk.hpp" #include "runtime/javaCalls.hpp" #include "runtime/reflection.hpp" #include "runtime/signature.hpp" @@ -66,8 +68,8 @@ "adapter_drop_args", "adapter_collect_args", "adapter_spread_args", - "adapter_flyby", - "adapter_ricochet", + "adapter_fold_args", + "adapter_unused_13", // optimized adapter types: "adapter_swap_args/1", @@ -83,9 +85,76 @@ "adapter_prim_to_prim/f2d", "adapter_ref_to_prim/unboxi", "adapter_ref_to_prim/unboxl", - "adapter_spread_args/0", - "adapter_spread_args/1", - "adapter_spread_args/more", + + // return value handlers for collect/filter/fold adapters: + "return/ref", + "return/int", + "return/long", + "return/float", + "return/double", + "return/void", + "return/S0/ref", + "return/S1/ref", + "return/S2/ref", + "return/S3/ref", + "return/S4/ref", + "return/S5/ref", + "return/any", + + // spreading (array length cases 0, 1, ...) + "adapter_spread/0", + "adapter_spread/1/ref", + "adapter_spread/2/ref", + "adapter_spread/3/ref", + "adapter_spread/4/ref", + "adapter_spread/5/ref", + "adapter_spread/ref", + "adapter_spread/byte", + "adapter_spread/char", + "adapter_spread/short", + "adapter_spread/int", + "adapter_spread/long", + "adapter_spread/float", + "adapter_spread/double", + + // blocking filter/collect conversions: + "adapter_collect/ref", + "adapter_collect/int", + "adapter_collect/long", + "adapter_collect/float", + "adapter_collect/double", + "adapter_collect/void", + "adapter_collect/0/ref", + "adapter_collect/1/ref", + "adapter_collect/2/ref", + "adapter_collect/3/ref", + "adapter_collect/4/ref", + "adapter_collect/5/ref", + "adapter_filter/S0/ref", + "adapter_filter/S1/ref", + "adapter_filter/S2/ref", + "adapter_filter/S3/ref", + "adapter_filter/S4/ref", + "adapter_filter/S5/ref", + "adapter_collect/2/S0/ref", + "adapter_collect/2/S1/ref", + "adapter_collect/2/S2/ref", + "adapter_collect/2/S3/ref", + "adapter_collect/2/S4/ref", + "adapter_collect/2/S5/ref", + + // blocking fold conversions: + "adapter_fold/ref", + "adapter_fold/int", + "adapter_fold/long", + "adapter_fold/float", + "adapter_fold/double", + "adapter_fold/void", + "adapter_fold/1/ref", + "adapter_fold/2/ref", + "adapter_fold/3/ref", + "adapter_fold/4/ref", + "adapter_fold/5/ref", NULL }; @@ -96,13 +165,23 @@ jobject MethodHandles::_raise_exception_method; +address MethodHandles::_adapter_return_handlers[CONV_TYPE_MASK+1]; + #ifdef ASSERT bool MethodHandles::spot_check_entry_names() { assert(!strcmp(entry_name(_invokestatic_mh), "invokestatic"), ""); assert(!strcmp(entry_name(_bound_ref_mh), "bound_ref"), ""); assert(!strcmp(entry_name(_adapter_retype_only), "adapter_retype_only"), ""); - assert(!strcmp(entry_name(_adapter_ricochet), "adapter_ricochet"), ""); + assert(!strcmp(entry_name(_adapter_fold_args), "adapter_fold_args"), ""); assert(!strcmp(entry_name(_adapter_opt_unboxi), "adapter_ref_to_prim/unboxi"), ""); + assert(!strcmp(entry_name(_adapter_opt_spread_char), "adapter_spread/char"), ""); + assert(!strcmp(entry_name(_adapter_opt_spread_double), "adapter_spread/double"), ""); + assert(!strcmp(entry_name(_adapter_opt_collect_int), "adapter_collect/int"), ""); + assert(!strcmp(entry_name(_adapter_opt_collect_0_ref), "adapter_collect/0/ref"), ""); + assert(!strcmp(entry_name(_adapter_opt_collect_2_S3_ref), "adapter_collect/2/S3/ref"), ""); + assert(!strcmp(entry_name(_adapter_opt_filter_S5_ref), "adapter_filter/S5/ref"), ""); + assert(!strcmp(entry_name(_adapter_opt_fold_3_ref), "adapter_fold/3/ref"), ""); + assert(!strcmp(entry_name(_adapter_opt_fold_void), "adapter_fold/void"), ""); return true; } #endif @@ -112,6 +191,9 @@ // MethodHandles::generate_adapters // void MethodHandles::generate_adapters() { +#ifdef TARGET_ARCH_NYI_6939861 + if (FLAG_IS_DEFAULT(UseRicochetFrames)) UseRicochetFrames = false; +#endif if (!EnableInvokeDynamic || SystemDictionary::MethodHandle_klass() == NULL) return; assert(_adapter_code == NULL, "generate only once"); @@ -126,7 +208,6 @@ g.generate(); } - //------------------------------------------------------------------------------ // MethodHandlesAdapterGenerator::generate // @@ -135,12 +216,62 @@ for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST; ek < MethodHandles::_EK_LIMIT; ek = MethodHandles::EntryKind(1 + (int)ek)) { - StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek)); - MethodHandles::generate_method_handle_stub(_masm, ek); + if (MethodHandles::ek_supported(ek)) { + StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek)); + MethodHandles::generate_method_handle_stub(_masm, ek); + } } } +#ifdef TARGET_ARCH_NYI_6939861 +// these defs belong in methodHandles_<arch>.cpp +frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) { + ShouldNotCallThis(); + return fr; +} +void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* f, const RegisterMap* reg_map) { + ShouldNotCallThis(); +} +#endif //TARGET_ARCH_NYI_6939861 + + +//------------------------------------------------------------------------------ +// MethodHandles::ek_supported +// +bool MethodHandles::ek_supported(MethodHandles::EntryKind ek) { + MethodHandles::EntryKind ek_orig = MethodHandles::ek_original_kind(ek); + switch (ek_orig) { + case _adapter_unused_13: + return false; // not defined yet + case _adapter_prim_to_ref: + return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF); + case _adapter_collect_args: + return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS); + case _adapter_fold_args: + return UseRicochetFrames && conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS); + case _adapter_opt_return_any: + return UseRicochetFrames; +#ifdef TARGET_ARCH_NYI_6939861 + // ports before 6939861 supported only three kinds of spread ops + case _adapter_spread_args: + // restrict spreads to three kinds: + switch (ek) { + case _adapter_opt_spread_0: + case _adapter_opt_spread_1: + case _adapter_opt_spread_more: + break; + default: + return false; + break; + } + break; +#endif //TARGET_ARCH_NYI_6939861 + } + return true; +} + + void MethodHandles::set_enabled(bool z) { if (_enabled != z) { guarantee(z && EnableInvokeDynamic, "can only enable once, and only if -XX:+EnableInvokeDynamic"); @@ -153,9 +284,9 @@ // and local, like parse a data structure. For speed, such methods work on plain // oops, not handles. Trapping methods uniformly operate on handles. -methodOop MethodHandles::decode_vmtarget(oop vmtarget, int vmindex, oop mtype, - klassOop& receiver_limit_result, int& decode_flags_result) { - if (vmtarget == NULL) return NULL; +methodHandle MethodHandles::decode_vmtarget(oop vmtarget, int vmindex, oop mtype, + KlassHandle& receiver_limit_result, int& decode_flags_result) { + if (vmtarget == NULL) return methodHandle(); assert(methodOopDesc::nonvirtual_vtable_index < 0, "encoding"); if (vmindex < 0) { // this DMH performs no dispatch; it is directly bound to a methodOop @@ -198,20 +329,20 @@ // MemberName and DirectMethodHandle have the same linkage to the JVM internals. // (MemberName is the non-operational name used for queries and setup.) -methodOop MethodHandles::decode_DirectMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { +methodHandle MethodHandles::decode_DirectMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { oop vmtarget = java_lang_invoke_DirectMethodHandle::vmtarget(mh); int vmindex = java_lang_invoke_DirectMethodHandle::vmindex(mh); oop mtype = java_lang_invoke_DirectMethodHandle::type(mh); return decode_vmtarget(vmtarget, vmindex, mtype, receiver_limit_result, decode_flags_result); } -methodOop MethodHandles::decode_BoundMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { +methodHandle MethodHandles::decode_BoundMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { assert(java_lang_invoke_BoundMethodHandle::is_instance(mh), ""); assert(mh->klass() != SystemDictionary::AdapterMethodHandle_klass(), ""); for (oop bmh = mh;;) { // Bound MHs can be stacked to bind several arguments. oop target = java_lang_invoke_MethodHandle::vmtarget(bmh); - if (target == NULL) return NULL; + if (target == NULL) return methodHandle(); decode_flags_result |= MethodHandles::_dmf_binds_argument; klassOop tk = target->klass(); if (tk == SystemDictionary::BoundMethodHandle_klass()) { @@ -236,14 +367,14 @@ } } -methodOop MethodHandles::decode_AdapterMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { +methodHandle MethodHandles::decode_AdapterMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { assert(mh->klass() == SystemDictionary::AdapterMethodHandle_klass(), ""); for (oop amh = mh;;) { // Adapter MHs can be stacked to convert several arguments. int conv_op = adapter_conversion_op(java_lang_invoke_AdapterMethodHandle::conversion(amh)); decode_flags_result |= (_dmf_adapter_lsb << conv_op) & _DMF_ADAPTER_MASK; oop target = java_lang_invoke_MethodHandle::vmtarget(amh); - if (target == NULL) return NULL; + if (target == NULL) return methodHandle(); klassOop tk = target->klass(); if (tk == SystemDictionary::AdapterMethodHandle_klass()) { amh = target; @@ -255,8 +386,8 @@ } } -methodOop MethodHandles::decode_MethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { - if (mh == NULL) return NULL; +methodHandle MethodHandles::decode_MethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { + if (mh == NULL) return methodHandle(); klassOop mhk = mh->klass(); assert(java_lang_invoke_MethodHandle::is_subclass(mhk), "must be a MethodHandle"); if (mhk == SystemDictionary::DirectMethodHandle_klass()) { @@ -270,7 +401,7 @@ return decode_BoundMethodHandle(mh, receiver_limit_result, decode_flags_result); } else { assert(false, "cannot parse this MH"); - return NULL; // random MH? + return methodHandle(); // random MH? } } @@ -299,9 +430,9 @@ // A trusted party is handing us a cookie to determine a method. // Let's boil it down to the method oop they really want. -methodOop MethodHandles::decode_method(oop x, klassOop& receiver_limit_result, int& decode_flags_result) { +methodHandle MethodHandles::decode_method(oop x, KlassHandle& receiver_limit_result, int& decode_flags_result) { decode_flags_result = 0; - receiver_limit_result = NULL; + receiver_limit_result = KlassHandle(); klassOop xk = x->klass(); if (xk == Universe::methodKlassObj()) { return decode_methodOop((methodOop) x, decode_flags_result); @@ -329,7 +460,7 @@ assert(!x->is_method(), "already checked"); assert(!java_lang_invoke_MemberName::is_instance(x), "already checked"); } - return NULL; + return methodHandle(); } @@ -389,11 +520,10 @@ int offset = instanceKlass::cast(k)->offset_from_fields(slot); init_MemberName(mname_oop, k, accessFlags_from(mods), offset); } else { - int decode_flags = 0; klassOop receiver_limit = NULL; - methodOop m = MethodHandles::decode_method(target_oop, - receiver_limit, decode_flags); + KlassHandle receiver_limit; int decode_flags = 0; + methodHandle m = MethodHandles::decode_method(target_oop, receiver_limit, decode_flags); bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0); - init_MemberName(mname_oop, m, do_dispatch); + init_MemberName(mname_oop, m(), do_dispatch); } } @@ -423,13 +553,14 @@ } -methodOop MethodHandles::decode_MemberName(oop mname, klassOop& receiver_limit_result, int& decode_flags_result) { +methodHandle MethodHandles::decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result) { + methodHandle empty; int flags = java_lang_invoke_MemberName::flags(mname); - if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0) return NULL; // not invocable + if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0) return empty; // not invocable oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname); int vmindex = java_lang_invoke_MemberName::vmindex(mname); - if (vmindex == VM_INDEX_UNINITIALIZED) return NULL; // not resolved - methodOop m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result); + if (vmindex == VM_INDEX_UNINITIALIZED) return empty; // not resolved + methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result); oop clazz = java_lang_invoke_MemberName::clazz(mname); if (clazz != NULL && java_lang_Class::is_instance(clazz)) { klassOop klass = java_lang_Class::as_klassOop(clazz); @@ -439,9 +570,7 @@ } // convert the external string or reflective type to an internal signature -Symbol* MethodHandles::convert_to_signature(oop type_str, - bool polymorphic, - TRAPS) { +Symbol* MethodHandles::convert_to_signature(oop type_str, bool polymorphic, TRAPS) { if (java_lang_invoke_MethodType::is_instance(type_str)) { return java_lang_invoke_MethodType::as_signature(type_str, polymorphic, CHECK_NULL); } else if (java_lang_Class::is_instance(type_str)) { @@ -474,48 +603,48 @@ #endif if (java_lang_invoke_MemberName::vmindex(mname()) != VM_INDEX_UNINITIALIZED) return; // already resolved - oop defc_oop = java_lang_invoke_MemberName::clazz(mname()); - oop name_str = java_lang_invoke_MemberName::name(mname()); - oop type_str = java_lang_invoke_MemberName::type(mname()); - int flags = java_lang_invoke_MemberName::flags(mname()); + Handle defc_oop(THREAD, java_lang_invoke_MemberName::clazz(mname())); + Handle name_str(THREAD, java_lang_invoke_MemberName::name( mname())); + Handle type_str(THREAD, java_lang_invoke_MemberName::type( mname())); + int flags = java_lang_invoke_MemberName::flags(mname()); - if (defc_oop == NULL || name_str == NULL || type_str == NULL) { + if (defc_oop.is_null() || name_str.is_null() || type_str.is_null()) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nothing to resolve"); } - klassOop defc_klassOop = java_lang_Class::as_klassOop(defc_oop); - defc_oop = NULL; // safety - if (defc_klassOop == NULL) return; // a primitive; no resolution possible - if (!Klass::cast(defc_klassOop)->oop_is_instance()) { - if (!Klass::cast(defc_klassOop)->oop_is_array()) return; - defc_klassOop = SystemDictionary::Object_klass(); + + instanceKlassHandle defc; + { + klassOop defc_klassOop = java_lang_Class::as_klassOop(defc_oop()); + if (defc_klassOop == NULL) return; // a primitive; no resolution possible + if (!Klass::cast(defc_klassOop)->oop_is_instance()) { + if (!Klass::cast(defc_klassOop)->oop_is_array()) return; + defc_klassOop = SystemDictionary::Object_klass(); + } + defc = instanceKlassHandle(THREAD, defc_klassOop); } - instanceKlassHandle defc(THREAD, defc_klassOop); - defc_klassOop = NULL; // safety if (defc.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "primitive class"); } - defc->link_class(CHECK); + defc->link_class(CHECK); // possible safepoint // convert the external string name to an internal symbol - TempNewSymbol name = java_lang_String::as_symbol_or_null(name_str); + TempNewSymbol name = java_lang_String::as_symbol_or_null(name_str()); if (name == NULL) return; // no such name - name_str = NULL; // safety Handle polymorphic_method_type; bool polymorphic_signature = false; if ((flags & ALL_KINDS) == IS_METHOD && (defc() == SystemDictionary::MethodHandle_klass() && - methodOopDesc::is_method_handle_invoke_name(name))) + methodOopDesc::is_method_handle_invoke_name(name))) { polymorphic_signature = true; + } // convert the external string or reflective type to an internal signature - TempNewSymbol type = convert_to_signature(type_str, polymorphic_signature, CHECK); - if (java_lang_invoke_MethodType::is_instance(type_str) && polymorphic_signature) { - polymorphic_method_type = Handle(THREAD, type_str); //preserve exactly + TempNewSymbol type = convert_to_signature(type_str(), polymorphic_signature, CHECK); + if (java_lang_invoke_MethodType::is_instance(type_str()) && polymorphic_signature) { + polymorphic_method_type = type_str; // preserve exactly } - if (type == NULL) return; // no such signature exists in the VM - type_str = NULL; // safety // Time to do the lookup. switch (flags & ALL_KINDS) { @@ -560,8 +689,8 @@ java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget); java_lang_invoke_MemberName::set_vmindex(mname(), vmindex); java_lang_invoke_MemberName::set_modifiers(mname(), mods); - DEBUG_ONLY(int junk; klassOop junk2); - assert(decode_MemberName(mname(), junk2, junk) == result.resolved_method()(), + DEBUG_ONLY(KlassHandle junk1; int junk2); + assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(), "properly stored for later decoding"); return; } @@ -589,8 +718,8 @@ java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget); java_lang_invoke_MemberName::set_vmindex(mname(), vmindex); java_lang_invoke_MemberName::set_modifiers(mname(), mods); - DEBUG_ONLY(int junk; klassOop junk2); - assert(decode_MemberName(mname(), junk2, junk) == result.resolved_method()(), + DEBUG_ONLY(KlassHandle junk1; int junk2); + assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(), "properly stored for later decoding"); return; } @@ -677,16 +806,14 @@ case IS_METHOD: case IS_CONSTRUCTOR: { - klassOop receiver_limit = NULL; - int decode_flags = 0; - methodHandle m(THREAD, decode_vmtarget(vmtarget, vmindex, NULL, - receiver_limit, decode_flags)); + KlassHandle receiver_limit; int decode_flags = 0; + methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit, decode_flags); if (m.is_null()) break; if (!have_defc) { klassOop defc = m->method_holder(); - if (receiver_limit != NULL && receiver_limit != defc - && Klass::cast(receiver_limit)->is_subtype_of(defc)) - defc = receiver_limit; + if (receiver_limit.not_null() && receiver_limit() != defc + && Klass::cast(receiver_limit())->is_subtype_of(defc)) + defc = receiver_limit(); java_lang_invoke_MemberName::set_clazz(mname(), Klass::cast(defc)->java_mirror()); } if (!have_name) { @@ -884,10 +1011,9 @@ // - AMH can have methodOop for static invoke with bound receiver // - DMH can have methodOop for static invoke (on variable receiver) // - DMH can have klassOop for dispatched (non-static) invoke - klassOop receiver_limit = NULL; - int decode_flags = 0; - methodOop m = decode_MethodHandle(mh(), receiver_limit, decode_flags); - if (m == NULL) return NULL; + KlassHandle receiver_limit; int decode_flags = 0; + methodHandle m = decode_MethodHandle(mh(), receiver_limit, decode_flags); + if (m.is_null()) return NULL; switch (format) { case ETF_REFLECT_METHOD: // same as jni_ToReflectedMethod: @@ -903,10 +1029,10 @@ if (SystemDictionary::MemberName_klass() == NULL) break; instanceKlassHandle mname_klass(THREAD, SystemDictionary::MemberName_klass()); mname_klass->initialize(CHECK_NULL); - Handle mname = mname_klass->allocate_instance_handle(CHECK_NULL); + Handle mname = mname_klass->allocate_instance_handle(CHECK_NULL); // possible safepoint java_lang_invoke_MemberName::set_vmindex(mname(), VM_INDEX_UNINITIALIZED); bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0); - init_MemberName(mname(), m, do_dispatch); + init_MemberName(mname(), m(), do_dispatch); expand_MemberName(mname, 0, CHECK_NULL); return mname(); } @@ -928,6 +1054,7 @@ }; static bool is_always_null_type(klassOop klass) { + if (klass == NULL) return false; // safety if (!Klass::cast(klass)->oop_is_instance()) return false; instanceKlass* ik = instanceKlass::cast(klass); // Must be on the boot class path: @@ -944,6 +1071,8 @@ } bool MethodHandles::class_cast_needed(klassOop src, klassOop dst) { + if (dst == NULL) return true; + if (src == NULL) return (dst != SystemDictionary::Object_klass()); if (src == dst || dst == SystemDictionary::Object_klass()) return false; // quickest checks Klass* srck = Klass::cast(src); @@ -972,6 +1101,14 @@ return Klass::cast(SystemDictionary::Object_klass())->java_mirror(); } +bool MethodHandles::is_float_fixed_reinterpretation_cast(BasicType src, BasicType dst) { + if (src == T_FLOAT) return dst == T_INT; + if (src == T_INT) return dst == T_FLOAT; + if (src == T_DOUBLE) return dst == T_LONG; + if (src == T_LONG) return dst == T_DOUBLE; + return false; +} + bool MethodHandles::same_basic_type_for_arguments(BasicType src, BasicType dst, bool raw, @@ -998,10 +1135,8 @@ return true; // remaining case: byte fits in short } // allow float/fixed reinterpretation casts - if (src == T_FLOAT) return dst == T_INT; - if (src == T_INT) return dst == T_FLOAT; - if (src == T_DOUBLE) return dst == T_LONG; - if (src == T_LONG) return dst == T_DOUBLE; + if (is_float_fixed_reinterpretation_cast(src, dst)) + return true; return false; } @@ -1026,10 +1161,15 @@ int first_ptype_pos, KlassHandle insert_ptype, TRAPS) { + Handle mhi_type; + if (m->is_method_handle_invoke()) { + // use this more exact typing instead of the symbolic signature: + mhi_type = Handle(THREAD, m->method_handle_type()); + } objArrayHandle ptypes(THREAD, java_lang_invoke_MethodType::ptypes(mtype())); int pnum = first_ptype_pos; int pmax = ptypes->length(); - int mnum = 0; // method argument + int anum = 0; // method argument const char* err = NULL; ResourceMark rm(THREAD); for (SignatureStream ss(m->signature()); !ss.is_done(); ss.next()) { @@ -1048,47 +1188,70 @@ else ptype_oop = insert_ptype->java_mirror(); pnum += 1; - mnum += 1; + anum += 1; } - klassOop pklass = NULL; - BasicType ptype = T_OBJECT; - if (ptype_oop != NULL) - ptype = java_lang_Class::as_BasicType(ptype_oop, &pklass); - else - // null does not match any non-reference; use Object to report the error - pklass = SystemDictionary::Object_klass(); - klassOop mklass = NULL; - BasicType mtype = ss.type(); - if (mtype == T_ARRAY) mtype = T_OBJECT; // fold all refs to T_OBJECT - if (mtype == T_OBJECT) { - if (ptype_oop == NULL) { + KlassHandle pklass; + BasicType ptype = T_OBJECT; + bool have_ptype = false; + // missing ptype_oop does not match any non-reference; use Object to report the error + pklass = SystemDictionaryHandles::Object_klass(); + if (ptype_oop != NULL) { + have_ptype = true; + klassOop pklass_oop = NULL; + ptype = java_lang_Class::as_BasicType(ptype_oop, &pklass_oop); + pklass = KlassHandle(THREAD, pklass_oop); + } + ptype_oop = NULL; //done with this + KlassHandle aklass; + BasicType atype = ss.type(); + if (atype == T_ARRAY) atype = T_OBJECT; // fold all refs to T_OBJECT + if (atype == T_OBJECT) { + if (!have_ptype) { // null matches any reference continue; } - KlassHandle pklass_handle(THREAD, pklass); pklass = NULL; - // If we fail to resolve types at this point, we will throw an error. - Symbol* name = ss.as_symbol(CHECK); - instanceKlass* mk = instanceKlass::cast(m->method_holder()); - Handle loader(THREAD, mk->class_loader()); - Handle domain(THREAD, mk->protection_domain()); - mklass = SystemDictionary::resolve_or_null(name, loader, domain, CHECK); - pklass = pklass_handle(); - if (mklass == NULL && pklass != NULL && - Klass::cast(pklass)->name() == name && - m->is_method_handle_invoke()) { - // Assume a match. We can't really decode the signature of MH.invoke*. - continue; + if (mhi_type.is_null()) { + // If we fail to resolve types at this point, we will usually throw an error. + TempNewSymbol name = ss.as_symbol_or_null(); + if (name != NULL) { + instanceKlass* mk = instanceKlass::cast(m->method_holder()); + Handle loader(THREAD, mk->class_loader()); + Handle domain(THREAD, mk->protection_domain()); + klassOop aklass_oop = SystemDictionary::resolve_or_null(name, loader, domain, CHECK); + if (aklass_oop != NULL) + aklass = KlassHandle(THREAD, aklass_oop); + } + } else { + // for method handle invokers we don't look at the name in the signature + oop atype_oop; + if (ss.at_return_type()) + atype_oop = java_lang_invoke_MethodType::rtype(mhi_type()); + else + atype_oop = java_lang_invoke_MethodType::ptype(mhi_type(), anum-1); + klassOop aklass_oop = NULL; + atype = java_lang_Class::as_BasicType(atype_oop, &aklass_oop); + aklass = KlassHandle(THREAD, aklass_oop); } } if (!ss.at_return_type()) { - err = check_argument_type_change(ptype, pklass, mtype, mklass, mnum); + err = check_argument_type_change(ptype, pklass(), atype, aklass(), anum); } else { - err = check_return_type_change(mtype, mklass, ptype, pklass); // note reversal! + err = check_return_type_change(atype, aklass(), ptype, pklass()); // note reversal! } if (err != NULL) break; } if (err != NULL) { +#ifndef PRODUCT + if (PrintMiscellaneous && (Verbose || WizardMode)) { + tty->print("*** verify_method_signature failed: "); + java_lang_invoke_MethodType::print_signature(mtype(), tty); + tty->cr(); + tty->print_cr(" first_ptype_pos = %d, insert_ptype = "UINTX_FORMAT, first_ptype_pos, insert_ptype()); + tty->print(" Failing method: "); + m->print(); + } +#endif //PRODUCT THROW_MSG(vmSymbols::java_lang_InternalError(), err); } } @@ -1244,7 +1407,7 @@ int argnum, bool raw) { const char* err = NULL; - bool for_return = (argnum < 0); + const bool for_return = (argnum < 0); // just in case: if (src_type == T_ARRAY) src_type = T_OBJECT; @@ -1253,17 +1416,17 @@ // Produce some nice messages if VerifyMethodHandles is turned on: if (!same_basic_type_for_arguments(src_type, dst_type, raw, for_return)) { if (src_type == T_OBJECT) { - if (raw && dst_type == T_INT && is_always_null_type(src_klass)) - return NULL; // OK to convert a null pointer to a garbage int - err = ((argnum >= 0) + if (raw && is_java_primitive(dst_type)) + return NULL; // ref-to-prim discards ref and returns zero + err = (!for_return ? "type mismatch: passing a %s for method argument #%d, which expects primitive %s" : "type mismatch: returning a %s, but caller expects primitive %s"); } else if (dst_type == T_OBJECT) { - err = ((argnum >= 0) + err = (!for_return ? "type mismatch: passing a primitive %s for method argument #%d, which expects %s" : "type mismatch: returning a primitive %s, but caller expects %s"); } else { - err = ((argnum >= 0) + err = (!for_return ? "type mismatch: passing a %s for method argument #%d, which expects %s" : "type mismatch: returning a %s, but caller expects %s"); } @@ -1272,11 +1435,11 @@ if (!class_cast_needed(dst_klass, src_klass)) { if (raw) return NULL; // reverse cast is OK; the MH target is trusted to enforce it - err = ((argnum >= 0) + err = (!for_return ? "cast required: passing a %s for method argument #%d, which expects %s" : "cast required: returning a %s, but caller expects %s"); } else { - err = ((argnum >= 0) + err = (!for_return ? "reference mismatch: passing a %s for method argument #%d, which expects %s" : "reference mismatch: returning a %s, but caller expects %s"); } @@ -1288,14 +1451,16 @@ // format, format, format const char* src_name = type2name(src_type); const char* dst_name = type2name(dst_type); - if (src_type == T_OBJECT) src_name = Klass::cast(src_klass)->external_name(); - if (dst_type == T_OBJECT) dst_name = Klass::cast(dst_klass)->external_name(); if (src_name == NULL) src_name = "unknown type"; if (dst_name == NULL) dst_name = "unknown type"; + if (src_type == T_OBJECT) + src_name = (src_klass != NULL) ? Klass::cast(src_klass)->external_name() : "an unresolved class"; + if (dst_type == T_OBJECT) + dst_name = (dst_klass != NULL) ? Klass::cast(dst_klass)->external_name() : "an unresolved class"; size_t msglen = strlen(err) + strlen(src_name) + strlen(dst_name) + (argnum < 10 ? 1 : 11); char* msg = NEW_RESOURCE_ARRAY(char, msglen + 1); - if (argnum >= 0) { + if (!for_return) { assert(strstr(err, "%d") != NULL, ""); jio_snprintf(msg, msglen, err, src_name, argnum, dst_name); } else { @@ -1426,8 +1591,8 @@ // that links the interpreter calls to the method. We need the same // bits, and will use the same calling sequence code. - int vmindex = methodOopDesc::garbage_vtable_index; - oop vmtarget = NULL; + int vmindex = methodOopDesc::garbage_vtable_index; + Handle vmtarget; instanceKlass::cast(m->method_holder())->link_class(CHECK); @@ -1445,7 +1610,7 @@ } else if (!do_dispatch || m->can_be_statically_bound()) { // We are simulating an invokestatic or invokespecial instruction. // Set up the method pointer, just like ConstantPoolCacheEntry::set_method(). - vmtarget = m(); + vmtarget = m; // this does not help dispatch, but it will make it possible to parse this MH: vmindex = methodOopDesc::nonvirtual_vtable_index; assert(vmindex < 0, "(>=0) == do_dispatch"); @@ -1457,7 +1622,7 @@ // For a DMH, it is done now, when the handle is created. Klass* k = Klass::cast(m->method_holder()); if (k->should_be_initialized()) { - k->initialize(CHECK); + k->initialize(CHECK); // possible safepoint } } } else { @@ -1471,10 +1636,10 @@ if (me == NULL) { THROW(vmSymbols::java_lang_InternalError()); } - java_lang_invoke_DirectMethodHandle::set_vmtarget(mh(), vmtarget); - java_lang_invoke_DirectMethodHandle::set_vmindex(mh(), vmindex); - DEBUG_ONLY(int flags; klassOop rlimit); - assert(MethodHandles::decode_method(mh(), rlimit, flags) == m(), + java_lang_invoke_DirectMethodHandle::set_vmtarget(mh(), vmtarget()); + java_lang_invoke_DirectMethodHandle::set_vmindex( mh(), vmindex); + DEBUG_ONLY(KlassHandle rlimit; int flags); + assert(MethodHandles::decode_method(mh(), rlimit, flags) == m, "properly stored for later decoding"); DEBUG_ONLY(bool actual_do_dispatch = ((flags & _dmf_does_dispatch) != 0)); assert(!(actual_do_dispatch && !do_dispatch), @@ -1490,10 +1655,13 @@ methodHandle m, TRAPS) { // Verify type. - oop receiver = java_lang_invoke_BoundMethodHandle::argument(mh()); + KlassHandle bound_recv_type; + { + oop receiver = java_lang_invoke_BoundMethodHandle::argument(mh()); + if (receiver != NULL) + bound_recv_type = KlassHandle(THREAD, receiver->klass()); + } Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(mh())); - KlassHandle bound_recv_type; - if (receiver != NULL) bound_recv_type = KlassHandle(THREAD, receiver->klass()); verify_method_type(m, mtype, true, bound_recv_type, CHECK); int receiver_pos = m->size_of_parameters() - 1; @@ -1533,6 +1701,8 @@ if (m->is_abstract()) { THROW(vmSymbols::java_lang_AbstractMethodError()); } java_lang_invoke_MethodHandle::init_vmslots(mh()); + int vmargslot = m->size_of_parameters() - 1; + assert(java_lang_invoke_BoundMethodHandle::vmargslot(mh()) == vmargslot, ""); if (VerifyMethodHandles) { verify_BoundMethodHandle_with_receiver(mh, m, CHECK); @@ -1540,8 +1710,8 @@ java_lang_invoke_BoundMethodHandle::set_vmtarget(mh(), m()); - DEBUG_ONLY(int junk; klassOop junk2); - assert(MethodHandles::decode_method(mh(), junk2, junk) == m(), "properly stored for later decoding"); + DEBUG_ONLY(KlassHandle junk1; int junk2); + assert(MethodHandles::decode_method(mh(), junk1, junk2) == m, "properly stored for later decoding"); assert(decode_MethodHandle_stack_pushes(mh()) == 1, "BMH pushes one stack slot"); // Done! @@ -1611,14 +1781,9 @@ DEBUG_ONLY(int this_pushes = decode_MethodHandle_stack_pushes(mh())); if (direct_to_method) { assert(this_pushes == slots_pushed, "BMH pushes one or two stack slots"); - assert(slots_pushed <= MethodHandlePushLimit, ""); } else { int target_pushes = decode_MethodHandle_stack_pushes(target()); assert(this_pushes == slots_pushed + target_pushes, "BMH stack motion must be correct"); - // do not blow the stack; use a Java-based adapter if this limit is exceeded - // FIXME - // if (slots_pushed + target_pushes > MethodHandlePushLimit) - // err = "too many bound parameters"; } } @@ -1641,16 +1806,20 @@ } java_lang_invoke_MethodHandle::init_vmslots(mh()); + int argslot = java_lang_invoke_BoundMethodHandle::vmargslot(mh()); if (VerifyMethodHandles) { int insert_after = argnum - 1; - verify_vmargslot(mh, insert_after, java_lang_invoke_BoundMethodHandle::vmargslot(mh()), CHECK); + verify_vmargslot(mh, insert_after, argslot, CHECK); verify_vmslots(mh, CHECK); } // Get bound type and required slots. - oop ptype_oop = java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum); - BasicType ptype = java_lang_Class::as_BasicType(ptype_oop); + BasicType ptype; + { + oop ptype_oop = java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum); + ptype = java_lang_Class::as_BasicType(ptype_oop); + } int slots_pushed = type2size[ptype]; // If (a) the target is a direct non-dispatched method handle, @@ -1661,13 +1830,12 @@ if (OptimizeMethodHandles && target->klass() == SystemDictionary::DirectMethodHandle_klass() && (argnum == 0 || java_lang_invoke_DirectMethodHandle::vmindex(target()) < 0)) { - int decode_flags = 0; klassOop receiver_limit_oop = NULL; - methodHandle m(THREAD, decode_method(target(), receiver_limit_oop, decode_flags)); + KlassHandle receiver_limit; int decode_flags = 0; + methodHandle m = decode_method(target(), receiver_limit, decode_flags); if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "DMH failed to decode"); } DEBUG_ONLY(int m_vmslots = m->size_of_parameters() - slots_pushed); // pos. of 1st arg. assert(java_lang_invoke_BoundMethodHandle::vmslots(mh()) == m_vmslots, "type w/ m sig"); if (argnum == 0 && (decode_flags & _dmf_has_receiver) != 0) { - KlassHandle receiver_limit(THREAD, receiver_limit_oop); init_BoundMethodHandle_with_receiver(mh, m, receiver_limit, decode_flags, CHECK); @@ -1736,6 +1904,7 @@ Handle target(THREAD, java_lang_invoke_AdapterMethodHandle::vmtarget(mh())); Handle src_mtype(THREAD, java_lang_invoke_MethodHandle::type(mh())); Handle dst_mtype(THREAD, java_lang_invoke_MethodHandle::type(target())); + Handle arg_mtype; const char* err = NULL; @@ -1744,25 +1913,29 @@ switch (ek) { case _adapter_check_cast: // target type of cast case _adapter_ref_to_prim: // wrapper type from which to unbox - case _adapter_prim_to_ref: // wrapper type to box into - case _adapter_collect_args: // array type to collect into case _adapter_spread_args: // array type to spread from if (!java_lang_Class::is_instance(argument()) || java_lang_Class::is_primitive(argument())) { err = "adapter requires argument of type java.lang.Class"; break; } - if (ek == _adapter_collect_args || - ek == _adapter_spread_args) { + if (ek == _adapter_spread_args) { // Make sure it is a suitable collection type. (Array, for now.) Klass* ak = Klass::cast(java_lang_Class::as_klassOop(argument())); - if (!ak->oop_is_objArray()) { - { err = "adapter requires argument of type java.lang.Class<Object[]>"; break; } - } + if (!ak->oop_is_array()) + { err = "spread adapter requires argument representing an array class"; break; } + BasicType et = arrayKlass::cast(ak->as_klassOop())->element_type(); + if (et != dest && stack_move <= 0) + { err = "spread adapter requires array class argument of correct type"; break; } } break; - case _adapter_flyby: - case _adapter_ricochet: + case _adapter_prim_to_ref: // boxer MH to use + case _adapter_collect_args: // method handle which collects the args + case _adapter_fold_args: // method handle which collects the args + if (!UseRicochetFrames) { + { err = "box/collect/fold operators are not supported"; break; } + } if (!java_lang_invoke_MethodHandle::is_instance(argument())) { err = "MethodHandle adapter argument required"; break; } + arg_mtype = Handle(THREAD, java_lang_invoke_MethodHandle::type(argument())); break; default: if (argument.not_null()) @@ -1773,6 +1946,7 @@ if (err == NULL) { // Check that the src/dest types are supplied if needed. + // Also check relevant parameter or return types. switch (ek) { case _adapter_check_cast: if (src != T_OBJECT || dest != T_OBJECT) { @@ -1795,8 +1969,7 @@ } break; case _adapter_prim_to_ref: - if (!is_java_primitive(src) || dest != T_OBJECT - || argument() != Klass::cast(SystemDictionary::box_klass(src))->java_mirror()) { + if (!is_java_primitive(src) || dest != T_OBJECT) { err = "adapter requires primitive src conversion subfield"; break; } break; @@ -1807,14 +1980,12 @@ err = "adapter requires src/dest conversion subfields for swap"; break; } int swap_size = type2size[src]; - oop src_mtype = java_lang_invoke_AdapterMethodHandle::type(mh()); - oop dest_mtype = java_lang_invoke_AdapterMethodHandle::type(target()); - int slot_limit = java_lang_invoke_AdapterMethodHandle::vmslots(target()); + int slot_limit = java_lang_invoke_MethodHandle::vmslots(target()); int src_slot = argslot; int dest_slot = vminfo; bool rotate_up = (src_slot > dest_slot); // upward rotation int src_arg = argnum; - int dest_arg = argument_slot_to_argnum(dest_mtype, dest_slot); + int dest_arg = argument_slot_to_argnum(dst_mtype(), dest_slot); verify_vmargslot(mh, dest_arg, dest_slot, CHECK); if (!(dest_slot >= src_slot + swap_size) && !(src_slot >= dest_slot + swap_size)) { @@ -1822,8 +1993,8 @@ } else if (ek == _adapter_swap_args && !(src_slot > dest_slot)) { err = "source of swap must be deeper in stack"; } else if (ek == _adapter_swap_args) { - err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, dest_arg), - java_lang_invoke_MethodType::ptype(dest_mtype, src_arg), + err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), dest_arg), + java_lang_invoke_MethodType::ptype(dst_mtype(), src_arg), dest_arg); } else if (ek == _adapter_rot_args) { if (rotate_up) { @@ -1831,8 +2002,8 @@ // rotate up: [dest_slot..src_slot-ss] --> [dest_slot+ss..src_slot] // that is: [src_arg+1..dest_arg] --> [src_arg..dest_arg-1] for (int i = src_arg+1; i <= dest_arg && err == NULL; i++) { - err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, i), - java_lang_invoke_MethodType::ptype(dest_mtype, i-1), + err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), i), + java_lang_invoke_MethodType::ptype(dst_mtype(), i-1), i); } } else { // rotate down @@ -1840,28 +2011,54 @@ // rotate down: [src_slot+ss..dest_slot] --> [src_slot..dest_slot-ss] // that is: [dest_arg..src_arg-1] --> [dst_arg+1..src_arg] for (int i = dest_arg; i <= src_arg-1 && err == NULL; i++) { - err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, i), - java_lang_invoke_MethodType::ptype(dest_mtype, i+1), + err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), i), + java_lang_invoke_MethodType::ptype(dst_mtype(), i+1), i); } } } if (err == NULL) - err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, src_arg), - java_lang_invoke_MethodType::ptype(dest_mtype, dest_arg), + err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), src_arg), + java_lang_invoke_MethodType::ptype(dst_mtype(), dest_arg), src_arg); } break; + case _adapter_spread_args: case _adapter_collect_args: - case _adapter_spread_args: + case _adapter_fold_args: { - BasicType coll_type = (ek == _adapter_collect_args) ? dest : src; - BasicType elem_type = (ek == _adapter_collect_args) ? src : dest; - if (coll_type != T_OBJECT || elem_type != T_OBJECT) { - err = "adapter requires src/dest subfields"; break; - // later: - // - consider making coll be a primitive array - // - consider making coll be a heterogeneous collection + bool is_spread = (ek == _adapter_spread_args); + bool is_fold = (ek == _adapter_fold_args); + BasicType coll_type = is_spread ? src : dest; + BasicType elem_type = is_spread ? dest : src; + // coll_type is type of args in collected form (or T_VOID if none) + // elem_type is common type of args in spread form (or T_VOID if missing or heterogeneous) + if (coll_type == 0 || elem_type == 0) { + err = "adapter requires src/dest subfields for spread or collect"; break; + } + if (is_spread && coll_type != T_OBJECT) { + err = "spread adapter requires object type for argument bundle"; break; + } + Handle spread_mtype = (is_spread ? dst_mtype : src_mtype); + int spread_slot = argslot; + int spread_arg = argnum; + int slots_pushed = stack_move / stack_move_unit(); + int coll_slot_count = type2size[coll_type]; + int spread_slot_count = (is_spread ? slots_pushed : -slots_pushed) + coll_slot_count; + if (is_fold) spread_slot_count = argument_slot_count(arg_mtype()); + if (!is_spread) { + int init_slots = argument_slot_count(src_mtype()); + int coll_slots = argument_slot_count(arg_mtype()); + if (spread_slot_count > init_slots || + spread_slot_count != coll_slots) { + err = "collect adapter has inconsistent arg counts"; break; + } + int next_slots = argument_slot_count(dst_mtype()); + int unchanged_slots_in = (init_slots - spread_slot_count); + int unchanged_slots_out = (next_slots - coll_slot_count - (is_fold ? spread_slot_count : 0)); + if (unchanged_slots_in != unchanged_slots_out) { + err = "collect adapter continuation has inconsistent arg counts"; break; + } } } break; @@ -1896,8 +2093,9 @@ } break; case _adapter_collect_args: - if (slots_pushed > 1) { - err = "adapter requires conversion subfield slots_pushed <= 1"; + case _adapter_fold_args: + if (slots_pushed > 2) { + err = "adapter requires conversion subfield slots_pushed <= 2"; } break; case _adapter_spread_args: @@ -1917,32 +2115,36 @@ } if (err == NULL) { - // Make sure this adapter does not push too deeply. + // Make sure this adapter's stack pushing is accurately recorded. int slots_pushed = stack_move / stack_move_unit(); int this_vmslots = java_lang_invoke_MethodHandle::vmslots(mh()); int target_vmslots = java_lang_invoke_MethodHandle::vmslots(target()); + int target_pushes = decode_MethodHandle_stack_pushes(target()); if (slots_pushed != (target_vmslots - this_vmslots)) { err = "stack_move inconsistent with previous and current MethodType vmslots"; - } else if (slots_pushed > 0) { - // verify stack_move against MethodHandlePushLimit - int target_pushes = decode_MethodHandle_stack_pushes(target()); - // do not blow the stack; use a Java-based adapter if this limit is exceeded - if (slots_pushed + target_pushes > MethodHandlePushLimit) { - err = "adapter pushes too many parameters"; + } else { + int this_pushes = decode_MethodHandle_stack_pushes(mh()); + if (slots_pushed + target_pushes != this_pushes) { + if (this_pushes == 0) + err = "adapter push count not initialized"; + else + err = "adapter push count is wrong"; } } // While we're at it, check that the stack motion decoder works: - DEBUG_ONLY(int target_pushes = decode_MethodHandle_stack_pushes(target())); DEBUG_ONLY(int this_pushes = decode_MethodHandle_stack_pushes(mh())); assert(this_pushes == slots_pushed + target_pushes, "AMH stack motion must be correct"); } if (err == NULL && vminfo != 0) { switch (ek) { - case _adapter_swap_args: - case _adapter_rot_args: - break; // OK + case _adapter_swap_args: + case _adapter_rot_args: + case _adapter_prim_to_ref: + case _adapter_collect_args: + case _adapter_fold_args: + break; // OK default: err = "vminfo subfield is reserved to the JVM"; } @@ -1986,14 +2188,15 @@ } void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS) { - oop argument = java_lang_invoke_AdapterMethodHandle::argument(mh()); - int argslot = java_lang_invoke_AdapterMethodHandle::vmargslot(mh()); - jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh()); - jint conv_op = adapter_conversion_op(conversion); + Handle argument = java_lang_invoke_AdapterMethodHandle::argument(mh()); + int argslot = java_lang_invoke_AdapterMethodHandle::vmargslot(mh()); + jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh()); + jint conv_op = adapter_conversion_op(conversion); // adjust the adapter code to the internal EntryKind enumeration: EntryKind ek_orig = adapter_entry_kind(conv_op); EntryKind ek_opt = ek_orig; // may be optimized + EntryKind ek_try; // temp // Finalize the vmtarget field (Java initialized it to null). if (!java_lang_invoke_MethodHandle::is_instance(target())) { @@ -2002,17 +2205,23 @@ } java_lang_invoke_AdapterMethodHandle::set_vmtarget(mh(), target()); - if (VerifyMethodHandles) { - verify_AdapterMethodHandle(mh, argnum, CHECK); - } - int stack_move = adapter_conversion_stack_move(conversion); BasicType src = adapter_conversion_src_type(conversion); BasicType dest = adapter_conversion_dest_type(conversion); int vminfo = adapter_conversion_vminfo(conversion); // should be zero + int slots_pushed = stack_move / stack_move_unit(); + + if (VerifyMethodHandles) { + verify_AdapterMethodHandle(mh, argnum, CHECK); + } + const char* err = NULL; + if (!conv_op_supported(conv_op)) { + err = "adapter not yet implemented in the JVM"; + } + // Now it's time to finish the case analysis and pick a MethodHandleEntry. switch (ek_orig) { case _adapter_retype_only: @@ -2041,20 +2250,20 @@ } else if (src == T_DOUBLE && dest == T_FLOAT) { ek_opt = _adapter_opt_d2f; } else { - assert(false, ""); + goto throw_not_impl; // runs user code, hence could block } break; case 1 *4+ 2: - if (src == T_INT && dest == T_LONG) { + if ((src == T_INT || is_subword_type(src)) && dest == T_LONG) { ek_opt = _adapter_opt_i2l; } else if (src == T_FLOAT && dest == T_DOUBLE) { ek_opt = _adapter_opt_f2d; } else { - assert(false, ""); + goto throw_not_impl; // runs user code, hence could block } break; default: - assert(false, ""); + goto throw_not_impl; // runs user code, hence could block break; } } @@ -2071,14 +2280,54 @@ ek_opt = _adapter_opt_unboxl; break; default: - assert(false, ""); + goto throw_not_impl; break; } } break; case _adapter_prim_to_ref: - goto throw_not_impl; // allocates, hence could block + { + assert(UseRicochetFrames, "else don't come here"); + // vminfo will be the location to insert the return value + vminfo = argslot; + ek_opt = _adapter_opt_collect_ref; + ensure_vmlayout_field(target, CHECK); + // for MethodHandleWalk: + if (java_lang_invoke_AdapterMethodHandle::is_instance(argument())) + ensure_vmlayout_field(argument, CHECK); + if (!OptimizeMethodHandles) break; + switch (type2size[src]) { + case 1: + ek_try = EntryKind(_adapter_opt_filter_S0_ref + argslot); + if (ek_try < _adapter_opt_collect_LAST && + ek_adapter_opt_collect_slot(ek_try) == argslot) { + assert(ek_adapter_opt_collect_count(ek_try) == 1 && + ek_adapter_opt_collect_type(ek_try) == T_OBJECT, ""); + ek_opt = ek_try; + break; + } + // else downgrade to variable slot: + ek_opt = _adapter_opt_collect_1_ref; + break; + case 2: + ek_try = EntryKind(_adapter_opt_collect_2_S0_ref + argslot); + if (ek_try < _adapter_opt_collect_LAST && + ek_adapter_opt_collect_slot(ek_try) == argslot) { + assert(ek_adapter_opt_collect_count(ek_try) == 2 && + ek_adapter_opt_collect_type(ek_try) == T_OBJECT, ""); + ek_opt = ek_try; + break; + } + // else downgrade to variable slot: + ek_opt = _adapter_opt_collect_2_ref; + break; + default: + goto throw_not_impl; + break; + } + } + break; case _adapter_swap_args: case _adapter_rot_args: @@ -2098,35 +2347,184 @@ rotate > 0 ? _adapter_opt_rot_2_up : _adapter_opt_rot_2_down); break; default: - assert(false, ""); + goto throw_not_impl; break; } } break; - case _adapter_collect_args: - goto throw_not_impl; // allocates, hence could block - case _adapter_spread_args: { +#ifdef TARGET_ARCH_NYI_6939861 + // ports before 6939861 supported only three kinds of spread ops + if (!UseRicochetFrames) { + int array_size = slots_pushed + 1; + assert(array_size >= 0, ""); + vminfo = array_size; + switch (array_size) { + case 0: ek_opt = _adapter_opt_spread_0; break; + case 1: ek_opt = _adapter_opt_spread_1; break; + default: ek_opt = _adapter_opt_spread_more; break; + } + break; + } +#endif //TARGET_ARCH_NYI_6939861 // vminfo will be the required length of the array - int slots_pushed = stack_move / stack_move_unit(); - int array_size = slots_pushed + 1; - assert(array_size >= 0, ""); + int array_size = (slots_pushed + 1) / (type2size[dest] == 2 ? 2 : 1); vminfo = array_size; - switch (array_size) { - case 0: ek_opt = _adapter_opt_spread_0; break; - case 1: ek_opt = _adapter_opt_spread_1; break; - default: ek_opt = _adapter_opt_spread_more; break; + // general case + switch (dest) { + case T_BOOLEAN : // fall through to T_BYTE: + case T_BYTE : ek_opt = _adapter_opt_spread_byte; break; + case T_CHAR : ek_opt = _adapter_opt_spread_char; break; + case T_SHORT : ek_opt = _adapter_opt_spread_short; break; + case T_INT : ek_opt = _adapter_opt_spread_int; break; + case T_LONG : ek_opt = _adapter_opt_spread_long; break; + case T_FLOAT : ek_opt = _adapter_opt_spread_float; break; + case T_DOUBLE : ek_opt = _adapter_opt_spread_double; break; + case T_OBJECT : ek_opt = _adapter_opt_spread_ref; break; + case T_VOID : if (array_size != 0) goto throw_not_impl; + ek_opt = _adapter_opt_spread_ref; break; + default : goto throw_not_impl; } - if ((vminfo & CONV_VMINFO_MASK) != vminfo) - goto throw_not_impl; // overflow + assert(array_size == 0 || // it doesn't matter what the spreader is + (ek_adapter_opt_spread_count(ek_opt) == -1 && + (ek_adapter_opt_spread_type(ek_opt) == dest || + (ek_adapter_opt_spread_type(ek_opt) == T_BYTE && dest == T_BOOLEAN))), + err_msg("dest=%d ek_opt=%d", dest, ek_opt)); + + if (array_size <= 0) { + // since the general case does not handle length 0, this case is required: + ek_opt = _adapter_opt_spread_0; + break; + } + if (dest == T_OBJECT) { + ek_try = EntryKind(_adapter_opt_spread_1_ref - 1 + array_size); + if (ek_try < _adapter_opt_spread_LAST && + ek_adapter_opt_spread_count(ek_try) == array_size) { + assert(ek_adapter_opt_spread_type(ek_try) == dest, ""); + ek_opt = ek_try; + break; + } + } + break; } break; - case _adapter_flyby: - case _adapter_ricochet: - goto throw_not_impl; // runs Java code, hence could block + case _adapter_collect_args: + { + assert(UseRicochetFrames, "else don't come here"); + int elem_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(argument())); + // vminfo will be the location to insert the return value + vminfo = argslot; + ensure_vmlayout_field(target, CHECK); + ensure_vmlayout_field(argument, CHECK); + + // general case: + switch (dest) { + default : if (!is_subword_type(dest)) goto throw_not_impl; + // else fall through: + case T_INT : ek_opt = _adapter_opt_collect_int; break; + case T_LONG : ek_opt = _adapter_opt_collect_long; break; + case T_FLOAT : ek_opt = _adapter_opt_collect_float; break; + case T_DOUBLE : ek_opt = _adapter_opt_collect_double; break; + case T_OBJECT : ek_opt = _adapter_opt_collect_ref; break; + case T_VOID : ek_opt = _adapter_opt_collect_void; break; + } + assert(ek_adapter_opt_collect_slot(ek_opt) == -1 && + ek_adapter_opt_collect_count(ek_opt) == -1 && + (ek_adapter_opt_collect_type(ek_opt) == dest || + ek_adapter_opt_collect_type(ek_opt) == T_INT && is_subword_type(dest)), + ""); + + if (dest == T_OBJECT && elem_slots == 1 && OptimizeMethodHandles) { + // filter operation on a ref + ek_try = EntryKind(_adapter_opt_filter_S0_ref + argslot); + if (ek_try < _adapter_opt_collect_LAST && + ek_adapter_opt_collect_slot(ek_try) == argslot) { + assert(ek_adapter_opt_collect_count(ek_try) == elem_slots && + ek_adapter_opt_collect_type(ek_try) == dest, ""); + ek_opt = ek_try; + break; + } + ek_opt = _adapter_opt_collect_1_ref; + break; + } + + if (dest == T_OBJECT && elem_slots == 2 && OptimizeMethodHandles) { + // filter of two arguments + ek_try = EntryKind(_adapter_opt_collect_2_S0_ref + argslot); + if (ek_try < _adapter_opt_collect_LAST && + ek_adapter_opt_collect_slot(ek_try) == argslot) { + assert(ek_adapter_opt_collect_count(ek_try) == elem_slots && + ek_adapter_opt_collect_type(ek_try) == dest, ""); + ek_opt = ek_try; + break; + } + ek_opt = _adapter_opt_collect_2_ref; + break; + } + + if (dest == T_OBJECT && OptimizeMethodHandles) { + // try to use a fixed length adapter + ek_try = EntryKind(_adapter_opt_collect_0_ref + elem_slots); + if (ek_try < _adapter_opt_collect_LAST && + ek_adapter_opt_collect_count(ek_try) == elem_slots) { + assert(ek_adapter_opt_collect_slot(ek_try) == -1 && + ek_adapter_opt_collect_type(ek_try) == dest, ""); + ek_opt = ek_try; + break; + } + } + + break; + } + + case _adapter_fold_args: + { + assert(UseRicochetFrames, "else don't come here"); + int elem_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(argument())); + // vminfo will be the location to insert the return value + vminfo = argslot + elem_slots; + ensure_vmlayout_field(target, CHECK); + ensure_vmlayout_field(argument, CHECK); + + switch (dest) { + default : if (!is_subword_type(dest)) goto throw_not_impl; + // else fall through: + case T_INT : ek_opt = _adapter_opt_fold_int; break; + case T_LONG : ek_opt = _adapter_opt_fold_long; break; + case T_FLOAT : ek_opt = _adapter_opt_fold_float; break; + case T_DOUBLE : ek_opt = _adapter_opt_fold_double; break; + case T_OBJECT : ek_opt = _adapter_opt_fold_ref; break; + case T_VOID : ek_opt = _adapter_opt_fold_void; break; + } + assert(ek_adapter_opt_collect_slot(ek_opt) == -1 && + ek_adapter_opt_collect_count(ek_opt) == -1 && + (ek_adapter_opt_collect_type(ek_opt) == dest || + ek_adapter_opt_collect_type(ek_opt) == T_INT && is_subword_type(dest)), + ""); + + if (dest == T_OBJECT && elem_slots == 0 && OptimizeMethodHandles) { + // if there are no args, just pretend it's a collect + ek_opt = _adapter_opt_collect_0_ref; + break; + } + + if (dest == T_OBJECT && OptimizeMethodHandles) { + // try to use a fixed length adapter + ek_try = EntryKind(_adapter_opt_fold_1_ref - 1 + elem_slots); + if (ek_try < _adapter_opt_fold_LAST && + ek_adapter_opt_collect_count(ek_try) == elem_slots) { + assert(ek_adapter_opt_collect_slot(ek_try) == -1 && + ek_adapter_opt_collect_type(ek_try) == dest, ""); + ek_opt = ek_try; + break; + } + } + + break; + } default: // should have failed much earlier; must be a missing case here @@ -2134,11 +2532,36 @@ // and fall through: throw_not_impl: - // FIXME: these adapters are NYI - err = "adapter not yet implemented in the JVM"; + if (err == NULL) + err = "unknown adapter type"; break; } + if (err == NULL && (vminfo & CONV_VMINFO_MASK) != vminfo) { + // should not happen, since vminfo is used to encode arg/slot indexes < 255 + err = "vminfo overflow"; + } + + if (err == NULL && !have_entry(ek_opt)) { + err = "adapter stub for this kind of method handle is missing"; + } + + if (err == NULL && ek_opt == ek_orig) { + switch (ek_opt) { + case _adapter_prim_to_prim: + case _adapter_ref_to_prim: + case _adapter_prim_to_ref: + case _adapter_swap_args: + case _adapter_rot_args: + case _adapter_collect_args: + case _adapter_fold_args: + case _adapter_spread_args: + // should be handled completely by optimized cases; see above + err = "init_AdapterMethodHandle should not issue this"; + break; + } + } + if (err != NULL) { throw_InternalError_for_bad_conversion(conversion, err, THREAD); return; @@ -2158,6 +2581,70 @@ // Java code can publish it in global data structures. } +void MethodHandles::ensure_vmlayout_field(Handle target, TRAPS) { + Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(target())); + Handle mtform(THREAD, java_lang_invoke_MethodType::form(mtype())); + if (mtform.is_null()) { THROW(vmSymbols::java_lang_InternalError()); } + if (java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) { + if (java_lang_invoke_MethodTypeForm::vmlayout(mtform()) == NULL) { + // fill it in + Handle erased_mtype(THREAD, java_lang_invoke_MethodTypeForm::erasedType(mtform())); + TempNewSymbol erased_signature + = java_lang_invoke_MethodType::as_signature(erased_mtype(), /*intern:*/true, CHECK); + methodOop cookie + = SystemDictionary::find_method_handle_invoke(vmSymbols::invokeExact_name(), + erased_signature, + SystemDictionaryHandles::Object_klass(), + THREAD); + java_lang_invoke_MethodTypeForm::init_vmlayout(mtform(), cookie); + } + } +} + +#ifdef ASSERT + +extern "C" +void print_method_handle(oop mh); + +static void stress_method_handle_walk_impl(Handle mh, TRAPS) { + if (StressMethodHandleWalk) { + // Exercise the MethodHandleWalk code in various ways and validate + // the resulting method oop. Some of these produce output so they + // are guarded under Verbose. + ResourceMark rm; + HandleMark hm; + if (Verbose) { + print_method_handle(mh()); + } + TempNewSymbol name = SymbolTable::new_symbol("invoke", CHECK); + Handle mt = java_lang_invoke_MethodHandle::type(mh()); + TempNewSymbol signature = java_lang_invoke_MethodType::as_signature(mt(), true, CHECK); + MethodHandleCompiler mhc(mh, name, signature, 10000, false, CHECK); + methodHandle m = mhc.compile(CHECK); + if (Verbose) { + m->print_codes(); + } + InterpreterOopMap mask; + OopMapCache::compute_one_oop_map(m, m->code_size() - 1, &mask); + } +} + +static void stress_method_handle_walk(Handle mh, TRAPS) { + stress_method_handle_walk_impl(mh, THREAD); + if (HAS_PENDING_EXCEPTION) { + oop ex = PENDING_EXCEPTION; + CLEAR_PENDING_EXCEPTION; + tty->print("StressMethodHandleWalk: "); + java_lang_Throwable::print(ex, tty); + tty->cr(); + } +} +#else + +static void stress_method_handle_walk(Handle mh, TRAPS) {} + +#endif + // // Here are the native methods on sun.invoke.MethodHandleImpl. // They are the private interface between this JVM and the HotSpot-specific @@ -2182,18 +2669,14 @@ // which method are we really talking about? if (target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); } - oop target_oop = JNIHandles::resolve_non_null(target_jh); - if (java_lang_invoke_MemberName::is_instance(target_oop) && - java_lang_invoke_MemberName::vmindex(target_oop) == VM_INDEX_UNINITIALIZED) { - Handle mname(THREAD, target_oop); - MethodHandles::resolve_MemberName(mname, CHECK); - target_oop = mname(); // in case of GC + Handle target(THREAD, JNIHandles::resolve_non_null(target_jh)); + if (java_lang_invoke_MemberName::is_instance(target()) && + java_lang_invoke_MemberName::vmindex(target()) == VM_INDEX_UNINITIALIZED) { + MethodHandles::resolve_MemberName(target, CHECK); } - int decode_flags = 0; klassOop receiver_limit = NULL; - methodHandle m(THREAD, - MethodHandles::decode_method(target_oop, - receiver_limit, decode_flags)); + KlassHandle receiver_limit; int decode_flags = 0; + methodHandle m = MethodHandles::decode_method(target(), receiver_limit, decode_flags); if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "no such method"); } // The trusted Java code that calls this method should already have performed @@ -2229,6 +2712,7 @@ } MethodHandles::init_DirectMethodHandle(mh, m, (do_dispatch != JNI_FALSE), CHECK); + stress_method_handle_walk(mh, CHECK); } JVM_END @@ -2251,21 +2735,17 @@ // Target object is a reflective method. (%%% Do we need this alternate path?) Untested("init_BMH of non-MH"); if (argnum != 0) { THROW(vmSymbols::java_lang_InternalError()); } - int decode_flags = 0; klassOop receiver_limit_oop = NULL; - methodHandle m(THREAD, - MethodHandles::decode_method(target(), - receiver_limit_oop, - decode_flags)); - KlassHandle receiver_limit(THREAD, receiver_limit_oop); + KlassHandle receiver_limit; int decode_flags = 0; + methodHandle m = MethodHandles::decode_method(target(), receiver_limit, decode_flags); MethodHandles::init_BoundMethodHandle_with_receiver(mh, m, receiver_limit, decode_flags, CHECK); - return; + } else { + // Build a BMH on top of a DMH or another BMH: + MethodHandles::init_BoundMethodHandle(mh, target, argnum, CHECK); } - - // Build a BMH on top of a DMH or another BMH: - MethodHandles::init_BoundMethodHandle(mh, target, argnum, CHECK); + stress_method_handle_walk(mh, CHECK); } JVM_END @@ -2283,6 +2763,7 @@ assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); MethodHandles::init_AdapterMethodHandle(mh, target, argnum, CHECK); + stress_method_handle_walk(mh, CHECK); } JVM_END @@ -2336,8 +2817,10 @@ #ifndef PRODUCT #define EACH_NAMED_CON(template) \ - template(MethodHandles,GC_JVM_PUSH_LIMIT) \ - template(MethodHandles,GC_JVM_STACK_MOVE_UNIT) \ + /* hold back this one until JDK stabilizes */ \ + /* template(MethodHandles,GC_JVM_PUSH_LIMIT) */ \ + /* hold back this one until JDK stabilizes */ \ + /* template(MethodHandles,GC_JVM_STACK_MOVE_UNIT) */ \ template(MethodHandles,ETF_HANDLE_OR_METHOD_NAME) \ template(MethodHandles,ETF_DIRECT_HANDLE) \ template(MethodHandles,ETF_METHOD_NAME) \ @@ -2361,9 +2844,8 @@ template(java_lang_invoke_AdapterMethodHandle,OP_DROP_ARGS) \ template(java_lang_invoke_AdapterMethodHandle,OP_COLLECT_ARGS) \ template(java_lang_invoke_AdapterMethodHandle,OP_SPREAD_ARGS) \ - template(java_lang_invoke_AdapterMethodHandle,OP_FLYBY) \ - template(java_lang_invoke_AdapterMethodHandle,OP_RICOCHET) \ - template(java_lang_invoke_AdapterMethodHandle,CONV_OP_LIMIT) \ + /* hold back this one until JDK stabilizes */ \ + /*template(java_lang_invoke_AdapterMethodHandle,CONV_OP_LIMIT)*/ \ template(java_lang_invoke_AdapterMethodHandle,CONV_OP_MASK) \ template(java_lang_invoke_AdapterMethodHandle,CONV_VMINFO_MASK) \ template(java_lang_invoke_AdapterMethodHandle,CONV_VMINFO_SHIFT) \ @@ -2391,12 +2873,12 @@ #ifndef PRODUCT if (which >= 0 && which < con_value_count) { int con = con_values[which]; - objArrayOop box = (objArrayOop) JNIHandles::resolve(box_jh); - if (box != NULL && box->klass() == Universe::objectArrayKlassObj() && box->length() > 0) { + objArrayHandle box(THREAD, (objArrayOop) JNIHandles::resolve(box_jh)); + if (box.not_null() && box->klass() == Universe::objectArrayKlassObj() && box->length() > 0) { const char* str = &con_names[0]; for (int i = 0; i < which; i++) str += strlen(str) + 1; // skip name and null - oop name = java_lang_String::create_oop_from_str(str, CHECK_0); + oop name = java_lang_String::create_oop_from_str(str, CHECK_0); // possible safepoint box->obj_at_put(0, name); } return con; @@ -2453,10 +2935,10 @@ jclass clazz_jh, jstring name_jh, jstring sig_jh, int mflags, jclass caller_jh, jint skip, jobjectArray results_jh)) { if (clazz_jh == NULL || results_jh == NULL) return -1; - klassOop k_oop = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz_jh)); + KlassHandle k(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz_jh))); - objArrayOop results = (objArrayOop) JNIHandles::resolve(results_jh); - if (results == NULL || !results->is_objArray()) return -1; + objArrayHandle results(THREAD, (objArrayOop) JNIHandles::resolve(results_jh)); + if (results.is_null() || !results->is_objArray()) return -1; TempNewSymbol name = NULL; TempNewSymbol sig = NULL; @@ -2469,71 +2951,36 @@ if (sig == NULL) return 0; // a match is not possible } - klassOop caller = NULL; + KlassHandle caller; if (caller_jh != NULL) { oop caller_oop = JNIHandles::resolve_non_null(caller_jh); if (!java_lang_Class::is_instance(caller_oop)) return -1; - caller = java_lang_Class::as_klassOop(caller_oop); + caller = KlassHandle(THREAD, java_lang_Class::as_klassOop(caller_oop)); } - if (name != NULL && sig != NULL && results != NULL) { + if (name != NULL && sig != NULL && results.not_null()) { // try a direct resolve // %%% TO DO } - int res = MethodHandles::find_MemberNames(k_oop, name, sig, mflags, - caller, skip, results); + int res = MethodHandles::find_MemberNames(k(), name, sig, mflags, + caller(), skip, results()); // TO DO: expand at least some of the MemberNames, to avoid massive callbacks return res; } JVM_END -JVM_ENTRY(void, MHN_registerBootstrap(JNIEnv *env, jobject igcls, jclass caller_jh, jobject bsm_jh)) { - instanceKlassHandle ik = MethodHandles::resolve_instance_klass(caller_jh, THREAD); - if (!AllowTransitionalJSR292) { - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), - "registerBootstrapMethod is only supported in JSR 292 EDR"); - } - ik->link_class(CHECK); - if (!java_lang_invoke_MethodHandle::is_instance(JNIHandles::resolve(bsm_jh))) { - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "method handle"); - } - const char* err = NULL; - if (ik->is_initialized() || ik->is_in_error_state()) { - err = "too late: class is already initialized"; - } else { - ObjectLocker ol(ik, THREAD); // note: this should be a recursive lock - if (ik->is_not_initialized() || - (ik->is_being_initialized() && ik->is_reentrant_initialization(THREAD))) { - if (ik->bootstrap_method() != NULL) { - err = "class is already equipped with a bootstrap method"; - } else { - ik->set_bootstrap_method(JNIHandles::resolve_non_null(bsm_jh)); - err = NULL; - } - } else { - err = "class is already initialized"; - if (ik->is_being_initialized()) - err = "class is already being initialized in a different thread"; - } - } - if (err != NULL) { - THROW_MSG(vmSymbols::java_lang_IllegalStateException(), err); - } +JVM_ENTRY(jobject, MH_invoke_UOE(JNIEnv *env, jobject igmh, jobjectArray igargs)) { + TempNewSymbol UOE_name = SymbolTable::new_symbol("java/lang/UnsupportedOperationException", CHECK_NULL); + THROW_MSG_NULL(UOE_name, "MethodHandle.invoke cannot be invoked reflectively"); + return NULL; } JVM_END -JVM_ENTRY(jobject, MHN_getBootstrap(JNIEnv *env, jobject igcls, jclass caller_jh)) { - if (!AllowTransitionalJSR292) - THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), "getBootstrap: transitional only"); - instanceKlassHandle ik = MethodHandles::resolve_instance_klass(caller_jh, THREAD); - return JNIHandles::make_local(THREAD, ik->bootstrap_method()); -} -JVM_END - -JVM_ENTRY(void, MHN_setCallSiteTarget(JNIEnv *env, jobject igcls, jobject site_jh, jobject target_jh)) { - if (!AllowTransitionalJSR292) - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "setCallSite: transitional only"); +JVM_ENTRY(jobject, MH_invokeExact_UOE(JNIEnv *env, jobject igmh, jobjectArray igargs)) { + TempNewSymbol UOE_name = SymbolTable::new_symbol("java/lang/UnsupportedOperationException", CHECK_NULL); + THROW_MSG_NULL(UOE_name, "MethodHandle.invokeExact cannot be invoked reflectively"); + return NULL; } JVM_END @@ -2541,21 +2988,17 @@ /// JVM_RegisterMethodHandleMethods #define LANG "Ljava/lang/" -#define JLINV "Ljava/lang/invoke/" /* standard package */ -#define JDYN "Ljava/dyn/" /* alternative package to JLINV if AllowTransitionalJSR292 */ -#define IDYN "Lsun/dyn/" /* alternative package to JDYN if AllowTransitionalJSR292 */ -// FIXME: After AllowTransitionalJSR292 is removed, replace JDYN and IDYN by JLINV. +#define JLINV "Ljava/lang/invoke/" #define OBJ LANG"Object;" #define CLS LANG"Class;" #define STRG LANG"String;" -#define CST JDYN"CallSite;" -#define MT JDYN"MethodType;" -#define MH JDYN"MethodHandle;" -#define MEM IDYN"MemberName;" -#define AMH IDYN"AdapterMethodHandle;" -#define BMH IDYN"BoundMethodHandle;" -#define DMH IDYN"DirectMethodHandle;" +#define MT JLINV"MethodType;" +#define MH JLINV"MethodHandle;" +#define MEM JLINV"MemberName;" +#define AMH JLINV"AdapterMethodHandle;" +#define BMH JLINV"BoundMethodHandle;" +#define DMH JLINV"DirectMethodHandle;" #define CC (char*) /*cast a literal from (const char*)*/ #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f) @@ -2579,39 +3022,12 @@ {CC"getMembers", CC"("CLS""STRG""STRG"I"CLS"I["MEM")I", FN_PTR(MHN_getMembers)} }; -// FIXME: Remove methods2 after AllowTransitionalJSR292 is removed. -static JNINativeMethod methods2[] = { - {CC"registerBootstrap", CC"("CLS MH")V", FN_PTR(MHN_registerBootstrap)}, - {CC"getBootstrap", CC"("CLS")"MH, FN_PTR(MHN_getBootstrap)}, - {CC"setCallSiteTarget", CC"("CST MH")V", FN_PTR(MHN_setCallSiteTarget)} +static JNINativeMethod invoke_methods[] = { + // void init(MemberName self, AccessibleObject ref) + {CC"invoke", CC"(["OBJ")"OBJ, FN_PTR(MH_invoke_UOE)}, + {CC"invokeExact", CC"(["OBJ")"OBJ, FN_PTR(MH_invokeExact_UOE)} }; -static void hack_signatures(JNINativeMethod* methods, jint num_methods, const char* from_sig, const char* to_sig) { - for (int i = 0; i < num_methods; i++) { - const char* sig = methods[i].signature; - if (!strstr(sig, from_sig)) continue; - size_t buflen = strlen(sig) + 100; - char* buf = NEW_C_HEAP_ARRAY(char, buflen); - char* bufp = buf; - const char* sigp = sig; - size_t from_len = strlen(from_sig), to_len = strlen(to_sig); - while (*sigp != '\0') { - assert(bufp < buf + buflen - to_len - 1, "oob"); - if (strncmp(sigp, from_sig, from_len) != 0) { - *bufp++ = *sigp++; - } else { - strcpy(bufp, to_sig); - bufp += to_len; - sigp += from_len; - } - } - *bufp = '\0'; - methods[i].signature = buf; // replace with new signature - if (TraceMethodHandles) - tty->print_cr("MethodHandleNatives: %s: change signature %s => %s", methods[i].name, sig, buf); - } -} - // This one function is exported, used by NativeLookup. JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class)) { @@ -2622,92 +3038,47 @@ return; // bind nothing } - if (SystemDictionary::MethodHandleNatives_klass() != NULL && - SystemDictionary::MethodHandleNatives_klass() != java_lang_Class::as_klassOop(JNIHandles::resolve(MHN_class))) { - warning("multiple versions of MethodHandleNatives in boot classpath; consider using -XX:+PreferTransitionalJSR292"); - THROW_MSG(vmSymbols::java_lang_InternalError(), "multiple versions of MethodHandleNatives in boot classpath; consider using -XX:+PreferTransitionalJSR292"); - } - bool enable_MH = true; - // Loop control. FIXME: Replace by dead reckoning after AllowTransitionalJSR292 is removed. - bool registered_natives = false; - bool try_plain = true, try_JDYN = true, try_IDYN = true; - for (;;) { + { ThreadToNativeFromVM ttnfv(thread); - if (try_plain) { try_plain = false; } - else if (try_JDYN) { try_JDYN = false; hack_signatures(methods, sizeof(methods)/sizeof(JNINativeMethod), IDYN, JDYN); } - else if (try_IDYN) { try_IDYN = false; hack_signatures(methods, sizeof(methods)/sizeof(JNINativeMethod), JDYN, JLINV); } - else { break; } int status = env->RegisterNatives(MHN_class, methods, sizeof(methods)/sizeof(JNINativeMethod)); + if (!env->ExceptionOccurred()) { + const char* L_MH_name = (JLINV "MethodHandle"); + const char* MH_name = L_MH_name+1; + jclass MH_class = env->FindClass(MH_name); + status = env->RegisterNatives(MH_class, invoke_methods, sizeof(invoke_methods)/sizeof(JNINativeMethod)); + } if (env->ExceptionOccurred()) { + MethodHandles::set_enabled(false); + warning("JSR 292 method handle code is mismatched to this JVM. Disabling support."); + enable_MH = false; env->ExceptionClear(); - // and try again... - } else { - registered_natives = true; - break; } } - if (!registered_natives) { - MethodHandles::set_enabled(false); - warning("JSR 292 method handle code is mismatched to this JVM. Disabling support."); - enable_MH = false; - } if (enable_MH) { - bool found_raise_exception = false; KlassHandle MHN_klass = SystemDictionaryHandles::MethodHandleNatives_klass(); - KlassHandle MHI_klass = SystemDictionaryHandles::MethodHandleImpl_klass(); - // Loop control. FIXME: Replace by dead reckoning after AllowTransitionalJSR292 is removed. - bool try_MHN = true, try_MHI = AllowTransitionalJSR292; - for (;;) { - KlassHandle try_klass; - if (try_MHN) { try_MHN = false; try_klass = MHN_klass; } - else if (try_MHI) { try_MHI = false; try_klass = MHI_klass; } - else { break; } - if (try_klass.is_null()) continue; + if (MHN_klass.not_null()) { TempNewSymbol raiseException_name = SymbolTable::new_symbol("raiseException", CHECK); TempNewSymbol raiseException_sig = SymbolTable::new_symbol("(ILjava/lang/Object;Ljava/lang/Object;)V", CHECK); - methodOop raiseException_method = instanceKlass::cast(try_klass->as_klassOop()) + methodOop raiseException_method = instanceKlass::cast(MHN_klass->as_klassOop()) ->find_method(raiseException_name, raiseException_sig); if (raiseException_method != NULL && raiseException_method->is_static()) { MethodHandles::set_raise_exception_method(raiseException_method); - found_raise_exception = true; - break; + } else { + warning("JSR 292 method handle code is mismatched to this JVM. Disabling support."); + enable_MH = false; } - } - if (!found_raise_exception) { - warning("JSR 292 method handle code is mismatched to this JVM. Disabling support."); + } else { enable_MH = false; } } if (enable_MH) { - if (AllowTransitionalJSR292) { - // We need to link the MethodHandleImpl klass before we generate - // the method handle adapters as the _raise_exception adapter uses - // one of its methods (and its c2i-adapter). - klassOop k = SystemDictionary::MethodHandleImpl_klass(); - if (k != NULL) { - instanceKlass* ik = instanceKlass::cast(k); - ik->link_class(CHECK); - } - } - MethodHandles::generate_adapters(); MethodHandles::set_enabled(true); } - - if (AllowTransitionalJSR292) { - ThreadToNativeFromVM ttnfv(thread); - - int status = env->RegisterNatives(MHN_class, methods2, sizeof(methods2)/sizeof(JNINativeMethod)); - if (env->ExceptionOccurred()) { - // Don't do this, since it's too late: - // MethodHandles::set_enabled(false) - env->ExceptionClear(); - } - } } JVM_END
--- a/src/share/vm/prims/methodHandles.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/methodHandles.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -66,8 +66,8 @@ _adapter_drop_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS, _adapter_collect_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS, _adapter_spread_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS, - _adapter_flyby = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_FLYBY, - _adapter_ricochet = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_RICOCHET, + _adapter_fold_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS, + _adapter_unused_13 = _adapter_mh_first + 13, //hole in the CONV_OP enumeration _adapter_mh_last = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::CONV_OP_LIMIT - 1, // Optimized adapter types @@ -93,10 +93,99 @@ _adapter_opt_unboxi, _adapter_opt_unboxl, - // spreading (array length cases 0, 1, >=2) - _adapter_opt_spread_0, - _adapter_opt_spread_1, - _adapter_opt_spread_more, + // %% Maybe tame the following with a VM_SYMBOLS_DO type macro? + + // how a blocking adapter returns (platform-dependent) + _adapter_opt_return_ref, + _adapter_opt_return_int, + _adapter_opt_return_long, + _adapter_opt_return_float, + _adapter_opt_return_double, + _adapter_opt_return_void, + _adapter_opt_return_S0_ref, // return ref to S=0 (last slot) + _adapter_opt_return_S1_ref, // return ref to S=1 (2nd-to-last slot) + _adapter_opt_return_S2_ref, + _adapter_opt_return_S3_ref, + _adapter_opt_return_S4_ref, + _adapter_opt_return_S5_ref, + _adapter_opt_return_any, // dynamically select r/i/l/f/d + _adapter_opt_return_FIRST = _adapter_opt_return_ref, + _adapter_opt_return_LAST = _adapter_opt_return_any, + + // spreading (array length cases 0, 1, ...) + _adapter_opt_spread_0, // spread empty array to N=0 arguments + _adapter_opt_spread_1_ref, // spread Object[] to N=1 argument + _adapter_opt_spread_2_ref, // spread Object[] to N=2 arguments + _adapter_opt_spread_3_ref, // spread Object[] to N=3 arguments + _adapter_opt_spread_4_ref, // spread Object[] to N=4 arguments + _adapter_opt_spread_5_ref, // spread Object[] to N=5 arguments + _adapter_opt_spread_ref, // spread Object[] to N arguments + _adapter_opt_spread_byte, // spread byte[] or boolean[] to N arguments + _adapter_opt_spread_char, // spread char[], etc., to N arguments + _adapter_opt_spread_short, // spread short[], etc., to N arguments + _adapter_opt_spread_int, // spread int[], short[], etc., to N arguments + _adapter_opt_spread_long, // spread long[] to N arguments + _adapter_opt_spread_float, // spread float[] to N arguments + _adapter_opt_spread_double, // spread double[] to N arguments + _adapter_opt_spread_FIRST = _adapter_opt_spread_0, + _adapter_opt_spread_LAST = _adapter_opt_spread_double, + + // blocking filter/collect conversions + // These collect N arguments and replace them (at slot S) by a return value + // which is passed to the final target, along with the unaffected arguments. + // collect_{N}_{T} collects N arguments at any position into a T value + // collect_{N}_S{S}_{T} collects N arguments at slot S into a T value + // collect_{T} collects any number of arguments at any position + // filter_S{S}_{T} is the same as collect_1_S{S}_{T} (a unary collection) + // (collect_2 is also usable as a filter, with long or double arguments) + _adapter_opt_collect_ref, // combine N arguments, replace with a reference + _adapter_opt_collect_int, // combine N arguments, replace with an int, short, etc. + _adapter_opt_collect_long, // combine N arguments, replace with a long + _adapter_opt_collect_float, // combine N arguments, replace with a float + _adapter_opt_collect_double, // combine N arguments, replace with a double + _adapter_opt_collect_void, // combine N arguments, replace with nothing + // if there is a small fixed number to push, do so without a loop: + _adapter_opt_collect_0_ref, // collect N=0 arguments, insert a reference + _adapter_opt_collect_1_ref, // collect N=1 argument, replace with a reference + _adapter_opt_collect_2_ref, // combine N=2 arguments, replace with a reference + _adapter_opt_collect_3_ref, // combine N=3 arguments, replace with a reference + _adapter_opt_collect_4_ref, // combine N=4 arguments, replace with a reference + _adapter_opt_collect_5_ref, // combine N=5 arguments, replace with a reference + // filters are an important special case because they never move arguments: + _adapter_opt_filter_S0_ref, // filter N=1 argument at S=0, replace with a reference + _adapter_opt_filter_S1_ref, // filter N=1 argument at S=1, replace with a reference + _adapter_opt_filter_S2_ref, // filter N=1 argument at S=2, replace with a reference + _adapter_opt_filter_S3_ref, // filter N=1 argument at S=3, replace with a reference + _adapter_opt_filter_S4_ref, // filter N=1 argument at S=4, replace with a reference + _adapter_opt_filter_S5_ref, // filter N=1 argument at S=5, replace with a reference + // these move arguments, but they are important for boxing + _adapter_opt_collect_2_S0_ref, // combine last N=2 arguments, replace with a reference + _adapter_opt_collect_2_S1_ref, // combine N=2 arguments at S=1, replace with a reference + _adapter_opt_collect_2_S2_ref, // combine N=2 arguments at S=2, replace with a reference + _adapter_opt_collect_2_S3_ref, // combine N=2 arguments at S=3, replace with a reference + _adapter_opt_collect_2_S4_ref, // combine N=2 arguments at S=4, replace with a reference + _adapter_opt_collect_2_S5_ref, // combine N=2 arguments at S=5, replace with a reference + _adapter_opt_collect_FIRST = _adapter_opt_collect_ref, + _adapter_opt_collect_LAST = _adapter_opt_collect_2_S5_ref, + + // blocking folding conversions + // these are like collects, but retain all the N arguments for the final target + //_adapter_opt_fold_0_ref, // same as _adapter_opt_collect_0_ref + // fold_{N}_{T} processes N arguments at any position into a T value, which it inserts + // fold_{T} processes any number of arguments at any position + _adapter_opt_fold_ref, // process N arguments, prepend a reference + _adapter_opt_fold_int, // process N arguments, prepend an int, short, etc. + _adapter_opt_fold_long, // process N arguments, prepend a long + _adapter_opt_fold_float, // process N arguments, prepend a float + _adapter_opt_fold_double, // process N arguments, prepend a double + _adapter_opt_fold_void, // process N arguments, but leave the list unchanged + _adapter_opt_fold_1_ref, // process N=1 argument, prepend a reference + _adapter_opt_fold_2_ref, // process N=2 arguments, prepend a reference + _adapter_opt_fold_3_ref, // process N=3 arguments, prepend a reference + _adapter_opt_fold_4_ref, // process N=4 arguments, prepend a reference + _adapter_opt_fold_5_ref, // process N=5 arguments, prepend a reference + _adapter_opt_fold_FIRST = _adapter_opt_fold_ref, + _adapter_opt_fold_LAST = _adapter_opt_fold_5_ref, _EK_LIMIT, _EK_FIRST = 0 @@ -110,6 +199,7 @@ enum { // import java_lang_invoke_AdapterMethodHandle::CONV_OP_* CONV_OP_LIMIT = java_lang_invoke_AdapterMethodHandle::CONV_OP_LIMIT, CONV_OP_MASK = java_lang_invoke_AdapterMethodHandle::CONV_OP_MASK, + CONV_TYPE_MASK = java_lang_invoke_AdapterMethodHandle::CONV_TYPE_MASK, CONV_VMINFO_MASK = java_lang_invoke_AdapterMethodHandle::CONV_VMINFO_MASK, CONV_VMINFO_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_VMINFO_SHIFT, CONV_OP_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_OP_SHIFT, @@ -123,6 +213,7 @@ static MethodHandleEntry* _entries[_EK_LIMIT]; static const char* _entry_names[_EK_LIMIT+1]; static jobject _raise_exception_method; + static address _adapter_return_handlers[CONV_TYPE_MASK+1]; // Adapters. static MethodHandlesAdapterBlob* _adapter_code; @@ -147,39 +238,195 @@ } // Some adapter helper functions. - static void get_ek_bound_mh_info(EntryKind ek, BasicType& arg_type, int& arg_mask, int& arg_slots) { + static EntryKind ek_original_kind(EntryKind ek) { + if (ek <= _adapter_mh_last) return ek; switch (ek) { - case _bound_int_mh : // fall-thru - case _bound_int_direct_mh : arg_type = T_INT; arg_mask = _INSERT_INT_MASK; break; - case _bound_long_mh : // fall-thru - case _bound_long_direct_mh: arg_type = T_LONG; arg_mask = _INSERT_LONG_MASK; break; - case _bound_ref_mh : // fall-thru - case _bound_ref_direct_mh : arg_type = T_OBJECT; arg_mask = _INSERT_REF_MASK; break; - default: ShouldNotReachHere(); + case _adapter_opt_swap_1: + case _adapter_opt_swap_2: + return _adapter_swap_args; + case _adapter_opt_rot_1_up: + case _adapter_opt_rot_1_down: + case _adapter_opt_rot_2_up: + case _adapter_opt_rot_2_down: + return _adapter_rot_args; + case _adapter_opt_i2i: + case _adapter_opt_l2i: + case _adapter_opt_d2f: + case _adapter_opt_i2l: + case _adapter_opt_f2d: + return _adapter_prim_to_prim; + case _adapter_opt_unboxi: + case _adapter_opt_unboxl: + return _adapter_ref_to_prim; } - arg_slots = type2size[arg_type]; + if (ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST) + return _adapter_spread_args; + if (ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST) + return _adapter_collect_args; + if (ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST) + return _adapter_fold_args; + if (ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST) + return _adapter_opt_return_any; + assert(false, "oob"); + return _EK_LIMIT; + } + + static bool ek_supported(MethodHandles::EntryKind ek); + + static BasicType ek_bound_mh_arg_type(EntryKind ek) { + switch (ek) { + case _bound_int_mh : // fall-thru + case _bound_int_direct_mh : return T_INT; + case _bound_long_mh : // fall-thru + case _bound_long_direct_mh : return T_LONG; + default : return T_OBJECT; + } + } + + static int ek_adapter_opt_swap_slots(EntryKind ek) { + switch (ek) { + case _adapter_opt_swap_1 : return 1; + case _adapter_opt_swap_2 : return 2; + case _adapter_opt_rot_1_up : return 1; + case _adapter_opt_rot_1_down : return 1; + case _adapter_opt_rot_2_up : return 2; + case _adapter_opt_rot_2_down : return 2; + default : ShouldNotReachHere(); return -1; + } + } + + static int ek_adapter_opt_swap_mode(EntryKind ek) { + switch (ek) { + case _adapter_opt_swap_1 : return 0; + case _adapter_opt_swap_2 : return 0; + case _adapter_opt_rot_1_up : return 1; + case _adapter_opt_rot_1_down : return -1; + case _adapter_opt_rot_2_up : return 1; + case _adapter_opt_rot_2_down : return -1; + default : ShouldNotReachHere(); return 0; + } } - static void get_ek_adapter_opt_swap_rot_info(EntryKind ek, int& swap_bytes, int& rotate) { - int swap_slots = 0; + static int ek_adapter_opt_collect_count(EntryKind ek) { + assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST || + ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST, ""); switch (ek) { - case _adapter_opt_swap_1: swap_slots = 1; rotate = 0; break; - case _adapter_opt_swap_2: swap_slots = 2; rotate = 0; break; - case _adapter_opt_rot_1_up: swap_slots = 1; rotate = 1; break; - case _adapter_opt_rot_1_down: swap_slots = 1; rotate = -1; break; - case _adapter_opt_rot_2_up: swap_slots = 2; rotate = 1; break; - case _adapter_opt_rot_2_down: swap_slots = 2; rotate = -1; break; - default: ShouldNotReachHere(); + case _adapter_opt_collect_0_ref : return 0; + case _adapter_opt_filter_S0_ref : + case _adapter_opt_filter_S1_ref : + case _adapter_opt_filter_S2_ref : + case _adapter_opt_filter_S3_ref : + case _adapter_opt_filter_S4_ref : + case _adapter_opt_filter_S5_ref : + case _adapter_opt_fold_1_ref : + case _adapter_opt_collect_1_ref : return 1; + case _adapter_opt_collect_2_S0_ref : + case _adapter_opt_collect_2_S1_ref : + case _adapter_opt_collect_2_S2_ref : + case _adapter_opt_collect_2_S3_ref : + case _adapter_opt_collect_2_S4_ref : + case _adapter_opt_collect_2_S5_ref : + case _adapter_opt_fold_2_ref : + case _adapter_opt_collect_2_ref : return 2; + case _adapter_opt_fold_3_ref : + case _adapter_opt_collect_3_ref : return 3; + case _adapter_opt_fold_4_ref : + case _adapter_opt_collect_4_ref : return 4; + case _adapter_opt_fold_5_ref : + case _adapter_opt_collect_5_ref : return 5; + default : return -1; // sentinel value for "variable" } - // Return the size of the stack slots to move in bytes. - swap_bytes = swap_slots * Interpreter::stackElementSize; + } + + static int ek_adapter_opt_collect_slot(EntryKind ek) { + assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST || + ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST, ""); + switch (ek) { + case _adapter_opt_collect_2_S0_ref : + case _adapter_opt_filter_S0_ref : return 0; + case _adapter_opt_collect_2_S1_ref : + case _adapter_opt_filter_S1_ref : return 1; + case _adapter_opt_collect_2_S2_ref : + case _adapter_opt_filter_S2_ref : return 2; + case _adapter_opt_collect_2_S3_ref : + case _adapter_opt_filter_S3_ref : return 3; + case _adapter_opt_collect_2_S4_ref : + case _adapter_opt_filter_S4_ref : return 4; + case _adapter_opt_collect_2_S5_ref : + case _adapter_opt_filter_S5_ref : return 5; + default : return -1; // sentinel value for "variable" + } } - static int get_ek_adapter_opt_spread_info(EntryKind ek) { + static BasicType ek_adapter_opt_collect_type(EntryKind ek) { + assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST || + ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST, ""); + switch (ek) { + case _adapter_opt_fold_int : + case _adapter_opt_collect_int : return T_INT; + case _adapter_opt_fold_long : + case _adapter_opt_collect_long : return T_LONG; + case _adapter_opt_fold_float : + case _adapter_opt_collect_float : return T_FLOAT; + case _adapter_opt_fold_double : + case _adapter_opt_collect_double : return T_DOUBLE; + case _adapter_opt_fold_void : + case _adapter_opt_collect_void : return T_VOID; + default : return T_OBJECT; + } + } + + static int ek_adapter_opt_return_slot(EntryKind ek) { + assert(ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST, ""); + switch (ek) { + case _adapter_opt_return_S0_ref : return 0; + case _adapter_opt_return_S1_ref : return 1; + case _adapter_opt_return_S2_ref : return 2; + case _adapter_opt_return_S3_ref : return 3; + case _adapter_opt_return_S4_ref : return 4; + case _adapter_opt_return_S5_ref : return 5; + default : return -1; // sentinel value for "variable" + } + } + + static BasicType ek_adapter_opt_return_type(EntryKind ek) { + assert(ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST, ""); switch (ek) { - case _adapter_opt_spread_0: return 0; - case _adapter_opt_spread_1: return 1; - default : return -1; + case _adapter_opt_return_int : return T_INT; + case _adapter_opt_return_long : return T_LONG; + case _adapter_opt_return_float : return T_FLOAT; + case _adapter_opt_return_double : return T_DOUBLE; + case _adapter_opt_return_void : return T_VOID; + case _adapter_opt_return_any : return T_CONFLICT; // sentinel value for "variable" + default : return T_OBJECT; + } + } + + static int ek_adapter_opt_spread_count(EntryKind ek) { + assert(ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST, ""); + switch (ek) { + case _adapter_opt_spread_0 : return 0; + case _adapter_opt_spread_1_ref : return 1; + case _adapter_opt_spread_2_ref : return 2; + case _adapter_opt_spread_3_ref : return 3; + case _adapter_opt_spread_4_ref : return 4; + case _adapter_opt_spread_5_ref : return 5; + default : return -1; // sentinel value for "variable" + } + } + + static BasicType ek_adapter_opt_spread_type(EntryKind ek) { + assert(ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST, ""); + switch (ek) { + // (there is no _adapter_opt_spread_boolean; we use byte) + case _adapter_opt_spread_byte : return T_BYTE; + case _adapter_opt_spread_char : return T_CHAR; + case _adapter_opt_spread_short : return T_SHORT; + case _adapter_opt_spread_int : return T_INT; + case _adapter_opt_spread_long : return T_LONG; + case _adapter_opt_spread_float : return T_FLOAT; + case _adapter_opt_spread_double : return T_DOUBLE; + default : return T_OBJECT; } } @@ -228,12 +475,21 @@ // Bit mask of conversion_op values. May vary by platform. static int adapter_conversion_ops_supported_mask(); + static bool conv_op_supported(int conv_op) { + assert(conv_op_valid(conv_op), ""); + return ((adapter_conversion_ops_supported_mask() & nth_bit(conv_op)) != 0); + } + // Offset in words that the interpreter stack pointer moves when an argument is pushed. // The stack_move value must always be a multiple of this. static int stack_move_unit() { return frame::interpreter_frame_expression_stack_direction() * Interpreter::stackElementWords; } + // Adapter frame traversal. (Implementation-specific.) + static frame ricochet_frame_sender(const frame& fr, RegisterMap* reg_map); + static void ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map); + enum { CONV_VMINFO_SIGN_FLAG = 0x80 }; // Shift values for prim-to-prim conversions. static int adapter_prim_to_prim_subword_vminfo(BasicType dest) { @@ -265,13 +521,13 @@ static inline address from_interpreted_entry(EntryKind ek); // helpers for decode_method. - static methodOop decode_methodOop(methodOop m, int& decode_flags_result); - static methodOop decode_vmtarget(oop vmtarget, int vmindex, oop mtype, klassOop& receiver_limit_result, int& decode_flags_result); - static methodOop decode_MemberName(oop mname, klassOop& receiver_limit_result, int& decode_flags_result); - static methodOop decode_MethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result); - static methodOop decode_DirectMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result); - static methodOop decode_BoundMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result); - static methodOop decode_AdapterMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result); + static methodOop decode_methodOop(methodOop m, int& decode_flags_result); + static methodHandle decode_vmtarget(oop vmtarget, int vmindex, oop mtype, KlassHandle& receiver_limit_result, int& decode_flags_result); + static methodHandle decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result); + static methodHandle decode_MethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); + static methodHandle decode_DirectMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); + static methodHandle decode_BoundMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); + static methodHandle decode_AdapterMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); // Find out how many stack slots an mh pushes or pops. // The result is *not* reported as a multiple of stack_move_unit(); @@ -317,7 +573,7 @@ _dmf_adapter_lsb = 0x20, _DMF_ADAPTER_MASK = (_dmf_adapter_lsb << CONV_OP_LIMIT) - _dmf_adapter_lsb }; - static methodOop decode_method(oop x, klassOop& receiver_limit_result, int& decode_flags_result); + static methodHandle decode_method(oop x, KlassHandle& receiver_limit_result, int& decode_flags_result); enum { // format of query to getConstant: GC_JVM_PUSH_LIMIT = 0, @@ -429,6 +685,7 @@ // Fill in the fields of an AdapterMethodHandle mh. (MH.type must be pre-filled.) static void init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS); + static void ensure_vmlayout_field(Handle target, TRAPS); #ifdef ASSERT static bool spot_check_entry_names(); @@ -441,6 +698,8 @@ KlassHandle receiver_klass, TRAPS); +public: + static bool is_float_fixed_reinterpretation_cast(BasicType src, BasicType dst); static bool same_basic_type_for_arguments(BasicType src, BasicType dst, bool raw = false, bool for_return = false); @@ -448,12 +707,52 @@ return same_basic_type_for_arguments(src, dst, raw, true); } - enum { // arg_mask values + static Symbol* convert_to_signature(oop type_str, bool polymorphic, TRAPS); + +#ifdef TARGET_ARCH_x86 +# include "methodHandles_x86.hpp" +#endif +#ifdef TARGET_ARCH_sparc +#define TARGET_ARCH_NYI_6939861 1 //FIXME +//# include "methodHandles_sparc.hpp" +#endif +#ifdef TARGET_ARCH_zero +#define TARGET_ARCH_NYI_6939861 1 //FIXME +//# include "methodHandles_zero.hpp" +#endif +#ifdef TARGET_ARCH_arm +# include "methodHandles_arm.hpp" +#endif +#ifdef TARGET_ARCH_ppc +# include "methodHandles_ppc.hpp" +#endif + +#ifdef TARGET_ARCH_NYI_6939861 + // Here are some backward compatible declarations until the 6939861 ports are updated. + #define _adapter_flyby (_EK_LIMIT + 10) + #define _adapter_ricochet (_EK_LIMIT + 11) + #define _adapter_opt_spread_1 _adapter_opt_spread_1_ref + #define _adapter_opt_spread_more _adapter_opt_spread_ref + enum { _INSERT_NO_MASK = -1, _INSERT_REF_MASK = 0, _INSERT_INT_MASK = 1, _INSERT_LONG_MASK = 3 }; + static void get_ek_bound_mh_info(EntryKind ek, BasicType& arg_type, int& arg_mask, int& arg_slots) { + arg_type = ek_bound_mh_arg_type(ek); + arg_mask = 0; + arg_slots = type2size[arg_type];; + } + static void get_ek_adapter_opt_swap_rot_info(EntryKind ek, int& swap_bytes, int& rotate) { + int swap_slots = ek_adapter_opt_swap_slots(ek); + rotate = ek_adapter_opt_swap_mode(ek); + swap_bytes = swap_slots * Interpreter::stackElementSize; + } + static int get_ek_adapter_opt_spread_info(EntryKind ek) { + return ek_adapter_opt_spread_count(ek); + } + static void insert_arg_slots(MacroAssembler* _masm, RegisterOrConstant arg_slots, int arg_mask, @@ -466,8 +765,7 @@ Register temp_reg, Register temp2_reg, Register temp3_reg = noreg); static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN; - - static Symbol* convert_to_signature(oop type_str, bool polymorphic, TRAPS); +#endif //TARGET_ARCH_NYI_6939861 };
--- a/src/share/vm/prims/nativeLookup.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/nativeLookup.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -117,8 +117,6 @@ { CC"Java_sun_misc_Unsafe_registerNatives", NULL, FN_PTR(JVM_RegisterUnsafeMethods) }, { CC"Java_java_lang_invoke_MethodHandleNatives_registerNatives", NULL, FN_PTR(JVM_RegisterMethodHandleMethods) }, - { CC"Java_sun_dyn_MethodHandleNatives_registerNatives", NULL, FN_PTR(JVM_RegisterMethodHandleMethods) }, // AllowTransitionalJSR292 - { CC"Java_java_dyn_MethodHandleNatives_registerNatives", NULL, FN_PTR(JVM_RegisterMethodHandleMethods) }, // AllowTransitionalJSR292 { CC"Java_sun_misc_Perf_registerNatives", NULL, FN_PTR(JVM_RegisterPerfMethods) } };
--- a/src/share/vm/prims/unsafe.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/prims/unsafe.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -24,6 +24,9 @@ #include "precompiled.hpp" #include "classfile/vmSymbols.hpp" +#ifndef SERIALGC +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" +#endif // SERIALGC #include "memory/allocation.inline.hpp" #include "prims/jni.h" #include "prims/jvm.h" @@ -193,7 +196,32 @@ UnsafeWrapper("Unsafe_GetObject"); if (obj == NULL) THROW_0(vmSymbols::java_lang_NullPointerException()); GET_OOP_FIELD(obj, offset, v) - return JNIHandles::make_local(env, v); + jobject ret = JNIHandles::make_local(env, v); +#ifndef SERIALGC + // We could be accessing the referent field in a reference + // object. If G1 is enabled then we need to register a non-null + // referent with the SATB barrier. + if (UseG1GC) { + bool needs_barrier = false; + + if (ret != NULL) { + if (offset == java_lang_ref_Reference::referent_offset) { + oop o = JNIHandles::resolve_non_null(obj); + klassOop k = o->klass(); + if (instanceKlass::cast(k)->reference_type() != REF_NONE) { + assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity"); + needs_barrier = true; + } + } + } + + if (needs_barrier) { + oop referent = JNIHandles::resolve(ret); + G1SATBCardTableModRefBS::enqueue(referent); + } + } +#endif // SERIALGC + return ret; UNSAFE_END UNSAFE_ENTRY(void, Unsafe_SetObject140(JNIEnv *env, jobject unsafe, jobject obj, jint offset, jobject x_h)) @@ -226,7 +254,32 @@ UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) UnsafeWrapper("Unsafe_GetObject"); GET_OOP_FIELD(obj, offset, v) - return JNIHandles::make_local(env, v); + jobject ret = JNIHandles::make_local(env, v); +#ifndef SERIALGC + // We could be accessing the referent field in a reference + // object. If G1 is enabled then we need to register non-null + // referent with the SATB barrier. + if (UseG1GC) { + bool needs_barrier = false; + + if (ret != NULL) { + if (offset == java_lang_ref_Reference::referent_offset && obj != NULL) { + oop o = JNIHandles::resolve(obj); + klassOop k = o->klass(); + if (instanceKlass::cast(k)->reference_type() != REF_NONE) { + assert(instanceKlass::cast(k)->is_subclass_of(SystemDictionary::Reference_klass()), "sanity"); + needs_barrier = true; + } + } + } + + if (needs_barrier) { + oop referent = JNIHandles::resolve(ret); + G1SATBCardTableModRefBS::enqueue(referent); + } + } +#endif // SERIALGC + return ret; UNSAFE_END UNSAFE_ENTRY(void, Unsafe_SetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h))
--- a/src/share/vm/runtime/advancedThresholdPolicy.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,7 +1,26 @@ /* -* Copyright (c) 2010, 2011 Oracle and/or its affiliates. All rights reserved. -* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. -*/ + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ #include "precompiled.hpp" #include "runtime/advancedThresholdPolicy.hpp"
--- a/src/share/vm/runtime/advancedThresholdPolicy.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/advancedThresholdPolicy.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,7 +1,26 @@ /* -* Copyright (c) 2010, 2011 Oracle and/or its affiliates. All rights reserved. -* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. -*/ + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ #ifndef SHARE_VM_RUNTIME_ADVANCEDTHRESHOLDPOLICY_HPP #define SHARE_VM_RUNTIME_ADVANCEDTHRESHOLDPOLICY_HPP
--- a/src/share/vm/runtime/arguments.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/arguments.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -243,6 +243,13 @@ { "MaxLiveObjectEvacuationRatio", JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) }, { "ForceSharedSpaces", JDK_Version::jdk_update(6,25), JDK_Version::jdk(8) }, + { "UseParallelOldGCCompacting", + JDK_Version::jdk_update(6,27), JDK_Version::jdk(8) }, + { "UseParallelDensePrefixUpdate", + JDK_Version::jdk_update(6,27), JDK_Version::jdk(8) }, + { "UseParallelOldGCDensePrefix", + JDK_Version::jdk_update(6,27), JDK_Version::jdk(8) }, + { "AllowTransitionalJSR292", JDK_Version::jdk(7), JDK_Version::jdk(8) }, { NULL, JDK_Version(0), JDK_Version(0) } }; @@ -799,26 +806,22 @@ JDK_Version since = JDK_Version(); - if (parse_argument(arg, origin)) { - // do nothing - } else if (is_newly_obsolete(arg, &since)) { - enum { bufsize = 256 }; - char buffer[bufsize]; - since.to_string(buffer, bufsize); - jio_fprintf(defaultStream::error_stream(), - "Warning: The flag %s has been EOL'd as of %s and will" - " be ignored\n", arg, buffer); - } else { - if (!ignore_unrecognized) { - jio_fprintf(defaultStream::error_stream(), - "Unrecognized VM option '%s'\n", arg); - // allow for commandline "commenting out" options like -XX:#+Verbose - if (strlen(arg) == 0 || arg[0] != '#') { - return false; - } - } + if (parse_argument(arg, origin) || ignore_unrecognized) { + return true; } - return true; + + const char * const argname = *arg == '+' || *arg == '-' ? arg + 1 : arg; + if (is_newly_obsolete(arg, &since)) { + char version[256]; + since.to_string(version, sizeof(version)); + warning("ignoring option %s; support was removed in %s", argname, version); + return true; + } + + jio_fprintf(defaultStream::error_stream(), + "Unrecognized VM option '%s'\n", argname); + // allow for commandline "commenting out" options like -XX:#+Verbose + return arg[0] == '#'; } bool Arguments::process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized) { @@ -956,12 +959,22 @@ // Ensure Agent_OnLoad has the correct initial values. // This may not be the final mode; mode may change later in onload phase. PropertyList_unique_add(&_system_properties, "java.vm.info", - (char*)Abstract_VM_Version::vm_info_string(), false); + (char*)VM_Version::vm_info_string(), false); UseInterpreter = true; UseCompiler = true; UseLoopCounter = true; +#ifndef ZERO + // Turn these off for mixed and comp. Leave them on for Zero. + if (FLAG_IS_DEFAULT(UseFastAccessorMethods)) { + UseFastAccessorMethods = (mode == _int); + } + if (FLAG_IS_DEFAULT(UseFastEmptyMethods)) { + UseFastEmptyMethods = (mode == _int); + } +#endif + // Default values may be platform/compiler dependent - // use the saved values ClipInlining = Arguments::_ClipInlining; @@ -1409,6 +1422,11 @@ } } } + if (UseNUMA) { + if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) { + FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M); + } + } } void Arguments::set_g1_gc_flags() { @@ -1973,6 +1991,9 @@ Arguments::_ClipInlining = ClipInlining; Arguments::_BackgroundCompilation = BackgroundCompilation; + // Setup flags for mixed which is the default + set_mode_flags(_mixed); + // Parse JAVA_TOOL_OPTIONS environment variable (if present) jint result = parse_java_tool_options_environment_variable(&scp, &scp_assembly_required); if (result != JNI_OK) { @@ -2362,7 +2383,6 @@ _gc_log_filename = strdup(tail); FLAG_SET_CMDLINE(bool, PrintGC, true); FLAG_SET_CMDLINE(bool, PrintGCTimeStamps, true); - FLAG_SET_CMDLINE(bool, TraceClassUnloading, true); // JNI hooks } else if (match_option(option, "-Xcheck", &tail)) {
--- a/src/share/vm/runtime/deoptimization.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/deoptimization.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -90,12 +90,14 @@ Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame, int caller_adjustment, + int caller_actual_parameters, int number_of_frames, intptr_t* frame_sizes, address* frame_pcs, BasicType return_type) { _size_of_deoptimized_frame = size_of_deoptimized_frame; _caller_adjustment = caller_adjustment; + _caller_actual_parameters = caller_actual_parameters; _number_of_frames = number_of_frames; _frame_sizes = frame_sizes; _frame_pcs = frame_pcs; @@ -189,6 +191,10 @@ assert(thread->deopt_nmethod() == NULL, "Pending deopt!"); thread->set_deopt_nmethod(deoptee.cb()->as_nmethod_or_null()); + if (VerifyStack) { + thread->validate_frame_layout(); + } + // Create a growable array of VFrames where each VFrame represents an inlined // Java frame. This storage is allocated with the usual system arena. assert(deoptee.is_compiled_frame(), "Wrong frame type"); @@ -369,6 +375,28 @@ popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words()); } + // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized + // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather + // than simply use array->sender.pc(). This requires us to walk the current set of frames + // + frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame + deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller + + // It's possible that the number of paramters at the call site is + // different than number of arguments in the callee when method + // handles are used. If the caller is interpreted get the real + // value so that the proper amount of space can be added to it's + // frame. + int caller_actual_parameters = callee_parameters; + if (deopt_sender.is_interpreted_frame()) { + methodHandle method = deopt_sender.interpreter_frame_method(); + Bytecode_invoke cur = Bytecode_invoke_check(method, + deopt_sender.interpreter_frame_bci()); + Symbol* signature = method->constants()->signature_ref_at(cur.index()); + ArgumentSizeComputer asc(signature); + caller_actual_parameters = asc.size() + (cur.has_receiver() ? 1 : 0); + } + // // frame_sizes/frame_pcs[0] oldest frame (int or c2i) // frame_sizes/frame_pcs[1] next oldest frame (int) @@ -387,7 +415,13 @@ // frame[number_of_frames - 1 ] = on_stack_size(youngest) // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest)) // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest))) - frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters, + int caller_parms = callee_parameters; + if (index == array->frames() - 1) { + // Use the value from the interpreted caller + caller_parms = caller_actual_parameters; + } + frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(caller_parms, + callee_parameters, callee_locals, index == 0, popframe_extra_args); @@ -414,13 +448,6 @@ // Compute information for handling adapters and adjusting the frame size of the caller. int caller_adjustment = 0; - // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized - // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather - // than simply use array->sender.pc(). This requires us to walk the current set of frames - // - frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame - deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller - // Compute the amount the oldest interpreter frame will have to adjust // its caller's stack by. If the caller is a compiled frame then // we pretend that the callee has no parameters so that the @@ -435,14 +462,13 @@ if (deopt_sender.is_compiled_frame()) { caller_adjustment = last_frame_adjust(0, callee_locals); - } else if (callee_locals > callee_parameters) { + } else if (callee_locals > caller_actual_parameters) { // The caller frame may need extending to accommodate // non-parameter locals of the first unpacked interpreted frame. // Compute that adjustment. - caller_adjustment = last_frame_adjust(callee_parameters, callee_locals); + caller_adjustment = last_frame_adjust(caller_actual_parameters, callee_locals); } - // If the sender is deoptimized the we must retrieve the address of the handler // since the frame will "magically" show the original pc before the deopt // and we'd undo the deopt. @@ -455,6 +481,7 @@ UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord, caller_adjustment * BytesPerWord, + caller_actual_parameters, number_of_frames, frame_sizes, frame_pcs, @@ -552,7 +579,7 @@ UnrollBlock* info = array->unroll_block(); // Unpack the interpreter frames and any adapter frame (c2 only) we might create. - array->unpack_to_stack(stub_frame, exec_mode); + array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters()); BasicType bt = info->return_type(); @@ -569,6 +596,8 @@ if (VerifyStack) { ResourceMark res_mark; + thread->validate_frame_layout(); + // Verify that the just-unpacked frames match the interpreter's // notions of expression stack and locals vframeArray* cur_array = thread->vframe_array_last(); @@ -1753,7 +1782,8 @@ "constraint", "div0_check", "age", - "predicate" + "predicate", + "loop_limit_check" }; const char* Deoptimization::_trap_action_name[Action_LIMIT] = { // Note: Keep this in sync. with enum DeoptAction.
--- a/src/share/vm/runtime/deoptimization.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/deoptimization.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,6 +56,7 @@ Reason_div0_check, // a null_check due to division by zero Reason_age, // nmethod too old; tier threshold reached Reason_predicate, // compiler generated predicate failed + Reason_loop_limit_check, // compiler generated loop limits check failed Reason_LIMIT, // Note: Keep this enum in sync. with _trap_reason_name. Reason_RECORDED_LIMIT = Reason_bimorphic // some are not recorded per bc @@ -78,7 +79,7 @@ enum { _action_bits = 3, - _reason_bits = 4, + _reason_bits = 5, _action_shift = 0, _reason_shift = _action_shift+_action_bits, BC_CASE_LIMIT = PRODUCT_ONLY(1) NOT_PRODUCT(4) // for _deoptimization_hist @@ -137,6 +138,9 @@ intptr_t* _register_block; // Block for storing callee-saved registers. BasicType _return_type; // Tells if we have to restore double or long return value intptr_t _initial_fp; // FP of the sender frame + int _caller_actual_parameters; // The number of actual arguments at the + // interpreted caller of the deoptimized frame + // The following fields are used as temps during the unpacking phase // (which is tight on registers, especially on x86). They really ought // to be PD variables but that involves moving this class into its own @@ -148,6 +152,7 @@ // Constructor UnrollBlock(int size_of_deoptimized_frame, int caller_adjustment, + int caller_actual_parameters, int number_of_frames, intptr_t* frame_sizes, address* frames_pcs, @@ -167,6 +172,8 @@ void set_initial_fp(intptr_t fp) { _initial_fp = fp; } + int caller_actual_parameters() const { return _caller_actual_parameters; } + // Accessors used by the code generator for the unpack stub. static int size_of_deoptimized_frame_offset_in_bytes() { return offset_of(UnrollBlock, _size_of_deoptimized_frame); } static int caller_adjustment_offset_in_bytes() { return offset_of(UnrollBlock, _caller_adjustment); }
--- a/src/share/vm/runtime/dtraceJSDT.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/dtraceJSDT.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/fieldDescriptor.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/fieldDescriptor.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/fieldType.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/fieldType.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/fieldType.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/fieldType.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/fprofiler.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/fprofiler.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/fprofiler.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/fprofiler.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/frame.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/frame.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -33,6 +33,7 @@ #include "oops/methodOop.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.inline2.hpp" +#include "prims/methodHandles.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" @@ -169,6 +170,11 @@ } // type testers +bool frame::is_ricochet_frame() const { + RicochetBlob* rcb = SharedRuntime::ricochet_blob(); + return (_cb == rcb && rcb != NULL && rcb->returns_to_bounce_addr(_pc)); +} + bool frame::is_deoptimized_frame() const { assert(_deopt_state != unknown, "not answerable"); return _deopt_state == is_deoptimized; @@ -341,12 +347,18 @@ frame frame::real_sender(RegisterMap* map) const { frame result = sender(map); - while (result.is_runtime_frame()) { + while (result.is_runtime_frame() || + result.is_ricochet_frame()) { result = result.sender(map); } return result; } +frame frame::sender_for_ricochet_frame(RegisterMap* map) const { + assert(is_ricochet_frame(), ""); + return MethodHandles::ricochet_frame_sender(*this, map); +} + // Note: called by profiler - NOT for current thread frame frame::profile_find_Java_sender_frame(JavaThread *thread) { // If we don't recognize this frame, walk back up the stack until we do @@ -529,6 +541,7 @@ const char* frame::print_name() const { if (is_native_frame()) return "Native"; if (is_interpreted_frame()) return "Interpreted"; + if (is_ricochet_frame()) return "Ricochet"; if (is_compiled_frame()) { if (is_deoptimized_frame()) return "Deoptimized"; return "Compiled"; @@ -715,6 +728,8 @@ st->print("v ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name()); } else if (_cb->is_deoptimization_stub()) { st->print("v ~DeoptimizationBlob"); + } else if (_cb->is_ricochet_stub()) { + st->print("v ~RichochetBlob"); } else if (_cb->is_exception_stub()) { st->print("v ~ExceptionBlob"); } else if (_cb->is_safepoint_stub()) { @@ -978,6 +993,9 @@ void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) { assert(_cb != NULL, "sanity check"); + if (_cb == SharedRuntime::ricochet_blob()) { + oops_ricochet_do(f, reg_map); + } if (_cb->oop_maps() != NULL) { OopMapSet::oops_do(this, reg_map, f); @@ -996,6 +1014,11 @@ cf->do_code_blob(_cb); } +void frame::oops_ricochet_do(OopClosure* f, const RegisterMap* map) { + assert(is_ricochet_frame(), ""); + MethodHandles::ricochet_frame_oops_do(*this, f, map); +} + class CompiledArgumentOopFinder: public SignatureInfo { protected: OopClosure* _f; @@ -1308,6 +1331,72 @@ guarantee((current - low_mark) % monitor_size == 0 , "Misaligned bottom of BasicObjectLock*"); guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark"); } + + +void frame::describe(FrameValues& values, int frame_no) { + if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) { + // Label values common to most frames + values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no)); + values.describe(-1, sp(), err_msg("sp for #%d", frame_no)); + values.describe(-1, fp(), err_msg("fp for #%d", frame_no)); + } + if (is_interpreted_frame()) { + methodOop m = interpreter_frame_method(); + int bci = interpreter_frame_bci(); + + // Label the method and current bci + values.describe(-1, MAX2(sp(), fp()), + FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 2); + values.describe(-1, MAX2(sp(), fp()), + err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 1); + if (m->max_locals() > 0) { + intptr_t* l0 = interpreter_frame_local_at(0); + intptr_t* ln = interpreter_frame_local_at(m->max_locals() - 1); + values.describe(-1, MAX2(l0, ln), err_msg("locals for #%d", frame_no), 1); + // Report each local and mark as owned by this frame + for (int l = 0; l < m->max_locals(); l++) { + intptr_t* l0 = interpreter_frame_local_at(l); + values.describe(frame_no, l0, err_msg("local %d", l)); + } + } + + // Compute the actual expression stack size + InterpreterOopMap mask; + OopMapCache::compute_one_oop_map(m, bci, &mask); + intptr_t* tos = NULL; + // Report each stack element and mark as owned by this frame + for (int e = 0; e < mask.expression_stack_size(); e++) { + tos = MAX2(tos, interpreter_frame_expression_stack_at(e)); + values.describe(frame_no, interpreter_frame_expression_stack_at(e), + err_msg("stack %d", e)); + } + if (tos != NULL) { + values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 1); + } + if (interpreter_frame_monitor_begin() != interpreter_frame_monitor_end()) { + values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_begin(), "monitors begin"); + values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_end(), "monitors end"); + } + } else if (is_entry_frame()) { + // For now just label the frame + values.describe(-1, MAX2(sp(), fp()), err_msg("#%d entry frame", frame_no), 2); + } else if (is_compiled_frame()) { + // For now just label the frame + nmethod* nm = cb()->as_nmethod_or_null(); + values.describe(-1, MAX2(sp(), fp()), + FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no, + nm, nm->method()->name_and_sig_as_C_string(), + is_deoptimized_frame() ? " (deoptimized" : ""), 2); + } else if (is_native_frame()) { + // For now just label the frame + nmethod* nm = cb()->as_nmethod_or_null(); + values.describe(-1, MAX2(sp(), fp()), + FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no, + nm, nm->method()->name_and_sig_as_C_string()), 2); + } + describe_pd(values, frame_no); +} + #endif @@ -1319,3 +1408,84 @@ _fr = thread->last_frame(); _is_done = false; } + + +#ifdef ASSERT + +void FrameValues::describe(int owner, intptr_t* location, const char* description, int priority) { + FrameValue fv; + fv.location = location; + fv.owner = owner; + fv.priority = priority; + fv.description = NEW_RESOURCE_ARRAY(char, strlen(description) + 1); + strcpy(fv.description, description); + _values.append(fv); +} + + +void FrameValues::validate() { + _values.sort(compare); + bool error = false; + FrameValue prev; + prev.owner = -1; + for (int i = _values.length() - 1; i >= 0; i--) { + FrameValue fv = _values.at(i); + if (fv.owner == -1) continue; + if (prev.owner == -1) { + prev = fv; + continue; + } + if (prev.location == fv.location) { + if (fv.owner != prev.owner) { + tty->print_cr("overlapping storage"); + tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", prev.location, *prev.location, prev.description); + tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description); + error = true; + } + } else { + prev = fv; + } + } + assert(!error, "invalid layout"); +} + + +void FrameValues::print() { + _values.sort(compare); + JavaThread* thread = JavaThread::current(); + + // Sometimes values like the fp can be invalid values if the + // register map wasn't updated during the walk. Trim out values + // that aren't actually in the stack of the thread. + int min_index = 0; + int max_index = _values.length() - 1; + intptr_t* v0 = _values.at(min_index).location; + while (!thread->is_in_stack((address)v0)) { + v0 = _values.at(++min_index).location; + } + intptr_t* v1 = _values.at(max_index).location; + while (!thread->is_in_stack((address)v1)) { + v1 = _values.at(--max_index).location; + } + intptr_t* min = MIN2(v0, v1); + intptr_t* max = MAX2(v0, v1); + intptr_t* cur = max; + intptr_t* last = NULL; + for (int i = max_index; i >= min_index; i--) { + FrameValue fv = _values.at(i); + while (cur > fv.location) { + tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT, cur, *cur); + cur--; + } + if (last == fv.location) { + const char* spacer = " " LP64_ONLY(" "); + tty->print_cr(" %s %s %s", spacer, spacer, fv.description); + } else { + tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description); + last = fv.location; + cur--; + } + } +} + +#endif
--- a/src/share/vm/runtime/frame.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/frame.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,6 +60,7 @@ typedef class BytecodeInterpreter* interpreterState; class CodeBlob; +class FrameValues; class vframeArray; @@ -134,6 +135,7 @@ bool is_interpreted_frame() const; bool is_java_frame() const; bool is_entry_frame() const; // Java frame called from C? + bool is_ricochet_frame() const; bool is_native_frame() const; bool is_runtime_frame() const; bool is_compiled_frame() const; @@ -174,6 +176,7 @@ // Helper methods for better factored code in frame::sender frame sender_for_compiled_frame(RegisterMap* map) const; frame sender_for_entry_frame(RegisterMap* map) const; + frame sender_for_ricochet_frame(RegisterMap* map) const; frame sender_for_interpreter_frame(RegisterMap* map) const; frame sender_for_native_frame(RegisterMap* map) const; @@ -381,6 +384,8 @@ private: const char* print_name() const; + void describe_pd(FrameValues& values, int frame_no); + public: void print_value() const { print_value_on(tty,NULL); } void print_value_on(outputStream* st, JavaThread *thread) const; @@ -388,12 +393,16 @@ void interpreter_frame_print_on(outputStream* st) const; void print_on_error(outputStream* st, char* buf, int buflen, bool verbose = false) const; + // Add annotated descriptions of memory locations belonging to this frame to values + void describe(FrameValues& values, int frame_no); + // Conversion from an VMReg to physical stack location oop* oopmapreg_to_location(VMReg reg, const RegisterMap* regmap) const; // Oops-do's void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f); void oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache = true); + void oops_ricochet_do(OopClosure* f, const RegisterMap* map); private: void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f); @@ -472,6 +481,41 @@ }; +#ifdef ASSERT +// A simple class to describe a location on the stack +class FrameValue VALUE_OBJ_CLASS_SPEC { + public: + intptr_t* location; + char* description; + int owner; + int priority; +}; + + +// A collection of described stack values that can print a symbolic +// description of the stack memory. Interpreter frame values can be +// in the caller frames so all the values are collected first and then +// sorted before being printed. +class FrameValues { + private: + GrowableArray<FrameValue> _values; + + static int compare(FrameValue* a, FrameValue* b) { + if (a->location == b->location) { + return a->priority - b->priority; + } + return a->location - b->location; + } + + public: + // Used by frame functions to describe locations. + void describe(int owner, intptr_t* location, const char* description, int priority = 0); + + void validate(); + void print(); +}; + +#endif // // StackFrameStream iterates through the frames of a thread starting from
--- a/src/share/vm/runtime/frame.inline.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/frame.inline.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/globals.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/globals.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -63,6 +63,12 @@ bool Flag::is_unlocked() const { if (strcmp(kind, "{diagnostic}") == 0) { + if (strcmp(name, "EnableInvokeDynamic") == 0 && UnlockExperimentalVMOptions && !UnlockDiagnosticVMOptions) { + // transitional logic to allow tests to run until they are changed + static int warned; + if (++warned == 1) warning("Use -XX:+UnlockDiagnosticVMOptions before EnableInvokeDynamic flag"); + return true; + } return UnlockDiagnosticVMOptions; } else if (strcmp(kind, "{experimental}") == 0 || strcmp(kind, "{C2 experimental}") == 0) {
--- a/src/share/vm/runtime/globals.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/globals.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -620,6 +620,9 @@ product(bool, UseSSE42Intrinsics, false, \ "SSE4.2 versions of intrinsics") \ \ + product(bool, UseCondCardMark, false, \ + "Check for already marked card before updating card table") \ + \ develop(bool, TraceCallFixup, false, \ "traces all call fixups") \ \ @@ -1355,13 +1358,6 @@ product(bool, UseParallelOldGC, false, \ "Use the Parallel Old garbage collector") \ \ - product(bool, UseParallelOldGCCompacting, true, \ - "In the Parallel Old garbage collector use parallel compaction") \ - \ - product(bool, UseParallelDensePrefixUpdate, true, \ - "In the Parallel Old garbage collector use parallel dense" \ - " prefix update") \ - \ product(uintx, HeapMaximumCompactionInterval, 20, \ "How often should we maximally compact the heap (not allowing " \ "any dead space)") \ @@ -1381,9 +1377,6 @@ "The standard deviation used by the par compact dead wood" \ "limiter (a number between 0-100).") \ \ - product(bool, UseParallelOldGCDensePrefix, true, \ - "Use a dense prefix with the Parallel Old garbage collector") \ - \ product(uintx, ParallelGCThreads, 0, \ "Number of parallel threads parallel gc will use") \ \ @@ -1467,8 +1460,10 @@ product(intx, ParallelGCBufferWastePct, 10, \ "wasted fraction of parallel allocation buffer.") \ \ - product(bool, ParallelGCRetainPLAB, true, \ - "Retain parallel allocation buffers across scavenges.") \ + diagnostic(bool, ParallelGCRetainPLAB, false, \ + "Retain parallel allocation buffers across scavenges; " \ + " -- disabled because this currently conflicts with " \ + " parallel card scanning under certain conditions ") \ \ product(intx, TargetPLABWastePct, 10, \ "target wasted space in last buffer as pct of overall allocation")\ @@ -1502,7 +1497,15 @@ product(uintx, ParGCDesiredObjsFromOverflowList, 20, \ "The desired number of objects to claim from the overflow list") \ \ - product(uintx, CMSParPromoteBlocksToClaim, 16, \ + diagnostic(intx, ParGCStridesPerThread, 2, \ + "The number of strides per worker thread that we divide up the " \ + "card table scanning work into") \ + \ + diagnostic(intx, ParGCCardsPerStrideChunk, 256, \ + "The number of cards in each chunk of the parallel chunks used " \ + "during card table scanning") \ + \ + product(uintx, CMSParPromoteBlocksToClaim, 16, \ "Number of blocks to attempt to claim when refilling CMS LAB for "\ "parallel GC.") \ \ @@ -1834,7 +1837,7 @@ develop(bool, VerifyBlockOffsetArray, false, \ "Do (expensive!) block offset array verification") \ \ - product(bool, BlockOffsetArrayUseUnallocatedBlock, false, \ + diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \ "Maintain _unallocated_block in BlockOffsetArray" \ " (currently applicable only to CMS collector)") \ \ @@ -1924,7 +1927,7 @@ experimental(intx, WorkStealingSleepMillis, 1, \ "Sleep time when sleep is used for yields") \ \ - experimental(uintx, WorkStealingYieldsBeforeSleep, 1000, \ + experimental(uintx, WorkStealingYieldsBeforeSleep, 5000, \ "Number of yields before a sleep is done during workstealing") \ \ experimental(uintx, WorkStealingHardSpins, 4096, \ @@ -2889,7 +2892,7 @@ "Max. no. of lines in the stack trace for Java exceptions " \ "(0 means all)") \ \ - NOT_EMBEDDED(develop(intx, GuaranteedSafepointInterval, 1000, \ + NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000, \ "Guarantee a safepoint (at least) every so many milliseconds " \ "(0 means none)")) \ \ @@ -2906,6 +2909,12 @@ product(intx, NmethodSweepCheckInterval, 5, \ "Compilers wake up every n seconds to possibly sweep nmethods") \ \ + notproduct(bool, LogSweeper, false, \ + "Keep a ring buffer of sweeper activity") \ + \ + notproduct(intx, SweeperLogEntries, 1024, \ + "Number of records in the ring buffer of sweeper activity") \ + \ notproduct(intx, MemProfilingInterval, 500, \ "Time between each invocation of the MemProfiler") \ \ @@ -3715,16 +3724,17 @@ diagnostic(bool, OptimizeMethodHandles, true, \ "when constructing method handles, try to improve them") \ \ + develop(bool, StressMethodHandleWalk, false, \ + "Process all method handles with MethodHandleWalk") \ + \ + diagnostic(bool, UseRicochetFrames, true, \ + "use ricochet stack frames for method handle combination, " \ + "if the platform supports them") \ + \ experimental(bool, TrustFinalNonStaticFields, false, \ "trust final non-static declarations for constant folding") \ \ - experimental(bool, AllowTransitionalJSR292, true, \ - "recognize pre-PFD formats of invokedynamic") \ - \ - experimental(bool, PreferTransitionalJSR292, false, \ - "prefer pre-PFD APIs on boot class path, if they exist") \ - \ - experimental(bool, AllowInvokeForInvokeGeneric, false, \ + experimental(bool, AllowInvokeGeneric, true, \ "accept MethodHandle.invoke and MethodHandle.invokeGeneric " \ "as equivalent methods") \ \
--- a/src/share/vm/runtime/handles.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/handles.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/icache.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/icache.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/interfaceSupport.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/interfaceSupport.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/interfaceSupport.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/interfaceSupport.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/javaCalls.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/javaCalls.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -389,7 +389,7 @@ // to Java if (!os::stack_shadow_pages_available(THREAD, method)) { // Throw stack overflow exception with preinitialized exception. - Exceptions::throw_stack_overflow_exception(THREAD, __FILE__, __LINE__); + Exceptions::throw_stack_overflow_exception(THREAD, __FILE__, __LINE__, method); return; } else { // Touch pages checked if the OS needs them to be touched to be mapped.
--- a/src/share/vm/runtime/javaCalls.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/javaCalls.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/javaFrameAnchor.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/javaFrameAnchor.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/jniHandles.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/jniHandles.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/objectMonitor.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/objectMonitor.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/os.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/os.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1291,3 +1291,41 @@ } return result; } + +// Read file line by line, if line is longer than bsize, +// skip rest of line. +int os::get_line_chars(int fd, char* buf, const size_t bsize){ + size_t sz, i = 0; + + // read until EOF, EOL or buf is full + while ((sz = (int) read(fd, &buf[i], 1)) == 1 && i < (bsize-1) && buf[i] != '\n') { + ++i; + } + + if (buf[i] == '\n') { + // EOL reached so ignore EOL character and return + + buf[i] = 0; + return (int) i; + } + + buf[i+1] = 0; + + if (sz != 1) { + // EOF reached. if we read chars before EOF return them and + // return EOF on next call otherwise return EOF + + return (i == 0) ? -1 : (int) i; + } + + // line is longer than size of buf, skip to EOL + int ch; + while (read(fd, &ch, 1) == 1 && ch != '\n') { + // Do nothing + } + + // return initial part of line that fits in buf. + // If we reached EOF, it will be returned on next call. + + return (int) i; +}
--- a/src/share/vm/runtime/os.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/os.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -274,7 +274,7 @@ static char* reserve_memory_special(size_t size, char* addr = NULL, bool executable = false); static bool release_memory_special(char* addr, size_t bytes); - static bool large_page_init(); + static void large_page_init(); static size_t large_page_size(); static bool can_commit_large_page_memory(); static bool can_execute_large_page_memory(); @@ -658,6 +658,10 @@ // Hook for os specific jvm options that we don't want to abort on seeing static bool obsolete_option(const JavaVMOption *option); + // Read file line by line. If line is longer than bsize, + // rest of line is skipped. Returns number of bytes read or -1 on EOF + static int get_line_chars(int fd, char *buf, const size_t bsize); + // Platform dependent stuff #ifdef TARGET_OS_FAMILY_linux # include "os_linux.hpp"
--- a/src/share/vm/runtime/reflection.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/reflection.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/reflectionUtils.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/reflectionUtils.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/registerMap.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/registerMap.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/rframe.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/rframe.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/safepoint.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/safepoint.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/serviceThread.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/serviceThread.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -28,6 +28,7 @@ #include "runtime/serviceThread.hpp" #include "runtime/mutexLocker.hpp" #include "prims/jvmtiImpl.hpp" +#include "services/gcNotifier.hpp" ServiceThread* ServiceThread::_instance = NULL; @@ -81,6 +82,7 @@ while (true) { bool sensors_changed = false; bool has_jvmti_events = false; + bool has_gc_notification_event = false; JvmtiDeferredEvent jvmti_event; { // Need state transition ThreadBlockInVM so that this thread @@ -95,9 +97,10 @@ MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) && - !(has_jvmti_events = JvmtiDeferredEventQueue::has_events())) { + !(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) && + !(has_gc_notification_event = GCNotifier::has_event())) { // wait until one of the sensors has pending requests, or there is a - // pending JVMTI event to post + // pending JVMTI event or JMX GC notification to post Service_lock->wait(Mutex::_no_safepoint_check_flag); } @@ -113,6 +116,10 @@ if (sensors_changed) { LowMemoryDetector::process_sensor_changes(jt); } + + if(has_gc_notification_event) { + GCNotifier::sendNotification(CHECK); + } } }
--- a/src/share/vm/runtime/sharedRuntime.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/sharedRuntime.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -88,6 +88,8 @@ HS_DTRACE_PROBE_DECL7(hotspot, method__return, int, char*, int, char*, int, char*, int); +RicochetBlob* SharedRuntime::_ricochet_blob = NULL; + // Implementation of SharedRuntime #ifndef PRODUCT @@ -460,6 +462,10 @@ if (Interpreter::contains(return_address)) { return Interpreter::rethrow_exception_entry(); } + // Ricochet frame unwind code + if (SharedRuntime::ricochet_blob() != NULL && SharedRuntime::ricochet_blob()->returns_to_bounce_addr(return_address)) { + return SharedRuntime::ricochet_blob()->exception_addr(); + } guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub"); guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!"); @@ -1174,6 +1180,7 @@ assert(stub_frame.is_runtime_frame(), "sanity check"); frame caller_frame = stub_frame.sender(®_map); assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame"); + assert(!caller_frame.is_ricochet_frame(), "unexpected frame"); #endif /* ASSERT */ methodHandle callee_method; @@ -1222,6 +1229,7 @@ if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame() || + caller_frame.is_ricochet_frame() || is_mh_invoke_via_adapter) { methodOop callee = thread->callee_target(); guarantee(callee != NULL && callee->is_method(), "bad handshake"); @@ -1700,9 +1708,11 @@ message = generate_class_cast_message(objName, targetKlass->external_name()); } else { // %%% need to get the MethodType string, without messing around too much + const char* desc = NULL; // Get a signature from the invoke instruction const char* mhName = "method handle"; const char* targetType = "the required signature"; + int targetArity = -1, mhArity = -1; vframeStream vfst(thread, true); if (!vfst.at_end()) { Bytecode_invoke call(vfst.method(), vfst.bci()); @@ -1716,20 +1726,35 @@ && target->is_method_handle_invoke() && required == target->method_handle_type()) { targetType = target->signature()->as_C_string(); + targetArity = ArgumentCount(target->signature()).size(); } } - klassOop kignore; int fignore; - methodOop actual_method = MethodHandles::decode_method(actual, - kignore, fignore); - if (actual_method != NULL) { - if (methodOopDesc::is_method_handle_invoke_name(actual_method->name())) - mhName = "$"; + KlassHandle kignore; int dmf_flags = 0; + methodHandle actual_method = MethodHandles::decode_method(actual, kignore, dmf_flags); + if ((dmf_flags & ~(MethodHandles::_dmf_has_receiver | + MethodHandles::_dmf_does_dispatch | + MethodHandles::_dmf_from_interface)) != 0) + actual_method = methodHandle(); // MH does extra binds, drops, etc. + bool has_receiver = ((dmf_flags & MethodHandles::_dmf_has_receiver) != 0); + if (actual_method.not_null()) { + mhName = actual_method->signature()->as_C_string(); + mhArity = ArgumentCount(actual_method->signature()).size(); + if (!actual_method->is_static()) mhArity += 1; + } else if (java_lang_invoke_MethodHandle::is_instance(actual)) { + oopDesc* mhType = java_lang_invoke_MethodHandle::type(actual); + mhArity = java_lang_invoke_MethodType::ptype_count(mhType); + stringStream st; + java_lang_invoke_MethodType::print_signature(mhType, &st); + mhName = st.as_string(); + } + if (targetArity != -1 && targetArity != mhArity) { + if (has_receiver && targetArity == mhArity-1) + desc = " cannot be called without a receiver argument as "; else - mhName = actual_method->signature()->as_C_string(); - if (mhName[0] == '$') - mhName = actual_method->signature()->as_C_string(); + desc = " cannot be called with a different arity as "; } message = generate_class_cast_message(mhName, targetType, + desc != NULL ? desc : " cannot be called as "); } if (TraceMethodHandles) {
--- a/src/share/vm/runtime/sharedRuntime.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/sharedRuntime.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -58,6 +58,8 @@ static RuntimeStub* _resolve_virtual_call_blob; static RuntimeStub* _resolve_static_call_blob; + static RicochetBlob* _ricochet_blob; + static SafepointBlob* _polling_page_safepoint_handler_blob; static SafepointBlob* _polling_page_return_handler_blob; #ifdef COMPILER2 @@ -213,6 +215,16 @@ return _resolve_static_call_blob->entry_point(); } + static RicochetBlob* ricochet_blob() { +#ifdef X86 + // Currently only implemented on x86 + assert(!EnableInvokeDynamic || _ricochet_blob != NULL, "oops"); +#endif + return _ricochet_blob; + } + + static void generate_ricochet_blob(); + static SafepointBlob* polling_page_return_handler_blob() { return _polling_page_return_handler_blob; } static SafepointBlob* polling_page_safepoint_handler_blob() { return _polling_page_safepoint_handler_blob; }
--- a/src/share/vm/runtime/signature.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/signature.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/signature.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/signature.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/simpleThresholdPolicy.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/simpleThresholdPolicy.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/simpleThresholdPolicy.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/stackValueCollection.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/stackValueCollection.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/statSampler.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/statSampler.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/stubCodeGenerator.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/stubCodeGenerator.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/stubRoutines.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/stubRoutines.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -433,3 +433,77 @@ #undef RETURN_STUB } + +// constants for computing the copy function +enum { + COPYFUNC_UNALIGNED = 0, + COPYFUNC_ALIGNED = 1, // src, dest aligned to HeapWordSize + COPYFUNC_CONJOINT = 0, + COPYFUNC_DISJOINT = 2 // src != dest, or transfer can descend +}; + +// Note: The condition "disjoint" applies also for overlapping copies +// where an descending copy is permitted (i.e., dest_offset <= src_offset). +address +StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) { + int selector = + (aligned ? COPYFUNC_ALIGNED : COPYFUNC_UNALIGNED) + + (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT); + +#define RETURN_STUB(xxx_arraycopy) { \ + name = #xxx_arraycopy; \ + return StubRoutines::xxx_arraycopy(); } + +#define RETURN_STUB_PARM(xxx_arraycopy, parm) { \ + name = #xxx_arraycopy; \ + return StubRoutines::xxx_arraycopy(parm); } + + switch (t) { + case T_BYTE: + case T_BOOLEAN: + switch (selector) { + case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jbyte_arraycopy); + case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jbyte_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jbyte_disjoint_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jbyte_disjoint_arraycopy); + } + case T_CHAR: + case T_SHORT: + switch (selector) { + case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jshort_arraycopy); + case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jshort_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jshort_disjoint_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jshort_disjoint_arraycopy); + } + case T_INT: + case T_FLOAT: + switch (selector) { + case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jint_arraycopy); + case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jint_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jint_disjoint_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jint_disjoint_arraycopy); + } + case T_DOUBLE: + case T_LONG: + switch (selector) { + case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jlong_arraycopy); + case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jlong_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jlong_disjoint_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jlong_disjoint_arraycopy); + } + case T_ARRAY: + case T_OBJECT: + switch (selector) { + case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized); + case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized); + case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized); + case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized); + } + default: + ShouldNotReachHere(); + return NULL; + } + +#undef RETURN_STUB +#undef RETURN_STUB_PARM +}
--- a/src/share/vm/runtime/stubRoutines.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/stubRoutines.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -282,6 +282,8 @@ static address addr_fpu_subnormal_bias2() { return (address)&_fpu_subnormal_bias2; } + static address select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized); + static address jbyte_arraycopy() { return _jbyte_arraycopy; } static address jshort_arraycopy() { return _jshort_arraycopy; } static address jint_arraycopy() { return _jint_arraycopy; }
--- a/src/share/vm/runtime/sweeper.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/sweeper.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,94 @@ #include "utilities/events.hpp" #include "utilities/xmlstream.hpp" +#ifdef ASSERT + +#define SWEEP(nm) record_sweep(nm, __LINE__) +// Sweeper logging code +class SweeperRecord { + public: + int traversal; + int invocation; + int compile_id; + long traversal_mark; + int state; + const char* kind; + address vep; + address uep; + int line; + + void print() { + tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = " + PTR_FORMAT " state = %d traversal_mark %d line = %d", + traversal, + invocation, + compile_id, + kind == NULL ? "" : kind, + uep, + vep, + state, + traversal_mark, + line); + } +}; + +static int _sweep_index = 0; +static SweeperRecord* _records = NULL; + +void NMethodSweeper::report_events(int id, address entry) { + if (_records != NULL) { + for (int i = _sweep_index; i < SweeperLogEntries; i++) { + if (_records[i].uep == entry || + _records[i].vep == entry || + _records[i].compile_id == id) { + _records[i].print(); + } + } + for (int i = 0; i < _sweep_index; i++) { + if (_records[i].uep == entry || + _records[i].vep == entry || + _records[i].compile_id == id) { + _records[i].print(); + } + } + } +} + +void NMethodSweeper::report_events() { + if (_records != NULL) { + for (int i = _sweep_index; i < SweeperLogEntries; i++) { + // skip empty records + if (_records[i].vep == NULL) continue; + _records[i].print(); + } + for (int i = 0; i < _sweep_index; i++) { + // skip empty records + if (_records[i].vep == NULL) continue; + _records[i].print(); + } + } +} + +void NMethodSweeper::record_sweep(nmethod* nm, int line) { + if (_records != NULL) { + _records[_sweep_index].traversal = _traversals; + _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark; + _records[_sweep_index].invocation = _invocations; + _records[_sweep_index].compile_id = nm->compile_id(); + _records[_sweep_index].kind = nm->compile_kind(); + _records[_sweep_index].state = nm->_state; + _records[_sweep_index].vep = nm->verified_entry_point(); + _records[_sweep_index].uep = nm->entry_point(); + _records[_sweep_index].line = line; + + _sweep_index = (_sweep_index + 1) % SweeperLogEntries; + } +} +#else +#define SWEEP(nm) +#endif + + long NMethodSweeper::_traversals = 0; // No. of stack traversals performed nmethod* NMethodSweeper::_current = NULL; // Current nmethod int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache @@ -137,6 +225,13 @@ if (old != 0) { return; } +#ifdef ASSERT + if (LogSweeper && _records == NULL) { + // Create the ring buffer for the logging code + _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries); + memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); + } +#endif if (_invocations > 0) { sweep_code_cache(); _invocations--; @@ -213,10 +308,29 @@ } } +class NMethodMarker: public StackObj { + private: + CompilerThread* _thread; + public: + NMethodMarker(nmethod* nm) { + _thread = CompilerThread::current(); + _thread->set_scanned_nmethod(nm); + } + ~NMethodMarker() { + _thread->set_scanned_nmethod(NULL); + } +}; + void NMethodSweeper::process_nmethod(nmethod *nm) { assert(!CodeCache_lock->owned_by_self(), "just checking"); + // Make sure this nmethod doesn't get unloaded during the scan, + // since the locks acquired below might safepoint. + NMethodMarker nmm(nm); + + SWEEP(nm); + // Skip methods that are currently referenced by the VM if (nm->is_locked_by_vm()) { // But still remember to clean-up inline caches for alive nmethods @@ -224,8 +338,10 @@ // Clean-up all inline caches that points to zombie/non-reentrant methods MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); + SWEEP(nm); } else { _locked_seen++; + SWEEP(nm); } return; } @@ -247,6 +363,7 @@ } nm->mark_for_reclamation(); _rescan = true; + SWEEP(nm); } } else if (nm->is_not_entrant()) { // If there is no current activations of this method on the @@ -257,6 +374,7 @@ } nm->make_zombie(); _rescan = true; + SWEEP(nm); } else { // Still alive, clean up its inline caches MutexLocker cl(CompiledIC_lock); @@ -265,6 +383,7 @@ // request a rescan. If this method stays on the stack for a // long time we don't want to keep rescanning the code cache. _not_entrant_seen_on_stack++; + SWEEP(nm); } } else if (nm->is_unloaded()) { // Unloaded code, just make it a zombie @@ -273,10 +392,12 @@ if (nm->is_osr_method()) { // No inline caches will ever point to osr methods, so we can just remove it MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + SWEEP(nm); nm->flush(); } else { nm->make_zombie(); _rescan = true; + SWEEP(nm); } } else { assert(nm->is_alive(), "should be alive"); @@ -293,6 +414,7 @@ // Clean-up all inline caches that points to zombie/non-reentrant methods MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); + SWEEP(nm); } } @@ -418,6 +540,11 @@ // state of the code cache if it's requested. void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) { if (PrintMethodFlushing) { + stringStream s; + // Dump code cache state into a buffer before locking the tty, + // because log_state() will use locks causing lock conflicts. + CodeCache::log_state(&s); + ttyLocker ttyl; tty->print("### sweeper: %s ", msg); if (format != NULL) { @@ -426,10 +553,15 @@ tty->vprint(format, ap); va_end(ap); } - CodeCache::log_state(tty); tty->cr(); + tty->print_cr(s.as_string()); } if (LogCompilation && (xtty != NULL)) { + stringStream s; + // Dump code cache state into a buffer before locking the tty, + // because log_state() will use locks causing lock conflicts. + CodeCache::log_state(&s); + ttyLocker ttyl; xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count()); if (format != NULL) { @@ -438,7 +570,7 @@ xtty->vprint(format, ap); va_end(ap); } - CodeCache::log_state(xtty); + xtty->print(s.as_string()); xtty->stamp(); xtty->end_elem(); }
--- a/src/share/vm/runtime/sweeper.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/sweeper.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -57,6 +57,13 @@ public: static long traversal_count() { return _traversals; } +#ifdef ASSERT + // Keep track of sweeper activity in the ring buffer + static void record_sweep(nmethod* nm, int line); + static void report_events(int id, address entry); + static void report_events(); +#endif + static void scan_stacks(); // Invoked at the end of each safepoint static void sweep_code_cache(); // Concurrent part of sweep job static void possibly_sweep(); // Compiler threads call this to sweep
--- a/src/share/vm/runtime/synchronizer.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/synchronizer.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/thread.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/thread.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -31,6 +31,7 @@ #include "compiler/compileBroker.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/linkResolver.hpp" +#include "interpreter/oopMapCache.hpp" #include "jvmtifiles/jvmtiEnv.hpp" #include "memory/oopFactory.hpp" #include "memory/universe.inline.hpp" @@ -2860,6 +2861,26 @@ } +#ifdef ASSERT +// Print or validate the layout of stack frames +void JavaThread::print_frame_layout(int depth, bool validate_only) { + ResourceMark rm; + PRESERVE_EXCEPTION_MARK; + FrameValues values; + int frame_no = 0; + for(StackFrameStream fst(this, false); !fst.is_done(); fst.next()) { + fst.current()->describe(values, ++frame_no); + if (depth == frame_no) break; + } + if (validate_only) { + values.validate(); + } else { + tty->print_cr("[Describe stack layout]"); + values.print(); + } +} +#endif + void JavaThread::trace_stack_from(vframe* start_vf) { ResourceMark rm; int vframe_no = 1; @@ -2922,12 +2943,22 @@ _queue = queue; _counters = counters; _buffer_blob = NULL; + _scanned_nmethod = NULL; #ifndef PRODUCT _ideal_graph_printer = NULL; #endif } +void CompilerThread::oops_do(OopClosure* f, CodeBlobClosure* cf) { + JavaThread::oops_do(f, cf); + if (_scanned_nmethod != NULL && cf != NULL) { + // Safepoints can occur when the sweeper is scanning an nmethod so + // process it here to make sure it isn't unloaded in the middle of + // a scan. + cf->do_code_blob(_scanned_nmethod); + } +} // ======= Threads ========
--- a/src/share/vm/runtime/thread.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/thread.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -439,7 +439,7 @@ // GC support // Apply "f->do_oop" to all root oops in "this". // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames - void oops_do(OopClosure* f, CodeBlobClosure* cf); + virtual void oops_do(OopClosure* f, CodeBlobClosure* cf); // Handles the parallel case for the method below. private: @@ -1380,6 +1380,12 @@ void trace_stack_from(vframe* start_vf) PRODUCT_RETURN; void trace_frames() PRODUCT_RETURN; + // Print an annotated view of the stack frames + void print_frame_layout(int depth = 0, bool validate_only = false) NOT_DEBUG_RETURN; + void validate_frame_layout() { + print_frame_layout(0, true); + } + // Returns the number of stack frames on the stack int depth() const; @@ -1692,6 +1698,8 @@ CompileQueue* _queue; BufferBlob* _buffer_blob; + nmethod* _scanned_nmethod; // nmethod being scanned by the sweeper + public: static CompilerThread* current(); @@ -1720,6 +1728,11 @@ _log = log; } + // GC support + // Apply "f->do_oop" to all root oops in "this". + // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames + void oops_do(OopClosure* f, CodeBlobClosure* cf); + #ifndef PRODUCT private: IdealGraphPrinter *_ideal_graph_printer; @@ -1731,6 +1744,12 @@ // Get/set the thread's current task CompileTask* task() { return _task; } void set_task(CompileTask* task) { _task = task; } + + // Track the nmethod currently being scanned by the sweeper + void set_scanned_nmethod(nmethod* nm) { + assert(_scanned_nmethod == NULL || nm == NULL, "should reset to NULL before writing a new value"); + _scanned_nmethod = nm; + } }; inline CompilerThread* CompilerThread::current() {
--- a/src/share/vm/runtime/threadLocalStorage.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/threadLocalStorage.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vframe.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/vframe.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vframeArray.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/vframeArray.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -154,7 +154,8 @@ int unpack_counter = 0; -void vframeArrayElement::unpack_on_stack(int callee_parameters, +void vframeArrayElement::unpack_on_stack(int caller_actual_parameters, + int callee_parameters, int callee_locals, frame* caller, bool is_top_frame, @@ -270,6 +271,7 @@ temps + callee_parameters, popframe_preserved_args_size_in_words, locks, + caller_actual_parameters, callee_parameters, callee_locals, caller, @@ -415,7 +417,8 @@ } -int vframeArrayElement::on_stack_size(int callee_parameters, +int vframeArrayElement::on_stack_size(int caller_actual_parameters, + int callee_parameters, int callee_locals, bool is_top_frame, int popframe_extra_stack_expression_els) const { @@ -426,6 +429,7 @@ temps + callee_parameters, popframe_extra_stack_expression_els, locks, + caller_actual_parameters, callee_parameters, callee_locals, is_top_frame); @@ -496,7 +500,7 @@ } } -void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode) { +void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode, int caller_actual_parameters) { // stack picture // unpack_frame // [new interpreter frames ] (frames are skeletal but walkable) @@ -525,7 +529,8 @@ for (index = frames() - 1; index >= 0 ; index--) { int callee_parameters = index == 0 ? 0 : element(index-1)->method()->size_of_parameters(); int callee_locals = index == 0 ? 0 : element(index-1)->method()->max_locals(); - element(index)->unpack_on_stack(callee_parameters, + element(index)->unpack_on_stack(caller_actual_parameters, + callee_parameters, callee_locals, &caller_frame, index == 0, @@ -534,6 +539,7 @@ Deoptimization::unwind_callee_save_values(element(index)->iframe(), this); } caller_frame = *element(index)->iframe(); + caller_actual_parameters = callee_parameters; }
--- a/src/share/vm/runtime/vframeArray.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/vframeArray.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -83,13 +83,15 @@ // Returns the on stack word size for this frame // callee_parameters is the number of callee locals residing inside this frame - int on_stack_size(int callee_parameters, + int on_stack_size(int caller_actual_parameters, + int callee_parameters, int callee_locals, bool is_top_frame, int popframe_extra_stack_expression_els) const; // Unpacks the element to skeletal interpreter frame - void unpack_on_stack(int callee_parameters, + void unpack_on_stack(int caller_actual_parameters, + int callee_parameters, int callee_locals, frame* caller, bool is_top_frame, @@ -190,7 +192,7 @@ int frame_size() const { return _frame_size; } // Unpack the array on the stack passed in stack interval - void unpack_to_stack(frame &unpack_frame, int exec_mode); + void unpack_to_stack(frame &unpack_frame, int exec_mode, int caller_actual_parameters); // Deallocates monitor chunks allocated during deoptimization. // This should be called when the array is not used anymore.
--- a/src/share/vm/runtime/vmStructs.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/vmStructs.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -783,6 +783,7 @@ nonstatic_field(nmethod, _osr_link, nmethod*) \ nonstatic_field(nmethod, _scavenge_root_link, nmethod*) \ nonstatic_field(nmethod, _scavenge_root_state, jbyte) \ + nonstatic_field(nmethod, _state, unsigned char) \ nonstatic_field(nmethod, _exception_offset, int) \ nonstatic_field(nmethod, _deoptimize_offset, int) \ nonstatic_field(nmethod, _orig_pc_offset, int) \ @@ -800,6 +801,8 @@ nonstatic_field(nmethod, _osr_entry_point, address) \ nonstatic_field(nmethod, _lock_count, jint) \ nonstatic_field(nmethod, _stack_traversal_mark, long) \ + nonstatic_field(nmethod, _compile_id, int) \ + nonstatic_field(nmethod, _marked_for_deoptimization, bool) \ \ /********************************/ \ /* JavaCalls (NOTE: incomplete) */ \ @@ -1310,11 +1313,13 @@ \ declare_toplevel_type(CodeBlob) \ declare_type(BufferBlob, CodeBlob) \ - declare_type(nmethod, CodeBlob) \ + declare_type(AdapterBlob, BufferBlob) \ + declare_type(nmethod, CodeBlob) \ declare_type(RuntimeStub, CodeBlob) \ declare_type(SingletonBlob, CodeBlob) \ declare_type(SafepointBlob, SingletonBlob) \ declare_type(DeoptimizationBlob, SingletonBlob) \ + declare_type(RicochetBlob, SingletonBlob) \ declare_c2_type(ExceptionBlob, SingletonBlob) \ declare_c2_type(UncommonTrapBlob, CodeBlob) \ \
--- a/src/share/vm/runtime/vmStructs.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/vmStructs.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vmThread.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/vmThread.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -291,7 +291,9 @@ // Among other things, this ensures that Eden top is correct. Universe::heap()->prepare_for_verify(); os::check_heap(); - Universe::verify(true, true); // Silent verification to not polute normal output + // Silent verification so as not to pollute normal output, + // unless we really asked for it. + Universe::verify(true, !(PrintGCDetails || Verbose)); } CompileBroker::set_should_block();
--- a/src/share/vm/runtime/vm_operations.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/vm_operations.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vm_operations.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/vm_operations.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vm_version.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/vm_version.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vm_version.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/runtime/vm_version.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/attachListener.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/services/attachListener.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/attachListener.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/services/attachListener.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/classLoadingService.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/services/classLoadingService.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/g1MemoryPool.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/services/g1MemoryPool.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,10 +34,10 @@ size_t init_size, bool support_usage_threshold) : _g1h(g1h), CollectedMemoryPool(name, - MemoryPool::Heap, - init_size, - undefined_max(), - support_usage_threshold) { + MemoryPool::Heap, + init_size, + undefined_max(), + support_usage_threshold) { assert(UseG1GC, "sanity"); } @@ -48,44 +48,27 @@ // See the comment at the top of g1MemoryPool.hpp size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) { - size_t young_list_length = g1h->young_list()->length(); - size_t eden_used = young_list_length * HeapRegion::GrainBytes; - size_t survivor_used = survivor_space_used(g1h); - eden_used = subtract_up_to_zero(eden_used, survivor_used); - return eden_used; + return g1h->g1mm()->eden_space_used(); } // See the comment at the top of g1MemoryPool.hpp size_t G1MemoryPoolSuper::survivor_space_committed(G1CollectedHeap* g1h) { - return MAX2(survivor_space_used(g1h), (size_t) HeapRegion::GrainBytes); + return g1h->g1mm()->survivor_space_committed(); } // See the comment at the top of g1MemoryPool.hpp size_t G1MemoryPoolSuper::survivor_space_used(G1CollectedHeap* g1h) { - size_t survivor_num = g1h->g1_policy()->recorded_survivor_regions(); - size_t survivor_used = survivor_num * HeapRegion::GrainBytes; - return survivor_used; + return g1h->g1mm()->survivor_space_used(); } // See the comment at the top of g1MemoryPool.hpp size_t G1MemoryPoolSuper::old_space_committed(G1CollectedHeap* g1h) { - size_t committed = overall_committed(g1h); - size_t eden_committed = eden_space_committed(g1h); - size_t survivor_committed = survivor_space_committed(g1h); - committed = subtract_up_to_zero(committed, eden_committed); - committed = subtract_up_to_zero(committed, survivor_committed); - committed = MAX2(committed, (size_t) HeapRegion::GrainBytes); - return committed; + return g1h->g1mm()->old_space_committed(); } // See the comment at the top of g1MemoryPool.hpp size_t G1MemoryPoolSuper::old_space_used(G1CollectedHeap* g1h) { - size_t used = overall_used(g1h); - size_t eden_used = eden_space_used(g1h); - size_t survivor_used = survivor_space_used(g1h); - used = subtract_up_to_zero(used, eden_used); - used = subtract_up_to_zero(used, survivor_used); - return used; + return g1h->g1mm()->old_space_used(); } G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
--- a/src/share/vm/services/g1MemoryPool.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/services/g1MemoryPool.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,68 +46,9 @@ // get, as this does affect the performance and behavior of G1. Which // is why we introduce the three memory pools implemented here. // -// The above approach inroduces a couple of challenging issues in the -// implementation of the three memory pools: -// -// 1) The used space calculation for a pool is not necessarily -// independent of the others. We can easily get from G1 the overall -// used space in the entire heap, the number of regions in the young -// generation (includes both eden and survivors), and the number of -// survivor regions. So, from that we calculate: -// -// survivor_used = survivor_num * region_size -// eden_used = young_region_num * region_size - survivor_used -// old_gen_used = overall_used - eden_used - survivor_used -// -// Note that survivor_used and eden_used are upper bounds. To get the -// actual value we would have to iterate over the regions and add up -// ->used(). But that'd be expensive. So, we'll accept some lack of -// accuracy for those two. But, we have to be careful when calculating -// old_gen_used, in case we subtract from overall_used more then the -// actual number and our result goes negative. -// -// 2) Calculating the used space is straightforward, as described -// above. However, how do we calculate the committed space, given that -// we allocate space for the eden, survivor, and old gen out of the -// same pool of regions? One way to do this is to use the used value -// as also the committed value for the eden and survivor spaces and -// then calculate the old gen committed space as follows: -// -// old_gen_committed = overall_committed - eden_committed - survivor_committed +// See comments in g1MonitoringSupport.hpp for additional details +// on this model. // -// Maybe a better way to do that would be to calculate used for eden -// and survivor as a sum of ->used() over their regions and then -// calculate committed as region_num * region_size (i.e., what we use -// to calculate the used space now). This is something to consider -// in the future. -// -// 3) Another decision that is again not straightforward is what is -// the max size that each memory pool can grow to. One way to do this -// would be to use the committed size for the max for the eden and -// survivors and calculate the old gen max as follows (basically, it's -// a similar pattern to what we use for the committed space, as -// described above): -// -// old_gen_max = overall_max - eden_max - survivor_max -// -// Unfortunately, the above makes the max of each pool fluctuate over -// time and, even though this is allowed according to the spec, it -// broke several assumptions in the M&M framework (there were cases -// where used would reach a value greater than max). So, for max we -// use -1, which means "undefined" according to the spec. -// -// 4) Now, there is a very subtle issue with all the above. The -// framework will call get_memory_usage() on the three pools -// asynchronously. As a result, each call might get a different value -// for, say, survivor_num which will yield inconsistent values for -// eden_used, survivor_used, and old_gen_used (as survivor_num is used -// in the calculation of all three). This would normally be -// ok. However, it's possible that this might cause the sum of -// eden_used, survivor_used, and old_gen_used to go over the max heap -// size and this seems to sometimes cause JConsole (and maybe other -// clients) to get confused. There's not a really an easy / clean -// solution to this problem, due to the asynchrounous nature of the -// framework. // This class is shared by the three G1 memory pool classes @@ -116,22 +57,6 @@ // (see comment above), we put the calculations in this class so that // we can easily share them among the subclasses. class G1MemoryPoolSuper : public CollectedMemoryPool { -private: - // It returns x - y if x > y, 0 otherwise. - // As described in the comment above, some of the inputs to the - // calculations we have to do are obtained concurrently and hence - // may be inconsistent with each other. So, this provides a - // defensive way of performing the subtraction and avoids the value - // going negative (which would mean a very large result, given that - // the parameter are size_t). - static size_t subtract_up_to_zero(size_t x, size_t y) { - if (x > y) { - return x - y; - } else { - return 0; - } - } - protected: G1CollectedHeap* _g1h; @@ -148,13 +73,6 @@ return (size_t) -1; } - static size_t overall_committed(G1CollectedHeap* g1h) { - return g1h->capacity(); - } - static size_t overall_used(G1CollectedHeap* g1h) { - return g1h->used_unlocked(); - } - static size_t eden_space_committed(G1CollectedHeap* g1h); static size_t eden_space_used(G1CollectedHeap* g1h);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/services/gcNotifier.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/systemDictionary.hpp" +#include "classfile/vmSymbols.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/interfaceSupport.hpp" +#include "runtime/java.hpp" +#include "runtime/javaCalls.hpp" +#include "runtime/mutex.hpp" +#include "runtime/mutexLocker.hpp" +#include "services/gcNotifier.hpp" +#include "services/management.hpp" +#include "services/memoryService.hpp" +#include "memoryManager.hpp" +#include "memory/oopFactory.hpp" + +GCNotificationRequest *GCNotifier::first_request = NULL; +GCNotificationRequest *GCNotifier::last_request = NULL; + +void GCNotifier::pushNotification(GCMemoryManager *mgr, const char *action, const char *cause) { + // Make a copy of the last GC statistics + // GC may occur between now and the creation of the notification + int num_pools = MemoryService::num_memory_pools(); + GCStatInfo* stat = new GCStatInfo(num_pools); + mgr->get_last_gc_stat(stat); + GCNotificationRequest *request = new GCNotificationRequest(os::javaTimeMillis(),mgr,action,cause,stat); + addRequest(request); + } + +void GCNotifier::addRequest(GCNotificationRequest *request) { + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + if(first_request == NULL) { + first_request = request; + } else { + last_request->next = request; + } + last_request = request; + Service_lock->notify_all(); +} + +GCNotificationRequest *GCNotifier::getRequest() { + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + GCNotificationRequest *request = first_request; + if(first_request != NULL) { + first_request = first_request->next; + } + return request; +} + +bool GCNotifier::has_event() { + return first_request != NULL; +} + +static Handle getGcInfoBuilder(GCMemoryManager *gcManager,TRAPS) { + + klassOop k = Management::sun_management_GarbageCollectorImpl_klass(CHECK_NH); + instanceKlassHandle gcMBeanKlass (THREAD, k); + + instanceOop i = gcManager->get_memory_manager_instance(THREAD); + instanceHandle ih(THREAD, i); + + JavaValue result(T_OBJECT); + JavaCallArguments args(ih); + + JavaCalls::call_virtual(&result, + gcMBeanKlass, + vmSymbols::getGcInfoBuilder_name(), + vmSymbols::getGcInfoBuilder_signature(), + &args, + CHECK_NH); + return Handle(THREAD,(oop)result.get_jobject()); + +} + +static Handle createGcInfo(GCMemoryManager *gcManager, GCStatInfo *gcStatInfo,TRAPS) { + + // Fill the arrays of MemoryUsage objects with before and after GC + // per pool memory usage + + klassOop muKlass = Management::java_lang_management_MemoryUsage_klass(CHECK_NH); objArrayOop bu = oopFactory::new_objArray( muKlass,MemoryService::num_memory_pools(), CHECK_NH); + objArrayHandle usage_before_gc_ah(THREAD, bu); + objArrayOop au = oopFactory::new_objArray(muKlass,MemoryService::num_memory_pools(), CHECK_NH); + objArrayHandle usage_after_gc_ah(THREAD, au); + + for (int i = 0; i < MemoryService::num_memory_pools(); i++) { + Handle before_usage = MemoryService::create_MemoryUsage_obj(gcStatInfo->before_gc_usage_for_pool(i), CHECK_NH); + Handle after_usage; + + MemoryUsage u = gcStatInfo->after_gc_usage_for_pool(i); + if (u.max_size() == 0 && u.used() > 0) { + // If max size == 0, this pool is a survivor space. + // Set max size = -1 since the pools will be swapped after GC. + MemoryUsage usage(u.init_size(), u.used(), u.committed(), (size_t)-1); + after_usage = MemoryService::create_MemoryUsage_obj(usage, CHECK_NH); + } else { + after_usage = MemoryService::create_MemoryUsage_obj(u, CHECK_NH); + } + usage_before_gc_ah->obj_at_put(i, before_usage()); + usage_after_gc_ah->obj_at_put(i, after_usage()); + } + + // Current implementation only has 1 attribute (number of GC threads) + // The type is 'I' + objArrayOop extra_args_array = oopFactory::new_objArray(SystemDictionary::Integer_klass(), 1, CHECK_NH); + objArrayHandle extra_array (THREAD, extra_args_array); + klassOop itKlass= SystemDictionary::Integer_klass(); + instanceKlassHandle intK(THREAD, itKlass); + + instanceHandle extra_arg_val = intK->allocate_instance_handle(CHECK_NH); + + { + JavaValue res(T_VOID); + JavaCallArguments argsInt; + argsInt.push_oop(extra_arg_val); + argsInt.push_int(gcManager->num_gc_threads()); + + JavaCalls::call_special(&res, + intK, + vmSymbols::object_initializer_name(), + vmSymbols::int_void_signature(), + &argsInt, + CHECK_NH); + } + extra_array->obj_at_put(0,extra_arg_val()); + + klassOop gcInfoklass = Management::com_sun_management_GcInfo_klass(CHECK_NH); + instanceKlassHandle ik (THREAD,gcInfoklass); + + Handle gcInfo_instance = ik->allocate_instance_handle(CHECK_NH); + + JavaValue constructor_result(T_VOID); + JavaCallArguments constructor_args(16); + constructor_args.push_oop(gcInfo_instance); + constructor_args.push_oop(getGcInfoBuilder(gcManager,THREAD)); + constructor_args.push_long(gcStatInfo->gc_index()); + constructor_args.push_long(gcStatInfo->start_time()); + constructor_args.push_long(gcStatInfo->end_time()); + constructor_args.push_oop(usage_before_gc_ah); + constructor_args.push_oop(usage_after_gc_ah); + constructor_args.push_oop(extra_array); + + JavaCalls::call_special(&constructor_result, + ik, + vmSymbols::object_initializer_name(), + vmSymbols::com_sun_management_GcInfo_constructor_signature(), + &constructor_args, + CHECK_NH); + + return Handle(gcInfo_instance()); +} + +void GCNotifier::sendNotification(TRAPS) { + ResourceMark rm(THREAD); + GCNotificationRequest *request = getRequest(); + if(request != NULL) { + Handle objGcInfo = createGcInfo(request->gcManager,request->gcStatInfo,THREAD); + + Handle objName = java_lang_String::create_from_platform_dependent_str(request->gcManager->name(), CHECK); + Handle objAction = java_lang_String::create_from_platform_dependent_str(request->gcAction, CHECK); + Handle objCause = java_lang_String::create_from_platform_dependent_str(request->gcCause, CHECK); + + klassOop k = Management::sun_management_GarbageCollectorImpl_klass(CHECK); + instanceKlassHandle gc_mbean_klass (THREAD, k); + + instanceOop gc_mbean = request->gcManager->get_memory_manager_instance(THREAD); + instanceHandle gc_mbean_h(THREAD, gc_mbean); + if (!gc_mbean_h->is_a(k)) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), + "This GCMemoryManager doesn't have a GarbageCollectorMXBean"); + } + + JavaValue result(T_VOID); + JavaCallArguments args(gc_mbean_h); + args.push_long(request->timestamp); + args.push_oop(objName); + args.push_oop(objAction); + args.push_oop(objCause); + args.push_oop(objGcInfo); + + JavaCalls::call_virtual(&result, + gc_mbean_klass, + vmSymbols::createGCNotification_name(), + vmSymbols::createGCNotification_signature(), + &args, + CHECK); + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + } + + delete request; + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/services/gcNotifier.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_SERVICES_GCNOTIFIER_HPP +#define SHARE_VM_SERVICES_GCNOTIFIER_HPP + +#include "memory/allocation.hpp" +#include "services/memoryPool.hpp" +#include "services/memoryService.hpp" +#include "services/memoryManager.hpp" + +class GCNotificationRequest : public CHeapObj { + friend class GCNotifier; + GCNotificationRequest *next; + jlong timestamp; + GCMemoryManager *gcManager; + const char *gcAction; + const char *gcCause; + GCStatInfo *gcStatInfo; +public: + GCNotificationRequest(jlong ts, GCMemoryManager *manager, const char*action, const char *cause,GCStatInfo *info) { + next = NULL; + timestamp = ts; + gcManager = manager; + gcAction = action; + gcCause = cause; + gcStatInfo = info; + } + + ~GCNotificationRequest() { + delete gcStatInfo; + } +}; + +class GCNotifier : public AllStatic { + friend class ServiceThread; +private: + static GCNotificationRequest *first_request; + static GCNotificationRequest *last_request; + static void addRequest(GCNotificationRequest *request); + static GCNotificationRequest *getRequest(); +public: + static void pushNotification(GCMemoryManager *manager, const char *action, const char *cause); + static bool has_event(); + static void sendNotification(TRAPS); +}; + +#endif // SHARE_VM_SERVICES_GCNOTIFIER_HPP
--- a/src/share/vm/services/heapDumper.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/services/heapDumper.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1649,6 +1649,9 @@ if (fr->is_entry_frame()) { last_entry_frame = fr; } + if (fr->is_ricochet_frame()) { + fr->oops_ricochet_do(&blk, vf->register_map()); + } } vf = vf->sender(); }
--- a/src/share/vm/services/jmm.h Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/services/jmm.h Thu Jun 02 18:59:50 2011 +0100 @@ -48,7 +48,7 @@ JMM_VERSION_1_0 = 0x20010000, JMM_VERSION_1_1 = 0x20010100, // JDK 6 JMM_VERSION_1_2 = 0x20010200, // JDK 7 - JMM_VERSION = 0x20010200 + JMM_VERSION = 0x20010201 }; typedef struct { @@ -293,6 +293,9 @@ jlongArray ids, jboolean lockedMonitors, jboolean lockedSynchronizers); + void (JNICALL *SetGCNotificationEnabled) (JNIEnv *env, + jobject mgr, + jboolean enabled); } JmmInterface; #ifdef __cplusplus
--- a/src/share/vm/services/management.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/services/management.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -42,6 +42,7 @@ #include "services/classLoadingService.hpp" #include "services/heapDumper.hpp" #include "services/lowMemoryDetector.hpp" +#include "services/gcNotifier.hpp" #include "services/management.hpp" #include "services/memoryManager.hpp" #include "services/memoryPool.hpp" @@ -60,6 +61,8 @@ klassOop Management::_memoryManagerMXBean_klass = NULL; klassOop Management::_garbageCollectorMXBean_klass = NULL; klassOop Management::_managementFactory_klass = NULL; +klassOop Management::_garbageCollectorImpl_klass = NULL; +klassOop Management::_gcInfo_klass = NULL; jmmOptionalSupport Management::_optional_support = {0}; TimeStamp Management::_stamp; @@ -179,6 +182,8 @@ f->do_oop((oop*) &_memoryManagerMXBean_klass); f->do_oop((oop*) &_garbageCollectorMXBean_klass); f->do_oop((oop*) &_managementFactory_klass); + f->do_oop((oop*) &_garbageCollectorImpl_klass); + f->do_oop((oop*) &_gcInfo_klass); } klassOop Management::java_lang_management_ThreadInfo_klass(TRAPS) { @@ -230,6 +235,20 @@ return _managementFactory_klass; } +klassOop Management::sun_management_GarbageCollectorImpl_klass(TRAPS) { + if (_garbageCollectorImpl_klass == NULL) { + _garbageCollectorImpl_klass = load_and_initialize_klass(vmSymbols::sun_management_GarbageCollectorImpl(), CHECK_NULL); + } + return _garbageCollectorImpl_klass; +} + +klassOop Management::com_sun_management_GcInfo_klass(TRAPS) { + if (_gcInfo_klass == NULL) { + _gcInfo_klass = load_and_initialize_klass(vmSymbols::com_sun_management_GcInfo(), CHECK_NULL); + } + return _gcInfo_klass; +} + static void initialize_ThreadInfo_constructor_arguments(JavaCallArguments* args, ThreadSnapshot* snapshot, TRAPS) { Handle snapshot_thread(THREAD, snapshot->threadObj()); @@ -2056,6 +2075,13 @@ } JVM_END +JVM_ENTRY(void, jmm_SetGCNotificationEnabled(JNIEnv *env, jobject obj, jboolean enabled)) + ResourceMark rm(THREAD); + // Get the GCMemoryManager + GCMemoryManager* mgr = get_gc_memory_manager_from_jobject(obj, CHECK); + mgr->set_notification_enabled(enabled?true:false); +JVM_END + // Dump heap - Returns 0 if succeeds. JVM_ENTRY(jint, jmm_DumpHeap0(JNIEnv *env, jstring outputfile, jboolean live)) #ifndef SERVICES_KERNEL @@ -2122,7 +2148,8 @@ jmm_FindDeadlockedThreads, jmm_SetVMGlobal, NULL, - jmm_DumpThreads + jmm_DumpThreads, + jmm_SetGCNotificationEnabled }; void* Management::get_jmm_interface(int version) {
--- a/src/share/vm/services/management.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/services/management.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,6 +49,8 @@ static klassOop _memoryManagerMXBean_klass; static klassOop _garbageCollectorMXBean_klass; static klassOop _managementFactory_klass; + static klassOop _garbageCollectorImpl_klass; + static klassOop _gcInfo_klass; static klassOop load_and_initialize_klass(Symbol* sh, TRAPS); @@ -86,6 +88,8 @@ static klassOop java_lang_management_GarbageCollectorMXBean_klass(TRAPS); static klassOop sun_management_Sensor_klass(TRAPS); static klassOop sun_management_ManagementFactory_klass(TRAPS); + static klassOop sun_management_GarbageCollectorImpl_klass(TRAPS); + static klassOop com_sun_management_GcInfo_klass(TRAPS); static instanceOop create_thread_info_instance(ThreadSnapshot* snapshot, TRAPS); static instanceOop create_thread_info_instance(ThreadSnapshot* snapshot, objArrayHandle monitors_array, typeArrayHandle depths_array, objArrayHandle synchronizers_array, TRAPS);
--- a/src/share/vm/services/memoryManager.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/services/memoryManager.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ #include "services/memoryManager.hpp" #include "services/memoryPool.hpp" #include "services/memoryService.hpp" +#include "services/gcNotifier.hpp" #include "utilities/dtrace.hpp" HS_DTRACE_PROBE_DECL8(hotspot, mem__pool__gc__begin, char*, int, char*, int, @@ -202,6 +203,7 @@ _last_gc_lock = new Mutex(Mutex::leaf, "_last_gc_lock", true); _current_gc_stat = NULL; _num_gc_threads = 1; + _notification_enabled = false; } GCMemoryManager::~GCMemoryManager() { @@ -250,7 +252,8 @@ // to ensure the current gc stat is placed in _last_gc_stat. void GCMemoryManager::gc_end(bool recordPostGCUsage, bool recordAccumulatedGCTime, - bool recordGCEndTime, bool countCollection) { + bool recordGCEndTime, bool countCollection, + GCCause::Cause cause) { if (recordAccumulatedGCTime) { _accumulated_timer.stop(); } @@ -283,6 +286,11 @@ pool->set_last_collection_usage(usage); LowMemoryDetector::detect_after_gc_memory(pool); } + if(is_notification_enabled()) { + bool isMajorGC = this == MemoryService::get_major_gc_manager(); + GCNotifier::pushNotification(this, isMajorGC ? "end of major GC" : "end of minor GC", + GCCause::to_string(cause)); + } } if (countCollection) { _num_collections++;
--- a/src/share/vm/services/memoryManager.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/services/memoryManager.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -166,6 +166,7 @@ Mutex* _last_gc_lock; GCStatInfo* _current_gc_stat; int _num_gc_threads; + volatile bool _notification_enabled; public: GCMemoryManager(); ~GCMemoryManager(); @@ -181,7 +182,7 @@ void gc_begin(bool recordGCBeginTime, bool recordPreGCUsage, bool recordAccumulatedGCTime); void gc_end(bool recordPostGCUsage, bool recordAccumulatedGCTime, - bool recordGCEndTime, bool countCollection); + bool recordGCEndTime, bool countCollection, GCCause::Cause cause); void reset_gc_stat() { _num_collections = 0; _accumulated_timer.reset(); } @@ -189,6 +190,8 @@ // the collection count. Zero signifies no gc has taken place. size_t get_last_gc_stat(GCStatInfo* dest); + void set_notification_enabled(bool enabled) { _notification_enabled = enabled; } + bool is_notification_enabled() { return _notification_enabled; } virtual MemoryManager::Name kind() = 0; };
--- a/src/share/vm/services/memoryPool.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/services/memoryPool.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/memoryService.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/services/memoryService.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -565,7 +565,8 @@ void MemoryService::gc_end(bool fullGC, bool recordPostGCUsage, bool recordAccumulatedGCTime, - bool recordGCEndTime, bool countCollection) { + bool recordGCEndTime, bool countCollection, + GCCause::Cause cause) { GCMemoryManager* mgr; if (fullGC) { @@ -577,7 +578,7 @@ // register the GC end statistics and memory usage mgr->gc_end(recordPostGCUsage, recordAccumulatedGCTime, recordGCEndTime, - countCollection); + countCollection, cause); } void MemoryService::oops_do(OopClosure* f) { @@ -633,7 +634,7 @@ // gc manager (so _fullGC is set to false ) and for other generation kinds // doing mark-sweep-compact uses major gc manager (so _fullGC is set // to true). -TraceMemoryManagerStats::TraceMemoryManagerStats(Generation::Name kind) { +TraceMemoryManagerStats::TraceMemoryManagerStats(Generation::Name kind, GCCause::Cause cause) { switch (kind) { case Generation::DefNew: #ifndef SERIALGC @@ -654,9 +655,10 @@ } // this has to be called in a stop the world pause and represent // an entire gc pause, start to finish: - initialize(_fullGC, true, true, true, true, true, true, true); + initialize(_fullGC, cause,true, true, true, true, true, true, true); } TraceMemoryManagerStats::TraceMemoryManagerStats(bool fullGC, + GCCause::Cause cause, bool recordGCBeginTime, bool recordPreGCUsage, bool recordPeakUsage, @@ -664,7 +666,7 @@ bool recordAccumulatedGCTime, bool recordGCEndTime, bool countCollection) { - initialize(fullGC, recordGCBeginTime, recordPreGCUsage, recordPeakUsage, + initialize(fullGC, cause, recordGCBeginTime, recordPreGCUsage, recordPeakUsage, recordPostGCUsage, recordAccumulatedGCTime, recordGCEndTime, countCollection); } @@ -672,6 +674,7 @@ // for a subclass to create then initialize an instance before invoking // the MemoryService void TraceMemoryManagerStats::initialize(bool fullGC, + GCCause::Cause cause, bool recordGCBeginTime, bool recordPreGCUsage, bool recordPeakUsage, @@ -687,6 +690,7 @@ _recordAccumulatedGCTime = recordAccumulatedGCTime; _recordGCEndTime = recordGCEndTime; _countCollection = countCollection; + _cause = cause; MemoryService::gc_begin(_fullGC, _recordGCBeginTime, _recordAccumulatedGCTime, _recordPreGCUsage, _recordPeakUsage); @@ -694,6 +698,6 @@ TraceMemoryManagerStats::~TraceMemoryManagerStats() { MemoryService::gc_end(_fullGC, _recordPostGCUsage, _recordAccumulatedGCTime, - _recordGCEndTime, _countCollection); + _recordGCEndTime, _countCollection, _cause); }
--- a/src/share/vm/services/memoryService.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/services/memoryService.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -29,6 +29,7 @@ #include "memory/generation.hpp" #include "runtime/handles.hpp" #include "services/memoryUsage.hpp" +#include "gc_interface/gcCause.hpp" // Forward declaration class MemoryPool; @@ -162,7 +163,8 @@ bool recordPreGCUsage, bool recordPeakUsage); static void gc_end(bool fullGC, bool recordPostGCUsage, bool recordAccumulatedGCTime, - bool recordGCEndTime, bool countCollection); + bool recordGCEndTime, bool countCollection, + GCCause::Cause cause); static void oops_do(OopClosure* f); @@ -172,6 +174,14 @@ // Create an instance of java/lang/management/MemoryUsage static Handle create_MemoryUsage_obj(MemoryUsage usage, TRAPS); + + static const GCMemoryManager* get_minor_gc_manager() { + return _minor_gc_manager; + } + + static const GCMemoryManager* get_major_gc_manager() { + return _major_gc_manager; + } }; class TraceMemoryManagerStats : public StackObj { @@ -184,10 +194,11 @@ bool _recordAccumulatedGCTime; bool _recordGCEndTime; bool _countCollection; - + GCCause::Cause _cause; public: TraceMemoryManagerStats() {} TraceMemoryManagerStats(bool fullGC, + GCCause::Cause cause, bool recordGCBeginTime = true, bool recordPreGCUsage = true, bool recordPeakUsage = true, @@ -197,6 +208,7 @@ bool countCollection = true); void initialize(bool fullGC, + GCCause::Cause cause, bool recordGCBeginTime, bool recordPreGCUsage, bool recordPeakUsage, @@ -205,7 +217,7 @@ bool recordGCEndTime, bool countCollection); - TraceMemoryManagerStats(Generation::Name kind); + TraceMemoryManagerStats(Generation::Name kind, GCCause::Cause cause); ~TraceMemoryManagerStats(); };
--- a/src/share/vm/shark/sharkNativeWrapper.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/shark/sharkNativeWrapper.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright 2009, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. *
--- a/src/share/vm/utilities/constantTag.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/constantTag.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -93,8 +93,6 @@ return "MethodType"; case JVM_CONSTANT_InvokeDynamic : return "InvokeDynamic"; - case JVM_CONSTANT_InvokeDynamicTrans : - return "InvokeDynamic/transitional"; case JVM_CONSTANT_Object : return "Object"; case JVM_CONSTANT_Utf8 :
--- a/src/share/vm/utilities/constantTag.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/constantTag.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -86,8 +86,7 @@ bool is_method_type() const { return _tag == JVM_CONSTANT_MethodType; } bool is_method_handle() const { return _tag == JVM_CONSTANT_MethodHandle; } - bool is_invoke_dynamic() const { return (_tag == JVM_CONSTANT_InvokeDynamic || - _tag == JVM_CONSTANT_InvokeDynamicTrans); } + bool is_invoke_dynamic() const { return _tag == JVM_CONSTANT_InvokeDynamic; } bool is_loadable_constant() const { return ((_tag >= JVM_CONSTANT_Integer && _tag <= JVM_CONSTANT_String) ||
--- a/src/share/vm/utilities/copy.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/copy.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/debug.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/debug.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -469,6 +469,7 @@ extern "C" void pp(void* p) { Command c("pp"); FlagSetting fl(PrintVMMessages, true); + FlagSetting f2(DisplayVMOutput, true); if (Universe::heap()->is_in(p)) { oop obj = oop(p); obj->print(); @@ -507,6 +508,17 @@ } +extern "C" void pfl() { + // print frame layout + Command c("pfl"); + JavaThread* p = JavaThread::active(); + tty->print(" for thread: "); + p->print(); + tty->cr(); + if (p->has_last_Java_frame()) { + p->print_frame_layout(); + } +} extern "C" void psf() { // print stack frames {
--- a/src/share/vm/utilities/debug.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/debug.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/elfFile.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/elfFile.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -29,6 +29,7 @@ #include <string.h> #include <stdio.h> #include <limits.h> +#include <new> #include "memory/allocation.inline.hpp" #include "utilities/decoder.hpp" @@ -46,7 +47,7 @@ m_status = Decoder::no_error; int len = strlen(filepath) + 1; - m_filepath = NEW_C_HEAP_ARRAY(char, len); + m_filepath = (const char*)os::malloc(len * sizeof(char)); if (m_filepath != NULL) { strcpy((char*)m_filepath, filepath); m_file = fopen(filepath, "r"); @@ -74,7 +75,7 @@ } if (m_filepath != NULL) { - FREE_C_HEAP_ARRAY(char, m_filepath); + os::free((void*)m_filepath); } if (m_next != NULL) { @@ -120,14 +121,14 @@ } // string table if (shdr.sh_type == SHT_STRTAB) { - ElfStringTable* table = new ElfStringTable(m_file, shdr, index); + ElfStringTable* table = new (std::nothrow) ElfStringTable(m_file, shdr, index); if (table == NULL) { m_status = Decoder::out_of_memory; return false; } add_string_table(table); } else if (shdr.sh_type == SHT_SYMTAB || shdr.sh_type == SHT_DYNSYM) { - ElfSymbolTable* table = new ElfSymbolTable(m_file, shdr); + ElfSymbolTable* table = new (std::nothrow) ElfSymbolTable(m_file, shdr); if (table == NULL) { m_status = Decoder::out_of_memory; return false;
--- a/src/share/vm/utilities/elfStringTable.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/elfStringTable.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -27,6 +27,7 @@ #ifndef _WINDOWS #include "memory/allocation.inline.hpp" +#include "runtime/os.hpp" #include "utilities/elfStringTable.hpp" // We will try to load whole string table into memory if we can. @@ -41,14 +42,14 @@ // try to load the string table long cur_offset = ftell(file); - m_table = (char*)NEW_C_HEAP_ARRAY(char, shdr.sh_size); + m_table = (char*)os::malloc(sizeof(char) * shdr.sh_size); if (m_table != NULL) { // if there is an error, mark the error if (fseek(file, shdr.sh_offset, SEEK_SET) || fread((void*)m_table, shdr.sh_size, 1, file) != 1 || fseek(file, cur_offset, SEEK_SET)) { m_status = Decoder::file_invalid; - FREE_C_HEAP_ARRAY(char, m_table); + os::free((void*)m_table); m_table = NULL; } } else { @@ -58,7 +59,7 @@ ElfStringTable::~ElfStringTable() { if (m_table != NULL) { - FREE_C_HEAP_ARRAY(char, m_table); + os::free((void*)m_table); } if (m_next != NULL) {
--- a/src/share/vm/utilities/elfSymbolTable.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/elfSymbolTable.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/exceptions.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/exceptions.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -207,7 +207,7 @@ } -void Exceptions::throw_stack_overflow_exception(Thread* THREAD, const char* file, int line) { +void Exceptions::throw_stack_overflow_exception(Thread* THREAD, const char* file, int line, methodHandle method) { Handle exception; if (!THREAD->has_pending_exception()) { klassOop k = SystemDictionary::StackOverflowError_klass(); @@ -215,13 +215,13 @@ exception = Handle(THREAD, e); // fill_in_stack trace does gc assert(instanceKlass::cast(k)->is_initialized(), "need to increase min_stack_allowed calculation"); if (StackTraceInThrowable) { - java_lang_Throwable::fill_in_stack_trace(exception); + java_lang_Throwable::fill_in_stack_trace(exception, method()); } } else { // if prior exception, throw that one instead exception = Handle(THREAD, THREAD->pending_exception()); } - _throw_oop(THREAD, file, line, exception()); + _throw(THREAD, file, line, exception); } void Exceptions::fthrow(Thread* thread, const char* file, int line, Symbol* h_name, const char* format, ...) {
--- a/src/share/vm/utilities/exceptions.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/exceptions.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -144,7 +144,7 @@ const char* message, ExceptionMsgToUtf8Mode to_utf8_safe = safe_to_utf8); - static void throw_stack_overflow_exception(Thread* thread, const char* file, int line); + static void throw_stack_overflow_exception(Thread* thread, const char* file, int line, methodHandle method); // for AbortVMOnException flag NOT_PRODUCT(static void debug_check_abort(Handle exception, const char* message = NULL);)
--- a/src/share/vm/utilities/globalDefinitions_gcc.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/globalDefinitions_gcc.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/globalDefinitions_visCPP.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/globalDefinitions_visCPP.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/hashtable.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/hashtable.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/hashtable.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/hashtable.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/hashtable.inline.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/hashtable.inline.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/ostream.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/ostream.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/taskqueue.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/taskqueue.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/utf8.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/utf8.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/utf8.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/utf8.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/xmlstream.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/xmlstream.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/xmlstream.hpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/xmlstream.hpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/yieldingWorkgroup.cpp Wed Jun 01 17:09:56 2011 +0100 +++ b/src/share/vm/utilities/yieldingWorkgroup.cpp Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test5091921.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,309 @@ +/* + * Copyright (c) 2011 Hewlett-Packard Company. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 5091921 + * @summary Sign flip issues in loop optimizer + * + * @run main/othervm -Xcomp -XX:CompileOnly=Test5091921 -XX:MaxInlineSize=1 Test5091921 + */ + +public class Test5091921 { + private static int result = 0; + + + /* Test for the bug of transforming indx >= MININT to indx > MININT-1 */ + public static int test_ge1(int limit) { + int indx; + int sum = 0; + for (indx = 500; indx >= limit; indx -= 2) { + sum += 2000 / indx; + result = sum; + } + return sum; + } + + /* Test for the bug of transforming indx <= MAXINT to indx < MAXINT+1 */ + public static int test_le1(int limit) { + int indx; + int sum = 0; + for (indx = -500; indx <= limit; indx += 2) + { + sum += 3000 / indx; + result = sum; + } + return sum; + } + + /* Run with -Xcomp -XX:CompileOnly=wrap1.test1 -XX:MaxInlineSize=1 */ + /* limit reset to ((limit-init+stride-1)/stride)*stride+init */ + /* Calculation may overflow */ + public static volatile int c = 1; + public static int test_wrap1(int limit) + { + int indx; + int sum = 0; + for (indx = 0xffffffff; indx < limit; indx += 0x20000000) + { + sum += c; + } + return sum; + } + + /* Test for range check elimination with bit flip issue for + scale*i+offset<limit where offset is not 0 */ + static int[] box5 = {1,2,3,4,5,6,7,8,9}; + public static int test_rce5(int[] b, int limit) + { + int indx; + int sum = b[1]; + result = sum; + for (indx = 0x80000000; indx < limit; ++indx) + { + if (indx > 0x80000000) + { + // this test is not issued in pre-loop but issued in main loop + // trick rce into thinking expression is false when indx >= 0 + // in fact it is false when indx==0x80000001 + if (indx - 9 < -9) + { + sum += indx; + result = sum; + sum ^= b[indx & 7]; + result = sum; + } + else + break; + } + else + { + sum += b[indx & 3]; + result = sum; + } + } + return sum; + } + + /* Test for range check elimination with bit flip issue for + scale*i<limit where scale > 1 */ + static int[] box6 = {1,2,3,4,5,6,7,8,9}; + public static int test_rce6(int[] b, int limit) + { + int indx; + int sum = b[1]; + result = sum; + for (indx = 0x80000000; indx < limit; ++indx) + { + if (indx > 0x80000000) + { + // harmless rce target + if (indx < 0) + { + sum += result; + result = sum; + } + else + break; + // this test is not issued in pre-loop but issued in main loop + // trick rce into thinking expression is false when indx >= 0 + // in fact it is false when indx==0x80000001 + // In compilers that transform mulI to shiftI may mask this issue. + if (indx * 28 + 1 < 0) + { + sum += indx; + result = sum; + sum ^= b[indx & 7]; + result = sum; + } + else + break; + } + else + { + sum += b[indx & 3]; + result = sum; + } + } + return sum; + } + + /* Test for range check elimination with i <= limit */ + static int[] box7 = {1,2,3,4,5,6,7,8,9,0x7fffffff}; + public static int test_rce7(int[] b) + { + int indx; + int max = b[9]; + int sum = b[7]; + result = sum; + for (indx = 0; indx < b.length; ++indx) + { + if (indx <= max) + { + sum += (indx ^ 15) + ((result != 0) ? 0 : sum); + result = sum; + } + else + throw new RuntimeException(); + } + for (indx = -7; indx < b.length; ++indx) + { + if (indx <= 9) + { + sum += (sum ^ 15) + ((result != 0) ? 0 : sum); + result = sum; + } + else + throw new RuntimeException(); + } + return sum; + } + + /* Test for range check elimination with i >= limit */ + static int[] box8 = {-1,0,1,2,3,4,5,6,7,8,0x80000000}; + public static int test_rce8(int[] b) + { + int indx; + int sum = b[5]; + int min = b[10]; + result = sum; + for (indx = b.length-1; indx >= 0; --indx) + { + if (indx >= min) + { + sum += (sum ^ 9) + ((result != 0) ? 0 :sum); + result = sum; + } + else + throw new RuntimeException(); + } + return sum; + } + + public static void main(String[] args) + { + result=1; + int r = 0; + try { + r = test_ge1(0x80000000); + System.out.println(result); + System.out.println("test_ge1 FAILED"); + System.exit(1); + } + catch (ArithmeticException e1) { + System.out.println("test_ge1: Expected exception caught"); + if (result != 5986) { + System.out.println(result); + System.out.println("test_ge1 FAILED"); + System.exit(97); + } + } + System.out.println("test_ge1 WORKED"); + + result=0; + try + { + r = test_le1(0x7fffffff); + System.out.println(result); + System.out.println("test_le1 FAILED"); + System.exit(1); + } + catch (ArithmeticException e1) + { + System.out.println("test_le1: Expected exception caught"); + if (result != -9039) + { + System.out.println(result); + System.out.println("test_le1 FAILED"); + System.exit(97); + } + } + System.out.println("test_le1 WORKED"); + + result=0; + r = test_wrap1(0x7fffffff); + if (r != 4) + { + System.out.println(result); + System.out.println("test_wrap1 FAILED"); + System.exit(97); + } + else + { + System.out.println("test_wrap1 WORKED"); + } + + result=0; + r = test_rce5(box5,0x80000100); + if (result != 3) + { + System.out.println(result); + System.out.println("test_rce5 FAILED"); + System.exit(97); + } + else + { + System.out.println("test_rce5 WORKED"); + } + + result=0; + r = test_rce6(box6,0x80000100); + if (result != 6) + { + System.out.println(result); + System.out.println("test_rce6 FAILED"); + System.exit(97); + } + else + { + System.out.println("test_rce6 WORKED"); + } + + result=0; + r = test_rce7(box7); + if (result != 14680079) + { + System.out.println(result); + System.out.println("test_rce7 FAILED"); + System.exit(97); + } + else + { + System.out.println("test_rce7 WORKED"); + } + + result=0; + r = test_rce8(box8); + if (result != 16393) + { + System.out.println(result); + System.out.println("test_rce8 FAILED"); + System.exit(97); + } + else + { + System.out.println("test_rce8 WORKED"); + } + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6186134.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6186134 + * @summary Server virtual machine produces/exeutes incorrect code. + * + * @run main Test6186134 100000 + */ +import java.util.ArrayList; + +public class Test6186134 { + + int num = 0; + + public Test6186134(int n) { + num = n; + } + + public boolean more() { + return num-- > 0; + } + + public ArrayList test1() { + ArrayList res = new ArrayList(); + int maxResults = Integer.MAX_VALUE; + int n = 0; + boolean more = more(); + while ((n++ < maxResults) && more) { + res.add(new Object()); + more = more(); + } + return res; + } + + public static void main(String[] pars) { + int n = Integer.parseInt(pars[0]); + for (int i=0; i<n; i++) { + Test6186134 t = new Test6186134(10); + int size = t.test1().size(); + if (size != 10) { + System.out.println("wrong size: " + size +", should be 10"); + System.exit(97); + } + } + System.out.println("Passed"); + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6196102.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6196102 + * @summary Integer seems to be greater than Integer.MAX_VALUE + * + * @run main Test6196102 + */ + +public class Test6196102 { + static public void main(String[] args) { + int i1 = 0; + int i2 = Integer.MAX_VALUE; + + while (i1 >= 0) { + i1++; + if (i1 > i2) { + System.out.println("E R R O R: " + i1); + System.exit(97); + } + } + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6357214.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6357214 + * @summary Hotspot server compiler gets integer comparison wrong + * + * @run main/othervm/timeout=60 -DshowAll=ffo -DeventID=444 Test6357214 + */ + +// The test hangs after few iterations before the fix. So it fails if timeout. +class MyResult { + public boolean next() { + return true; + } + + public String getString(String in) { + if (in.equals("id")) + return "idFoo"; + if (in.equals("contentKey")) + return "ckFoo"; + return "Foo"; + } + + public int getInt(String in) { + if (in.equals("processingComplete")) + return 0; + return 1; + } + + public byte[] getBytes(String in) { + byte[] arr = null; + if (in.equals("content")) { + arr = new byte[65536]; + byte j = 32; + for (int i=0; i<65536; i++) { + arr[i] = j; + if (++j == 127) + j=32; + } + } + return arr; + } +} + +public class Test6357214 { + public static volatile boolean bollocks = true; + public String create(String context) throws Exception { + + // + // Extract HTTP parameters + // + + boolean showAll = System.getProperty("showAll") != null; + String eventID = System.getProperty("eventID"); + String eventContentKey = System.getProperty("cKey"); + // + // Build ContentStaging query based on eventID or eventContentKey + // + + String sql = "select id, processingComplete, contentKey, content " + + "from ContentStaging cs, ContentStagingKey csk " + + "where cs.eventContentKey = csk.eventContentKey "; + + if (eventID != null) { + sql += "and id = " + eventID; + } + else if (eventContentKey != null) { + sql += "and cs.eventContentKey = '" + + eventContentKey + + "' having id = max(id)"; + } + else { + throw new Exception("Need eventID or eventContentKey"); + } + + // + // This factory builds a static panel, there is no JSP + // + + StringBuffer html = new StringBuffer(); + + try { + + MyResult result = new MyResult(); + if (result.next()) { + + eventID = result.getString("id"); + int processingComplete = result.getInt("processingComplete"); + String contentKey = result.getString("contentKey"); + byte[] bytes = result.getBytes("content"); + + // + // Print content status and associated controls + // + + html.append("<br/><font class=\"small\">"); + html.append("Status: "); + switch (processingComplete) { + case 0 : + case 1 : html.append("PENDING"); break; + case 2 : html.append(contentKey); break; + case 3 : html.append(eventID); break; + default : html.append("UNKNONW"); + } + html.append("</font><br/>"); + + // + // Print at most 20Kb of content unless "showAll" is set + // + + int limit = showAll ? Integer.MAX_VALUE : 1024 * 20; + System.out.println(limit); + html.append("<pre>"); + for (int i = 0; bytes != null && i < bytes.length; i++) { + char c = (char) bytes[i]; + switch (c) { + case '<' : html.append("<"); break; + case '>' : html.append(">"); break; + case '&' : html.append("&"); break; + default : html.append(c); + } + + if (i > limit) { + while (bollocks); + // System.out.println("i is " + i); + // System.out.println("limit is " + limit); + html.append("...\n</pre>"); + html.append(eventID); + html.append("<pre>"); + break; + } + } + html.append("</pre>"); + } + } + catch (Exception exception) { + throw exception; + } + finally { + html.append("Oof!!"); + } + String ret = html.toString(); + System.out.println("Returning string length = "+ ret.length()); + return ret; + } + + public static void main(String[] args) throws Exception { + int length=0; + + for (int i = 0; i < 100; i++) { + length = new Test6357214().create("boo").length(); + System.out.println(length); + } + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6559156.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6559156 + * @summary Server compiler generates bad code for "<= Integer.MAX_VALUE" expression + * + * @run main Test6559156 + */ + +public class Test6559156 { + + static final int N_TESTS = 1000000; + + public static void main(String[] args) throws Exception { + + /* + * If MAX_VALUE is changed to MAX_VALUE - 1 below, the test passes + * because (apparently) bad code is only generated when comparing + * <= MAX_VALUE in the doTest method. + */ + Test6559156 test = new Test6559156(); + for (int i = 0; i < N_TESTS; i += 1) { + test.doTest1(10, Integer.MAX_VALUE, i); + test.doTest2(10, Integer.MAX_VALUE, i); + } + System.out.println("No failure"); + } + + void doTest1(int expected, int max, int i) { + int counted; + for (counted = 0; + (counted <= max) && (counted < expected); + counted += 1) { + } + if (counted != expected) { + throw new RuntimeException("Failed test1 iteration=" + i + + " max=" + max + + " counted=" + counted + + " expected=" + expected); + } + } + + void doTest2(int expected, int max, int i) { + int counted; + for (counted = 0; + // change test sequence. + (counted < expected) && (counted <= max); + counted += 1) { + } + if (counted != expected) { + throw new RuntimeException("Failed test1 iteration=" + i + + " max=" + max + + " counted=" + counted + + " expected=" + expected); + } + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6753639.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6753639 + * @summary Strange optimisation in for loop with cyclic integer condition + * + * @run main/othervm -Xbatch Test6753639 + */ + +public class Test6753639 { + public static void main(String[] args) throws InterruptedException { + int END = Integer.MAX_VALUE; + int count = 0; + for(int i = Integer.MAX_VALUE - 5; i <= END; i++) { + count++; + if (count > 100000) { + System.out.println("Passed"); + System.exit(95); + } + } + System.out.println("broken " + count); + System.out.println("FAILED"); + System.exit(97); + } +} + +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6850611.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6850611 + * @summary int / long arithmetic seems to be broken in 1.6.0_14 HotSpot Server VM (Win XP) + * + * @run main Test6850611 + */ + +public class Test6850611 { + + public static void main(String[] args) { + test(); + } + + private static void test() { + for (int j = 0; j < 5; ++j) { + long x = 0; + for (int i = Integer.MIN_VALUE; i < Integer.MAX_VALUE; ++i) { + x += i; + } + System.out.println("sum: " + x); + if (x != -4294967295l) { + System.out.println("FAILED"); + System.exit(97); + } + } + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6890943.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6890943 + * @summary JVM mysteriously gives wrong result on 64-bit 1.6 VMs in hotspot mode. + * + * @run shell Test6890943.sh + */ +import java.util.*; +import java.io.*; +import java.util.regex.*; + +public class Test6890943 { + public static final boolean AIR = true, ROCK = false; + public static void main(String[] args) { + new Test6890943().go(); + } + + int r, c, f, t; + boolean[][] grid; + + public void go() { + Scanner s = new Scanner(System.in); + s.useDelimiter("\\s+"); + int T = s.nextInt(); + for (t = 0 ; t < T ; t++) { + r = s.nextInt(); c = s.nextInt(); f = s.nextInt(); + grid = new boolean[r][c]; + for (int x = 0 ; x < r ; x++) { + String line = s.next(); + for (int y = 0 ; y < c ; y++) grid[x][y] = line.charAt(y) == '.'; + } + int digs = solve(); + String res = digs == -1 ? "No" : "Yes " + digs; + System.out.printf("Case #%d: %s\n", t+1, res); + } + } + + Map<Integer, Integer> M = new HashMap<Integer, Integer>(); + + private int solve() { + M = new HashMap<Integer, Integer>(); + M.put(calcWalkingRange(0, 0), 0); + for (int digDown = 0 ; digDown < r ; digDown++) { + Map<Integer, Integer> tries = new HashMap<Integer, Integer>(); + for (Map.Entry<Integer, Integer> m : M.entrySet()) { + int q = m.getKey(); + if (depth(q) != (digDown)) continue; + if (stuck(q)) continue; + tries.put(q, m.getValue()); + } + + for (Map.Entry<Integer, Integer> m : tries.entrySet()) { + int q = m.getKey(); + int fallLeftDelta = 0, fallRightDelta = 0; + //fall left + int fallLeft = fall(digDown, start(q)); + if (fallLeft > 0) { + fallLeftDelta = 1; + if (fallLeft <= f) addToM(calcWalkingRange(digDown+fallLeft, start(q)), m.getValue()); + } + + //fall right + int fallRight = fall(digDown, end(q)); + if (fallRight > 0) { + fallRightDelta = 1; + + if (fallRight <= f) addToM(calcWalkingRange(digDown+fallRight, end(q)), m.getValue()); + } + + for (int p = start(q) + fallLeftDelta ; p <= end(q) - fallRightDelta ; p++) { + //goLeft + for (int digSpot = p ; digSpot > start(q) +fallLeftDelta ; digSpot--) { + int fallDown = 1+fall(digDown+1, digSpot); + if (fallDown <= f) { + if (fallDown == 1) { + addToM(calcWalkingRange(digDown + 1, digSpot, digSpot, p), m.getValue() + Math.abs(digSpot-p)+1); + } else { + addToM(calcWalkingRange(digDown + fallDown, digSpot), m.getValue() + Math.abs(digSpot-p)+1); + } + } + } + + //goRight + for (int digSpot = p ; digSpot < end(q)-fallRightDelta ;digSpot++) { + int fallDown = 1+fall(digDown+1, digSpot); + if (fallDown <= f) { + if (fallDown == 1) { + addToM(calcWalkingRange(digDown + 1, digSpot, p, digSpot), m.getValue() + Math.abs(digSpot-p)+1); + } else { + addToM(calcWalkingRange(digDown + fallDown, digSpot), m.getValue() + Math.abs(digSpot-p)+1); + } + } + } + } + } + } + + int result = Integer.MAX_VALUE; + for (Map.Entry<Integer, Integer> m : M.entrySet()) { + if (depth(m.getKey()) == r-1) result = Math.min(m.getValue(), result); + } + + if (result == Integer.MAX_VALUE) return -1; + return result; + } + + private void addToM(int q, int i) { + Integer original = M.get(q); + if ( original == null ) M.put(q, i); + else M.put(q, Math.min(original, i)); + } + + private int fall(int row, int column) { + int res = 0; + for ( int p = row+1 ; p < r ; p++) { + if (grid[p][column] == AIR) res++; + else break; + } + return res; + } + + private boolean stuck(int q) { + return start(q) == end(q); + } + + private int depth(int q) { + return q % 50; + } + + private int start(int q) { + return q / (50*50); + } + + private int end(int q) { + return (q / 50) % 50; + } + + private int calcWalkingRange(int depth, int pos) { + return calcWalkingRange(depth, pos, Integer.MAX_VALUE, Integer.MIN_VALUE); + } + + private int calcWalkingRange(int depth, int pos, int airOverrideStart, int airOverrideEnd) { + int left = pos, right = pos; + if (depth >= r) return (c-1)*50 + depth; + + while (left > 0) { + if (grid[depth][left-1] == ROCK && (left-1 < airOverrideStart || left-1 > airOverrideEnd)) break; + if (depth < r-1 && grid[depth+1][left-1] == AIR) { + left--; + break; + } + left--; + } + while (right < c-1) { + if (grid[depth][right+1] == ROCK && (right+1 < airOverrideStart || right+1 > airOverrideEnd)) break; + if (depth < r-1 && grid[depth+1][right+1] == AIR) { + right++; + break; + } + right++; + } + + return left *50*50 + right*50 + depth; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6890943.sh Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,67 @@ +#!/bin/sh +# +# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + +if [ "${TESTSRC}" = "" ] +then + echo "TESTSRC not set. Test cannot execute. Failed." + exit 1 +fi +echo "TESTSRC=${TESTSRC}" +if [ "${TESTJAVA}" = "" ] +then + echo "TESTJAVA not set. Test cannot execute. Failed." + exit 1 +fi +echo "TESTJAVA=${TESTJAVA}" +if [ "${TESTCLASSES}" = "" ] +then + echo "TESTCLASSES not set. Test cannot execute. Failed." + exit 1 +fi +echo "TESTCLASSES=${TESTCLASSES}" +echo "CLASSPATH=${CLASSPATH}" + +set -x + +cp ${TESTSRC}/Test6890943.java . +cp ${TESTSRC}/input6890943.txt . +cp ${TESTSRC}/output6890943.txt . +cp ${TESTSRC}/Test6890943.sh . + +${TESTJAVA}/bin/javac -d . Test6890943.java + +${TESTJAVA}/bin/java -XX:-PrintVMOptions ${TESTVMOPTS} Test6890943 < input6890943.txt > test.out 2>&1 + +diff output6890943.txt test.out + +result=$? +if [ $result -eq 0 ] +then + echo "Passed" + exit 0 +else + echo "Failed" + exit 1 +fi
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6897150.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6897150 + * @summary Hotspot optimises away a valid loop + * + * @run main Test6897150 + */ + +// Should be compiled with javac from JDK1.3 to get bytecode which shows the problem. +public class Test6897150 { + public static void main(String[] args) { + // This works + loopAndPrint(Integer.MAX_VALUE -1); + // This doesn't + loopAndPrint(Integer.MAX_VALUE); + } + + static void verify(int max, int a) { + if ( a != (max - 1)) { + System.out.println("Expected: " + (max - 1)); + System.out.println("Actual : " + a); + System.exit(97); + } + } + static void loopAndPrint(int max) { + int a = -1; + int i = 1; + for (; i < max; i++) { + a = i; + } + verify(max, a); + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6905845.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6905845 + * @summary Server VM improperly optimizing away loop. + * + * @run main Test6905845 + */ + +public class Test6905845 { + + public static void main(String[] args){ + for (int asdf = 0; asdf < 5; asdf++){ + //test block + { + StringBuilder strBuf1 = new StringBuilder(65); + long start = System.currentTimeMillis(); + int count = 0; + + for (int i = Integer.MIN_VALUE; i < (Integer.MAX_VALUE - 80); i += 79){ + strBuf1.append(i); + count++; + strBuf1.delete(0, 65); + } + + System.out.println(count); + if (count != 54366674) { + System.out.println("wrong count: " + count +", should be 54366674"); + System.exit(97); + } + } + //test block + { + StringBuilder strBuf1 = new StringBuilder(65); + long start = System.currentTimeMillis(); + int count = 0; + + for (int i = Integer.MIN_VALUE; i < (Integer.MAX_VALUE - 80); i += 79){ + strBuf1.append(i); + count++; + strBuf1.delete(0, 65); + } + + System.out.println(count); + if (count != 54366674) { + System.out.println("wrong count: " + count +", should be 54366674"); + System.exit(97); + } + } + } + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6931567.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6931567 + * @summary JIT Error (on class file compiled with eclipse) on JVM x64 (but not on x32!). + * + * @run main Test6931567 + */ + +// Should be compiled with javac from JDK1.3 to get bytecode which shows the problem. +public class Test6931567 { + + public static void main(final String[] args) { + booleanInvert(Integer.MAX_VALUE); + booleanInvert(Integer.MAX_VALUE - 1); + } + + private static void booleanInvert(final int max) { + boolean test1 = false; + boolean test2 = false; + + for (int i = 0; i < max; i++) { + test1 = !test1; + } + + for (int i = 0; i < max; i++) { + test2 ^= true; + } + + if (test1 != test2) { + System.out.println("ERROR: Boolean invert\n\ttest1=" + test1 + + "\n\ttest2=" + test2); + System.exit(97); + } else { + System.out.println("Passed!"); + } + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6935022.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6935022 + * @summary Server VM incorrectly breaks out of while loop + * + * @run main Test6935022 + */ + +public class Test6935022 { + public static final void main(String[] args) throws Exception { + Test6935022 test = new Test6935022(); + + int cnt = 0; + + while (cnt < 10000) { + try { + ++cnt; + if ((cnt&1023) == 0) + System.out.println("Thread="+Thread.currentThread().getName() + " iteration: " + cnt); + test.loop(2147483647, (cnt&1023)); + } + + catch (Exception e) { + System.out.println("Caught on iteration " + cnt); + e.printStackTrace(); + System.exit(97); + } + } + } + + private void loop(int endingRow, int mask) throws Exception { + int rows = 1; + boolean next = true; + + while(rows <= endingRow && next) { + rows++; + if (rows == mask) + System.out.println("Rows="+rows+", end="+endingRow+", next="+next); + next = next(rows); + } + + if (next) + throw new Exception("Ended on rows(no rs): " + rows); + } + + private boolean next(int rows) { + return rows < 12; + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6959129.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6959129 + * @summary COMPARISON WITH INTEGER.MAX_INT DOES NOT WORK CORRECTLY IN THE CLIENT VM. + * + * @run main/othervm -ea Test6959129 + */ + +public class Test6959129 { + + public static void main(String[] args) { + long start = System.currentTimeMillis(); + int min = Integer.MAX_VALUE-30000; + int max = Integer.MAX_VALUE; + long maxmoves = 0; + try { + maxmoves = maxMoves(min, max); + } catch (AssertionError e) { + System.out.println("Passed"); + System.exit(95); + } + System.out.println("maxMove:" + maxmoves); + System.out.println("FAILED"); + System.exit(97); + } + /** + * Imperative implementation that returns the length hailstone moves + * for a given number. + */ + public static long hailstoneLengthImp(long n) { + long moves = 0; + while (n != 1) { + assert n > 1; + if (isEven(n)) { + n = n / 2; + } else { + n = 3 * n + 1; + } + ++moves; + } + return moves; + } + + private static boolean isEven(long n) { + return n % 2 == 0; + } + + /** + * Returns the maximum length of the hailstone sequence for numbers + * between min to max. + * + * For rec1 - Assume that min is bigger than max. + */ + public static long maxMoves(int min, int max) { + long maxmoves = 0; + for (int n = min; n <= max; n++) { + if ((n & 1023) == 0) System.out.println(n); + long moves = hailstoneLengthImp(n); + if (moves > maxmoves) { + maxmoves = moves; + } + } + return maxmoves; + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6985295.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6985295 + * @summary JVM fails to evaluate condition randomly + * + * @run main/othervm -Xbatch Test6985295 + */ + +public class Test6985295 { + + public static void main(String[] args) { + int min = Integer.MAX_VALUE-50000; + int max = Integer.MAX_VALUE; + System.out.println("max = " + max); + long counter = 0; + int i; + for(i = min; i <= max; i++) { + counter++; + if (counter > 1000000) { + System.out.println("Passed"); + System.exit(95); + } + } + System.out.println("iteration went " + counter + " times (" + i + ")"); + System.out.println("FAILED"); + System.exit(97); + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test6992759.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6992759 + * @summary Bad code generated for integer <= comparison, fails for Integer.MAX_VALUE + * + * @run main Test6992759 + */ + +public class Test6992759 { + + static final int N_TESTS = 1000000000; + + public static void main(String[] args) throws Exception { + + /* + * If MAX_VALUE is changed to MAX_VALUE - 1 below, the test passes + * because (apparently) bad code is only generated when comparing + * <= MAX_VALUE in the doTest method. + */ + Test6992759 test = new Test6992759(); + for (int i = 0; i < N_TESTS; i += 1) { + test.doTest(10, Integer.MAX_VALUE, i); + //test.doTest(10, Integer.MAX_VALUE - 1, i); + } + System.out.println("No failure"); + } + + void doTest(int expected, int max, int i) { + int counted; + for (counted = 0; + (counted <= max) && (counted < expected); + counted += 1) { + } + if (counted != expected) { + throw new RuntimeException("Failed test iteration=" + i + + " max=" + max + + " counted=" + counted + + " expected=" + expected); + } + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test7005594.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 7005594 + * @summary Array overflow not handled correctly with loop optimzations + * + * @run shell Test7005594.sh + */ + +public class Test7005594 { + + static int test(byte a[]){ + int result=0; + for( int i=0; i<a.length; i+=((0x7fffffff>>1)+1) ){ + result += a[i]; + } + return result; + } + + public static void main(String [] args){ + byte a[]=new byte[(0x7fffffff>>1)+2]; + int result = 0; + try { + result = test(a); + } catch (ArrayIndexOutOfBoundsException e) { + e.printStackTrace(System.out); + System.out.println("Passed"); + System.exit(95); + } + System.out.println(result); + System.out.println("FAILED"); + System.exit(97); + } + +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test7005594.sh Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,80 @@ +#!/bin/sh +# +# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + +if [ "${TESTSRC}" = "" ] +then + echo "TESTSRC not set. Test cannot execute. Failed." + exit 1 +fi +echo "TESTSRC=${TESTSRC}" +if [ "${TESTJAVA}" = "" ] +then + echo "TESTJAVA not set. Test cannot execute. Failed." + exit 1 +fi +echo "TESTJAVA=${TESTJAVA}" +if [ "${TESTCLASSES}" = "" ] +then + echo "TESTCLASSES not set. Test cannot execute. Failed." + exit 1 +fi +echo "TESTCLASSES=${TESTCLASSES}" +echo "CLASSPATH=${CLASSPATH}" + +set -x + +cp ${TESTSRC}/Test7005594.java . +cp ${TESTSRC}/Test7005594.sh . + +${TESTJAVA}/bin/javac -d . Test7005594.java + +${TESTJAVA}/bin/java ${TESTVMOPTS} -Xms1600m -Xcomp -XX:CompileOnly=Test7005594.test Test7005594 > test.out 2>&1 + +result=$? + +cat test.out + +if [ $result -eq 95 ] +then + echo "Passed" + exit 0 +fi + +if [ $result -eq 97 ] +then + echo "Failed" + exit 1 +fi + +# The test should pass when no enough space for object heap +grep "Could not reserve enough space for object heap" test.out +if [ $? = 0 ] +then + echo "Passed" + exit 0 +else + echo "Failed" + exit 1 +fi
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/Test7020614.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 7020614 + * @summary "-server" mode optimizer makes code hang + * + * @run main/othervm/timeout=30 -Xbatch Test7020614 + */ + +public class Test7020614 { + + private static final int ITERATIONS = 1000; + private static int doNotOptimizeOut = 0; + + public static long bitCountShort() { + long t0 = System.currentTimeMillis(); + int sum = 0; + for (int it = 0; it < ITERATIONS; ++it) { + short value = 0; + do { + sum += Integer.bitCount(value); + } while (++value != 0); + } + doNotOptimizeOut += sum; + return System.currentTimeMillis() - t0; + } + + public static void main(String[] args) { + for (int i = 0; i < 4; ++i) { + System.out.println((i + 1) + ": " + bitCountShort()); + } + System.out.println("doNotOptimizeOut value: " + doNotOptimizeOut); + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/input6890943.txt Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,1543 @@ +50 +4 5 1 +..... +##### +..### +.#... +4 5 1 +..... +##### +###.. +...#. +5 4 2 +.... +#### +..## +.### +.#.# +6 10 5 +.......... +####.##### +####.##### +####.##### +####.##### +####.##### +6 10 4 +.......... +#....##### +#....##### +#....##### +#....##### +#....##### +6 10 1 +.......... +####.##### +####.##### +####.##### +####.##### +####.##### +6 10 2 +.......... +####.##### +####.##### +####.##### +####.##### +####.##### +6 11 2 +.....###### +########### +###.......# +###.#.#...# +###.#.##..# +###.#.###.# +6 11 1 +.....###### +########### +###.......# +###.#.#...# +###.#.##..# +###.#.###.# +6 11 2 +.......#### +########### +###.......# +###.#.#...# +###.#.##..# +###.#.###.# +7 11 1 +..#........ +##.#....... +###.#...... +####.#..... +#####.#.... +######.#... +#########.. +13 16 2 +................ +#.#.#.#.#.#.#.#. +................ +.#.#.#.#.#.#.#.# +................ +#.#.#.#.#.#.#.#. +................ +.#.#.#.#.#.#.#.# +................ +#.#.#.#.#.#.#.#. +................ +.#.#.#.#.#.#.#.# +................ +4 16 3 +................ +#.#.#.#.#.#.#.#. +.#.#.#.#.#.#.#.# +................ +50 50 1 +.................................................. +################################################.# +.#............#....#.......................#....## +..#.....#......#....#.....................#....#.. +...#.......#....#....#...................#....#... +#...#....#.......#....#.................#....#.... +##...#.......#....#....#...............#....#..... +###...#....#.......#....#.............#....#...... +####...#.......#....#....#...........#....#....... +.####...#....#.......#....#.........#....#........ +..####...#.......#....#....#.......#....#......... +...####...#....#.......#....#.....#....#.......... +....####...#.......#....#....#...#....#...###..... +.....####...#....#.......#....#.#....#..##..###... +......####...#.......#....#....#.............##... +.......####...#....#.......#...............##..... +........####...#.......#....#............##....... +.........####...#....#.......#.........#######.... +..###.....####...#.......#....#................... +.#..###....####...#....#.......#.........####..... +##...###....####...#.......#....#.......##..##.... +##...........####...#....#.......#......##..##.... +##..####......####...#.......#....##....##..##.... +##....##.......####...#....#......##....##..##.... +.###.##.........####.............##.....##..##.... +..###............#######........##.......####..... +.........###......######.......##................. +.......##..##..........#......##.........####..... +......##....##........#......##.........##..##.... +......##.............#......##..........##..##.... +......##............#......##...........##..##.... +......##....#......#......##............##..##.... +.......##..##.....#....########.........##..##.... +........####.....#.....###.#...#.........####..... +.#####..........#.....#..##.#...#................. +...##..........#.....#....##.#...#.........####... +...##.........#.....#..#...##.#...#.......##..##.. +...##........#.....#..##....##.#...#......##..##.. +...##.......#.....#....##....##.#...#.....##..##.. +##.##......#.....#...###......##.#...#.....#####.. +.###......#.....#..##..........##.#...#.......##.. +.........#.....#...##..####.....##.#...#.....##... +........#.....#...###..#.##......##.#...#...##.... +.......#.....#....##.....###......##.#...#........ +......#.....#..#.##...###..........##.#...#....... +.....#.....#...##..#..#..##.##.#....##.#...#...... +....#.....#........###.....##........##.#...#..... +...#.....#.......##...####..###.......##.#...#.... +..#.....#......##.#.#..#.#..##.........##.#...#... +.#..............#.#.#.#.#.#.#...........##.#...#.. +50 50 13 +.................................................. +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +################################################## +20 49 5 +................................................. +################################################# +################################################# +##################################.############## +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +############################.#################### +################################################# +######.########################################## +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +49 49 48 +................................................. +################################################# +################################################. +################################################# +####################################.############ +################################################# +##########.###################################### +#######.######################################### +################################################# +################################################# +################################################# +#######################################.######### +################################################# +################################################# +################################################# +################################################# +################################################# +###########################################.##### +################################################# +################################################# +###.############################################# +###############.################################# +################################################# +##.############################################## +################################################# +################################################# +################################################# +################################################# +################################################# +#####################################.########### +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +#####.########################################### +#####################.########################### +################################################# +###.############################################# +################################################# +################################################# +################################################# +#############.###########.####################### +###.##############.####.######################### +#########################################.####### +################################################# +########################################.######## +40 49 10 +................................................. +################################################# +..#..............#............................... +.......................#.......................#. +..#....#...................#.................#... +..........#...............................#...... +............#..................#.......#......... +........#............#............#.............. +...................#.....#....................... +....#..........#........................#........ +....#.................................#......#... +...........#..................................... +....................#............................ +.##........#................#.................... +..............#.................................. +........#......................................#. +.................................#....#........#. +................................................. +.............................#..##............... +...........................................#.#... +........#..........#........#.............#.....# +..........#.#.................................... +.............................#............#...... +.......#.......................#.........#....... +..............................#.................. +......#.......................................... +.............#................#.................. +......#.............................#............ +#.................#..................#......#.... +..............#.................................. +...........#...........................#.....#... +............#............#...#................... +.......................#.......................#. +............................#.................... +..........................#...................... +...........................#.............#...#... +.#............................................... +..................#.............................. +...#..............................#.............. +.....#..................#........................ +49 49 20 +.........................................##..#..# +########################################.##...#.# +#.##..#....#####..####...#..#.##.###.#..#..#..... +..#...#.##...##.#....#...##..#.#..###...###...... +..#..##.#.###......#.....##.....#.......##.#.#.#. +..##......###.#.##.....##.#..#.##.###..##.#.##### +#.#.##....#..#..#.....#.#.##.#.#.##.#.....###.#.# +..###.#..#..###...###..#.#.....#..######.#.#....# +..#...#...##.#..##.#.#.#..###.#..#..#...#....##.. +.#..#.#..##.#...##........##.##..#..........#.#.. +#.#.###.####...#.#...#..###..#.##....#........... +#..#.#######.#....#.#.##...#.#.............##.... +###..###....#..#....#.#.###...#..##.#.#..##.##### +....####...#.#...........##.#.#.#..#.#.#.##.#.#.. +##.#...##..#..#...#..##..####.##.#...#.....#...#. +...###.##.#..##.....#.#.##..#..###.#.###.#.#..#.# +###....##...#.#.##.##..#.#...#...##....#.###..... +##..#....###..#.....#..#....#.#.#.##.......##.### +.#.#....#.....####..##....##...##...#.##..##.#... +##....#....#.#.###.##...#..##.##...##....#.###### +..#....#..##.....##.##.#.........##..##...#.#.... +#..#..#.#....##.#.#...#.###..#...#..#.##.#.#....# +.....#..#...###.....##...###....###.##.....#...#. +#..#.#...###..#....#..####....#.#......#..##....# +.....#.#...###...###.#..#.#.#.........#.#.#..#.## +.#..##..##..#..#.#....##.........#..#.##.#..##... +##.#.#.....##.##..###...#.#.#..#.#.####.#.###.#.. +..#.#.......#.#.#...#####..#.##.#....#...#.#..... +..########.####.....#..#.........#..#####.##.#... +.#......##.####..###..#.####........#....#....#.# +#.....#....#...#...#..###......#.##..#..#...#.### +...#..###.....#....#..#..#......#.....#.#.#.#..## +....#.##..####.#..###...#...#...#######..#..#.... +.#..#...##...#...#......##...#####.##...#..##.... +..#.#.#######.#....#.#.###....#.##...#..#.##..#.. +#..#.##.#.#.##..###....#.##.#..#..#...##....##..# +.###.#.#..##.###...#..##.#.#...#.#.####....#..### +#.#......#...##.##...#.#.....##..#..##....#.##... +#.#...#.#.##...##.###.#..##..##..####..##.#.#...# +....#....#..####.##.....#.#....#..##..##....#..#. +....#...###.....##..#..###....#........###..##..# +.###....##...........#....#........####.#.####..# +.#........##...#.###..###.#...##.##..###..###..## +#.#######.#....###...#..##..#...#....##....#....# +#....#..#......#..#...##.....##.#.#.#..#......##. +#.##.##.#.#.##..#..##..######.##.###.#.#..#....#. +.###.##.....#.##.#..###....###..##....#.#..#..... +#####..#.#....#.#......##..##.#...........####.#. +..#..#.#..#...##....###.##.#.#...#..#..#....#..## +49 49 5 +.........................................#....### +#########################################..#..... +.....#.###...#...#...........##...#...#....#...#. +....#.....#..#....#....##...#..###.#...#..##.#.## +....#...###.##...........#.#.##.......#.#.#.#..#. +...........#..###..#..##.....#.........#.....#... +.....#......#.##.#..##.###.....#...###.#....#...# +.#......##.#.......##...##.....###..#......#.##.. +#.....#.#..#.#...#####...#.###.##.....#..#....... +........##....#....#....#.#.#....####....#.##.### +....#.#......#.##.....#..........##.............# +#..##....#.#....#..#.....#.#.#....#.#.#....##...# +...#..##..#...#....#.#....#..#....#..#..#.......# +......#...##.#.####.....####.###.#..#........#### +##..........#...........#..#.##......##......#.## +....###....##...#.####.....#............#..#..... +...#....#....##.....##.....#.#.....#..#.#..#..#.. +...........#...#.##..##..##.#......#..#.#..##.... +.#........##......#.........##..#..........#.#... +#....##.#.##..#.....#.....##.....##....#......... +.#.......###.......#..#.....#.....#..##.##...#..# +###......##.....##...##..#.#....#...####.##...... +....#..#....##...#.##.##.#.#...#...#.#..#.##.#..# +..........#.#..#####..####....##..#.#.#.........# +........##.#.###..###....#...#........##.#......# +...........##.##..#.....#.#.#..#....#.###..#.###. +#.#....#.#...##............#.#.....#....#...#.#.. +..###.#.##...#....#..##.##...#........#.......... +...........#...##...#..#..##.....#..#.......#..#. +#.#.#......##.#.....#...#.#.#.#...#..#######.#.## +...##..#.......###..#..#.##....#......####.#..... +....##......#.#......#....##..#...##.......#..#.. +#..###.##....##..##.#..#.###.##.....#...#........ +#......#....####....#.........#.........#.##.##.. +.#.#..###......#...##.#.##..#.#.....#...#.#...... +......#.#...##.#....#..#.#.......#.#.....#.#.###. +###..#.....#....#.#...##..#.#.....#.#.....#.#.#.. +..........#........#....#...#..#...#...#.#.#.#... +#..##..#...###....#.#..#....##....#...#..##....#. +.#.#.#....#..##.#...#.....#.##..#....##........#. +..#.##....###...#...#..#..#.....#..#..###..#...#. +##....#........#..#..#..#......#...#.##....#.#.## +....#.............#..#...#...#.#.#...##....#..#.# +...#.........#....###.....#.#..#..#...#..#...#... +.#.#.###..###..##.#.##...#...#..#................ +.#......#..#..#.#.#...#.....................#..#. +..#.....#.......#..##.......#...#...##.#.....#.#. +#..####.#....#......#.........#.#...###...#....#. +#.#.#.#....#......#..#.#........###..#....##..... +31 47 7 +.........................#.#..##.#..#...#.#...# +#########################.#.#...#...#.#........ +...#...........#...#...#...##...#..#.#.#...#.## +.#........#.......#.#.....#......#........#.... +#.#....###...#...#....#....#......##...#....... +#.#......##..##...#.....#.##.#.........#....... +.......#.....##........#..#.......#.##...#..... +..#.......##.#...#.#..#.#..#....#......#......# +#.##.........#####..###.......#........#......# +.#..#.#.#...#....####...#...........#....#..... +##..##..#..###..###....##......#....#..##...#.# +#......#........##...#.#.#..#..#....#..#...##.# +.#.#.##..........#.#..#...##.##................ +##.##.#...#....#.............#.#.....###...#.#. +..#..#.#..#..#.#....#....#............#.##..#.. +......###......#..##..#.#...##.........#.#.###. +..#.##.#..#......##....#.#........#....##..#..# +.........#.#.#.....##...#.#...#.##.....##.##... +.#........#...#.###.........#.#.#.............. +....##........#.....#....###....###.#..#....##. +..#..#....#....#.#.......#.#.#..........###.... +.#..###.#...#.###...##...#....#...#............ +##.....#..##.#.##.##.......#.##.....###...##.#. +.###.......#...#.....#.....###.........#...#... +#.....#..####.....##...#........####..........# +#..#...........##.#.#.#..............#....##... +..#.#..........#..##.#.##...##..##.#.#..##..#.. +#..........................###......#....##.... +.....#..........#..#......#...#.#..#....#...#.. +#......#....##.........#..#.......#..#.......#. +.#......#...###...##....##.#..........#...#..#. +44 35 13 +...........................##..#### +############################.....## +..###.#.##.#...#.##.####.###.###.## +#.##.#.#.#.#..###.#..#...###.##..## +.#.##.#.##..#.###.###.##.#.#...#.#. +##....##.#..#####.#.####..##.##...# +####.##..##.###.##.#.#...#.##.###.# +#.#######.#.#.#..#.#..#..#...###.#. +##.###.#..#.####.##.#.#########.##. +#.##.#######..#.#.####.#..#..###### +.#.##..########...##.###..#..##.##. +#.#######.###...##.#...#.####.#..#. +.###..###....#.#.##.###..##.##.##.# +...#.###.#.##.#.####.##.####.#.#... +#..#......##....#.##.#.#.###.#..#.. +...##.###.###.#.####..#.#.#..###.#. +.#####.#..#..##.#.#...##.#.#.##...# +..##..#.#....##.#.#.###.##.##...#.. +###.#..###....#######..#.#.###.##.# +##.##.#.#.##..#.#.#.#.#..##.####... +##.###...#.###.#.#.#..#####.###.#.. +...#.#...##......#.##..##.##.#.#.## +##..#..####..###...###.#........### +##..#....#..#.#..##.#####..###.#... +##...#.#####.###.##....###...####.. +#.####.#..#.##.#.#...#.###.#...##.. +####.#####.###.#.##...##...#...#.## +#..##.##....###..#..####.##..#.#.## +..#.###.##....####.##.#..###.#....# +#.#.###.#..#.##.##...###.##..###### +##.#.##.###.#..#...###.####..##.### +.########.#....#..#........#..##..# +####..#.##.#.##.####..#.###...##### +#..##..#..##.###....####.#.#...#.#. +.#...##.##.###.###...##..##..###... +###.##...#.##...####.#.#.##..#.#### +##.###..##.#....#.###..##.#...###.# +##..##.###..#..#.####.#.....##.#### +.#....##...#####.....####...#.##.#. +##.#.#.....##...#..#...#....#....## +#..#.#..#####.##..###.#.########### +.#.###.#..###.##.###.#.###.##.#.### +.###..####.#..##......#..##.######. +...##..###.#.....##.#.#..##......#. +6 10 2 +.......#.. +########## +##.#...#.. +.#..###... +#.##.##### +.####..#.# +7 9 4 +......##. +######### +.#.###.## +..#...#.. +.....##.# +####...## +.#..#.#.# +10 9 2 +......... +######### +..###.### +#..###.## +.....#.#. +..##..... +###.#.... +.##..#... +##...#..# +#..##..## +10 7 1 +....#.. +####..# +####..# +..#.#.. +...##.# +#....## +..#..## +#.#.... +.##.#.. +###...# +6 10 2 +.......#.. +#######.#. +..##.#.##. +..#..#.... +.#.#....## +..#....#.. +40 40 5 +........................................ +###############################..####### +#..##############.....################## +........................................ +...........................##########... +#####...################..############## +##################.##################### +.............................#####...... +#.............................########## +...............................####..... +.........#################.............. +..........................###........... +........................................ +....................#####............... +##########....................########## +.......##################............... +########........###############..####### +........................................ +..........#########################..... +#####................................... +.........................####........... +......................#####...######.... +######..................################ +........#########....................... +############.........................### +........####################............ +......................################## +....................######......#....... +............################............ +........................................ +......................#########......... +#######............##################### +........###############................. +........................................ +.........###################............ +.............................#.......... +######................................## +........................#############... +......##................................ +........................................ +30 20 11 +.................... +#################### +......##.##......... +#.....####....###### +...##............... +........#######.###. +.......##........... +....#####..##....... +........##.#........ +.....###...........# +..##########.....##. +..#........#.#...... +......##...##....... +....##...###..###... +.####...#####...#### +###................. +.......##........... +..........###..####. +..####..#####.#####. +..###....#.......##. +......##....##...... +....##.###.......... +.##.....#####..#.... +.................... +......###........... +####..###.#########. +.......#######...... +....###............. +.........###.#####.. +................#### +50 20 17 +.................... +###################. +#..##............... +.................... +............####.... +........#####....... +..............#..#.. +.........####...#### +.............####... +.....#....###....... +####...............# +.................... +.....######......... +......#............. +.####......#####.... +.............##..... +#####....#####...... +..#####............. +##..####....###..### +.................... +.................... +........######.###.. +....####....#####... +....########........ +...#####............ +.###................ +...............###.. +.......#########.... +..................## +.......####......... +..#####............. +...####............. +.##..........#####.. +.................... +...#.##.......###... +######.....##......# +......####.......... +......#............. +.................... +.................... +.................... +.....#.............. +.....####....###.... +......#.........#### +.......######...###. +.................... +.................... +##...........###...# +.###................ +......#######....... +45 25 10 +......................... +######################### +#...................#..## +....................#.... +......................... +......................... +...................#####. +##.....................## +.#.....................#. +##.....................## +......................... +.....####................ +.....##.#................ +.....##.#................ +.....##.#............#### +.....##.#.......###..#### +.....##.#.......#.#...... +.....##.######..#.#...... +.....####....#..#.#..###. +........#....#..#.#..###. +...##...#....#..#.#...... +...##...#....#..#.#...... +...##...#....#..#.#...... +...##...#....#..#.#...... +...########..#..###...... +##.##...#....#........### +.#.##...######..#####.#.. +.#.##...........#...#.#.. +.#.##...........#...#.#.. +.#..............#...#.#.. +.#..............#...#.#.. +.#.####.........#...#.#.. +.#.#..#.........#...#.#.. +.#.#..#.........#####.#.. +##.#.##########.......### +...#.####.....#.......... +..#####.#.....#..####.... +..#####.#.....#..#..#.... +..#..#..#.....#..#..#.... +..#..#..#.....#..#..#.... +#.#..#..#.....#..#..#..## +#.#..#..#######..#..#..#. +#.####.......#####..#..#. +#............#..##..#..#. +#............########..#. +25 45 5 +............................................. +############################################# +..........#...###################.#...#...... +#########.#....############################## +#########.#.............##################... +####################....##################### +#########..........#........################# +####################........################# +..................####################.....#. +####..............#...........############### +######################################.###### +#########################.....#...########### +###.###################.......#.....######### +#####.#.........#.....#.....################# +############################################# +###.#############...####............######### +......############################...#....#.. +....#########....................#...######.. +##########.##..............################## +#####################......################## +############################################# +############################################# +#######################################...### +############################################# +##########....############################### +40 40 5 +...............................######### +######################################## +######################################## +######################################## +#########....########################### +######################################## +######################################## +######################################## +######################################## +######################################## +######################################## +..#######.######################........ +######################################## +######################################## +######################################## +...........#####################........ +####.................################### +######################################## +######################################## +######################################## +######################################## +#####................################### +###################...........########## +######################################## +######################################## +.......##############################... +######################################## +######################################## +######################################## +##############..............############ +######################################## +######################################## +######################################## +######################################## +######################################## +######################################## +######################################## +######################################## +######################################## +######################################## +30 20 2 +.................... +#################### +#############.....## +##...############### +#################### +#################### +#################### +....##.....#...#.... +#################### +##########.######### +###....############# +#########....####### +###....############# +#################### +....#######.....###. +#################### +########.########### +#################### +#############.....## +....#####..######### +#################### +##..########.##.#### +########.########### +#########..######### +#........########### +#########..######.## +###.################ +#################### +#################### +##############...### +50 20 7 +...................# +#################### +#####...############ +#################### +#################### +#################### +#################### +#################### +#################### +.....##############. +#################### +#################### +.........########... +#################### +#################### +#################### +#################### +#################### +#################### +#################### +#################### +#################### +#################### +#################### +#################### +#################### +#########.########## +#################### +#################### +#################### +....###########..... +#################### +#################### +#################### +#################### +#################### +#################### +#################### +#################### +##........########## +#################### +#################### +#################### +#################### +#################### +#################### +#################### +#################### +....##########...... +#################### +49 49 3 +................................................. +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +................................................. +................................................. +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +################################################# +........##########............................... +################################################# +###########..................#################### +################################################# +################################################# +################################################# +################################################# +................................................. +################################################# +################################################# +################################################# +################################################# +################################################# +.......#####..................................... +................................................. +################################################# +................................################. +..................................###########.... +################################################# +################################################# +################################################# +################################################# +45 25 4 +......................... +######################### +########............##### +######################### +......................... +###############.#######.# +###############.#######.# +############....#######.# +......................... +############.#.########## +############...########## +######################### +.................######.. +################.######.# +......................... +....................##... +......................... +######################### +######################### +######################### +######################### +######################### +######################### +..........####........... +##....................... +##....................... +#########.####.########## +..........####........... +######################### +######################### +######################### +######################### +######################### +######################### +......................... +##################.###### +##################.###### +##################.###### +......................... +######################### +######################### +######################### +......................... +......................... +......................... +25 45 5 +............................................. +############################################# +####################################.######## +###########.################################# +###########.################################# +###########.################################# +############################################# +############################################# +############################################# +############################################# +############################################# +############################################# +############################################# +############################################# +############################################# +#############################...############# +#############################.#.############# +#############################...############# +############################################# +############################################# +############################################# +###..######################################## +###..######################################## +#########################################.... +####################################.####.##. +50 50 18 +.................................................. +################################################## +..##..##..##..##..##..##..##..##..##..##..##..##.. +.###.###.###.###.###.###.###.###.###.###.###.###.# +....####....####....####....####....####....####.. +.#.#####.#.#####.#.#####.#.#####.#.#####.#.#####.# +..######..######..######..######..######..######.. +.#######.#######.#######.#######.#######.#######.# +........########........########........########.. +.#.#.#.#########.#.#.#.#########.#.#.#.#########.# +..##..##########..##..##########..##..##########.. +.###.###########.###.###########.###.###########.# +....############....############....############.. +.#.#############.#.#############.#.#############.# +..##############..##############..##############.. +.###############.###############.###############.# +................################................## +.#.#.#.#.#.#.#.#################.#.#.#.#.#.#.#.### +..##..##..##..##################..##..##..##..#### +.###.###.###.###################.###.###.###.##### +....####....####################....####....###### +.#.#####.#.#####################.#.#####.#.####### +..######..######################..######..######## +.#######.#######################.#######.######### +........########################........########## +.#.#.#.#########################.#.#.#.########### +..##..##########################..##..############ +.###.###########################.###.############# +....############################....############## +.#.#############################.#.############### +..##############################..################ +.###############################.################# +................................################## +.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.################### +..##..##..##..##..##..##..##..#################### +.###.###.###.###.###.###.###.##################### +....####....####....####....###################### +.#.#####.#.#####.#.#####.#.####################### +..######..######..######..######################## +.#######.#######.#######.######################### +........########........########################## +.#.#.#.#########.#.#.#.########################### +..##..##########..##..############################ +.###.###########.###.############################# +....############....############################## +.#.#############.#.############################### +..##############..################################ +.###############.################################# +................################################## +.#.#.#.#.#.#.#.################################### +50 50 19 +.................................................. +################################################## +..##..##..##..##..##..##..##..##..##..##..##..##.. +.###.###.###.###.###.###.###.###.###.###.###.###.# +....####....####....####....####....####....####.. +.#.#####.#.#####.#.#####.#.#####.#.#####.#.#####.# +..######..######..######..######..######..######.. +.#######.#######.#######.#######.#######.#######.# +........########........########........########.. +.#.#.#.#########.#.#.#.#########.#.#.#.#########.# +..##..##########..##..##########..##..##########.. +.###.###########.###.###########.###.###########.# +....############....############....############.. +.#.#############.#.#############.#.#############.# +..##############..##############..##############.. +.###############.###############.###############.# +................################................## +.#.#.#.#.#.#.#.#################.#.#.#.#.#.#.#.### +..##..##..##..##################..##..##..##..#### +.###.###.###.###################.###.###.###.##### +....####....####################....####....###### +.#.#####.#.#####################.#.#####.#.####### +..######..######################..######..######## +.#######.#######################.#######.######### +........########################........########## +.#.#.#.#########################.#.#.#.########### +..##..##########################..##..############ +.###.###########################.###.############# +....############################....############## +.#.#############################.#.############### +..##############################..################ +.###############################.################# +................................################## +.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.################### +..##..##..##..##..##..##..##..#################### +.###.###.###.###.###.###.###.##################### +....####....####....####....###################### +.#.#####.#.#####.#.#####.#.####################### +..######..######..######..######################## +.#######.#######.#######.######################### +........########........########################## +.#.#.#.#########.#.#.#.########################### +..##..##########..##..############################ +.###.###########.###.############################# +....############....############################## +.#.#############.#.############################### +..##############..################################ +.###############.################################# +................################################## +.#.#.#.#.#.#.#.################################### +50 50 20 +.................................................. +################################################## +..##..##..##..##..##..##..##..##..##..##..##..##.. +.###.###.###.###.###.###.###.###.###.###.###.###.# +....####....####....####....####....####....####.. +.#.#####.#.#####.#.#####.#.#####.#.#####.#.#####.# +..######..######..######..######..######..######.. +.#######.#######.#######.#######.#######.#######.# +........########........########........########.. +.#.#.#.#########.#.#.#.#########.#.#.#.#########.# +..##..##########..##..##########..##..##########.. +.###.###########.###.###########.###.###########.# +....############....############....############.. +.#.#############.#.#############.#.#############.# +..##############..##############..##############.. +.###############.###############.###############.# +................################................## +.#.#.#.#.#.#.#.#################.#.#.#.#.#.#.#.### +..##..##..##..##################..##..##..##..#### +.###.###.###.###################.###.###.###.##### +....####....####################....####....###### +.#.#####.#.#####################.#.#####.#.####### +..######..######################..######..######## +.#######.#######################.#######.######### +........########################........########## +.#.#.#.#########################.#.#.#.########### +..##..##########################..##..############ +.###.###########################.###.############# +....############################....############## +.#.#############################.#.############### +..##############################..################ +.###############################.################# +................................################## +.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.################### +..##..##..##..##..##..##..##..#################### +.###.###.###.###.###.###.###.##################### +....####....####....####....###################### +.#.#####.#.#####.#.#####.#.####################### +..######..######..######..######################## +.#######.#######.#######.######################### +........########........########################## +.#.#.#.#########.#.#.#.########################### +..##..##########..##..############################ +.###.###########.###.############################# +....############....############################## +.#.#############.#.############################### +..##############..################################ +.###############.################################# +................################################## +.#.#.#.#.#.#.#.################################### +49 48 5 +................................................ +################################################ +################################.############### +###############################..##############. +##############################.#.#############.# +#############################....############... +############################.###.###########.### +###########################..##..##########..##. +##########################.#.#.#.#########.#.#.# +#########################........########....... +########################.#######.#######.####### +#######################..######..######..######. +######################.#.#####.#.#####.#.#####.# +#####################....####....####....####... +####################.###.###.###.###.###.###.### +###################..##..##..##..##..##..##..##. +##################.#.#.#.#.#.#.#.#.#.#.#.#.#.#.# +#################............................... +################.############################### +###############..##############################. +##############.#.#############################.# +#############....############################... +############.###.###########################.### +###########..##..##########################..##. +##########.#.#.#.#########################.#.#.# +#########........########################....... +########.#######.#######################.####### +#######..######..######################..######. +######.#.#####.#.#####################.#.#####.# +#####....####....####################....####... +####.###.###.###.###################.###.###.### +###..##..##..##..##################..##..##..##. +##.#.#.#.#.#.#.#.#################.#.#.#.#.#.#.# +#................################............... +.###############.###############.############### +.##############..##############..##############. +.#############.#.#############.#.#############.# +.############....############....############... +.###########.###.###########.###.###########.### +.##########..##..##########..##..##########..##. +.#########.#.#.#.#########.#.#.#.#########.#.#.# +.########........########........########....... +.#######.#######.#######.#######.#######.####### +.######..######..######..######..######..######. +.#####.#.#####.#.#####.#.#####.#.#####.#.#####.# +.####....####....####....####....####....####... +.###.###.###.###.###.###.###.###.###.###.###.### +.##..##..##..##..##..##..##..##..##..##..##..##. +.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.# +49 48 11 +................................................ +################################################ +################################.############### +###############################..##############. +##############################.#.#############.# +#############################....############... +############################.###.###########.### +###########################..##..##########..##. +##########################.#.#.#.#########.#.#.# +#########################........########....... +########################.#######.#######.####### +#######################..######..######..######. +######################.#.#####.#.#####.#.#####.# +#####################....####....####....####... +####################.###.###.###.###.###.###.### +###################..##..##..##..##..##..##..##. +##################.#.#.#.#.#.#.#.#.#.#.#.#.#.#.# +#################............................... +################.############################### +###############..##############################. +##############.#.#############################.# +#############....############################... +############.###.###########################.### +###########..##..##########################..##. +##########.#.#.#.#########################.#.#.# +#########........########################....... +########.#######.#######################.####### +#######..######..######################..######. +######.#.#####.#.#####################.#.#####.# +#####....####....####################....####... +####.###.###.###.###################.###.###.### +###..##..##..##..##################..##..##..##. +##.#.#.#.#.#.#.#.#################.#.#.#.#.#.#.# +#................################............... +.###############.###############.############### +.##############..##############..##############. +.#############.#.#############.#.#############.# +.############....############....############... +.###########.###.###########.###.###########.### +.##########..##..##########..##..##########..##. +.#########.#.#.#.#########.#.#.#.#########.#.#.# +.########........########........########....... +.#######.#######.#######.#######.#######.####### +.######..######..######..######..######..######. +.#####.#.#####.#.#####.#.#####.#.#####.#.#####.# +.####....####....####....####....####....####... +.###.###.###.###.###.###.###.###.###.###.###.### +.##..##..##..##..##..##..##..##..##..##..##..##. +.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.# +49 48 30 +................................................ +################################################ +################################.############### +###############################..##############. +##############################.#.#############.# +#############################....############... +############################.###.###########.### +###########################..##..##########..##. +##########################.#.#.#.#########.#.#.# +#########################........########....... +########################.#######.#######.####### +#######################..######..######..######. +######################.#.#####.#.#####.#.#####.# +#####################....####....####....####... +####################.###.###.###.###.###.###.### +###################..##..##..##..##..##..##..##. +##################.#.#.#.#.#.#.#.#.#.#.#.#.#.#.# +#################............................... +################.############################### +###############..##############################. +##############.#.#############################.# +#############....############################... +############.###.###########################.### +###########..##..##########################..##. +##########.#.#.#.#########################.#.#.# +#########........########################....... +########.#######.#######################.####### +#######..######..######################..######. +######.#.#####.#.#####################.#.#####.# +#####....####....####################....####... +####.###.###.###.###################.###.###.### +###..##..##..##..##################..##..##..##. +##.#.#.#.#.#.#.#.#################.#.#.#.#.#.#.# +#................################............... +.###############.###############.############### +.##############..##############..##############. +.#############.#.#############.#.#############.# +.############....############....############... +.###########.###.###########.###.###########.### +.##########..##..##########..##..##########..##. +.#########.#.#.#.#########.#.#.#.#########.#.#.# +.########........########........########....... +.#######.#######.#######.#######.#######.####### +.######..######..######..######..######..######. +.#####.#.#####.#.#####.#.#####.#.#####.#.#####.# +.####....####....####....####....####....####... +.###.###.###.###.###.###.###.###.###.###.###.### +.##..##..##..##..##..##..##..##..##..##..##..##. +.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.# +50 10 4 +.......... +#####..... +#####..... +.####..... +#.###..... +##.##..... +.......... +#####..... +#####..... +####...... +###.#..... +##.##..... +.......... +#####..... +#####..... +.####..... +#.###..... +##.##..... +.......... +#####..... +#......... +.#........ +..#....... +...#...... +.......... +....#..... +...#...... +..#....... +.#........ +.......... +#####..... +#####..... +.####..... +#.###..... +##.##..... +.......... +#####..... +#####..... +####...... +###.#..... +##.##..... +.......... +#####..... +#####..... +#####..... +#####..... +#####..... +.......... +#####..... +#####..... +50 10 5 +.......... +#####..... +...##..... +.......... +.......... +.......... +#####..... +##........ +.......... +.......... +.......... +#####..... +##........ +.......... +.......... +.......... +#####..... +#......... +.#........ +..#....... +...#...... +.......... +....#..... +...#...... +..#....... +.#........ +.......... +#####..... +#####..... +####...... +###.#..... +##.##..... +.......... +#####..... +#####..... +.####..... +#.###..... +##.##..... +.......... +#####..... +#......... +.#........ +..#....... +...#...... +.......... +....#..... +...#...... +..#....... +.#........ +.......... +50 10 4 +.......... +#####..... +#......... +.#........ +..#....... +...#...... +.......... +....#..... +...#...... +..#....... +.#........ +.......... +#####..... +#####..... +####...... +###.#..... +##.##..... +.......... +#####..... +#####..... +#####..... +#####..... +#####..... +.......... +#####..... +##........ +.......... +.......... +.......... +#####..... +#####..... +#####..... +#####..... +#####..... +.......... +#####..... +##........ +.......... +.......... +.......... +#####..... +##........ +.......... +.......... +.......... +#####..... +#####..... +####...... +###.#..... +##.##..... +50 10 5 +.......... +#####..... +##........ +.......... +.......... +.......... +#####..... +#####..... +#####..... +#####..... +#####..... +.......... +#####..... +##........ +.......... +.......... +.......... +#####..... +#####..... +.####..... +#.###..... +##.##..... +.......... +#####..... +#####..... +####...... +###.#..... +##.##..... +.......... +#####..... +...##..... +.......... +.......... +.......... +#####..... +#####..... +#####..... +#####..... +#####..... +.......... +#####..... +#####..... +####...... +###.#..... +##.##..... +.......... +#####..... +#......... +.#........ +..#....... +35 41 11 +........................................# +######################################### +##.......####.#..######.##.###...#####.## +.##.#.#...#.###...##...#..#.#..##..###### +.#...##....#.###...##.#.##.#.###...####.. +.###...##.##..####..##.#.#####.#...#.#... +..#...#.##..#...##..###..##...###...#.#.. +.#.####.##.##.###.....#..#..##.###..#.##. +##..##..#...##.###.#...####...#..##....#. +#....#..##.#.#.#......####.#.....#...#.#. +#.##.#####......####......###.###..###.#. +##..######...######.##.#.##.......#...#.. +.#.....###......#####...#..#.#.###...##.# +...##.##.##..##...####.#.###...#..#.##..# +....###.#.#..#...###..###.###..#####...## +....##.##..#.#.#.#.#####...##..######.... +#.#.##.##.#...#####....##.#.#...#.##.#..# +#.##.##.##.#...#.#.####...#..#.......##.# +.##.#..###..####.#..###...#...###.##.##.. +.####.#.#######.#......##....#######..##. +.#..#...#.#.####..#.######.#.#..##.#.#### +...#.###..#.##.#.###.#.#....#.###.#.#...# +..#.#.####....###...#..##..#####.#.###### +#......####.#..##.....#####.##...###..... +##..##..####......#.#.##..##...###.#..... +#.#.####.####.......##......####.###..### +##...###.#...#.####.##.#........##..#.### +....#..#####.#....#.##...###..#####.#.### +####.#.###.........####..###.#..######.## +.#..########..###..#####.######.###.#...# +.##..#.##..#....####...#.###.....##.#...# +##.#..##.##..##.##...##.##.##.#.##.###### +..#..#..####..##...###.#...#.....###..#.. +####.#..####.###...##..#.#.###.#..#..#### +#...#..#..#.#...#...#.#.##.##.#...###.#.# +31 41 12 +......................................... +#################################.....#.. +.#..........#.......#....##....#...#..... +.........#......#.#...#...#..#........#.. +#......#.#......#...#.........#.........# +.................#....#...#...##......... +.......#.#..#.....#..#.....##........#..# +.....#..#..#......................#..#... +..............#....##....#...#..#..#....# +...#.#.#........##.#..#..........#......# +...#......#..#......#....#....#....#..... +......##.#...#.##..........#............. +.......##.#.#..#...#.....#.#..#.......... +.........#..........#.................#.. +.#....#..#......#.......#.#..#..####.##.. +#...#................##...#..........#... +..........#...#.#..#..###..#...#......... +........##.......#.....##.#......#...#.#. +.#.....#.#..#.....#.#..##.#.#...........# +.......####...#.#.........#...#.#........ +.##.................#.#.#................ +.....###.#...#..#.#..............#....... +.....#...#.....#........#....##.......#.. +.........#...........##.#..#.....##...... +...#....#.........#...#...#.#............ +.....#..............#..............#....# +#.##...#.............#....#.#..#......#.. +........#...#...##.............#.#....... +.......#.......#..............#.......... +.....#.........#.........#..#...#..#....# +####..#...#.#.....##...........#.#.#.#.#.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/5091921/output6890943.txt Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,50 @@ +Case #1: Yes 2 +Case #2: Yes 2 +Case #3: Yes 1 +Case #4: Yes 0 +Case #5: No +Case #6: No +Case #7: Yes 6 +Case #8: Yes 6 +Case #9: No +Case #10: Yes 1 +Case #11: Yes 6 +Case #12: Yes 0 +Case #13: No +Case #14: Yes 22 +Case #15: Yes 1225 +Case #16: Yes 178 +Case #17: No +Case #18: Yes 1 +Case #19: Yes 7 +Case #20: Yes 2 +Case #21: Yes 1 +Case #22: No +Case #23: Yes 3 +Case #24: Yes 1 +Case #25: Yes 7 +Case #26: No +Case #27: Yes 2 +Case #28: Yes 4 +Case #29: Yes 2 +Case #30: Yes 1 +Case #31: Yes 2 +Case #32: Yes 20 +Case #33: Yes 161 +Case #34: Yes 48 +Case #35: No +Case #36: Yes 218 +Case #37: Yes 51 +Case #38: Yes 247 +Case #39: Yes 32 +Case #40: Yes 31 +Case #41: Yes 31 +Case #42: Yes 25 +Case #43: Yes 17 +Case #44: Yes 2 +Case #45: Yes 61 +Case #46: Yes 25 +Case #47: No +Case #48: No +Case #49: Yes 8 +Case #50: Yes 0
--- a/test/compiler/6795161/Test.java Wed Jun 01 17:09:56 2011 +0100 +++ b/test/compiler/6795161/Test.java Thu Jun 02 18:59:50 2011 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ * @test * @bug 6795161 * @summary Escape analysis leads to data corruption - * @run main/othervm -server -Xcomp -XX:CompileOnly=Test -XX:+DoEscapeAnalysis Test + * @run main/othervm -server -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:CompileOnly=Test -XX:+DoEscapeAnalysis Test */ class Test_Class_1 {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/6796786/Test6796786.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6796786 + * @summary invalid FP identity transform - (a - b) -> b - a + * + * @run main/othervm -Xbatch Test6796786 + */ + +public class Test6796786 { + static volatile float d1; + static volatile float d2; + + public static void main(String[] args) { + int total = 0; + for (int i = 0; i < 100000; i++) { + if (Float.floatToRawIntBits(- (d1 - d2)) == Float.floatToRawIntBits(-0.0f)) { + total++; + } + } + if (total != 100000) { + throw new InternalError(); + } + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/7041100/Test7041100.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 7041100 + * @summary The load in String.equals intrinsic executed before null check + * + * @run main/othervm -Xbatch Test7041100 abc def + */ + +public class Test7041100 { + + static String n = null; + public static void main(String[] args) throws Exception { + for (int i = 0; i < 10000; i++) { + stringEQ(args[0], args[1]); + stringEQ(args[0], args[0]); + stringEQ(args[0], n); + stringEQ(n, args[0]); + } + } + + public static boolean stringEQ(String a, String b) { + if (a == b) + return true; + if (a == null || b == null) + return false; + else + return a.equals(b); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/7042153/Test7042153.java Thu Jun 02 18:59:50 2011 +0100 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 7042153 + * @summary Bad folding of IfOps with unloaded constant arguments in C1 + * + * @run main/othervm -Xcomp Test7042153 + */ + +import java.lang.reflect.*; + +public class Test7042153 { + static public class Bar { } + static public class Foo { } + + static volatile boolean z; + public static void main(String [] args) { + Class cx = Bar.class; + Class cy = Foo.class; + z = (cx == cy); + } +}