# HG changeset patch # User Andrew John Hughes # Date 1395893957 0 # Node ID ed2108ad126a147050d5c19397680331f7e97e65 # Parent 531847dfec6f16ea13f2fdbda9bec1b108a8d538 Allow JSR292 patches to be turned on or off, independently of Zero. 2014-03-26 Andrew John Hughes * patches/zero/7023639-8000780-jsr292_fast_path.patch, * patches/zero/7192406-exact_return_type_info.patch, * patches/zero/7196242-loopsandthreads_crashed.patch, * patches/zero/7200949-jruby_fail.patch, * patches/zero/8029507-jvm_method_processing.patch, * patches/zero/arm-7023639.patch: Move patches to... * INSTALL: Document --enable-jsr292-update. * Makefile.am: (ICEDTEA_PATCHES): Fix path to JSR292 patches and enable if JSR292 update option is on, rather than tying the patches to Zero. (EXTRA_DIST): Update path to JSR292 patches. * acinclude.m4: (IT_ENABLE_ARM32JIT): Fix text regarding default. (IT_ENABLE_JSR292_UPDATE): Turn on or off the JSR292 patches. They are on by default if building Zero without the ARM32 JIT. * configure.ac: Replace invocation of IT_ENABLE_ARM32JIT with IT_ENABLE_JSR292_UPDATE (which depends on the former). * patches/jsr292/7023639-8000780-jsr292_fast_path.patch, * patches/jsr292/7192406-exact_return_type_info.patch, * patches/jsr292/7196242-loopsandthreads_crashed.patch, * patches/jsr292/7200949-jruby_fail.patch, * patches/jsr292/8029507-jvm_method_processing.patch, * patches/jsr292/arm-7023639.patch: ... here. diff -r 531847dfec6f -r ed2108ad126a ChangeLog --- a/ChangeLog Thu Mar 27 03:50:20 2014 +0000 +++ b/ChangeLog Thu Mar 27 04:19:17 2014 +0000 @@ -1,3 +1,30 @@ +2014-03-26 Andrew John Hughes + + * patches/zero/7023639-8000780-jsr292_fast_path.patch, + * patches/zero/7192406-exact_return_type_info.patch, + * patches/zero/7196242-loopsandthreads_crashed.patch, + * patches/zero/7200949-jruby_fail.patch, + * patches/zero/8029507-jvm_method_processing.patch, + * patches/zero/arm-7023639.patch: Move patches to... + * INSTALL: Document --enable-jsr292-update. + * Makefile.am: + (ICEDTEA_PATCHES): Fix path to JSR292 patches and + enable if JSR292 update option is on, rather than tying + the patches to Zero. + (EXTRA_DIST): Update path to JSR292 patches. + * acinclude.m4: + (IT_ENABLE_ARM32JIT): Fix text regarding default. + (IT_ENABLE_JSR292_UPDATE): Turn on or off the JSR292 patches. + They are on by default if building Zero without the ARM32 JIT. + * configure.ac: Replace invocation of IT_ENABLE_ARM32JIT with + IT_ENABLE_JSR292_UPDATE (which depends on the former). + * patches/jsr292/7023639-8000780-jsr292_fast_path.patch, + * patches/jsr292/7192406-exact_return_type_info.patch, + * patches/jsr292/7196242-loopsandthreads_crashed.patch, + * patches/jsr292/7200949-jruby_fail.patch, + * patches/jsr292/8029507-jvm_method_processing.patch, + * patches/jsr292/arm-7023639.patch: ... here. + 2014-03-26 Andrew John Hughes Perform configure checks using ecj.jar diff -r 531847dfec6f -r ed2108ad126a INSTALL --- a/INSTALL Thu Mar 27 03:50:20 2014 +0000 +++ b/INSTALL Thu Mar 27 04:19:17 2014 +0000 @@ -176,6 +176,7 @@ * --enable-Werror: Turn gcc & javac warnings into errors. * --disable-jar-compression: Don't compress the OpenJDK JAR files. * --disable-downloading: Don't download tarballs if not available; fail instead. +* --enable-jsr292-update: Patch OpenJDK sources with a backport of S7023639 and associated patches. Other options may be supplied which enable or disable new features. These are documented fully in the relevant section below. diff -r 531847dfec6f -r ed2108ad126a Makefile.am --- a/Makefile.am Thu Mar 27 03:50:20 2014 +0000 +++ b/Makefile.am Thu Mar 27 04:19:17 2014 +0000 @@ -310,14 +310,14 @@ ICEDTEA_PATCHES += patches/nss-not-enabled-config.patch endif -if ZERO_BUILD +if ENABLE_JSR292 ICEDTEA_PATCHES += \ - patches/zero/7023639-8000780-jsr292_fast_path.patch \ - patches/zero/7196242-loopsandthreads_crashed.patch \ - patches/zero/7192406-exact_return_type_info.patch \ - patches/zero/7200949-jruby_fail.patch \ - patches/zero/8029507-jvm_method_processing.patch \ - patches/zero/arm-7023639.patch + patches/jsr292/7023639-8000780-jsr292_fast_path.patch \ + patches/jsr292/7196242-loopsandthreads_crashed.patch \ + patches/jsr292/7192406-exact_return_type_info.patch \ + patches/jsr292/7200949-jruby_fail.patch \ + patches/jsr292/8029507-jvm_method_processing.patch \ + patches/jsr292/arm-7023639.patch endif ICEDTEA_PATCHES += $(DISTRIBUTION_PATCHES) @@ -779,7 +779,7 @@ $(top_srcdir)/patches/boot/*.patch \ $(top_srcdir)/patches/cacao/*.patch \ $(top_srcdir)/patches/jamvm/*.patch \ - $(top_srcdir)/patches/zero/*.patch \ + $(top_srcdir)/patches/jsr292/*.patch \ tools-copy contrib overlays \ javaws.png javaws.desktop \ jconsole.desktop policytool.desktop \ diff -r 531847dfec6f -r ed2108ad126a acinclude.m4 --- a/acinclude.m4 Thu Mar 27 03:50:20 2014 +0000 +++ b/acinclude.m4 Thu Mar 27 04:19:17 2014 +0000 @@ -2508,7 +2508,7 @@ [ AC_MSG_CHECKING([whether to enable the ARM32 JIT]) AC_ARG_ENABLE([arm32-jit], - [AS_HELP_STRING(--enable-arm32-jit,build with the ARM32 JIT [[default=no]])], + [AS_HELP_STRING(--enable-arm32-jit,build with the ARM32 JIT [[default=yes]])], [ case "${enableval}" in yes) @@ -2525,3 +2525,36 @@ AC_MSG_RESULT([$enable_arm32jit]) AM_CONDITIONAL([ENABLE_ARM32JIT], test x"${enable_arm32jit}" = "xyes") ]) + +AC_DEFUN([IT_ENABLE_JSR292_UPDATE], +[ + AC_REQUIRE([IT_SET_ARCH_SETTINGS]) + AC_REQUIRE([IT_ENABLE_ZERO_BUILD]) + AC_REQUIRE([IT_ENABLE_ARM32JIT]) + AC_MSG_CHECKING([whether to enable the JSR292 update in 7023639]) + AC_ARG_ENABLE([jsr292-update], + [AS_HELP_STRING(--enable-jsr292-update,build with the JSR292 update [[default=yes for zero]])], + [ + case "${enableval}" in + yes) + enable_jsr292=yes + ;; + *) + enable_jsr292=no + ;; + esac + ], + [ + if test "x${use_zero}" = xyes; then + if test "x${JRE_ARCH_DIR}" = xarm -a "x${enable_arm32jit}" = "xyes"; then + enable_jsr292=no; + else + enable_jsr292=yes; + fi + else + enable_jsr292=no; + fi + ]) + AC_MSG_RESULT([$enable_jsr292]) + AM_CONDITIONAL([ENABLE_JSR292], test x"${enable_jsr292}" = "xyes") +]) diff -r 531847dfec6f -r ed2108ad126a configure.ac --- a/configure.ac Thu Mar 27 03:50:20 2014 +0000 +++ b/configure.ac Thu Mar 27 04:19:17 2014 +0000 @@ -180,7 +180,7 @@ IT_ENABLE_JAR_COMPRESSION IT_SET_SHARK_BUILD IT_CHECK_ADDITIONAL_VMS -IT_ENABLE_ARM32JIT +IT_ENABLE_JSR292_UPDATE IT_WITH_VERSION_SUFFIX IT_WITH_PROJECT diff -r 531847dfec6f -r ed2108ad126a patches/jsr292/7023639-8000780-jsr292_fast_path.patch --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/patches/jsr292/7023639-8000780-jsr292_fast_path.patch Thu Mar 27 04:19:17 2014 +0000 @@ -0,0 +1,28468 @@ +# HG changeset patch +# User andrew +# Date 1391696693 0 +# Thu Feb 06 14:24:53 2014 +0000 +# Node ID 19ac51ce4be77e6895816f9823bce63a72392e89 +# Parent 3442eb7ef2d216d6bf655d537929a2d31a76a321 +7023639: JSR 292 method handle invocation needs a fast path for compiled code +6984705: JSR 292 method handle creation should not go through JNI +Summary: remove assembly code for JDK 7 chained method handles +Reviewed-by: jrose, twisti, kvn, mhaupt +Contributed-by: John Rose , Christian Thalinger , Michael Haupt + +8000780: [Backport from jdk8] Fix zero fail to build in icedtea7-head. +Summary: Update Zero in icedtea7 to use the hsx24 b25+ java level MLVM + hooks (MLVM Lazy) now in jdk8 and jdk7u-dev head. +Reviewed-by: rkennke ( Roman Kenbke ) + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java Thu Feb 06 14:24:53 2014 +0000 +@@ -93,7 +93,6 @@ + public boolean isUncommonTrapStub() { return false; } + public boolean isExceptionStub() { return false; } + public boolean isSafepointStub() { return false; } +- public boolean isRicochetBlob() { return false; } + public boolean isAdapterBlob() { return false; } + + // Fine grain nmethod support: isNmethod() == isJavaMethod() || isNativeMethod() || isOSRMethod() +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java Thu Feb 06 14:24:53 2014 +0000 +@@ -57,7 +57,6 @@ + virtualConstructor.addMapping("BufferBlob", BufferBlob.class); + virtualConstructor.addMapping("nmethod", NMethod.class); + virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class); +- virtualConstructor.addMapping("RicochetBlob", RicochetBlob.class); + virtualConstructor.addMapping("AdapterBlob", AdapterBlob.class); + virtualConstructor.addMapping("MethodHandlesAdapterBlob", MethodHandlesAdapterBlob.class); + virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class); +@@ -127,10 +126,6 @@ + Assert.that(result.blobContains(start) || result.blobContains(start.addOffsetTo(8)), + "found wrong CodeBlob"); + } +- if (result.isRicochetBlob()) { +- // This should probably be done for other SingletonBlobs +- return VM.getVM().ricochetBlob(); +- } + return result; + } + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/code/RicochetBlob.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/RicochetBlob.java Tue Jan 14 20:24:44 2014 -0500 ++++ /dev/null Thu Jan 01 00:00:00 1970 +0000 +@@ -1,70 +0,0 @@ +-/* +- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-package sun.jvm.hotspot.code; +- +-import java.util.*; +-import sun.jvm.hotspot.debugger.*; +-import sun.jvm.hotspot.runtime.*; +-import sun.jvm.hotspot.types.*; +- +-/** RicochetBlob (currently only used by Compiler 2) */ +- +-public class RicochetBlob extends SingletonBlob { +- static { +- VM.registerVMInitializedObserver(new Observer() { +- public void update(Observable o, Object data) { +- initialize(VM.getVM().getTypeDataBase()); +- } +- }); +- } +- +- private static void initialize(TypeDataBase db) { +- Type type = db.lookupType("RicochetBlob"); +- +- bounceOffsetField = type.getCIntegerField("_bounce_offset"); +- exceptionOffsetField = type.getCIntegerField("_exception_offset"); +- } +- +- private static CIntegerField bounceOffsetField; +- private static CIntegerField exceptionOffsetField; +- +- public RicochetBlob(Address addr) { +- super(addr); +- } +- +- public boolean isRicochetBlob() { +- return true; +- } +- +- public Address bounceAddr() { +- return codeBegin().addOffsetTo(bounceOffsetField.getValue(addr)); +- } +- +- public boolean returnsToBounceAddr(Address pc) { +- Address bouncePc = bounceAddr(); +- return (pc.equals(bouncePc) || pc.addOffsetTo(Frame.pcReturnOffset()).equals(bouncePc)); +- } +- +-} +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java Thu Feb 06 14:24:53 2014 +0000 +@@ -147,12 +147,6 @@ + } + } + +- public boolean isRicochetFrame() { +- CodeBlob cb = VM.getVM().getCodeCache().findBlob(getPC()); +- RicochetBlob rcb = VM.getVM().ricochetBlob(); +- return (cb == rcb && rcb != null && rcb.returnsToBounceAddr(getPC())); +- } +- + public boolean isCompiledFrame() { + if (Assert.ASSERTS_ENABLED) { + Assert.that(!VM.getVM().isCore(), "noncore builds only"); +@@ -216,8 +210,7 @@ + public Frame realSender(RegisterMap map) { + if (!VM.getVM().isCore()) { + Frame result = sender(map); +- while (result.isRuntimeFrame() || +- result.isRicochetFrame()) { ++ while (result.isRuntimeFrame()) { + result = result.sender(map); + } + return result; +@@ -631,9 +624,6 @@ + if (Assert.ASSERTS_ENABLED) { + Assert.that(cb != null, "sanity check"); + } +- if (cb == VM.getVM().ricochetBlob()) { +- oopsRicochetDo(oopVisitor, regMap); +- } + if (cb.getOopMaps() != null) { + OopMapSet.oopsDo(this, cb, regMap, oopVisitor, VM.getVM().isDebugging()); + +@@ -650,10 +640,6 @@ + // } + } + +- private void oopsRicochetDo (AddressVisitor oopVisitor, RegisterMap regMap) { +- // XXX Empty for now +- } +- + // FIXME: implement the above routines, plus add + // oops_interpreted_arguments_do and oops_compiled_arguments_do + } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Thu Feb 06 14:24:53 2014 +0000 +@@ -87,8 +87,6 @@ + private StubRoutines stubRoutines; + private Bytes bytes; + +- private RicochetBlob ricochetBlob; +- + /** Flags indicating whether we are attached to a core, C1, or C2 build */ + private boolean usingClientCompiler; + private boolean usingServerCompiler; +@@ -628,18 +626,6 @@ + return stubRoutines; + } + +- public RicochetBlob ricochetBlob() { +- if (ricochetBlob == null) { +- Type ricochetType = db.lookupType("SharedRuntime"); +- AddressField ricochetBlobAddress = ricochetType.getAddressField("_ricochet_blob"); +- Address addr = ricochetBlobAddress.getValue(); +- if (addr != null) { +- ricochetBlob = new RicochetBlob(addr); +- } +- } +- return ricochetBlob; +- } +- + public VMRegImpl getVMRegImplInfo() { + if (vmregImpl == null) { + vmregImpl = new VMRegImpl(); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCFrame.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCFrame.java Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCFrame.java Thu Feb 06 14:24:53 2014 +0000 +@@ -571,8 +571,6 @@ + // registers callee-saved, then we will have to copy over + // the RegisterMap update logic from the Intel code. + +- if (isRicochetFrame()) return senderForRicochetFrame(map); +- + // The constructor of the sender must know whether this frame is interpreted so it can set the + // sender's _interpreter_sp_adjustment field. + if (VM.getVM().getInterpreter().contains(pc)) { +@@ -945,20 +943,6 @@ + } + + +- private Frame senderForRicochetFrame(SPARCRegisterMap map) { +- if (DEBUG) { +- System.out.println("senderForRicochetFrame"); +- } +- //RicochetFrame* f = RicochetFrame::from_frame(fr); +- // Cf. is_interpreted_frame path of frame::sender +- Address youngerSP = getSP(); +- Address sp = getSenderSP(); +- map.makeIntegerRegsUnsaved(); +- map.shiftWindow(sp, youngerSP); +- boolean thisFrameAdjustedStack = true; // I5_savedSP is live in this RF +- return new SPARCFrame(biasSP(sp), biasSP(youngerSP), thisFrameAdjustedStack); +- } +- + private Frame senderForEntryFrame(RegisterMap regMap) { + SPARCRegisterMap map = (SPARCRegisterMap) regMap; + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCRicochetFrame.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCRicochetFrame.java Tue Jan 14 20:24:44 2014 -0500 ++++ /dev/null Thu Jan 01 00:00:00 1970 +0000 +@@ -1,77 +0,0 @@ +-/* +- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-package sun.jvm.hotspot.runtime.sparc; +- +-import java.util.*; +-import sun.jvm.hotspot.asm.sparc.SPARCRegister; +-import sun.jvm.hotspot.asm.sparc.SPARCRegisters; +-import sun.jvm.hotspot.debugger.*; +-import sun.jvm.hotspot.runtime.*; +-import sun.jvm.hotspot.types.*; +- +-public class SPARCRicochetFrame { +- static { +- VM.registerVMInitializedObserver(new Observer() { +- public void update(Observable o, Object data) { +- initialize(VM.getVM().getTypeDataBase()); +- } +- }); +- } +- +- private SPARCFrame frame; +- +- private static void initialize(TypeDataBase db) { +- // Type type = db.lookupType("MethodHandles::RicochetFrame"); +- +- } +- +- static SPARCRicochetFrame fromFrame(SPARCFrame f) { +- return new SPARCRicochetFrame(f); +- } +- +- private SPARCRicochetFrame(SPARCFrame f) { +- frame = f; +- } +- +- private Address registerValue(SPARCRegister reg) { +- return frame.getSP().addOffsetTo(reg.spOffsetInSavedWindow()).getAddressAt(0); +- } +- +- public Address savedArgsBase() { +- return registerValue(SPARCRegisters.L4); +- } +- public Address exactSenderSP() { +- return registerValue(SPARCRegisters.I5); +- } +- public Address senderLink() { +- return frame.getSenderSP(); +- } +- public Address senderPC() { +- return frame.getSenderPC(); +- } +- public Address extendedSenderSP() { +- return savedArgsBase(); +- } +-} +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java Thu Feb 06 14:24:53 2014 +0000 +@@ -269,7 +269,6 @@ + + if (isEntryFrame()) return senderForEntryFrame(map); + if (isInterpretedFrame()) return senderForInterpreterFrame(map); +- if (isRicochetFrame()) return senderForRicochetFrame(map); + + if(cb == null) { + cb = VM.getVM().getCodeCache().findBlob(getPC()); +@@ -288,16 +287,6 @@ + return new X86Frame(getSenderSP(), getLink(), getSenderPC()); + } + +- private Frame senderForRicochetFrame(X86RegisterMap map) { +- if (DEBUG) { +- System.out.println("senderForRicochetFrame"); +- } +- X86RicochetFrame f = X86RicochetFrame.fromFrame(this); +- if (map.getUpdateMap()) +- updateMapWithSavedLink(map, f.senderLinkAddress()); +- return new X86Frame(f.extendedSenderSP(), f.exactSenderSP(), f.senderLink(), f.senderPC()); +- } +- + private Frame senderForEntryFrame(X86RegisterMap map) { + if (DEBUG) { + System.out.println("senderForEntryFrame"); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86RicochetFrame.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86RicochetFrame.java Tue Jan 14 20:24:44 2014 -0500 ++++ /dev/null Thu Jan 01 00:00:00 1970 +0000 +@@ -1,81 +0,0 @@ +-/* +- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-package sun.jvm.hotspot.runtime.x86; +- +-import java.util.*; +-import sun.jvm.hotspot.debugger.*; +-import sun.jvm.hotspot.runtime.*; +-import sun.jvm.hotspot.types.*; +- +-public class X86RicochetFrame extends VMObject { +- static { +- VM.registerVMInitializedObserver(new Observer() { +- public void update(Observable o, Object data) { +- initialize(VM.getVM().getTypeDataBase()); +- } +- }); +- } +- +- private static void initialize(TypeDataBase db) { +- Type type = db.lookupType("MethodHandles::RicochetFrame"); +- +- senderLinkField = type.getAddressField("_sender_link"); +- savedArgsBaseField = type.getAddressField("_saved_args_base"); +- exactSenderSPField = type.getAddressField("_exact_sender_sp"); +- senderPCField = type.getAddressField("_sender_pc"); +- } +- +- private static AddressField senderLinkField; +- private static AddressField savedArgsBaseField; +- private static AddressField exactSenderSPField; +- private static AddressField senderPCField; +- +- static X86RicochetFrame fromFrame(X86Frame f) { +- return new X86RicochetFrame(f.getFP().addOffsetTo(- senderLinkField.getOffset())); +- } +- +- private X86RicochetFrame(Address addr) { +- super(addr); +- } +- +- public Address senderLink() { +- return senderLinkField.getValue(addr); +- } +- public Address senderLinkAddress() { +- return addr.addOffsetTo(senderLinkField.getOffset()); +- } +- public Address savedArgsBase() { +- return savedArgsBaseField.getValue(addr); +- } +- public Address extendedSenderSP() { +- return savedArgsBase(); +- } +- public Address exactSenderSP() { +- return exactSenderSPField.getValue(addr); +- } +- public Address senderPC() { +- return senderPCField.getValue(addr); +- } +-} +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 make/solaris/makefiles/fastdebug.make +--- openjdk/hotspot/make/solaris/makefiles/fastdebug.make Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/make/solaris/makefiles/fastdebug.make Thu Feb 06 14:24:53 2014 +0000 +@@ -36,6 +36,14 @@ + ifeq ("${Platform_compiler}", "sparcWorks") + OPT_CFLAGS/SLOWER = -xO2 + ++ifeq ($(COMPILER_REV_NUMERIC), 510) ++# CC 5.10 has bug XXXXX with -xO4 ++OPT_CFLAGS/jvmtiClassFileReconstituter.o = $(OPT_CFLAGS/SLOWER) ++# jvm98 crashes on solaris-i586-fastdebug and solaris-sparc-fastdebug with stack overflow ++OPT_CFLAGS/escape.o = $(OPT_CFLAGS) -xspace ++OPT_CFLAGS/matcher.o = $(OPT_CFLAGS) -xspace ++endif # COMPILER_REV_NUMERIC == 510 ++ + ifeq ($(COMPILER_REV_NUMERIC), 509) + # To avoid jvm98 crash + OPT_CFLAGS/instanceKlass.o = $(OPT_CFLAGS/SLOWER) +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 make/solaris/makefiles/optimized.make +--- openjdk/hotspot/make/solaris/makefiles/optimized.make Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/make/solaris/makefiles/optimized.make Thu Feb 06 14:24:53 2014 +0000 +@@ -32,6 +32,11 @@ + # (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files) + ifeq ("${Platform_compiler}", "sparcWorks") + ++ifeq ($(COMPILER_REV_NUMERIC), 510) ++# CC 5.10 has bug XXXXX with -xO4 ++OPT_CFLAGS/jvmtiClassFileReconstituter.o = $(OPT_CFLAGS/O2) ++endif # COMPILER_REV_NUMERIC == 510 ++ + ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1) + # dtrace cannot handle tail call optimization (6672627, 6693876) + OPT_CFLAGS/jni.o = $(OPT_CFLAGS/DEFAULT) $(OPT_CCFLAGS/NO_TAIL_CALL_OPT) +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 make/solaris/makefiles/product.make +--- openjdk/hotspot/make/solaris/makefiles/product.make Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/make/solaris/makefiles/product.make Thu Feb 06 14:24:53 2014 +0000 +@@ -40,6 +40,11 @@ + # (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files) + ifeq ("${Platform_compiler}", "sparcWorks") + ++ifeq ($(COMPILER_REV_NUMERIC), 510) ++# CC 5.10 has bug XXXXX with -xO4 ++OPT_CFLAGS/jvmtiClassFileReconstituter.o = $(OPT_CFLAGS/O2) ++endif # COMPILER_REV_NUMERIC == 510 ++ + ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1) + # dtrace cannot handle tail call optimization (6672627, 6693876) + OPT_CFLAGS/jni.o = $(OPT_CFLAGS/DEFAULT) $(OPT_CCFLAGS/NO_TAIL_CALL_OPT) +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/assembler_sparc.cpp +--- openjdk/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -44,8 +44,10 @@ + + #ifdef PRODUCT + #define BLOCK_COMMENT(str) /* nothing */ ++#define STOP(error) stop(error) + #else + #define BLOCK_COMMENT(str) block_comment(str) ++#define STOP(error) block_comment(error); stop(error) + #endif + + // Convert the raw encoding form into the form expected by the +@@ -992,7 +994,7 @@ + save_frame(0); // to avoid clobbering O0 + ld_ptr(pc_addr, L0); + br_null_short(L0, Assembler::pt, PcOk); +- stop("last_Java_pc not zeroed before leaving Java"); ++ STOP("last_Java_pc not zeroed before leaving Java"); + bind(PcOk); + + // Verify that flags was zeroed on return to Java +@@ -1001,7 +1003,7 @@ + tst(L0); + br(Assembler::zero, false, Assembler::pt, FlagsOk); + delayed() -> restore(); +- stop("flags not zeroed before leaving Java"); ++ STOP("flags not zeroed before leaving Java"); + bind(FlagsOk); + #endif /* ASSERT */ + // +@@ -1021,7 +1023,7 @@ + andcc(last_java_sp, 0x01, G0); + br(Assembler::notZero, false, Assembler::pt, StackOk); + delayed()->nop(); +- stop("Stack Not Biased in set_last_Java_frame"); ++ STOP("Stack Not Biased in set_last_Java_frame"); + bind(StackOk); + #endif // ASSERT + assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); +@@ -1650,23 +1652,28 @@ + + + void RegistersForDebugging::print(outputStream* s) { ++ FlagSetting fs(Debugging, true); + int j; +- for ( j = 0; j < 8; ++j ) +- if ( j != 6 ) s->print_cr("i%d = 0x%.16lx", j, i[j]); +- else s->print_cr( "fp = 0x%.16lx", i[j]); ++ for (j = 0; j < 8; ++j) { ++ if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } ++ else { s->print( "fp = " ); os::print_location(s, i[j]); } ++ } + s->cr(); + +- for ( j = 0; j < 8; ++j ) +- s->print_cr("l%d = 0x%.16lx", j, l[j]); ++ for (j = 0; j < 8; ++j) { ++ s->print("l%d = ", j); os::print_location(s, l[j]); ++ } + s->cr(); + +- for ( j = 0; j < 8; ++j ) +- if ( j != 6 ) s->print_cr("o%d = 0x%.16lx", j, o[j]); +- else s->print_cr( "sp = 0x%.16lx", o[j]); ++ for (j = 0; j < 8; ++j) { ++ if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } ++ else { s->print( "sp = " ); os::print_location(s, o[j]); } ++ } + s->cr(); + +- for ( j = 0; j < 8; ++j ) +- s->print_cr("g%d = 0x%.16lx", j, g[j]); ++ for (j = 0; j < 8; ++j) { ++ s->print("g%d = ", j); os::print_location(s, g[j]); ++ } + s->cr(); + + // print out floats with compression +@@ -2020,8 +2027,8 @@ + char* b = new char[1024]; + sprintf(b, "untested: %s", what); + +- if ( ShowMessageBoxOnError ) stop(b); +- else warn(b); ++ if (ShowMessageBoxOnError) { STOP(b); } ++ else { warn(b); } + } + + +@@ -2998,26 +3005,60 @@ + } + + ++// virtual method calling ++void MacroAssembler::lookup_virtual_method(Register recv_klass, ++ RegisterOrConstant vtable_index, ++ Register method_result) { ++ assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); ++ Register sethi_temp = method_result; ++ const int base = (instanceKlass::vtable_start_offset() * wordSize + ++ // method pointer offset within the vtable entry: ++ vtableEntry::method_offset_in_bytes()); ++ RegisterOrConstant vtable_offset = vtable_index; ++ // Each of the following three lines potentially generates an instruction. ++ // But the total number of address formation instructions will always be ++ // at most two, and will often be zero. In any case, it will be optimal. ++ // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). ++ // If vtable_index is a constant, we will have at most (set B+X<is_global()) sub_2 = L0; + if (!sup_2->is_global()) sup_2 = L1; +- +- save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); ++ bool did_save = false; ++ if (temp_reg == noreg || temp2_reg == noreg) { ++ temp_reg = L2; ++ temp2_reg = L3; ++ save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); ++ sub_klass = sub_2; ++ super_klass = sup_2; ++ did_save = true; ++ } ++ Label L_failure, L_pop_to_failure, L_pop_to_success; ++ check_klass_subtype_fast_path(sub_klass, super_klass, ++ temp_reg, temp2_reg, ++ (did_save ? &L_pop_to_success : &L_success), ++ (did_save ? &L_pop_to_failure : &L_failure), NULL); ++ ++ if (!did_save) ++ save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); + check_klass_subtype_slow_path(sub_2, sup_2, + L2, L3, L4, L5, + NULL, &L_pop_to_failure); + + // on success: ++ bind(L_pop_to_success); + restore(); + ba_short(L_success); + +@@ -3234,54 +3275,6 @@ + } + + +-void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg, +- Register temp_reg, +- Label& wrong_method_type) { +- assert_different_registers(mtype_reg, mh_reg, temp_reg); +- // compare method type against that of the receiver +- RegisterOrConstant mhtype_offset = delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg); +- load_heap_oop(mh_reg, mhtype_offset, temp_reg); +- cmp_and_brx_short(temp_reg, mtype_reg, Assembler::notEqual, Assembler::pn, wrong_method_type); +-} +- +- +-// A method handle has a "vmslots" field which gives the size of its +-// argument list in JVM stack slots. This field is either located directly +-// in every method handle, or else is indirectly accessed through the +-// method handle's MethodType. This macro hides the distinction. +-void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg, +- Register temp_reg) { +- assert_different_registers(vmslots_reg, mh_reg, temp_reg); +- // load mh.type.form.vmslots +- Register temp2_reg = vmslots_reg; +- load_heap_oop(Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg); +- load_heap_oop(Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg); +- ld( Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg); +-} +- +- +-void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) { +- assert(mh_reg == G3_method_handle, "caller must put MH object in G3"); +- assert_different_registers(mh_reg, temp_reg); +- +- // pick out the interpreted side of the handler +- // NOTE: vmentry is not an oop! +- ld_ptr(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg); +- +- // off we go... +- ld_ptr(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes(), temp_reg); +- jmp(temp_reg, 0); +- +- // for the various stubs which take control at this point, +- // see MethodHandles::generate_method_handle_stub +- +- // Some callers can fill the delay slot. +- if (emit_delayed_nop) { +- delayed()->nop(); +- } +-} +- +- + RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, + Register temp_reg, + int extra_slot_offset) { +@@ -3914,7 +3907,7 @@ + ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); + or3(t1, t2, t3); + cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); +- stop("assert(top >= start)"); ++ STOP("assert(top >= start)"); + should_not_reach_here(); + + bind(next); +@@ -3922,13 +3915,13 @@ + ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); + or3(t3, t2, t3); + cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); +- stop("assert(top <= end)"); ++ STOP("assert(top <= end)"); + should_not_reach_here(); + + bind(next2); + and3(t3, MinObjAlignmentInBytesMask, t3); + cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); +- stop("assert(aligned)"); ++ STOP("assert(aligned)"); + should_not_reach_here(); + + bind(ok); +@@ -3976,7 +3969,7 @@ + btst(MinObjAlignmentInBytesMask, obj); + br(Assembler::zero, false, Assembler::pt, L); + delayed()->nop(); +- stop("eden top is not properly aligned"); ++ STOP("eden top is not properly aligned"); + bind(L); + } + #endif // ASSERT +@@ -4013,7 +4006,7 @@ + btst(MinObjAlignmentInBytesMask, top_addr); + br(Assembler::zero, false, Assembler::pt, L); + delayed()->nop(); +- stop("eden top is not properly aligned"); ++ STOP("eden top is not properly aligned"); + bind(L); + } + #endif // ASSERT +@@ -4066,7 +4059,7 @@ + btst(MinObjAlignmentInBytesMask, free); + br(Assembler::zero, false, Assembler::pt, L); + delayed()->nop(); +- stop("updated TLAB free is not properly aligned"); ++ STOP("updated TLAB free is not properly aligned"); + bind(L); + } + #endif // ASSERT +@@ -4164,7 +4157,7 @@ + ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); + sll_ptr(t2, LogHeapWordSize, t2); + cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); +- stop("assert(t1 == tlab_size)"); ++ STOP("assert(t1 == tlab_size)"); + should_not_reach_here(); + + bind(ok); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/assembler_sparc.hpp +--- openjdk/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -2538,6 +2538,11 @@ + Register temp_reg, Register temp2_reg, + Label& no_such_interface); + ++ // virtual method calling ++ void lookup_virtual_method(Register recv_klass, ++ RegisterOrConstant vtable_index, ++ Register method_result); ++ + // Test sub_klass against super_klass, with fast and slow paths. + + // The fast path produces a tri-state answer: yes / no / maybe-slow. +@@ -2577,12 +2582,6 @@ + Label& L_success); + + // method handles (JSR 292) +- void check_method_handle_type(Register mtype_reg, Register mh_reg, +- Register temp_reg, +- Label& wrong_method_type); +- void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg, +- Register temp_reg); +- void jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop = true); + // offset relative to Gargs of argument at tos[arg_slot]. + // (arg_slot == 0 means the last argument, not the first). + RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, +@@ -2590,7 +2589,7 @@ + int extra_slot_offset = 0); + // Address of Gargs and argument_offset. + Address argument_address(RegisterOrConstant arg_slot, +- Register temp_reg, ++ Register temp_reg = noreg, + int extra_slot_offset = 0); + + // Stack overflow checking +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp +--- openjdk/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -2956,6 +2956,7 @@ + void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { + ciMethod* method = op->profiled_method(); + int bci = op->profiled_bci(); ++ ciMethod* callee = op->profiled_callee(); + + // Update counter for all call types + ciMethodData* md = method->method_data_or_null(); +@@ -2984,9 +2985,11 @@ + + Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); + Bytecodes::Code bc = method->java_code_at_bci(bci); ++ const bool callee_is_static = callee->is_loaded() && callee->is_static(); + // Perform additional virtual call profiling for invokevirtual and + // invokeinterface bytecodes + if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && ++ !callee_is_static && // required for optimized MH invokes + C1ProfileVirtualCalls) { + assert(op->recv()->is_single_cpu(), "recv must be allocated"); + Register recv = op->recv()->as_register(); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/cppInterpreter_sparc.cpp +--- openjdk/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -514,9 +514,9 @@ + // Need to differentiate between igetfield, agetfield, bgetfield etc. + // because they are different sizes. + // Get the type from the constant pool cache +- __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch); +- // Make sure we don't need to mask G1_scratch for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); ++ __ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch); ++ // Make sure we don't need to mask G1_scratch after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); + __ cmp(G1_scratch, atos ); + __ br(Assembler::equal, true, Assembler::pt, xreturn_path); + __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/frame_sparc.cpp +--- openjdk/hotspot/src/cpu/sparc/vm/frame_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/frame_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -514,7 +514,6 @@ + // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be + // explicitly recognized. + +- if (is_ricochet_frame()) return sender_for_ricochet_frame(map); + + bool frame_is_interpreted = is_interpreted_frame(); + if (frame_is_interpreted) { +@@ -821,9 +820,7 @@ + values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1); + } + +- if (is_ricochet_frame()) { +- MethodHandles::RicochetFrame::describe(this, values, frame_no); +- } else if (is_interpreted_frame()) { ++ if (is_interpreted_frame()) { + DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp); + DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp); + DESCRIBE_FP_OFFSET(interpreter_frame_padding); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/interp_masm_sparc.cpp +--- openjdk/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -505,7 +505,7 @@ + void InterpreterMacroAssembler::load_receiver(Register param_count, + Register recv) { + sll(param_count, Interpreter::logStackElementSize, param_count); +- ld_ptr(Lesp, param_count, recv); // gets receiver Oop ++ ld_ptr(Lesp, param_count, recv); // gets receiver oop + } + + void InterpreterMacroAssembler::empty_expression_stack() { +@@ -767,8 +767,12 @@ + get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size); + ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); + const int shift_count = (1 + byte_no) * BitsPerByte; +- srl( bytecode, shift_count, bytecode); +- and3(bytecode, 0xFF, bytecode); ++ assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || ++ (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), ++ "correct shift count"); ++ srl(bytecode, shift_count, bytecode); ++ assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); ++ and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode); + } + + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/interpreterGenerator_sparc.hpp +--- openjdk/hotspot/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -32,7 +32,6 @@ + address generate_normal_entry(bool synchronized); + address generate_native_entry(bool synchronized); + address generate_abstract_entry(void); +- address generate_method_handle_entry(void); + address generate_math_entry(AbstractInterpreter::MethodKind kind); + address generate_empty_entry(void); + address generate_accessor_entry(void); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/interpreter_sparc.cpp +--- openjdk/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -255,17 +255,6 @@ + } + + +-// Method handle invoker +-// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...) +-address InterpreterGenerator::generate_method_handle_entry(void) { +- if (!EnableInvokeDynamic) { +- return generate_abstract_entry(); +- } +- +- return MethodHandles::generate_method_handle_interpreter_entry(_masm); +-} +- +- + //---------------------------------------------------------------------------------------------------- + // Entry points & stack frame layout + // +@@ -395,7 +384,7 @@ + case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; + case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; + case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; +- case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break; ++ + case Interpreter::java_lang_math_sin : break; + case Interpreter::java_lang_math_cos : break; + case Interpreter::java_lang_math_tan : break; +@@ -405,7 +394,9 @@ + case Interpreter::java_lang_math_log10 : break; + case Interpreter::java_lang_ref_reference_get + : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; +- default : ShouldNotReachHere(); break; ++ default: ++ fatal(err_msg("unexpected method kind: %d", kind)); ++ break; + } + + if (entry_point) return entry_point; +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/methodHandles_sparc.cpp +--- openjdk/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -31,452 +31,37 @@ + + #ifdef PRODUCT + #define BLOCK_COMMENT(str) /* nothing */ ++#define STOP(error) stop(error) + #else + #define BLOCK_COMMENT(str) __ block_comment(str) ++#define STOP(error) block_comment(error); __ stop(error) + #endif + + #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") + +-address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm, +- address interpreted_entry) { +- // Just before the actual machine code entry point, allocate space +- // for a MethodHandleEntry::Data record, so that we can manage everything +- // from one base pointer. +- __ align(wordSize); +- address target = __ pc() + sizeof(Data); +- while (__ pc() < target) { +- __ nop(); +- __ align(wordSize); +- } +- +- MethodHandleEntry* me = (MethodHandleEntry*) __ pc(); +- me->set_end_address(__ pc()); // set a temporary end_address +- me->set_from_interpreted_entry(interpreted_entry); +- me->set_type_checking_entry(NULL); +- +- return (address) me; ++// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant. ++static RegisterOrConstant constant(int value) { ++ return RegisterOrConstant(value); + } + +-MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm, +- address start_addr) { +- MethodHandleEntry* me = (MethodHandleEntry*) start_addr; +- assert(me->end_address() == start_addr, "valid ME"); +- +- // Fill in the real end_address: +- __ align(wordSize); +- me->set_end_address(__ pc()); +- +- return me; +-} +- +-// stack walking support +- +-frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) { +- //RicochetFrame* f = RicochetFrame::from_frame(fr); +- // Cf. is_interpreted_frame path of frame::sender +- intptr_t* younger_sp = fr.sp(); +- intptr_t* sp = fr.sender_sp(); +- map->make_integer_regs_unsaved(); +- map->shift_window(sp, younger_sp); +- bool this_frame_adjusted_stack = true; // I5_savedSP is live in this RF +- return frame(sp, younger_sp, this_frame_adjusted_stack); +-} +- +-void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) { +- ResourceMark rm; +- RicochetFrame* f = RicochetFrame::from_frame(fr); +- +- // pick up the argument type descriptor: +- Thread* thread = Thread::current(); +- Handle cookie(thread, f->compute_saved_args_layout(true, true)); +- +- // process fixed part +- blk->do_oop((oop*)f->saved_target_addr()); +- blk->do_oop((oop*)f->saved_args_layout_addr()); +- +- // process variable arguments: +- if (cookie.is_null()) return; // no arguments to describe +- +- // the cookie is actually the invokeExact method for my target +- // his argument signature is what I'm interested in +- assert(cookie->is_method(), ""); +- methodHandle invoker(thread, methodOop(cookie())); +- assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method"); +- assert(!invoker->is_static(), "must have MH argument"); +- int slot_count = invoker->size_of_parameters(); +- assert(slot_count >= 1, "must include 'this'"); +- intptr_t* base = f->saved_args_base(); +- intptr_t* retval = NULL; +- if (f->has_return_value_slot()) +- retval = f->return_value_slot_addr(); +- int slot_num = slot_count - 1; +- intptr_t* loc = &base[slot_num]; +- //blk->do_oop((oop*) loc); // original target, which is irrelevant +- int arg_num = 0; +- for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) { +- if (ss.at_return_type()) continue; +- BasicType ptype = ss.type(); +- if (ptype == T_ARRAY) ptype = T_OBJECT; // fold all refs to T_OBJECT +- assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void"); +- slot_num -= type2size[ptype]; +- loc = &base[slot_num]; +- bool is_oop = (ptype == T_OBJECT && loc != retval); +- if (is_oop) blk->do_oop((oop*)loc); +- arg_num += 1; +- } +- assert(slot_num == 0, "must have processed all the arguments"); +-} +- +-// Ricochet Frames +-const Register MethodHandles::RicochetFrame::L1_continuation = L1; +-const Register MethodHandles::RicochetFrame::L2_saved_target = L2; +-const Register MethodHandles::RicochetFrame::L3_saved_args_layout = L3; +-const Register MethodHandles::RicochetFrame::L4_saved_args_base = L4; // cf. Gargs = G4 +-const Register MethodHandles::RicochetFrame::L5_conversion = L5; +-#ifdef ASSERT +-const Register MethodHandles::RicochetFrame::L0_magic_number_1 = L0; +-#endif //ASSERT +- +-oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) { +- if (read_cache) { +- oop cookie = saved_args_layout(); +- if (cookie != NULL) return cookie; +- } +- oop target = saved_target(); +- oop mtype = java_lang_invoke_MethodHandle::type(target); +- oop mtform = java_lang_invoke_MethodType::form(mtype); +- oop cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform); +- if (write_cache) { +- (*saved_args_layout_addr()) = cookie; +- } +- return cookie; +-} +- +-void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm, +- // output params: +- int* bounce_offset, +- int* exception_offset, +- int* frame_size_in_words) { +- (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize; +- +- address start = __ pc(); +- +-#ifdef ASSERT +- __ illtrap(0); __ illtrap(0); __ illtrap(0); +- // here's a hint of something special: +- __ set(MAGIC_NUMBER_1, G0); +- __ set(MAGIC_NUMBER_2, G0); +-#endif //ASSERT +- __ illtrap(0); // not reached +- +- // Return values are in registers. +- // L1_continuation contains a cleanup continuation we must return +- // to. +- +- (*bounce_offset) = __ pc() - start; +- BLOCK_COMMENT("ricochet_blob.bounce"); +- +- if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); +- trace_method_handle(_masm, "return/ricochet_blob.bounce"); +- +- __ JMP(L1_continuation, 0); +- __ delayed()->nop(); +- __ illtrap(0); +- +- DEBUG_ONLY(__ set(MAGIC_NUMBER_2, G0)); +- +- (*exception_offset) = __ pc() - start; +- BLOCK_COMMENT("ricochet_blob.exception"); +- +- // compare this to Interpreter::rethrow_exception_entry, which is parallel code +- // for example, see TemplateInterpreterGenerator::generate_throw_exception +- // Live registers in: +- // Oexception (O0): exception +- // Oissuing_pc (O1): return address/pc that threw exception (ignored, always equal to bounce addr) +- __ verify_oop(Oexception); +- +- // Take down the frame. +- +- // Cf. InterpreterMacroAssembler::remove_activation. +- leave_ricochet_frame(_masm, /*recv_reg=*/ noreg, I5_savedSP, I7); +- +- // We are done with this activation frame; find out where to go next. +- // The continuation point will be an exception handler, which expects +- // the following registers set up: +- // +- // Oexception: exception +- // Oissuing_pc: the local call that threw exception +- // Other On: garbage +- // In/Ln: the contents of the caller's register window +- // +- // We do the required restore at the last possible moment, because we +- // need to preserve some state across a runtime call. +- // (Remember that the caller activation is unknown--it might not be +- // interpreted, so things like Lscratch are useless in the caller.) +- __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore +- __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller +- __ call_VM_leaf(L7_thread_cache, +- CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), +- G2_thread, Oissuing_pc->after_save()); +- +- // The caller's SP was adjusted upon method entry to accomodate +- // the callee's non-argument locals. Undo that adjustment. +- __ JMP(O0, 0); // return exception handler in caller +- __ delayed()->restore(I5_savedSP, G0, SP); +- +- // (same old exception object is already in Oexception; see above) +- // Note that an "issuing PC" is actually the next PC after the call +-} +- +-void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm, +- Register recv_reg, +- Register argv_reg, +- address return_handler) { +- // does not include the __ save() +- assert(argv_reg == Gargs, ""); +- Address G3_mh_vmtarget( recv_reg, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes()); +- Address G3_amh_conversion(recv_reg, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes()); +- +- // Create the RicochetFrame. +- // Unlike on x86 we can store all required information in local +- // registers. +- BLOCK_COMMENT("push RicochetFrame {"); +- __ set(ExternalAddress(return_handler), L1_continuation); +- __ load_heap_oop(G3_mh_vmtarget, L2_saved_target); +- __ mov(G0, L3_saved_args_layout); +- __ mov(Gargs, L4_saved_args_base); +- __ lduw(G3_amh_conversion, L5_conversion); // 32-bit field +- // I5, I6, I7 are already set up +- DEBUG_ONLY(__ set((int32_t) MAGIC_NUMBER_1, L0_magic_number_1)); +- BLOCK_COMMENT("} RicochetFrame"); +-} +- +-void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm, +- Register recv_reg, +- Register new_sp_reg, +- Register sender_pc_reg) { +- assert(new_sp_reg == I5_savedSP, "exact_sender_sp already in place"); +- assert(sender_pc_reg == I7, "in a fixed place"); +- // does not include the __ ret() & __ restore() +- assert_different_registers(recv_reg, new_sp_reg, sender_pc_reg); +- // Take down the frame. +- // Cf. InterpreterMacroAssembler::remove_activation. +- BLOCK_COMMENT("end_ricochet_frame {"); +- if (recv_reg->is_valid()) +- __ mov(L2_saved_target, recv_reg); +- BLOCK_COMMENT("} end_ricochet_frame"); +-} +- +-// Emit code to verify that FP is pointing at a valid ricochet frame. +-#ifndef PRODUCT +-enum { +- ARG_LIMIT = 255, SLOP = 45, +- // use this parameter for checking for garbage stack movements: +- UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP) +- // the slop defends against false alarms due to fencepost errors +-}; +-#endif +- +-#ifdef ASSERT +-void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) { +- // The stack should look like this: +- // ... keep1 | dest=42 | keep2 | magic | handler | magic | recursive args | [RF] +- // Check various invariants. +- +- Register O7_temp = O7, O5_temp = O5; +- +- Label L_ok_1, L_ok_2, L_ok_3, L_ok_4; +- BLOCK_COMMENT("verify_clean {"); +- // Magic numbers must check out: +- __ set((int32_t) MAGIC_NUMBER_1, O7_temp); +- __ cmp_and_br_short(O7_temp, L0_magic_number_1, Assembler::equal, Assembler::pt, L_ok_1); +- __ stop("damaged ricochet frame: MAGIC_NUMBER_1 not found"); +- +- __ BIND(L_ok_1); +- +- // Arguments pointer must look reasonable: +-#ifdef _LP64 +- Register FP_temp = O5_temp; +- __ add(FP, STACK_BIAS, FP_temp); +-#else +- Register FP_temp = FP; +-#endif +- __ cmp_and_brx_short(L4_saved_args_base, FP_temp, Assembler::greaterEqualUnsigned, Assembler::pt, L_ok_2); +- __ stop("damaged ricochet frame: L4 < FP"); +- +- __ BIND(L_ok_2); +- // Disable until we decide on it's fate +- // __ sub(L4_saved_args_base, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize, O7_temp); +- // __ cmp(O7_temp, FP_temp); +- // __ br(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok_3); +- // __ delayed()->nop(); +- // __ stop("damaged ricochet frame: (L4 - UNREASONABLE_STACK_MOVE) > FP"); +- +- __ BIND(L_ok_3); +- extract_conversion_dest_type(_masm, L5_conversion, O7_temp); +- __ cmp_and_br_short(O7_temp, T_VOID, Assembler::equal, Assembler::pt, L_ok_4); +- extract_conversion_vminfo(_masm, L5_conversion, O5_temp); +- __ ld_ptr(L4_saved_args_base, __ argument_offset(O5_temp, O5_temp), O7_temp); +- assert(Assembler::is_simm13(RETURN_VALUE_PLACEHOLDER), "must be simm13"); +- __ cmp_and_brx_short(O7_temp, (int32_t) RETURN_VALUE_PLACEHOLDER, Assembler::equal, Assembler::pt, L_ok_4); +- __ stop("damaged ricochet frame: RETURN_VALUE_PLACEHOLDER not found"); +- __ BIND(L_ok_4); +- BLOCK_COMMENT("} verify_clean"); +-} +-#endif //ASSERT +- + void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) { + if (VerifyMethodHandles) + verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), temp_reg, temp2_reg, +- "AMH argument is a Class"); ++ "MH argument is a Class"); + __ load_heap_oop(Address(klass_reg, java_lang_Class::klass_offset_in_bytes()), klass_reg); + } + +-void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg) { +- assert(CONV_VMINFO_SHIFT == 0, "preshifted"); +- assert(CONV_VMINFO_MASK == right_n_bits(BitsPerByte), "else change type of following load"); +- __ ldub(conversion_field_addr.plus_disp(BytesPerInt - 1), reg); ++#ifdef ASSERT ++static int check_nonzero(const char* xname, int x) { ++ assert(x != 0, err_msg("%s should be nonzero", xname)); ++ return x; + } +- +-void MethodHandles::extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg) { +- assert(CONV_VMINFO_SHIFT == 0, "preshifted"); +- __ and3(conversion_field_reg, CONV_VMINFO_MASK, reg); +-} +- +-void MethodHandles::extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg) { +- __ srl(conversion_field_reg, CONV_DEST_TYPE_SHIFT, reg); +- __ and3(reg, 0x0F, reg); +-} +- +-void MethodHandles::load_stack_move(MacroAssembler* _masm, +- Address G3_amh_conversion, +- Register stack_move_reg) { +- BLOCK_COMMENT("load_stack_move {"); +- __ ldsw(G3_amh_conversion, stack_move_reg); +- __ sra(stack_move_reg, CONV_STACK_MOVE_SHIFT, stack_move_reg); +-#ifdef ASSERT +- if (VerifyMethodHandles) { +- Label L_ok, L_bad; +- int32_t stack_move_limit = 0x0800; // extra-large +- __ cmp_and_br_short(stack_move_reg, stack_move_limit, Assembler::greaterEqual, Assembler::pn, L_bad); +- __ cmp(stack_move_reg, -stack_move_limit); +- __ br(Assembler::greater, false, Assembler::pt, L_ok); +- __ delayed()->nop(); +- __ BIND(L_bad); +- __ stop("load_stack_move of garbage value"); +- __ BIND(L_ok); +- } +-#endif +- BLOCK_COMMENT("} load_stack_move"); +-} ++#define NONZERO(x) check_nonzero(#x, x) ++#else //ASSERT ++#define NONZERO(x) (x) ++#endif //ASSERT + + #ifdef ASSERT +-void MethodHandles::RicochetFrame::verify() const { +- assert(magic_number_1() == MAGIC_NUMBER_1, ""); +- if (!Universe::heap()->is_gc_active()) { +- if (saved_args_layout() != NULL) { +- assert(saved_args_layout()->is_method(), "must be valid oop"); +- } +- if (saved_target() != NULL) { +- assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value"); +- } +- } +- int conv_op = adapter_conversion_op(conversion()); +- assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS || +- conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS || +- conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF, +- "must be a sane conversion"); +- if (has_return_value_slot()) { +- assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, ""); +- } +-} +- +-void MethodHandles::verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) { +- // Verify that argslot lies within (Gargs, FP]. +- Label L_ok, L_bad; +- BLOCK_COMMENT("verify_argslot {"); +- __ cmp_and_brx_short(Gargs, argslot_reg, Assembler::greaterUnsigned, Assembler::pn, L_bad); +- __ add(FP, STACK_BIAS, temp_reg); // STACK_BIAS is zero on !_LP64 +- __ cmp_and_brx_short(argslot_reg, temp_reg, Assembler::lessEqualUnsigned, Assembler::pt, L_ok); +- __ BIND(L_bad); +- __ stop(error_message); +- __ BIND(L_ok); +- BLOCK_COMMENT("} verify_argslot"); +-} +- +-void MethodHandles::verify_argslots(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, +- Register arg_slot_base_reg, +- Register temp_reg, +- Register temp2_reg, +- bool negate_argslots, +- const char* error_message) { +- // Verify that [argslot..argslot+size) lies within (Gargs, FP). +- Label L_ok, L_bad; +- BLOCK_COMMENT("verify_argslots {"); +- if (negate_argslots) { +- if (arg_slots.is_constant()) { +- arg_slots = -1 * arg_slots.as_constant(); +- } else { +- __ neg(arg_slots.as_register(), temp_reg); +- arg_slots = temp_reg; +- } +- } +- __ add(arg_slot_base_reg, __ argument_offset(arg_slots, temp_reg), temp_reg); +- __ add(FP, STACK_BIAS, temp2_reg); // STACK_BIAS is zero on !_LP64 +- __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::greaterUnsigned, Assembler::pn, L_bad); +- // Gargs points to the first word so adjust by BytesPerWord +- __ add(arg_slot_base_reg, BytesPerWord, temp_reg); +- __ cmp_and_brx_short(Gargs, temp_reg, Assembler::lessEqualUnsigned, Assembler::pt, L_ok); +- __ BIND(L_bad); +- __ stop(error_message); +- __ BIND(L_ok); +- BLOCK_COMMENT("} verify_argslots"); +-} +- +-// Make sure that arg_slots has the same sign as the given direction. +-// If (and only if) arg_slots is a assembly-time constant, also allow it to be zero. +-void MethodHandles::verify_stack_move(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, int direction) { +- enum { UNREASONABLE_STACK_MOVE = 256 * 4 }; // limit of 255 arguments +- bool allow_zero = arg_slots.is_constant(); +- if (direction == 0) { direction = +1; allow_zero = true; } +- assert(stack_move_unit() == -1, "else add extra checks here"); +- if (arg_slots.is_register()) { +- Label L_ok, L_bad; +- BLOCK_COMMENT("verify_stack_move {"); +- // __ btst(-stack_move_unit() - 1, arg_slots.as_register()); // no need +- // __ br(Assembler::notZero, false, Assembler::pn, L_bad); +- // __ delayed()->nop(); +- __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD); +- if (direction > 0) { +- __ br(allow_zero ? Assembler::less : Assembler::lessEqual, false, Assembler::pn, L_bad); +- __ delayed()->nop(); +- __ cmp(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE); +- __ br(Assembler::less, false, Assembler::pn, L_ok); +- __ delayed()->nop(); +- } else { +- __ br(allow_zero ? Assembler::greater : Assembler::greaterEqual, false, Assembler::pn, L_bad); +- __ delayed()->nop(); +- __ cmp(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE); +- __ br(Assembler::greater, false, Assembler::pn, L_ok); +- __ delayed()->nop(); +- } +- __ BIND(L_bad); +- if (direction > 0) +- __ stop("assert arg_slots > 0"); +- else +- __ stop("assert arg_slots < 0"); +- __ BIND(L_ok); +- BLOCK_COMMENT("} verify_stack_move"); +- } else { +- intptr_t size = arg_slots.as_constant(); +- if (direction < 0) size = -size; +- assert(size >= 0, "correct direction of constant move"); +- assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move"); +- } +-} +- + void MethodHandles::verify_klass(MacroAssembler* _masm, + Register obj_reg, KlassHandle klass, + Register temp_reg, Register temp2_reg, +@@ -485,6 +70,14 @@ + assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() && + klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(), + "must be one of the SystemDictionaryHandles"); ++ bool did_save = false; ++ if (temp_reg == noreg || temp2_reg == noreg) { ++ temp_reg = L1; ++ temp2_reg = L2; ++ __ save_frame_and_mov(0, obj_reg, L0); ++ obj_reg = L0; ++ did_save = true; ++ } + Label L_ok, L_bad; + BLOCK_COMMENT("verify_klass {"); + __ verify_oop(obj_reg); +@@ -499,548 +92,412 @@ + __ ld_ptr(Address(temp2_reg, 0), temp2_reg); + __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok); + __ BIND(L_bad); +- __ stop(error_message); ++ if (did_save) __ restore(); ++ __ STOP(error_message); + __ BIND(L_ok); ++ if (did_save) __ restore(); + BLOCK_COMMENT("} verify_klass"); + } ++ ++void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) { ++ Label L; ++ BLOCK_COMMENT("verify_ref_kind {"); ++ __ lduw(Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())), temp); ++ __ srl( temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT, temp); ++ __ and3(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK, temp); ++ __ cmp_and_br_short(temp, ref_kind, Assembler::equal, Assembler::pt, L); ++ { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); ++ jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); ++ if (ref_kind == JVM_REF_invokeVirtual || ++ ref_kind == JVM_REF_invokeSpecial) ++ // could do this for all ref_kinds, but would explode assembly code size ++ trace_method_handle(_masm, buf); ++ __ STOP(buf); ++ } ++ BLOCK_COMMENT("} verify_ref_kind"); ++ __ bind(L); ++} ++ + #endif // ASSERT + +- +-void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp) { ++void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp, ++ bool for_compiler_entry) { + assert(method == G5_method, "interpreter calling convention"); + __ verify_oop(method); +- __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target); +- if (JvmtiExport::can_post_interpreter_events()) { ++ ++ if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { ++ Label run_compiled_code; + // JVMTI events, such as single-stepping, are implemented partly by avoiding running + // compiled code in threads for which the event is enabled. Check here for + // interp_only_mode if these events CAN be enabled. + __ verify_thread(); +- Label skip_compiled_code; +- + const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); + __ ld(interp_only, temp); +- __ tst(temp); +- __ br(Assembler::notZero, true, Assembler::pn, skip_compiled_code); +- __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target); +- __ bind(skip_compiled_code); ++ __ cmp_and_br_short(temp, 0, Assembler::zero, Assembler::pt, run_compiled_code); ++ __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target); ++ __ jmp(target, 0); ++ __ delayed()->nop(); ++ __ BIND(run_compiled_code); ++ // Note: we could fill some delay slots here, but ++ // it doesn't matter, since this is interpreter code. + } + __ jmp(target, 0); + __ delayed()->nop(); + } + ++void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, ++ Register recv, Register method_temp, ++ Register temp2, Register temp3, ++ bool for_compiler_entry) { ++ BLOCK_COMMENT("jump_to_lambda_form {"); ++ // This is the initial entry point of a lazy method handle. ++ // After type checking, it picks up the invoker from the LambdaForm. ++ assert_different_registers(recv, method_temp, temp2, temp3); ++ assert(method_temp == G5_method, "required register for loading method"); ++ ++ //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); }); ++ ++ // Load the invoker, as MH -> MH.form -> LF.vmentry ++ __ verify_oop(recv); ++ __ load_heap_oop(Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())), method_temp); ++ __ verify_oop(method_temp); ++ __ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())), method_temp); ++ __ verify_oop(method_temp); ++ // the following assumes that a methodOop is normally compressed in the vmtarget field: ++ __ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())), method_temp); ++ __ verify_oop(method_temp); ++ ++ if (VerifyMethodHandles && !for_compiler_entry) { ++ // make sure recv is already on stack ++ __ load_sized_value(Address(method_temp, methodOopDesc::size_of_parameters_offset()), ++ temp2, ++ sizeof(u2), /*is_signed*/ false); ++ // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), ""); ++ Label L; ++ __ ld_ptr(__ argument_address(temp2, temp2, -1), temp2); ++ __ cmp_and_br_short(temp2, recv, Assembler::equal, Assembler::pt, L); ++ __ STOP("receiver not on stack"); ++ __ BIND(L); ++ } ++ ++ jump_from_method_handle(_masm, method_temp, temp2, temp3, for_compiler_entry); ++ BLOCK_COMMENT("} jump_to_lambda_form"); ++} ++ + + // Code generation +-address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { +- // I5_savedSP/O5_savedSP: sender SP (must preserve) ++address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm, ++ vmIntrinsics::ID iid) { ++ const bool not_for_compiler_entry = false; // this is the interpreter entry ++ assert(is_signature_polymorphic(iid), "expected invoke iid"); ++ if (iid == vmIntrinsics::_invokeGeneric || ++ iid == vmIntrinsics::_compiledLambdaForm) { ++ // Perhaps surprisingly, the symbolic references visible to Java are not directly used. ++ // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. ++ // They all allow an appendix argument. ++ __ should_not_reach_here(); // empty stubs make SG sick ++ return NULL; ++ } ++ ++ // I5_savedSP/O5_savedSP: sender SP (must preserve; see prepare_to_jump_from_interpreted) ++ // G5_method: methodOop + // G4 (Gargs): incoming argument list (must preserve) +- // G5_method: invoke methodOop +- // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots]) +- // O0, O1, O2, O3, O4: garbage temps, blown away +- Register O0_mtype = O0; +- Register O1_scratch = O1; +- Register O2_scratch = O2; +- Register O3_scratch = O3; +- Register O4_argslot = O4; +- Register O4_argbase = O4; ++ // O0: used as temp to hold mh or receiver ++ // O1, O4: garbage temps, blown away ++ Register O1_scratch = O1; ++ Register O4_param_size = O4; // size of parameters + +- // emit WrongMethodType path first, to enable back-branch from main path +- Label wrong_method_type; +- __ bind(wrong_method_type); +- Label invoke_generic_slow_path; +- assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");; +- __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch); +- __ cmp(O1_scratch, (int) vmIntrinsics::_invokeExact); +- __ brx(Assembler::notEqual, false, Assembler::pt, invoke_generic_slow_path); +- __ delayed()->nop(); +- __ mov(O0_mtype, G5_method_type); // required by throw_WrongMethodType +- __ mov(G3_method_handle, G3_method_handle); // already in this register +- // O0 will be filled in with JavaThread in stub +- __ jump_to(AddressLiteral(StubRoutines::throw_WrongMethodTypeException_entry()), O3_scratch); +- __ delayed()->nop(); ++ address code_start = __ pc(); + + // here's where control starts out: + __ align(CodeEntryAlignment); + address entry_point = __ pc(); + +- // fetch the MethodType from the method handle +- // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list. +- // This would simplify several touchy bits of code. +- // See 6984712: JSR 292 method handle calls need a clean argument base pointer +- { +- Register tem = G5_method; +- for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) { +- __ ld_ptr(Address(tem, *pchase), O0_mtype); +- tem = O0_mtype; // in case there is another indirection ++ if (VerifyMethodHandles) { ++ Label L; ++ BLOCK_COMMENT("verify_intrinsic_id {"); ++ __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch); ++ __ cmp_and_br_short(O1_scratch, (int) iid, Assembler::equal, Assembler::pt, L); ++ if (iid == vmIntrinsics::_linkToVirtual || ++ iid == vmIntrinsics::_linkToSpecial) { ++ // could do this for all kinds, but would explode assembly code size ++ trace_method_handle(_masm, "bad methodOop::intrinsic_id"); + } ++ __ STOP("bad methodOop::intrinsic_id"); ++ __ bind(L); ++ BLOCK_COMMENT("} verify_intrinsic_id"); + } + +- // given the MethodType, find out where the MH argument is buried +- __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O4_argslot); +- __ ldsw( Address(O4_argslot, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot); +- __ add(__ argument_address(O4_argslot, O4_argslot, 1), O4_argbase); +- // Note: argument_address uses its input as a scratch register! +- Address mh_receiver_slot_addr(O4_argbase, -Interpreter::stackElementSize); +- __ ld_ptr(mh_receiver_slot_addr, G3_method_handle); ++ // First task: Find out how big the argument list is. ++ Address O4_first_arg_addr; ++ int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid); ++ assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic"); ++ if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) { ++ __ load_sized_value(Address(G5_method, methodOopDesc::size_of_parameters_offset()), ++ O4_param_size, ++ sizeof(u2), /*is_signed*/ false); ++ // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), ""); ++ O4_first_arg_addr = __ argument_address(O4_param_size, O4_param_size, -1); ++ } else { ++ DEBUG_ONLY(O4_param_size = noreg); ++ } + +- trace_method_handle(_masm, "invokeExact"); ++ Register O0_mh = noreg; ++ if (!is_signature_polymorphic_static(iid)) { ++ __ ld_ptr(O4_first_arg_addr, O0_mh = O0); ++ DEBUG_ONLY(O4_param_size = noreg); ++ } + +- __ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type); ++ // O4_first_arg_addr is live! + +- // Nobody uses the MH receiver slot after this. Make sure. +- DEBUG_ONLY(__ set((int32_t) 0x999999, O1_scratch); __ st_ptr(O1_scratch, mh_receiver_slot_addr)); ++ if (TraceMethodHandles) { ++ const char* name = vmIntrinsics::name_at(iid); ++ if (*name == '_') name += 1; ++ const size_t len = strlen(name) + 50; ++ char* qname = NEW_C_HEAP_ARRAY(char, len, mtInternal); ++ const char* suffix = ""; ++ if (vmIntrinsics::method_for(iid) == NULL || ++ !vmIntrinsics::method_for(iid)->access_flags().is_public()) { ++ if (is_signature_polymorphic_static(iid)) ++ suffix = "/static"; ++ else ++ suffix = "/private"; ++ } ++ jio_snprintf(qname, len, "MethodHandle::interpreter_entry::%s%s", name, suffix); ++ if (O0_mh != noreg) ++ __ mov(O0_mh, G3_method_handle); // make stub happy ++ trace_method_handle(_masm, qname); ++ } + +- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); ++ if (iid == vmIntrinsics::_invokeBasic) { ++ generate_method_handle_dispatch(_masm, iid, O0_mh, noreg, not_for_compiler_entry); + +- // for invokeGeneric (only), apply argument and result conversions on the fly +- __ bind(invoke_generic_slow_path); +-#ifdef ASSERT +- if (VerifyMethodHandles) { +- Label L; +- __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch); +- __ cmp(O1_scratch, (int) vmIntrinsics::_invokeGeneric); +- __ brx(Assembler::equal, false, Assembler::pt, L); +- __ delayed()->nop(); +- __ stop("bad methodOop::intrinsic_id"); +- __ bind(L); ++ } else { ++ // Adjust argument list by popping the trailing MemberName argument. ++ Register O0_recv = noreg; ++ if (MethodHandles::ref_kind_has_receiver(ref_kind)) { ++ // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack. ++ __ ld_ptr(O4_first_arg_addr, O0_recv = O0); ++ DEBUG_ONLY(O4_param_size = noreg); ++ } ++ Register G5_member = G5_method; // MemberName ptr; incoming method ptr is dead now ++ __ ld_ptr(__ argument_address(constant(0)), G5_member); ++ __ add(Gargs, Interpreter::stackElementSize, Gargs); ++ generate_method_handle_dispatch(_masm, iid, O0_recv, G5_member, not_for_compiler_entry); + } +-#endif //ASSERT + +- // make room on the stack for another pointer: +- insert_arg_slots(_masm, 2 * stack_move_unit(), O4_argbase, O1_scratch, O2_scratch, O3_scratch); +- // load up an adapter from the calling type (Java weaves this) +- Register O2_form = O2_scratch; +- Register O3_adapter = O3_scratch; +- __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O2_form); +- __ load_heap_oop(Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); +- __ verify_oop(O3_adapter); +- __ st_ptr(O3_adapter, Address(O4_argbase, 1 * Interpreter::stackElementSize)); +- // As a trusted first argument, pass the type being called, so the adapter knows +- // the actual types of the arguments and return values. +- // (Generic invokers are shared among form-families of method-type.) +- __ st_ptr(O0_mtype, Address(O4_argbase, 0 * Interpreter::stackElementSize)); +- // FIXME: assert that O3_adapter is of the right method-type. +- __ mov(O3_adapter, G3_method_handle); +- trace_method_handle(_masm, "invokeGeneric"); +- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); ++ if (PrintMethodHandleStubs) { ++ address code_end = __ pc(); ++ tty->print_cr("--------"); ++ tty->print_cr("method handle interpreter entry for %s", vmIntrinsics::name_at(iid)); ++ Disassembler::decode(code_start, code_end); ++ tty->cr(); ++ } + + return entry_point; + } + +-// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant. +-static RegisterOrConstant constant(int value) { +- return RegisterOrConstant(value); +-} ++void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, ++ vmIntrinsics::ID iid, ++ Register receiver_reg, ++ Register member_reg, ++ bool for_compiler_entry) { ++ assert(is_signature_polymorphic(iid), "expected invoke iid"); ++ // temps used in this code are not used in *either* compiled or interpreted calling sequences ++ Register temp1 = (for_compiler_entry ? G1_scratch : O1); ++ Register temp2 = (for_compiler_entry ? G4_scratch : O4); ++ Register temp3 = G3_scratch; ++ Register temp4 = (for_compiler_entry ? noreg : O2); ++ if (for_compiler_entry) { ++ assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : O0), "only valid assignment"); ++ assert_different_registers(temp1, O0, O1, O2, O3, O4, O5); ++ assert_different_registers(temp2, O0, O1, O2, O3, O4, O5); ++ assert_different_registers(temp3, O0, O1, O2, O3, O4, O5); ++ assert_different_registers(temp4, O0, O1, O2, O3, O4, O5); ++ } ++ if (receiver_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, receiver_reg); ++ if (member_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, member_reg); ++ if (!for_compiler_entry) assert_different_registers(temp1, temp2, temp3, temp4, O5_savedSP); // don't trash lastSP + +-static void load_vmargslot(MacroAssembler* _masm, Address vmargslot_addr, Register result) { +- __ ldsw(vmargslot_addr, result); +-} ++ if (iid == vmIntrinsics::_invokeBasic) { ++ // indirect through MH.form.vmentry.vmtarget ++ jump_to_lambda_form(_masm, receiver_reg, G5_method, temp2, temp3, for_compiler_entry); + +-static RegisterOrConstant adjust_SP_and_Gargs_down_by_slots(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, +- Register temp_reg, Register temp2_reg) { +- // Keep the stack pointer 2*wordSize aligned. +- const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); +- if (arg_slots.is_constant()) { +- const int offset = arg_slots.as_constant() << LogBytesPerWord; +- const int masked_offset = round_to(offset, 2 * BytesPerWord); +- const int masked_offset2 = (offset + 1*BytesPerWord) & ~TwoWordAlignmentMask; +- assert(masked_offset == masked_offset2, "must agree"); +- __ sub(Gargs, offset, Gargs); +- __ sub(SP, masked_offset, SP ); +- return offset; + } else { ++ // The method is a member invoker used by direct method handles. ++ if (VerifyMethodHandles) { ++ // make sure the trailing argument really is a MemberName (caller responsibility) ++ verify_klass(_masm, member_reg, SystemDictionaryHandles::MemberName_klass(), ++ temp1, temp2, ++ "MemberName required for invokeVirtual etc."); ++ } ++ ++ Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes())); ++ Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes())); ++ Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())); ++ ++ Register temp1_recv_klass = temp1; ++ if (iid != vmIntrinsics::_linkToStatic) { ++ __ verify_oop(receiver_reg); ++ if (iid == vmIntrinsics::_linkToSpecial) { ++ // Don't actually load the klass; just null-check the receiver. ++ __ null_check(receiver_reg); ++ } else { ++ // load receiver klass itself ++ __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes()); ++ __ load_klass(receiver_reg, temp1_recv_klass); ++ __ verify_oop(temp1_recv_klass); ++ } ++ BLOCK_COMMENT("check_receiver {"); ++ // The receiver for the MemberName must be in receiver_reg. ++ // Check the receiver against the MemberName.clazz ++ if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { ++ // Did not load it above... ++ __ load_klass(receiver_reg, temp1_recv_klass); ++ __ verify_oop(temp1_recv_klass); ++ } ++ if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { ++ Label L_ok; ++ Register temp2_defc = temp2; ++ __ load_heap_oop(member_clazz, temp2_defc); ++ load_klass_from_Class(_masm, temp2_defc, temp3, temp4); ++ __ verify_oop(temp2_defc); ++ __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok); ++ // If we get here, the type check failed! ++ __ STOP("receiver class disagrees with MemberName.clazz"); ++ __ bind(L_ok); ++ } ++ BLOCK_COMMENT("} check_receiver"); ++ } ++ if (iid == vmIntrinsics::_linkToSpecial || ++ iid == vmIntrinsics::_linkToStatic) { ++ DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass ++ } ++ ++ // Live registers at this point: ++ // member_reg - MemberName that was the trailing argument ++ // temp1_recv_klass - klass of stacked receiver, if needed ++ // O5_savedSP - interpreter linkage (if interpreted) ++ // O0..O7,G1,G4 - compiler arguments (if compiled) ++ ++ bool method_is_live = false; ++ switch (iid) { ++ case vmIntrinsics::_linkToSpecial: ++ if (VerifyMethodHandles) { ++ verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); ++ } ++ __ load_heap_oop(member_vmtarget, G5_method); ++ method_is_live = true; ++ break; ++ ++ case vmIntrinsics::_linkToStatic: ++ if (VerifyMethodHandles) { ++ verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); ++ } ++ __ load_heap_oop(member_vmtarget, G5_method); ++ method_is_live = true; ++ break; ++ ++ case vmIntrinsics::_linkToVirtual: + #ifdef ASSERT + { +- Label L_ok; +- __ cmp_and_br_short(arg_slots.as_register(), 0, Assembler::greaterEqual, Assembler::pt, L_ok); +- __ stop("negative arg_slots"); +- __ bind(L_ok); ++ // same as TemplateTable::invokevirtual, ++ // minus the CP setup and profiling: ++ ++ if (VerifyMethodHandles) { ++ verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3); ++ } ++ ++ // pick out the vtable index from the MemberName, and then we can discard it: ++ Register temp2_index = temp2; ++ __ ld_ptr(member_vmindex, temp2_index); ++ ++ if (VerifyMethodHandles) { ++ Label L_index_ok; ++ __ cmp_and_br_short(temp2_index, (int) 0, Assembler::greaterEqual, Assembler::pn, L_index_ok); ++ __ STOP("no virtual index"); ++ __ BIND(L_index_ok); ++ } ++ ++ // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget ++ // at this point. And VerifyMethodHandles has already checked clazz, if needed. ++ ++ // get target methodOop & entry point ++ __ lookup_virtual_method(temp1_recv_klass, temp2_index, G5_method); ++ method_is_live = true; ++ break; + } +-#endif +- __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg); +- __ add( temp_reg, 1*BytesPerWord, temp2_reg); +- __ andn(temp2_reg, TwoWordAlignmentMask, temp2_reg); +- __ sub(Gargs, temp_reg, Gargs); +- __ sub(SP, temp2_reg, SP ); +- return temp_reg; ++ ++ case vmIntrinsics::_linkToInterface: ++ { ++ // same as TemplateTable::invokeinterface ++ // (minus the CP setup and profiling, with different argument motion) ++ if (VerifyMethodHandles) { ++ verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3); ++ } ++ ++ Register temp3_intf = temp3; ++ __ load_heap_oop(member_clazz, temp3_intf); ++ load_klass_from_Class(_masm, temp3_intf, temp2, temp4); ++ __ verify_oop(temp3_intf); ++ ++ Register G5_index = G5_method; ++ __ ld_ptr(member_vmindex, G5_index); ++ if (VerifyMethodHandles) { ++ Label L; ++ __ cmp_and_br_short(G5_index, 0, Assembler::greaterEqual, Assembler::pt, L); ++ __ STOP("invalid vtable index for MH.invokeInterface"); ++ __ bind(L); ++ } ++ ++ // given intf, index, and recv klass, dispatch to the implementation method ++ Label L_no_such_interface; ++ Register no_sethi_temp = noreg; ++ __ lookup_interface_method(temp1_recv_klass, temp3_intf, ++ // note: next two args must be the same: ++ G5_index, G5_method, ++ temp2, no_sethi_temp, ++ L_no_such_interface); ++ ++ __ verify_oop(G5_method); ++ jump_from_method_handle(_masm, G5_method, temp2, temp3, for_compiler_entry); ++ ++ __ bind(L_no_such_interface); ++ AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry()); ++ __ jump_to(icce, temp3); ++ __ delayed()->nop(); ++ break; ++ } ++ ++ default: ++ fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); ++ break; ++ } ++ ++ if (method_is_live) { ++ // live at this point: G5_method, O5_savedSP (if interpreted) ++ ++ // After figuring out which concrete method to call, jump into it. ++ // Note that this works in the interpreter with no data motion. ++ // But the compiled version will require that rcx_recv be shifted out. ++ __ verify_oop(G5_method); ++ jump_from_method_handle(_masm, G5_method, temp1, temp3, for_compiler_entry); ++ } + } + } + +-static RegisterOrConstant adjust_SP_and_Gargs_up_by_slots(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, +- Register temp_reg, Register temp2_reg) { +- // Keep the stack pointer 2*wordSize aligned. +- const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); +- if (arg_slots.is_constant()) { +- const int offset = arg_slots.as_constant() << LogBytesPerWord; +- const int masked_offset = offset & ~TwoWordAlignmentMask; +- __ add(Gargs, offset, Gargs); +- __ add(SP, masked_offset, SP ); +- return offset; +- } else { +- __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg); +- __ andn(temp_reg, TwoWordAlignmentMask, temp2_reg); +- __ add(Gargs, temp_reg, Gargs); +- __ add(SP, temp2_reg, SP ); +- return temp_reg; +- } +-} +- +-// Helper to insert argument slots into the stack. +-// arg_slots must be a multiple of stack_move_unit() and < 0 +-// argslot_reg is decremented to point to the new (shifted) location of the argslot +-// But, temp_reg ends up holding the original value of argslot_reg. +-void MethodHandles::insert_arg_slots(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, +- Register argslot_reg, +- Register temp_reg, Register temp2_reg, Register temp3_reg) { +- // allow constant zero +- if (arg_slots.is_constant() && arg_slots.as_constant() == 0) +- return; +- +- // We have to insert at least one word, so bang the stack. +- if (UseStackBanging) { +- // Save G3_method_handle since bang_stack_with_offset uses it as a temp register +- __ mov(G3_method_handle, temp_reg); +- int frame_size = (arg_slots.is_constant() ? -1 * arg_slots.as_constant() * wordSize : 0); +- if (frame_size <= 0) +- frame_size = 256 * Interpreter::stackElementSize; // conservative +- __ generate_stack_overflow_check(frame_size); +- __ mov(temp_reg, G3_method_handle); +- } +- +- assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, +- (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); +- +- BLOCK_COMMENT("insert_arg_slots {"); +- if (VerifyMethodHandles) +- verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame"); +- if (VerifyMethodHandles) +- verify_stack_move(_masm, arg_slots, -1); +- +- // Make space on the stack for the inserted argument(s). +- // Then pull down everything shallower than argslot_reg. +- // The stacked return address gets pulled down with everything else. +- // That is, copy [sp, argslot) downward by -size words. In pseudo-code: +- // sp -= size; +- // for (temp = sp + size; temp < argslot; temp++) +- // temp[-size] = temp[0] +- // argslot -= size; +- +- // offset is temp3_reg in case of arg_slots being a register. +- RegisterOrConstant offset = adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg); +- __ sub(Gargs, offset, temp_reg); // source pointer for copy +- +- { +- Label loop; +- __ BIND(loop); +- // pull one word down each time through the loop +- __ ld_ptr( Address(temp_reg, 0 ), temp2_reg); +- __ st_ptr(temp2_reg, Address(temp_reg, offset) ); +- __ add(temp_reg, wordSize, temp_reg); +- __ cmp_and_brx_short(temp_reg, argslot_reg, Assembler::lessUnsigned, Assembler::pt, loop); +- } +- +- // Now move the argslot down, to point to the opened-up space. +- __ add(argslot_reg, offset, argslot_reg); +- BLOCK_COMMENT("} insert_arg_slots"); +-} +- +- +-// Helper to remove argument slots from the stack. +-// arg_slots must be a multiple of stack_move_unit() and > 0 +-void MethodHandles::remove_arg_slots(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, +- Register argslot_reg, +- Register temp_reg, Register temp2_reg, Register temp3_reg) { +- // allow constant zero +- if (arg_slots.is_constant() && arg_slots.as_constant() == 0) +- return; +- assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, +- (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); +- +- BLOCK_COMMENT("remove_arg_slots {"); +- if (VerifyMethodHandles) +- verify_argslots(_masm, arg_slots, argslot_reg, temp_reg, temp2_reg, false, +- "deleted argument(s) must fall within current frame"); +- if (VerifyMethodHandles) +- verify_stack_move(_masm, arg_slots, +1); +- +- // Pull up everything shallower than argslot. +- // Then remove the excess space on the stack. +- // The stacked return address gets pulled up with everything else. +- // That is, copy [sp, argslot) upward by size words. In pseudo-code: +- // for (temp = argslot-1; temp >= sp; --temp) +- // temp[size] = temp[0] +- // argslot += size; +- // sp += size; +- +- RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg); +- __ sub(argslot_reg, wordSize, temp_reg); // source pointer for copy +- +- { +- Label L_loop; +- __ BIND(L_loop); +- // pull one word up each time through the loop +- __ ld_ptr( Address(temp_reg, 0 ), temp2_reg); +- __ st_ptr(temp2_reg, Address(temp_reg, offset) ); +- __ sub(temp_reg, wordSize, temp_reg); +- __ cmp_and_brx_short(temp_reg, Gargs, Assembler::greaterEqualUnsigned, Assembler::pt, L_loop); +- } +- +- // And adjust the argslot address to point at the deletion point. +- __ add(argslot_reg, offset, argslot_reg); +- +- // We don't need the offset at this point anymore, just adjust SP and Gargs. +- (void) adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg); +- +- BLOCK_COMMENT("} remove_arg_slots"); +-} +- +-// Helper to copy argument slots to the top of the stack. +-// The sequence starts with argslot_reg and is counted by slot_count +-// slot_count must be a multiple of stack_move_unit() and >= 0 +-// This function blows the temps but does not change argslot_reg. +-void MethodHandles::push_arg_slots(MacroAssembler* _masm, +- Register argslot_reg, +- RegisterOrConstant slot_count, +- Register temp_reg, Register temp2_reg) { +- // allow constant zero +- if (slot_count.is_constant() && slot_count.as_constant() == 0) +- return; +- assert_different_registers(argslot_reg, temp_reg, temp2_reg, +- (!slot_count.is_register() ? Gargs : slot_count.as_register()), +- SP); +- assert(Interpreter::stackElementSize == wordSize, "else change this code"); +- +- BLOCK_COMMENT("push_arg_slots {"); +- if (VerifyMethodHandles) +- verify_stack_move(_masm, slot_count, 0); +- +- RegisterOrConstant offset = adjust_SP_and_Gargs_down_by_slots(_masm, slot_count, temp2_reg, temp_reg); +- +- if (slot_count.is_constant()) { +- for (int i = slot_count.as_constant() - 1; i >= 0; i--) { +- __ ld_ptr( Address(argslot_reg, i * wordSize), temp_reg); +- __ st_ptr(temp_reg, Address(Gargs, i * wordSize)); +- } +- } else { +- Label L_plural, L_loop, L_break; +- // Emit code to dynamically check for the common cases, zero and one slot. +- __ cmp(slot_count.as_register(), (int32_t) 1); +- __ br(Assembler::greater, false, Assembler::pn, L_plural); +- __ delayed()->nop(); +- __ br(Assembler::less, false, Assembler::pn, L_break); +- __ delayed()->nop(); +- __ ld_ptr( Address(argslot_reg, 0), temp_reg); +- __ st_ptr(temp_reg, Address(Gargs, 0)); +- __ ba_short(L_break); +- __ BIND(L_plural); +- +- // Loop for 2 or more: +- // top = &argslot[slot_count] +- // while (top > argslot) *(--Gargs) = *(--top) +- Register top_reg = temp_reg; +- __ add(argslot_reg, offset, top_reg); +- __ add(Gargs, offset, Gargs ); // move back up again so we can go down +- __ BIND(L_loop); +- __ sub(top_reg, wordSize, top_reg); +- __ sub(Gargs, wordSize, Gargs ); +- __ ld_ptr( Address(top_reg, 0), temp2_reg); +- __ st_ptr(temp2_reg, Address(Gargs, 0)); +- __ cmp_and_brx_short(top_reg, argslot_reg, Assembler::greaterUnsigned, Assembler::pt, L_loop); +- __ BIND(L_break); +- } +- BLOCK_COMMENT("} push_arg_slots"); +-} +- +-// in-place movement; no change to Gargs +-// blows temp_reg, temp2_reg +-void MethodHandles::move_arg_slots_up(MacroAssembler* _masm, +- Register bottom_reg, // invariant +- Address top_addr, // can use temp_reg +- RegisterOrConstant positive_distance_in_slots, // destroyed if register +- Register temp_reg, Register temp2_reg) { +- assert_different_registers(bottom_reg, +- temp_reg, temp2_reg, +- positive_distance_in_slots.register_or_noreg()); +- BLOCK_COMMENT("move_arg_slots_up {"); +- Label L_loop, L_break; +- Register top_reg = temp_reg; +- if (!top_addr.is_same_address(Address(top_reg, 0))) { +- __ add(top_addr, top_reg); +- } +- // Detect empty (or broken) loop: +-#ifdef ASSERT +- if (VerifyMethodHandles) { +- // Verify that &bottom < &top (non-empty interval) +- Label L_ok, L_bad; +- if (positive_distance_in_slots.is_register()) { +- __ cmp(positive_distance_in_slots.as_register(), (int32_t) 0); +- __ br(Assembler::lessEqual, false, Assembler::pn, L_bad); +- __ delayed()->nop(); +- } +- __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_ok); +- __ BIND(L_bad); +- __ stop("valid bounds (copy up)"); +- __ BIND(L_ok); +- } +-#endif +- __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::greaterEqualUnsigned, Assembler::pn, L_break); +- // work top down to bottom, copying contiguous data upwards +- // In pseudo-code: +- // while (--top >= bottom) *(top + distance) = *(top + 0); +- RegisterOrConstant offset = __ argument_offset(positive_distance_in_slots, positive_distance_in_slots.register_or_noreg()); +- __ BIND(L_loop); +- __ sub(top_reg, wordSize, top_reg); +- __ ld_ptr( Address(top_reg, 0 ), temp2_reg); +- __ st_ptr(temp2_reg, Address(top_reg, offset) ); +- __ cmp_and_brx_short(top_reg, bottom_reg, Assembler::greaterUnsigned, Assembler::pt, L_loop); +- assert(Interpreter::stackElementSize == wordSize, "else change loop"); +- __ BIND(L_break); +- BLOCK_COMMENT("} move_arg_slots_up"); +-} +- +-// in-place movement; no change to rsp +-// blows temp_reg, temp2_reg +-void MethodHandles::move_arg_slots_down(MacroAssembler* _masm, +- Address bottom_addr, // can use temp_reg +- Register top_reg, // invariant +- RegisterOrConstant negative_distance_in_slots, // destroyed if register +- Register temp_reg, Register temp2_reg) { +- assert_different_registers(top_reg, +- negative_distance_in_slots.register_or_noreg(), +- temp_reg, temp2_reg); +- BLOCK_COMMENT("move_arg_slots_down {"); +- Label L_loop, L_break; +- Register bottom_reg = temp_reg; +- if (!bottom_addr.is_same_address(Address(bottom_reg, 0))) { +- __ add(bottom_addr, bottom_reg); +- } +- // Detect empty (or broken) loop: +-#ifdef ASSERT +- assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, ""); +- if (VerifyMethodHandles) { +- // Verify that &bottom < &top (non-empty interval) +- Label L_ok, L_bad; +- if (negative_distance_in_slots.is_register()) { +- __ cmp(negative_distance_in_slots.as_register(), (int32_t) 0); +- __ br(Assembler::greaterEqual, false, Assembler::pn, L_bad); +- __ delayed()->nop(); +- } +- __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_ok); +- __ BIND(L_bad); +- __ stop("valid bounds (copy down)"); +- __ BIND(L_ok); +- } +-#endif +- __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::greaterEqualUnsigned, Assembler::pn, L_break); +- // work bottom up to top, copying contiguous data downwards +- // In pseudo-code: +- // while (bottom < top) *(bottom - distance) = *(bottom + 0), bottom++; +- RegisterOrConstant offset = __ argument_offset(negative_distance_in_slots, negative_distance_in_slots.register_or_noreg()); +- __ BIND(L_loop); +- __ ld_ptr( Address(bottom_reg, 0 ), temp2_reg); +- __ st_ptr(temp2_reg, Address(bottom_reg, offset) ); +- __ add(bottom_reg, wordSize, bottom_reg); +- __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_loop); +- assert(Interpreter::stackElementSize == wordSize, "else change loop"); +- __ BIND(L_break); +- BLOCK_COMMENT("} move_arg_slots_down"); +-} +- +-// Copy from a field or array element to a stacked argument slot. +-// is_element (ignored) says whether caller is loading an array element instead of an instance field. +-void MethodHandles::move_typed_arg(MacroAssembler* _masm, +- BasicType type, bool is_element, +- Address value_src, Address slot_dest, +- Register temp_reg) { +- assert(!slot_dest.uses(temp_reg), "must be different register"); +- BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)"); +- if (type == T_OBJECT || type == T_ARRAY) { +- __ load_heap_oop(value_src, temp_reg); +- __ verify_oop(temp_reg); +- __ st_ptr(temp_reg, slot_dest); +- } else if (type != T_VOID) { +- int arg_size = type2aelembytes(type); +- bool arg_is_signed = is_signed_subword_type(type); +- int slot_size = is_subword_type(type) ? type2aelembytes(T_INT) : arg_size; // store int sub-words as int +- __ load_sized_value( value_src, temp_reg, arg_size, arg_is_signed); +- __ store_sized_value(temp_reg, slot_dest, slot_size ); +- } +- BLOCK_COMMENT("} move_typed_arg"); +-} +- +-// Cf. TemplateInterpreterGenerator::generate_return_entry_for and +-// InterpreterMacroAssembler::save_return_value +-void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type, +- Address return_slot) { +- BLOCK_COMMENT("move_return_value {"); +- // Look at the type and pull the value out of the corresponding register. +- if (type == T_VOID) { +- // nothing to do +- } else if (type == T_OBJECT) { +- __ verify_oop(O0); +- __ st_ptr(O0, return_slot); +- } else if (type == T_INT || is_subword_type(type)) { +- int type_size = type2aelembytes(T_INT); +- __ store_sized_value(O0, return_slot, type_size); +- } else if (type == T_LONG) { +- // store the value by parts +- // Note: We assume longs are continguous (if misaligned) on the interpreter stack. +-#if !defined(_LP64) && defined(COMPILER2) +- __ stx(G1, return_slot); +-#else +- #ifdef _LP64 +- __ stx(O0, return_slot); +- #else +- if (return_slot.has_disp()) { +- // The displacement is a constant +- __ st(O0, return_slot); +- __ st(O1, return_slot.plus_disp(Interpreter::stackElementSize)); +- } else { +- __ std(O0, return_slot); +- } +- #endif +-#endif +- } else if (type == T_FLOAT) { +- __ stf(FloatRegisterImpl::S, Ftos_f, return_slot); +- } else if (type == T_DOUBLE) { +- __ stf(FloatRegisterImpl::D, Ftos_f, return_slot); +- } else { +- ShouldNotReachHere(); +- } +- BLOCK_COMMENT("} move_return_value"); +-} +- + #ifndef PRODUCT +-void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no) { +- RicochetFrame* rf = new RicochetFrame(*fr); +- +- // ricochet slots (kept in registers for sparc) +- values.describe(frame_no, rf->register_addr(I5_savedSP), err_msg("exact_sender_sp reg for #%d", frame_no)); +- values.describe(frame_no, rf->register_addr(L5_conversion), err_msg("conversion reg for #%d", frame_no)); +- values.describe(frame_no, rf->register_addr(L4_saved_args_base), err_msg("saved_args_base reg for #%d", frame_no)); +- values.describe(frame_no, rf->register_addr(L3_saved_args_layout), err_msg("saved_args_layout reg for #%d", frame_no)); +- values.describe(frame_no, rf->register_addr(L2_saved_target), err_msg("saved_target reg for #%d", frame_no)); +- values.describe(frame_no, rf->register_addr(L1_continuation), err_msg("continuation reg for #%d", frame_no)); +- +- // relevant ricochet targets (in caller frame) +- values.describe(-1, rf->saved_args_base(), err_msg("*saved_args_base for #%d", frame_no)); +- values.describe(-1, (intptr_t *)(STACK_BIAS+(uintptr_t)rf->exact_sender_sp()), err_msg("*exact_sender_sp+STACK_BIAS for #%d", frame_no)); +-} +-#endif // ASSERT +- +-#ifndef PRODUCT +-extern "C" void print_method_handle(oop mh); + void trace_method_handle_stub(const char* adaptername, + oopDesc* mh, + intptr_t* saved_sp, + intptr_t* args, + intptr_t* tracing_fp) { +- bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have mh +- +- tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp, args); ++ bool has_mh = (strstr(adaptername, "/static") == NULL && ++ strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH ++ const char* mh_reg_name = has_mh ? "G3_mh" : "G3"; ++ tty->print_cr("MH %s %s="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, ++ adaptername, mh_reg_name, ++ (intptr_t) mh, saved_sp, args); + + if (Verbose) { + // dumping last frame with frame::describe +@@ -1101,6 +558,7 @@ + + // mark saved_sp, if seems valid (may not be valid for some adapters) + intptr_t *unbiased_sp = (intptr_t *)(STACK_BIAS+(uintptr_t)saved_sp); ++ const int ARG_LIMIT = 255, SLOP = 45, UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP); + if ((unbiased_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (unbiased_sp < dump_fp)) { + values.describe(-1, unbiased_sp, "*saved_sp+STACK_BIAS"); + } +@@ -1108,10 +566,13 @@ + // Note: the unextended_sp may not be correct + tty->print_cr(" stack layout:"); + values.print(p); +- } +- +- if (has_mh) { +- print_method_handle(mh); ++ if (has_mh && mh->is_oop()) { ++ mh->print(); ++ if (java_lang_invoke_MethodHandle::is_instance(mh)) { ++ if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0) ++ java_lang_invoke_MethodHandle::form(mh)->print(); ++ } ++ } + } + } + +@@ -1154,1280 +615,3 @@ + BLOCK_COMMENT("} trace_method_handle"); + } + #endif // PRODUCT +- +-// which conversion op types are implemented here? +-int MethodHandles::adapter_conversion_ops_supported_mask() { +- return ((1<from_compiled_entry(), "method must be linked"); +- +- __ set(AddressLiteral((address) &_raise_exception_method), G5_method); +- __ ld_ptr(Address(G5_method, 0), G5_method); +- +- const int jobject_oop_offset = 0; +- __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method); +- +- adjust_SP_and_Gargs_down_by_slots(_masm, 3, noreg, noreg); +- +- __ st (O0_code, __ argument_address(constant(2), noreg, 0)); +- __ st_ptr(O1_actual, __ argument_address(constant(1), noreg, 0)); +- __ st_ptr(O2_required, __ argument_address(constant(0), noreg, 0)); +- jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); +- } +- break; +- +- case _invokestatic_mh: +- case _invokespecial_mh: +- { +- __ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop +- // Same as TemplateTable::invokestatic or invokespecial, +- // minus the CP setup and profiling: +- if (ek == _invokespecial_mh) { +- // Must load & check the first argument before entering the target method. +- __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); +- __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle); +- __ null_check(G3_method_handle); +- __ verify_oop(G3_method_handle); +- } +- jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); +- } +- break; +- +- case _invokevirtual_mh: +- { +- // Same as TemplateTable::invokevirtual, +- // minus the CP setup and profiling: +- +- // Pick out the vtable index and receiver offset from the MH, +- // and then we can discard it: +- Register O2_index = O2_scratch; +- __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); +- __ ldsw(G3_dmh_vmindex, O2_index); +- // Note: The verifier allows us to ignore G3_mh_vmtarget. +- __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle); +- __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); +- +- // Get receiver klass: +- Register O0_klass = O0_argslot; +- __ load_klass(G3_method_handle, O0_klass); +- __ verify_oop(O0_klass); +- +- // Get target methodOop & entry point: +- const int base = instanceKlass::vtable_start_offset() * wordSize; +- assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); +- +- __ sll_ptr(O2_index, LogBytesPerWord, O2_index); +- __ add(O0_klass, O2_index, O0_klass); +- Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes()); +- __ ld_ptr(vtable_entry_addr, G5_method); +- +- jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); +- } +- break; +- +- case _invokeinterface_mh: +- { +- // Same as TemplateTable::invokeinterface, +- // minus the CP setup and profiling: +- __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); +- Register O1_intf = O1_scratch; +- Register G5_index = G5_scratch; +- __ load_heap_oop(G3_mh_vmtarget, O1_intf); +- __ ldsw(G3_dmh_vmindex, G5_index); +- __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle); +- __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); +- +- // Get receiver klass: +- Register O0_klass = O0_argslot; +- __ load_klass(G3_method_handle, O0_klass); +- __ verify_oop(O0_klass); +- +- // Get interface: +- Label no_such_interface; +- __ verify_oop(O1_intf); +- __ lookup_interface_method(O0_klass, O1_intf, +- // Note: next two args must be the same: +- G5_index, G5_method, +- O2_scratch, +- O3_scratch, +- no_such_interface); +- +- jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); +- +- __ bind(no_such_interface); +- // Throw an exception. +- // For historical reasons, it will be IncompatibleClassChangeError. +- __ unimplemented("not tested yet"); +- __ ld_ptr(Address(O1_intf, java_mirror_offset), O2_required); // required interface +- __ mov( O0_klass, O1_actual); // bad receiver +- __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); +- __ delayed()->mov(Bytecodes::_invokeinterface, O0_code); // who is complaining? +- } +- break; +- +- case _bound_ref_mh: +- case _bound_int_mh: +- case _bound_long_mh: +- case _bound_ref_direct_mh: +- case _bound_int_direct_mh: +- case _bound_long_direct_mh: +- { +- const bool direct_to_method = (ek >= _bound_ref_direct_mh); +- BasicType arg_type = ek_bound_mh_arg_type(ek); +- int arg_slots = type2size[arg_type]; +- +- // Make room for the new argument: +- load_vmargslot(_masm, G3_bmh_vmargslot, O0_argslot); +- __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); +- +- insert_arg_slots(_masm, arg_slots * stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); +- +- // Store bound argument into the new stack slot: +- __ load_heap_oop(G3_bmh_argument, O1_scratch); +- if (arg_type == T_OBJECT) { +- __ st_ptr(O1_scratch, Address(O0_argslot, 0)); +- } else { +- Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type)); +- move_typed_arg(_masm, arg_type, false, +- prim_value_addr, +- Address(O0_argslot, 0), +- O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3) +- } +- +- if (direct_to_method) { +- __ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop +- jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); +- } else { +- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); // target is a methodOop +- __ verify_oop(G3_method_handle); +- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); +- } +- } +- break; +- +- case _adapter_opt_profiling: +- if (java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes() != 0) { +- Address G3_mh_vmcount(G3_method_handle, java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes()); +- __ ld(G3_mh_vmcount, O1_scratch); +- __ add(O1_scratch, 1, O1_scratch); +- __ st(O1_scratch, G3_mh_vmcount); +- } +- // fall through +- +- case _adapter_retype_only: +- case _adapter_retype_raw: +- // Immediately jump to the next MH layer: +- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); +- __ verify_oop(G3_method_handle); +- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); +- // This is OK when all parameter types widen. +- // It is also OK when a return type narrows. +- break; +- +- case _adapter_check_cast: +- { +- // Check a reference argument before jumping to the next layer of MH: +- load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); +- Address vmarg = __ argument_address(O0_argslot, O0_argslot); +- +- // What class are we casting to? +- Register O1_klass = O1_scratch; // Interesting AMH data. +- __ load_heap_oop(G3_amh_argument, O1_klass); // This is a Class object! +- load_klass_from_Class(_masm, O1_klass, O2_scratch, O3_scratch); +- +- Label L_done; +- __ ld_ptr(vmarg, O2_scratch); +- __ br_null_short(O2_scratch, Assembler::pn, L_done); // No cast if null. +- __ load_klass(O2_scratch, O2_scratch); +- +- // Live at this point: +- // - O0_argslot : argslot index in vmarg; may be required in the failing path +- // - O1_klass : klass required by the target method +- // - O2_scratch : argument klass to test +- // - G3_method_handle: adapter method handle +- __ check_klass_subtype(O2_scratch, O1_klass, O3_scratch, O4_scratch, L_done); +- +- // If we get here, the type check failed! +- __ load_heap_oop(G3_amh_argument, O2_required); // required class +- __ ld_ptr( vmarg, O1_actual); // bad object +- __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); +- __ delayed()->mov(Bytecodes::_checkcast, O0_code); // who is complaining? +- +- __ BIND(L_done); +- // Get the new MH: +- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); +- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); +- } +- break; +- +- case _adapter_prim_to_prim: +- case _adapter_ref_to_prim: +- // Handled completely by optimized cases. +- __ stop("init_AdapterMethodHandle should not issue this"); +- break; +- +- case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim +-//case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim +- case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim +- case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim +- { +- // Perform an in-place conversion to int or an int subword. +- load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); +- Address value; +- Address vmarg; +- bool value_left_justified = false; +- +- switch (ek) { +- case _adapter_opt_i2i: +- value = vmarg = __ argument_address(O0_argslot, O0_argslot); +- break; +- case _adapter_opt_l2i: +- { +- // just delete the extra slot +-#ifdef _LP64 +- // In V9, longs are given 2 64-bit slots in the interpreter, but the +- // data is passed in only 1 slot. +- // Keep the second slot. +- __ add(__ argument_address(O0_argslot, O0_argslot, -1), O0_argslot); +- remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); +- value = Address(O0_argslot, 4); // Get least-significant 32-bit of 64-bit value. +- vmarg = Address(O0_argslot, Interpreter::stackElementSize); +-#else +- // Keep the first slot. +- __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); +- remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); +- value = Address(O0_argslot, 0); +- vmarg = value; +-#endif +- } +- break; +- case _adapter_opt_unboxi: +- { +- vmarg = __ argument_address(O0_argslot, O0_argslot); +- // Load the value up from the heap. +- __ ld_ptr(vmarg, O1_scratch); +- int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); +-#ifdef ASSERT +- for (int bt = T_BOOLEAN; bt < T_INT; bt++) { +- if (is_subword_type(BasicType(bt))) +- assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), ""); +- } +-#endif +- __ null_check(O1_scratch, value_offset); +- value = Address(O1_scratch, value_offset); +-#ifdef _BIG_ENDIAN +- // Values stored in objects are packed. +- value_left_justified = true; +-#endif +- } +- break; +- default: +- ShouldNotReachHere(); +- } +- +- // This check is required on _BIG_ENDIAN +- Register G5_vminfo = G5_scratch; +- __ ldsw(G3_amh_conversion, G5_vminfo); +- assert(CONV_VMINFO_SHIFT == 0, "preshifted"); +- +- // Original 32-bit vmdata word must be of this form: +- // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 | +- __ lduw(value, O1_scratch); +- if (!value_left_justified) +- __ sll(O1_scratch, G5_vminfo, O1_scratch); +- Label zero_extend, done; +- __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo); +- __ br(Assembler::zero, false, Assembler::pn, zero_extend); +- __ delayed()->nop(); +- +- // this path is taken for int->byte, int->short +- __ sra(O1_scratch, G5_vminfo, O1_scratch); +- __ ba_short(done); +- +- __ bind(zero_extend); +- // this is taken for int->char +- __ srl(O1_scratch, G5_vminfo, O1_scratch); +- +- __ bind(done); +- __ st(O1_scratch, vmarg); +- +- // Get the new MH: +- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); +- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); +- } +- break; +- +- case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim +- case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim +- { +- // Perform an in-place int-to-long or ref-to-long conversion. +- load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); +- +- // On big-endian machine we duplicate the slot and store the MSW +- // in the first slot. +- __ add(__ argument_address(O0_argslot, O0_argslot, 1), O0_argslot); +- +- insert_arg_slots(_masm, stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); +- +- Address arg_lsw(O0_argslot, 0); +- Address arg_msw(O0_argslot, -Interpreter::stackElementSize); +- +- switch (ek) { +- case _adapter_opt_i2l: +- { +-#ifdef _LP64 +- __ ldsw(arg_lsw, O2_scratch); // Load LSW sign-extended +-#else +- __ ldsw(arg_lsw, O3_scratch); // Load LSW sign-extended +- __ srlx(O3_scratch, BitsPerInt, O2_scratch); // Move MSW value to lower 32-bits for std +-#endif +- __ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64 +- } +- break; +- case _adapter_opt_unboxl: +- { +- // Load the value up from the heap. +- __ ld_ptr(arg_lsw, O1_scratch); +- int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG); +- assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), ""); +- __ null_check(O1_scratch, value_offset); +- __ ld_long(Address(O1_scratch, value_offset), O2_scratch); // Uses O2/O3 on !_LP64 +- __ st_long(O2_scratch, arg_msw); +- } +- break; +- default: +- ShouldNotReachHere(); +- } +- +- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); +- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); +- } +- break; +- +- case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim +- case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim +- { +- // perform an in-place floating primitive conversion +- __ unimplemented(entry_name(ek)); +- } +- break; +- +- case _adapter_prim_to_ref: +- __ unimplemented(entry_name(ek)); // %%% FIXME: NYI +- break; +- +- case _adapter_swap_args: +- case _adapter_rot_args: +- // handled completely by optimized cases +- __ stop("init_AdapterMethodHandle should not issue this"); +- break; +- +- case _adapter_opt_swap_1: +- case _adapter_opt_swap_2: +- case _adapter_opt_rot_1_up: +- case _adapter_opt_rot_1_down: +- case _adapter_opt_rot_2_up: +- case _adapter_opt_rot_2_down: +- { +- int swap_slots = ek_adapter_opt_swap_slots(ek); +- int rotate = ek_adapter_opt_swap_mode(ek); +- +- // 'argslot' is the position of the first argument to swap. +- load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); +- __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); +- if (VerifyMethodHandles) +- verify_argslot(_masm, O0_argslot, O2_scratch, "swap point must fall within current frame"); +- +- // 'vminfo' is the second. +- Register O1_destslot = O1_scratch; +- load_conversion_vminfo(_masm, G3_amh_conversion, O1_destslot); +- __ add(__ argument_address(O1_destslot, O1_destslot), O1_destslot); +- if (VerifyMethodHandles) +- verify_argslot(_masm, O1_destslot, O2_scratch, "swap point must fall within current frame"); +- +- assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here"); +- if (!rotate) { +- // simple swap +- for (int i = 0; i < swap_slots; i++) { +- __ ld_ptr( Address(O0_argslot, i * wordSize), O2_scratch); +- __ ld_ptr( Address(O1_destslot, i * wordSize), O3_scratch); +- __ st_ptr(O3_scratch, Address(O0_argslot, i * wordSize)); +- __ st_ptr(O2_scratch, Address(O1_destslot, i * wordSize)); +- } +- } else { +- // A rotate is actually pair of moves, with an "odd slot" (or pair) +- // changing place with a series of other slots. +- // First, push the "odd slot", which is going to get overwritten +- switch (swap_slots) { +- case 2 : __ ld_ptr(Address(O0_argslot, 1 * wordSize), O4_scratch); // fall-thru +- case 1 : __ ld_ptr(Address(O0_argslot, 0 * wordSize), O3_scratch); break; +- default: ShouldNotReachHere(); +- } +- if (rotate > 0) { +- // Here is rotate > 0: +- // (low mem) (high mem) +- // | dest: more_slots... | arg: odd_slot :arg+1 | +- // => +- // | dest: odd_slot | dest+1: more_slots... :arg+1 | +- // work argslot down to destslot, copying contiguous data upwards +- // pseudo-code: +- // argslot = src_addr - swap_bytes +- // destslot = dest_addr +- // while (argslot >= destslot) *(argslot + swap_bytes) = *(argslot + 0), argslot--; +- move_arg_slots_up(_masm, +- O1_destslot, +- Address(O0_argslot, 0), +- swap_slots, +- O0_argslot, O2_scratch); +- } else { +- // Here is the other direction, rotate < 0: +- // (low mem) (high mem) +- // | arg: odd_slot | arg+1: more_slots... :dest+1 | +- // => +- // | arg: more_slots... | dest: odd_slot :dest+1 | +- // work argslot up to destslot, copying contiguous data downwards +- // pseudo-code: +- // argslot = src_addr + swap_bytes +- // destslot = dest_addr +- // while (argslot <= destslot) *(argslot - swap_bytes) = *(argslot + 0), argslot++; +- // dest_slot denotes an exclusive upper limit +- int limit_bias = OP_ROT_ARGS_DOWN_LIMIT_BIAS; +- if (limit_bias != 0) +- __ add(O1_destslot, - limit_bias * wordSize, O1_destslot); +- move_arg_slots_down(_masm, +- Address(O0_argslot, swap_slots * wordSize), +- O1_destslot, +- -swap_slots, +- O0_argslot, O2_scratch); +- +- __ sub(O1_destslot, swap_slots * wordSize, O1_destslot); +- } +- // pop the original first chunk into the destination slot, now free +- switch (swap_slots) { +- case 2 : __ st_ptr(O4_scratch, Address(O1_destslot, 1 * wordSize)); // fall-thru +- case 1 : __ st_ptr(O3_scratch, Address(O1_destslot, 0 * wordSize)); break; +- default: ShouldNotReachHere(); +- } +- } +- +- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); +- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); +- } +- break; +- +- case _adapter_dup_args: +- { +- // 'argslot' is the position of the first argument to duplicate. +- load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); +- __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); +- +- // 'stack_move' is negative number of words to duplicate. +- Register O1_stack_move = O1_scratch; +- load_stack_move(_masm, G3_amh_conversion, O1_stack_move); +- +- if (VerifyMethodHandles) { +- verify_argslots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, true, +- "copied argument(s) must fall within current frame"); +- } +- +- if (UseStackBanging) { +- // Save G3_method_handle since bang_stack_with_offset uses it as a temp register +- __ mov(G3_method_handle, O3_scratch); +- // Bang the stack before pushing args. +- int frame_size = 256 * Interpreter::stackElementSize; // conservative +- __ generate_stack_overflow_check(frame_size + sizeof(RicochetFrame)); +- __ mov(O3_scratch, G3_method_handle); +- } +- // insert location is always the bottom of the argument list: +- __ neg(O1_stack_move); +- push_arg_slots(_masm, O0_argslot, O1_stack_move, O2_scratch, O3_scratch); +- +- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); +- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); +- } +- break; +- +- case _adapter_drop_args: +- { +- // 'argslot' is the position of the first argument to nuke. +- load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); +- __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); +- +- // 'stack_move' is number of words to drop. +- Register O1_stack_move = O1_scratch; +- load_stack_move(_masm, G3_amh_conversion, O1_stack_move); +- +- remove_arg_slots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, O4_scratch); +- +- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); +- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); +- } +- break; +- +- case _adapter_collect_args: +- case _adapter_fold_args: +- case _adapter_spread_args: +- // Handled completely by optimized cases. +- __ stop("init_AdapterMethodHandle should not issue this"); +- break; +- +- case _adapter_opt_collect_ref: +- case _adapter_opt_collect_int: +- case _adapter_opt_collect_long: +- case _adapter_opt_collect_float: +- case _adapter_opt_collect_double: +- case _adapter_opt_collect_void: +- case _adapter_opt_collect_0_ref: +- case _adapter_opt_collect_1_ref: +- case _adapter_opt_collect_2_ref: +- case _adapter_opt_collect_3_ref: +- case _adapter_opt_collect_4_ref: +- case _adapter_opt_collect_5_ref: +- case _adapter_opt_filter_S0_ref: +- case _adapter_opt_filter_S1_ref: +- case _adapter_opt_filter_S2_ref: +- case _adapter_opt_filter_S3_ref: +- case _adapter_opt_filter_S4_ref: +- case _adapter_opt_filter_S5_ref: +- case _adapter_opt_collect_2_S0_ref: +- case _adapter_opt_collect_2_S1_ref: +- case _adapter_opt_collect_2_S2_ref: +- case _adapter_opt_collect_2_S3_ref: +- case _adapter_opt_collect_2_S4_ref: +- case _adapter_opt_collect_2_S5_ref: +- case _adapter_opt_fold_ref: +- case _adapter_opt_fold_int: +- case _adapter_opt_fold_long: +- case _adapter_opt_fold_float: +- case _adapter_opt_fold_double: +- case _adapter_opt_fold_void: +- case _adapter_opt_fold_1_ref: +- case _adapter_opt_fold_2_ref: +- case _adapter_opt_fold_3_ref: +- case _adapter_opt_fold_4_ref: +- case _adapter_opt_fold_5_ref: +- { +- // Given a fresh incoming stack frame, build a new ricochet frame. +- // On entry, TOS points at a return PC, and FP is the callers frame ptr. +- // RSI/R13 has the caller's exact stack pointer, which we must also preserve. +- // RCX contains an AdapterMethodHandle of the indicated kind. +- +- // Relevant AMH fields: +- // amh.vmargslot: +- // points to the trailing edge of the arguments +- // to filter, collect, or fold. For a boxing operation, +- // it points just after the single primitive value. +- // amh.argument: +- // recursively called MH, on |collect| arguments +- // amh.vmtarget: +- // final destination MH, on return value, etc. +- // amh.conversion.dest: +- // tells what is the type of the return value +- // (not needed here, since dest is also derived from ek) +- // amh.conversion.vminfo: +- // points to the trailing edge of the return value +- // when the vmtarget is to be called; this is +- // equal to vmargslot + (retained ? |collect| : 0) +- +- // Pass 0 or more argument slots to the recursive target. +- int collect_count_constant = ek_adapter_opt_collect_count(ek); +- +- // The collected arguments are copied from the saved argument list: +- int collect_slot_constant = ek_adapter_opt_collect_slot(ek); +- +- assert(ek_orig == _adapter_collect_args || +- ek_orig == _adapter_fold_args, ""); +- bool retain_original_args = (ek_orig == _adapter_fold_args); +- +- // The return value is replaced (or inserted) at the 'vminfo' argslot. +- // Sometimes we can compute this statically. +- int dest_slot_constant = -1; +- if (!retain_original_args) +- dest_slot_constant = collect_slot_constant; +- else if (collect_slot_constant >= 0 && collect_count_constant >= 0) +- // We are preserving all the arguments, and the return value is prepended, +- // so the return slot is to the left (above) the |collect| sequence. +- dest_slot_constant = collect_slot_constant + collect_count_constant; +- +- // Replace all those slots by the result of the recursive call. +- // The result type can be one of ref, int, long, float, double, void. +- // In the case of void, nothing is pushed on the stack after return. +- BasicType dest = ek_adapter_opt_collect_type(ek); +- assert(dest == type2wfield[dest], "dest is a stack slot type"); +- int dest_count = type2size[dest]; +- assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size"); +- +- // Choose a return continuation. +- EntryKind ek_ret = _adapter_opt_return_any; +- if (dest != T_CONFLICT && OptimizeMethodHandles) { +- switch (dest) { +- case T_INT : ek_ret = _adapter_opt_return_int; break; +- case T_LONG : ek_ret = _adapter_opt_return_long; break; +- case T_FLOAT : ek_ret = _adapter_opt_return_float; break; +- case T_DOUBLE : ek_ret = _adapter_opt_return_double; break; +- case T_OBJECT : ek_ret = _adapter_opt_return_ref; break; +- case T_VOID : ek_ret = _adapter_opt_return_void; break; +- default : ShouldNotReachHere(); +- } +- if (dest == T_OBJECT && dest_slot_constant >= 0) { +- EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant); +- if (ek_try <= _adapter_opt_return_LAST && +- ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) { +- ek_ret = ek_try; +- } +- } +- assert(ek_adapter_opt_return_type(ek_ret) == dest, ""); +- } +- +- // Already pushed: ... keep1 | collect | keep2 | +- +- // Push a few extra argument words, if we need them to store the return value. +- { +- int extra_slots = 0; +- if (retain_original_args) { +- extra_slots = dest_count; +- } else if (collect_count_constant == -1) { +- extra_slots = dest_count; // collect_count might be zero; be generous +- } else if (dest_count > collect_count_constant) { +- extra_slots = (dest_count - collect_count_constant); +- } else { +- // else we know we have enough dead space in |collect| to repurpose for return values +- } +- if (extra_slots != 0) { +- __ sub(SP, round_to(extra_slots, 2) * Interpreter::stackElementSize, SP); +- } +- } +- +- // Set up Ricochet Frame. +- __ mov(SP, O5_savedSP); // record SP for the callee +- +- // One extra (empty) slot for outgoing target MH (see Gargs computation below). +- __ save_frame(2); // Note: we need to add 2 slots since frame::memory_parameter_word_sp_offset is 23. +- +- // Note: Gargs is live throughout the following, until we make our recursive call. +- // And the RF saves a copy in L4_saved_args_base. +- +- RicochetFrame::enter_ricochet_frame(_masm, G3_method_handle, Gargs, +- entry(ek_ret)->from_interpreted_entry()); +- +- // Compute argument base: +- // Set up Gargs for current frame, extra (empty) slot is for outgoing target MH (space reserved by save_frame above). +- __ add(FP, STACK_BIAS - (1 * Interpreter::stackElementSize), Gargs); +- +- // Now pushed: ... keep1 | collect | keep2 | extra | [RF] +- +-#ifdef ASSERT +- if (VerifyMethodHandles && dest != T_CONFLICT) { +- BLOCK_COMMENT("verify AMH.conv.dest {"); +- extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O1_scratch); +- Label L_dest_ok; +- __ cmp(O1_scratch, (int) dest); +- __ br(Assembler::equal, false, Assembler::pt, L_dest_ok); +- __ delayed()->nop(); +- if (dest == T_INT) { +- for (int bt = T_BOOLEAN; bt < T_INT; bt++) { +- if (is_subword_type(BasicType(bt))) { +- __ cmp(O1_scratch, (int) bt); +- __ br(Assembler::equal, false, Assembler::pt, L_dest_ok); +- __ delayed()->nop(); +- } +- } +- } +- __ stop("bad dest in AMH.conv"); +- __ BIND(L_dest_ok); +- BLOCK_COMMENT("} verify AMH.conv.dest"); +- } +-#endif //ASSERT +- +- // Find out where the original copy of the recursive argument sequence begins. +- Register O0_coll = O0_scratch; +- { +- RegisterOrConstant collect_slot = collect_slot_constant; +- if (collect_slot_constant == -1) { +- load_vmargslot(_masm, G3_amh_vmargslot, O1_scratch); +- collect_slot = O1_scratch; +- } +- // collect_slot might be 0, but we need the move anyway. +- __ add(RicochetFrame::L4_saved_args_base, __ argument_offset(collect_slot, collect_slot.register_or_noreg()), O0_coll); +- // O0_coll now points at the trailing edge of |collect| and leading edge of |keep2| +- } +- +- // Replace the old AMH with the recursive MH. (No going back now.) +- // In the case of a boxing call, the recursive call is to a 'boxer' method, +- // such as Integer.valueOf or Long.valueOf. In the case of a filter +- // or collect call, it will take one or more arguments, transform them, +- // and return some result, to store back into argument_base[vminfo]. +- __ load_heap_oop(G3_amh_argument, G3_method_handle); +- if (VerifyMethodHandles) verify_method_handle(_masm, G3_method_handle, O1_scratch, O2_scratch); +- +- // Calculate |collect|, the number of arguments we are collecting. +- Register O1_collect_count = O1_scratch; +- RegisterOrConstant collect_count; +- if (collect_count_constant < 0) { +- __ load_method_handle_vmslots(O1_collect_count, G3_method_handle, O2_scratch); +- collect_count = O1_collect_count; +- } else { +- collect_count = collect_count_constant; +-#ifdef ASSERT +- if (VerifyMethodHandles) { +- BLOCK_COMMENT("verify collect_count_constant {"); +- __ load_method_handle_vmslots(O3_scratch, G3_method_handle, O2_scratch); +- Label L_count_ok; +- __ cmp_and_br_short(O3_scratch, collect_count_constant, Assembler::equal, Assembler::pt, L_count_ok); +- __ stop("bad vminfo in AMH.conv"); +- __ BIND(L_count_ok); +- BLOCK_COMMENT("} verify collect_count_constant"); +- } +-#endif //ASSERT +- } +- +- // copy |collect| slots directly to TOS: +- push_arg_slots(_masm, O0_coll, collect_count, O2_scratch, O3_scratch); +- // Now pushed: ... keep1 | collect | keep2 | RF... | collect | +- // O0_coll still points at the trailing edge of |collect| and leading edge of |keep2| +- +- // If necessary, adjust the saved arguments to make room for the eventual return value. +- // Normal adjustment: ... keep1 | +dest+ | -collect- | keep2 | RF... | collect | +- // If retaining args: ... keep1 | +dest+ | collect | keep2 | RF... | collect | +- // In the non-retaining case, this might move keep2 either up or down. +- // We don't have to copy the whole | RF... collect | complex, +- // but we must adjust RF.saved_args_base. +- // Also, from now on, we will forget about the original copy of |collect|. +- // If we are retaining it, we will treat it as part of |keep2|. +- // For clarity we will define |keep3| = |collect|keep2| or |keep2|. +- +- BLOCK_COMMENT("adjust trailing arguments {"); +- // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements. +- int open_count = dest_count; +- RegisterOrConstant close_count = collect_count_constant; +- Register O1_close_count = O1_collect_count; +- if (retain_original_args) { +- close_count = constant(0); +- } else if (collect_count_constant == -1) { +- close_count = O1_collect_count; +- } +- +- // How many slots need moving? This is simply dest_slot (0 => no |keep3|). +- RegisterOrConstant keep3_count; +- Register O2_keep3_count = O2_scratch; +- if (dest_slot_constant < 0) { +- extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O2_keep3_count); +- keep3_count = O2_keep3_count; +- } else { +- keep3_count = dest_slot_constant; +-#ifdef ASSERT +- if (VerifyMethodHandles && dest_slot_constant < 0) { +- BLOCK_COMMENT("verify dest_slot_constant {"); +- extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O3_scratch); +- Label L_vminfo_ok; +- __ cmp_and_br_short(O3_scratch, dest_slot_constant, Assembler::equal, Assembler::pt, L_vminfo_ok); +- __ stop("bad vminfo in AMH.conv"); +- __ BIND(L_vminfo_ok); +- BLOCK_COMMENT("} verify dest_slot_constant"); +- } +-#endif //ASSERT +- } +- +- // tasks remaining: +- bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0); +- bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0)); +- bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant()); +- +- // Old and new argument locations (based at slot 0). +- // Net shift (&new_argv - &old_argv) is (close_count - open_count). +- bool zero_open_count = (open_count == 0); // remember this bit of info +- if (move_keep3 && fix_arg_base) { +- // It will be easier to have everything in one register: +- if (close_count.is_register()) { +- // Deduct open_count from close_count register to get a clean +/- value. +- __ sub(close_count.as_register(), open_count, close_count.as_register()); +- } else { +- close_count = close_count.as_constant() - open_count; +- } +- open_count = 0; +- } +- Register L4_old_argv = RicochetFrame::L4_saved_args_base; +- Register O3_new_argv = O3_scratch; +- if (fix_arg_base) { +- __ add(L4_old_argv, __ argument_offset(close_count, O4_scratch), O3_new_argv, +- -(open_count * Interpreter::stackElementSize)); +- } +- +- // First decide if any actual data are to be moved. +- // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change. +- // (As it happens, all movements involve an argument list size change.) +- +- // If there are variable parameters, use dynamic checks to skip around the whole mess. +- Label L_done; +- if (keep3_count.is_register()) { +- __ cmp_and_br_short(keep3_count.as_register(), 0, Assembler::equal, Assembler::pn, L_done); +- } +- if (close_count.is_register()) { +- __ cmp_and_br_short(close_count.as_register(), open_count, Assembler::equal, Assembler::pn, L_done); +- } +- +- if (move_keep3 && fix_arg_base) { +- bool emit_move_down = false, emit_move_up = false, emit_guard = false; +- if (!close_count.is_constant()) { +- emit_move_down = emit_guard = !zero_open_count; +- emit_move_up = true; +- } else if (open_count != close_count.as_constant()) { +- emit_move_down = (open_count > close_count.as_constant()); +- emit_move_up = !emit_move_down; +- } +- Label L_move_up; +- if (emit_guard) { +- __ cmp(close_count.as_register(), open_count); +- __ br(Assembler::greater, false, Assembler::pn, L_move_up); +- __ delayed()->nop(); +- } +- +- if (emit_move_down) { +- // Move arguments down if |+dest+| > |-collect-| +- // (This is rare, except when arguments are retained.) +- // This opens space for the return value. +- if (keep3_count.is_constant()) { +- for (int i = 0; i < keep3_count.as_constant(); i++) { +- __ ld_ptr( Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch); +- __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize) ); +- } +- } else { +- // Live: O1_close_count, O2_keep3_count, O3_new_argv +- Register argv_top = O0_scratch; +- __ add(L4_old_argv, __ argument_offset(keep3_count, O4_scratch), argv_top); +- move_arg_slots_down(_masm, +- Address(L4_old_argv, 0), // beginning of old argv +- argv_top, // end of old argv +- close_count, // distance to move down (must be negative) +- O4_scratch, G5_scratch); +- } +- } +- +- if (emit_guard) { +- __ ba_short(L_done); // assumes emit_move_up is true also +- __ BIND(L_move_up); +- } +- +- if (emit_move_up) { +- // Move arguments up if |+dest+| < |-collect-| +- // (This is usual, except when |keep3| is empty.) +- // This closes up the space occupied by the now-deleted collect values. +- if (keep3_count.is_constant()) { +- for (int i = keep3_count.as_constant() - 1; i >= 0; i--) { +- __ ld_ptr( Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch); +- __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize) ); +- } +- } else { +- Address argv_top(L4_old_argv, __ argument_offset(keep3_count, O4_scratch)); +- // Live: O1_close_count, O2_keep3_count, O3_new_argv +- move_arg_slots_up(_masm, +- L4_old_argv, // beginning of old argv +- argv_top, // end of old argv +- close_count, // distance to move up (must be positive) +- O4_scratch, G5_scratch); +- } +- } +- } +- __ BIND(L_done); +- +- if (fix_arg_base) { +- // adjust RF.saved_args_base +- __ mov(O3_new_argv, RicochetFrame::L4_saved_args_base); +- } +- +- if (stomp_dest) { +- // Stomp the return slot, so it doesn't hold garbage. +- // This isn't strictly necessary, but it may help detect bugs. +- __ set(RicochetFrame::RETURN_VALUE_PLACEHOLDER, O4_scratch); +- __ st_ptr(O4_scratch, Address(RicochetFrame::L4_saved_args_base, +- __ argument_offset(keep3_count, keep3_count.register_or_noreg()))); // uses O2_keep3_count +- } +- BLOCK_COMMENT("} adjust trailing arguments"); +- +- BLOCK_COMMENT("do_recursive_call"); +- __ mov(SP, O5_savedSP); // record SP for the callee +- __ set(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr() - frame::pc_return_offset), O7); +- // The globally unique bounce address has two purposes: +- // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame). +- // 2. When returned to, it cuts back the stack and redirects control flow +- // to the return handler. +- // The return handler will further cut back the stack when it takes +- // down the RF. Perhaps there is a way to streamline this further. +- +- if (UseStackBanging) { +- // Save G3_method_handle since bang_stack_with_offset uses it as a temp register +- __ mov(G3_method_handle, O4_scratch); +- // Bang the stack before recursive call. +- // Even if slots == 0, we are inside a RicochetFrame. +- int frame_size = collect_count.is_constant() ? collect_count.as_constant() * wordSize : -1; +- if (frame_size < 0) { +- frame_size = 256 * Interpreter::stackElementSize; // conservative +- } +- __ generate_stack_overflow_check(frame_size + sizeof(RicochetFrame)); +- __ mov(O4_scratch, G3_method_handle); +- } +- // State during recursive call: +- // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc | +- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); +- } +- break; +- +- case _adapter_opt_return_ref: +- case _adapter_opt_return_int: +- case _adapter_opt_return_long: +- case _adapter_opt_return_float: +- case _adapter_opt_return_double: +- case _adapter_opt_return_void: +- case _adapter_opt_return_S0_ref: +- case _adapter_opt_return_S1_ref: +- case _adapter_opt_return_S2_ref: +- case _adapter_opt_return_S3_ref: +- case _adapter_opt_return_S4_ref: +- case _adapter_opt_return_S5_ref: +- { +- BasicType dest_type_constant = ek_adapter_opt_return_type(ek); +- int dest_slot_constant = ek_adapter_opt_return_slot(ek); +- +- if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); +- +- if (dest_slot_constant == -1) { +- // The current stub is a general handler for this dest_type. +- // It can be called from _adapter_opt_return_any below. +- // Stash the address in a little table. +- assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob"); +- address return_handler = __ pc(); +- _adapter_return_handlers[dest_type_constant] = return_handler; +- if (dest_type_constant == T_INT) { +- // do the subword types too +- for (int bt = T_BOOLEAN; bt < T_INT; bt++) { +- if (is_subword_type(BasicType(bt)) && +- _adapter_return_handlers[bt] == NULL) { +- _adapter_return_handlers[bt] = return_handler; +- } +- } +- } +- } +- +- // On entry to this continuation handler, make Gargs live again. +- __ mov(RicochetFrame::L4_saved_args_base, Gargs); +- +- Register O7_temp = O7; +- Register O5_vminfo = O5; +- +- RegisterOrConstant dest_slot = dest_slot_constant; +- if (dest_slot_constant == -1) { +- extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O5_vminfo); +- dest_slot = O5_vminfo; +- } +- // Store the result back into the argslot. +- // This code uses the interpreter calling sequence, in which the return value +- // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop. +- // There are certain irregularities with floating point values, which can be seen +- // in TemplateInterpreterGenerator::generate_return_entry_for. +- move_return_value(_masm, dest_type_constant, __ argument_address(dest_slot, O7_temp)); +- +- RicochetFrame::leave_ricochet_frame(_masm, G3_method_handle, I5_savedSP, I7); +- +- // Load the final target and go. +- if (VerifyMethodHandles) verify_method_handle(_masm, G3_method_handle, O0_scratch, O1_scratch); +- __ restore(I5_savedSP, G0, SP); +- __ jump_to_method_handle_entry(G3_method_handle, O0_scratch); +- __ illtrap(0); +- } +- break; +- +- case _adapter_opt_return_any: +- { +- Register O7_temp = O7; +- Register O5_dest_type = O5; +- +- if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); +- extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O5_dest_type); +- __ set(ExternalAddress((address) &_adapter_return_handlers[0]), O7_temp); +- __ sll_ptr(O5_dest_type, LogBytesPerWord, O5_dest_type); +- __ ld_ptr(O7_temp, O5_dest_type, O7_temp); +- +-#ifdef ASSERT +- { Label L_ok; +- __ br_notnull_short(O7_temp, Assembler::pt, L_ok); +- __ stop("bad method handle return"); +- __ BIND(L_ok); +- } +-#endif //ASSERT +- __ JMP(O7_temp, 0); +- __ delayed()->nop(); +- } +- break; +- +- case _adapter_opt_spread_0: +- case _adapter_opt_spread_1_ref: +- case _adapter_opt_spread_2_ref: +- case _adapter_opt_spread_3_ref: +- case _adapter_opt_spread_4_ref: +- case _adapter_opt_spread_5_ref: +- case _adapter_opt_spread_ref: +- case _adapter_opt_spread_byte: +- case _adapter_opt_spread_char: +- case _adapter_opt_spread_short: +- case _adapter_opt_spread_int: +- case _adapter_opt_spread_long: +- case _adapter_opt_spread_float: +- case _adapter_opt_spread_double: +- { +- // spread an array out into a group of arguments +- int length_constant = ek_adapter_opt_spread_count(ek); +- bool length_can_be_zero = (length_constant == 0); +- if (length_constant < 0) { +- // some adapters with variable length must handle the zero case +- if (!OptimizeMethodHandles || +- ek_adapter_opt_spread_type(ek) != T_OBJECT) +- length_can_be_zero = true; +- } +- +- // find the address of the array argument +- load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); +- __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); +- +- // O0_argslot points both to the array and to the first output arg +- Address vmarg = Address(O0_argslot, 0); +- +- // Get the array value. +- Register O1_array = O1_scratch; +- Register O2_array_klass = O2_scratch; +- BasicType elem_type = ek_adapter_opt_spread_type(ek); +- int elem_slots = type2size[elem_type]; // 1 or 2 +- int array_slots = 1; // array is always a T_OBJECT +- int length_offset = arrayOopDesc::length_offset_in_bytes(); +- int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type); +- __ ld_ptr(vmarg, O1_array); +- +- Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done; +- if (length_can_be_zero) { +- // handle the null pointer case, if zero is allowed +- Label L_skip; +- if (length_constant < 0) { +- load_conversion_vminfo(_masm, G3_amh_conversion, O3_scratch); +- __ cmp_zero_and_br(Assembler::notZero, O3_scratch, L_skip); +- __ delayed()->nop(); // to avoid back-to-back cbcond instructions +- } +- __ br_null_short(O1_array, Assembler::pn, L_array_is_empty); +- __ BIND(L_skip); +- } +- __ null_check(O1_array, oopDesc::klass_offset_in_bytes()); +- __ load_klass(O1_array, O2_array_klass); +- +- // Check the array type. +- Register O3_klass = O3_scratch; +- __ load_heap_oop(G3_amh_argument, O3_klass); // this is a Class object! +- load_klass_from_Class(_masm, O3_klass, O4_scratch, G5_scratch); +- +- Label L_ok_array_klass, L_bad_array_klass, L_bad_array_length; +- __ check_klass_subtype(O2_array_klass, O3_klass, O4_scratch, G5_scratch, L_ok_array_klass); +- // If we get here, the type check failed! +- __ ba_short(L_bad_array_klass); +- __ BIND(L_ok_array_klass); +- +- // Check length. +- if (length_constant >= 0) { +- __ ldsw(Address(O1_array, length_offset), O4_scratch); +- __ cmp(O4_scratch, length_constant); +- } else { +- Register O3_vminfo = O3_scratch; +- load_conversion_vminfo(_masm, G3_amh_conversion, O3_vminfo); +- __ ldsw(Address(O1_array, length_offset), O4_scratch); +- __ cmp(O3_vminfo, O4_scratch); +- } +- __ br(Assembler::notEqual, false, Assembler::pn, L_bad_array_length); +- __ delayed()->nop(); +- +- Register O2_argslot_limit = O2_scratch; +- +- // Array length checks out. Now insert any required stack slots. +- if (length_constant == -1) { +- // Form a pointer to the end of the affected region. +- __ add(O0_argslot, Interpreter::stackElementSize, O2_argslot_limit); +- // 'stack_move' is negative number of words to insert +- // This number already accounts for elem_slots. +- Register O3_stack_move = O3_scratch; +- load_stack_move(_masm, G3_amh_conversion, O3_stack_move); +- __ cmp(O3_stack_move, 0); +- assert(stack_move_unit() < 0, "else change this comparison"); +- __ br(Assembler::less, false, Assembler::pn, L_insert_arg_space); +- __ delayed()->nop(); +- __ br(Assembler::equal, false, Assembler::pn, L_copy_args); +- __ delayed()->nop(); +- // single argument case, with no array movement +- __ BIND(L_array_is_empty); +- remove_arg_slots(_masm, -stack_move_unit() * array_slots, +- O0_argslot, O1_scratch, O2_scratch, O3_scratch); +- __ ba_short(L_args_done); // no spreading to do +- __ BIND(L_insert_arg_space); +- // come here in the usual case, stack_move < 0 (2 or more spread arguments) +- // Live: O1_array, O2_argslot_limit, O3_stack_move +- insert_arg_slots(_masm, O3_stack_move, +- O0_argslot, O4_scratch, G5_scratch, O1_scratch); +- // reload from rdx_argslot_limit since rax_argslot is now decremented +- __ ld_ptr(Address(O2_argslot_limit, -Interpreter::stackElementSize), O1_array); +- } else if (length_constant >= 1) { +- int new_slots = (length_constant * elem_slots) - array_slots; +- insert_arg_slots(_masm, new_slots * stack_move_unit(), +- O0_argslot, O2_scratch, O3_scratch, O4_scratch); +- } else if (length_constant == 0) { +- __ BIND(L_array_is_empty); +- remove_arg_slots(_masm, -stack_move_unit() * array_slots, +- O0_argslot, O1_scratch, O2_scratch, O3_scratch); +- } else { +- ShouldNotReachHere(); +- } +- +- // Copy from the array to the new slots. +- // Note: Stack change code preserves integrity of O0_argslot pointer. +- // So even after slot insertions, O0_argslot still points to first argument. +- // Beware: Arguments that are shallow on the stack are deep in the array, +- // and vice versa. So a downward-growing stack (the usual) has to be copied +- // elementwise in reverse order from the source array. +- __ BIND(L_copy_args); +- if (length_constant == -1) { +- // [O0_argslot, O2_argslot_limit) is the area we are inserting into. +- // Array element [0] goes at O0_argslot_limit[-wordSize]. +- Register O1_source = O1_array; +- __ add(Address(O1_array, elem0_offset), O1_source); +- Register O4_fill_ptr = O4_scratch; +- __ mov(O2_argslot_limit, O4_fill_ptr); +- Label L_loop; +- __ BIND(L_loop); +- __ add(O4_fill_ptr, -Interpreter::stackElementSize * elem_slots, O4_fill_ptr); +- move_typed_arg(_masm, elem_type, true, +- Address(O1_source, 0), Address(O4_fill_ptr, 0), +- O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3) +- __ add(O1_source, type2aelembytes(elem_type), O1_source); +- __ cmp_and_brx_short(O4_fill_ptr, O0_argslot, Assembler::greaterUnsigned, Assembler::pt, L_loop); +- } else if (length_constant == 0) { +- // nothing to copy +- } else { +- int elem_offset = elem0_offset; +- int slot_offset = length_constant * Interpreter::stackElementSize; +- for (int index = 0; index < length_constant; index++) { +- slot_offset -= Interpreter::stackElementSize * elem_slots; // fill backward +- move_typed_arg(_masm, elem_type, true, +- Address(O1_array, elem_offset), Address(O0_argslot, slot_offset), +- O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3) +- elem_offset += type2aelembytes(elem_type); +- } +- } +- __ BIND(L_args_done); +- +- // Arguments are spread. Move to next method handle. +- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); +- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); +- +- __ BIND(L_bad_array_klass); +- assert(!vmarg.uses(O2_required), "must be different registers"); +- __ load_heap_oop(Address(O2_array_klass, java_mirror_offset), O2_required); // required class +- __ ld_ptr( vmarg, O1_actual); // bad object +- __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); +- __ delayed()->mov(Bytecodes::_aaload, O0_code); // who is complaining? +- +- __ bind(L_bad_array_length); +- assert(!vmarg.uses(O2_required), "must be different registers"); +- __ mov( G3_method_handle, O2_required); // required class +- __ ld_ptr(vmarg, O1_actual); // bad object +- __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); +- __ delayed()->mov(Bytecodes::_arraylength, O0_code); // who is complaining? +- } +- break; +- +- default: +- DEBUG_ONLY(tty->print_cr("bad ek=%d (%s)", (int)ek, entry_name(ek))); +- ShouldNotReachHere(); +- } +- BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek))); +- +- address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry); +- __ unimplemented(entry_name(ek)); // %%% FIXME: NYI +- +- init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie)); +-} +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/methodHandles_sparc.hpp +--- openjdk/hotspot/src/cpu/sparc/vm/methodHandles_sparc.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/methodHandles_sparc.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -30,186 +30,9 @@ + adapter_code_size = NOT_LP64(23000 DEBUG_ONLY(+ 40000)) LP64_ONLY(35000 DEBUG_ONLY(+ 50000)) + }; + +-public: +- +-class RicochetFrame : public ResourceObj { +- friend class MethodHandles; +- +- private: +- /* +- RF field x86 SPARC +- sender_pc *(rsp+0) I7-0x8 +- sender_link rbp I6+BIAS +- exact_sender_sp rsi/r13 I5_savedSP +- conversion *(rcx+&amh_conv) L5_conv +- saved_args_base rax L4_sab (cf. Gargs = G4) +- saved_args_layout #NULL L3_sal +- saved_target *(rcx+&mh_vmtgt) L2_stgt +- continuation #STUB_CON L1_cont +- */ +- static const Register L1_continuation ; // what to do when control gets back here +- static const Register L2_saved_target ; // target method handle to invoke on saved_args +- static const Register L3_saved_args_layout; // caching point for MethodTypeForm.vmlayout cookie +- static const Register L4_saved_args_base ; // base of pushed arguments (slot 0, arg N) (-3) +- static const Register L5_conversion ; // misc. information from original AdapterMethodHandle (-2) +- +- frame _fr; +- +- RicochetFrame(const frame& fr) : _fr(fr) { } +- +- intptr_t* register_addr(Register reg) const { +- assert((_fr.sp() + reg->sp_offset_in_saved_window()) == _fr.register_addr(reg), "must agree"); +- return _fr.register_addr(reg); +- } +- intptr_t register_value(Register reg) const { return *register_addr(reg); } +- +- public: +- intptr_t* continuation() const { return (intptr_t*) register_value(L1_continuation); } +- oop saved_target() const { return (oop) register_value(L2_saved_target); } +- oop saved_args_layout() const { return (oop) register_value(L3_saved_args_layout); } +- intptr_t* saved_args_base() const { return (intptr_t*) register_value(L4_saved_args_base); } +- intptr_t conversion() const { return register_value(L5_conversion); } +- intptr_t* exact_sender_sp() const { return (intptr_t*) register_value(I5_savedSP); } +- intptr_t* sender_link() const { return _fr.sender_sp(); } // XXX +- address sender_pc() const { return _fr.sender_pc(); } +- +- // This value is not used for much, but it apparently must be nonzero. +- static int frame_size_in_bytes() { return wordSize * 4; } +- +- intptr_t* extended_sender_sp() const { return saved_args_base(); } +- +- intptr_t return_value_slot_number() const { +- return adapter_conversion_vminfo(conversion()); +- } +- BasicType return_value_type() const { +- return adapter_conversion_dest_type(conversion()); +- } +- bool has_return_value_slot() const { +- return return_value_type() != T_VOID; +- } +- intptr_t* return_value_slot_addr() const { +- assert(has_return_value_slot(), ""); +- return saved_arg_slot_addr(return_value_slot_number()); +- } +- intptr_t* saved_target_slot_addr() const { +- return saved_arg_slot_addr(saved_args_length()); +- } +- intptr_t* saved_arg_slot_addr(int slot) const { +- assert(slot >= 0, ""); +- return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) ); +- } +- +- jint saved_args_length() const; +- jint saved_arg_offset(int arg) const; +- +- // GC interface +- oop* saved_target_addr() { return (oop*)register_addr(L2_saved_target); } +- oop* saved_args_layout_addr() { return (oop*)register_addr(L3_saved_args_layout); } +- +- oop compute_saved_args_layout(bool read_cache, bool write_cache); +- +-#ifdef ASSERT +- // The magic number is supposed to help find ricochet frames within the bytes of stack dumps. +- enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E }; +- static const Register L0_magic_number_1 ; // cookie for debugging, at start of RSA +- static Address magic_number_2_addr() { return Address(L4_saved_args_base, -wordSize); } +- intptr_t magic_number_1() const { return register_value(L0_magic_number_1); } +- intptr_t magic_number_2() const { return saved_args_base()[-1]; } +-#endif //ASSERT +- +- public: +- enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) }; +- +- void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc. +- +- static void generate_ricochet_blob(MacroAssembler* _masm, +- // output params: +- int* bounce_offset, +- int* exception_offset, +- int* frame_size_in_words); +- +- static void enter_ricochet_frame(MacroAssembler* _masm, +- Register recv_reg, +- Register argv_reg, +- address return_handler); +- +- static void leave_ricochet_frame(MacroAssembler* _masm, +- Register recv_reg, +- Register new_sp_reg, +- Register sender_pc_reg); +- +- static RicochetFrame* from_frame(const frame& fr) { +- RicochetFrame* rf = new RicochetFrame(fr); +- rf->verify(); +- return rf; +- } +- +- static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN; +- +- static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN; +-}; +- + // Additional helper methods for MethodHandles code generation: + public: + static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg); +- static void load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg); +- static void extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg); +- static void extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg); +- +- static void load_stack_move(MacroAssembler* _masm, +- Address G3_amh_conversion, +- Register G5_stack_move); +- +- static void insert_arg_slots(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, +- Register argslot_reg, +- Register temp_reg, Register temp2_reg, Register temp3_reg); +- +- static void remove_arg_slots(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, +- Register argslot_reg, +- Register temp_reg, Register temp2_reg, Register temp3_reg); +- +- static void push_arg_slots(MacroAssembler* _masm, +- Register argslot_reg, +- RegisterOrConstant slot_count, +- Register temp_reg, Register temp2_reg); +- +- static void move_arg_slots_up(MacroAssembler* _masm, +- Register bottom_reg, // invariant +- Address top_addr, // can use temp_reg +- RegisterOrConstant positive_distance_in_slots, +- Register temp_reg, Register temp2_reg); +- +- static void move_arg_slots_down(MacroAssembler* _masm, +- Address bottom_addr, // can use temp_reg +- Register top_reg, // invariant +- RegisterOrConstant negative_distance_in_slots, +- Register temp_reg, Register temp2_reg); +- +- static void move_typed_arg(MacroAssembler* _masm, +- BasicType type, bool is_element, +- Address value_src, Address slot_dest, +- Register temp_reg); +- +- static void move_return_value(MacroAssembler* _masm, BasicType type, +- Address return_slot); +- +- static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, +- Register temp_reg, +- const char* error_message) NOT_DEBUG_RETURN; +- +- static void verify_argslots(MacroAssembler* _masm, +- RegisterOrConstant argslot_count, +- Register argslot_reg, +- Register temp_reg, +- Register temp2_reg, +- bool negate_argslot, +- const char* error_message) NOT_DEBUG_RETURN; +- +- static void verify_stack_move(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, +- int direction) NOT_DEBUG_RETURN; + + static void verify_klass(MacroAssembler* _masm, + Register obj_reg, KlassHandle klass, +@@ -223,8 +46,17 @@ + "reference is a MH"); + } + ++ static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN; ++ + // Similar to InterpreterMacroAssembler::jump_from_interpreted. + // Takes care of special dispatch from single stepping too. +- static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, Register temp2); ++ static void jump_from_method_handle(MacroAssembler* _masm, Register method, ++ Register temp, Register temp2, ++ bool for_compiler_entry); ++ ++ static void jump_to_lambda_form(MacroAssembler* _masm, ++ Register recv, Register method_temp, ++ Register temp2, Register temp3, ++ bool for_compiler_entry); + + static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN; +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/sharedRuntime_sparc.cpp +--- openjdk/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -400,13 +400,13 @@ + case T_LONG: // LP64, longs compete with int args + assert(sig_bt[i+1] == T_VOID, ""); + #ifdef _LP64 +- if (int_reg_cnt < int_reg_max) int_reg_cnt++; ++ if (int_reg_cnt < int_reg_max) int_reg_cnt++; + #endif + break; + case T_OBJECT: + case T_ARRAY: + case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address +- if (int_reg_cnt < int_reg_max) int_reg_cnt++; ++ if (int_reg_cnt < int_reg_max) int_reg_cnt++; + #ifndef _LP64 + else stk_reg_pairs++; + #endif +@@ -416,11 +416,11 @@ + case T_CHAR: + case T_BYTE: + case T_BOOLEAN: +- if (int_reg_cnt < int_reg_max) int_reg_cnt++; ++ if (int_reg_cnt < int_reg_max) int_reg_cnt++; + else stk_reg_pairs++; + break; + case T_FLOAT: +- if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++; ++ if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++; + else stk_reg_pairs++; + break; + case T_DOUBLE: +@@ -436,7 +436,6 @@ + // This is where the longs/doubles start on the stack. + stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round + +- int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only + int flt_reg_pairs = (flt_reg_cnt+1) & ~1; + + // int stk_reg = frame::register_save_words*(wordSize>>2); +@@ -517,24 +516,15 @@ + stk_reg_pairs += 2; + } + #else // COMPILER2 +- if (int_reg_pairs + 1 < int_reg_max) { +- if (is_outgoing) { +- regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg()); +- } else { +- regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg()); +- } +- int_reg_pairs += 2; +- } else { + regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); + stk_reg_pairs += 2; +- } + #endif // COMPILER2 + #endif // _LP64 + break; + + case T_FLOAT: + if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg()); +- else regs[i].set1( VMRegImpl::stack2reg(stk_reg++)); ++ else regs[i].set1(VMRegImpl::stack2reg(stk_reg++)); + break; + case T_DOUBLE: + assert(sig_bt[i+1] == T_VOID, "expecting half"); +@@ -886,6 +876,20 @@ + __ delayed()->add(SP, G1, Gargs); + } + ++static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg, ++ address code_start, address code_end, ++ Label& L_ok) { ++ Label L_fail; ++ __ set(ExternalAddress(code_start), temp_reg); ++ __ set(pointer_delta(code_end, code_start, 1), temp2_reg); ++ __ cmp(pc_reg, temp_reg); ++ __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail); ++ __ delayed()->add(temp_reg, temp2_reg, temp_reg); ++ __ cmp(pc_reg, temp_reg); ++ __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok); ++ __ bind(L_fail); ++} ++ + void AdapterGenerator::gen_i2c_adapter( + int total_args_passed, + // VMReg max_arg, +@@ -907,6 +911,51 @@ + // This removes all sorts of headaches on the x86 side and also eliminates + // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. + ++ // More detail: ++ // Adapters can be frameless because they do not require the caller ++ // to perform additional cleanup work, such as correcting the stack pointer. ++ // An i2c adapter is frameless because the *caller* frame, which is interpreted, ++ // routinely repairs its own stack pointer (from interpreter_frame_last_sp), ++ // even if a callee has modified the stack pointer. ++ // A c2i adapter is frameless because the *callee* frame, which is interpreted, ++ // routinely repairs its caller's stack pointer (from sender_sp, which is set ++ // up via the senderSP register). ++ // In other words, if *either* the caller or callee is interpreted, we can ++ // get the stack pointer repaired after a call. ++ // This is why c2i and i2c adapters cannot be indefinitely composed. ++ // In particular, if a c2i adapter were to somehow call an i2c adapter, ++ // both caller and callee would be compiled methods, and neither would ++ // clean up the stack pointer changes performed by the two adapters. ++ // If this happens, control eventually transfers back to the compiled ++ // caller, but with an uncorrected stack, causing delayed havoc. ++ ++ if (VerifyAdapterCalls && ++ (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { ++ // So, let's test for cascading c2i/i2c adapters right now. ++ // assert(Interpreter::contains($return_addr) || ++ // StubRoutines::contains($return_addr), ++ // "i2c adapter must return to an interpreter frame"); ++ __ block_comment("verify_i2c { "); ++ Label L_ok; ++ if (Interpreter::code() != NULL) ++ range_check(masm, O7, O0, O1, ++ Interpreter::code()->code_start(), Interpreter::code()->code_end(), ++ L_ok); ++ if (StubRoutines::code1() != NULL) ++ range_check(masm, O7, O0, O1, ++ StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), ++ L_ok); ++ if (StubRoutines::code2() != NULL) ++ range_check(masm, O7, O0, O1, ++ StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), ++ L_ok); ++ const char* msg = "i2c adapter must return to an interpreter frame"; ++ __ block_comment(msg); ++ __ stop(msg); ++ __ bind(L_ok); ++ __ block_comment("} verify_i2ce "); ++ } ++ + // As you can see from the list of inputs & outputs there are not a lot + // of temp registers to work with: mostly G1, G3 & G4. + +@@ -1937,20 +1986,156 @@ + __ bind(done); + } + ++static void verify_oop_args(MacroAssembler* masm, ++ int total_args_passed, ++ const BasicType* sig_bt, ++ const VMRegPair* regs) { ++ Register temp_reg = G5_method; // not part of any compiled calling seq ++ if (VerifyOops) { ++ for (int i = 0; i < total_args_passed; i++) { ++ if (sig_bt[i] == T_OBJECT || ++ sig_bt[i] == T_ARRAY) { ++ VMReg r = regs[i].first(); ++ assert(r->is_valid(), "bad oop arg"); ++ if (r->is_stack()) { ++ RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; ++ ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg); ++ __ ld_ptr(SP, ld_off, temp_reg); ++ __ verify_oop(temp_reg); ++ } else { ++ __ verify_oop(r->as_Register()); ++ } ++ } ++ } ++ } ++} ++ ++static void gen_special_dispatch(MacroAssembler* masm, ++ int total_args_passed, ++ int comp_args_on_stack, ++ vmIntrinsics::ID special_dispatch, ++ const BasicType* sig_bt, ++ const VMRegPair* regs) { ++ verify_oop_args(masm, total_args_passed, sig_bt, regs); ++ ++ // Now write the args into the outgoing interpreter space ++ bool has_receiver = false; ++ Register receiver_reg = noreg; ++ int member_arg_pos = -1; ++ Register member_reg = noreg; ++ int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch); ++ if (ref_kind != 0) { ++ member_arg_pos = total_args_passed - 1; // trailing MemberName argument ++ member_reg = G5_method; // known to be free at this point ++ has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); ++ } else if (special_dispatch == vmIntrinsics::_invokeBasic) { ++ has_receiver = true; ++ } else { ++ fatal(err_msg("special_dispatch=%d", special_dispatch)); ++ } ++ ++ if (member_reg != noreg) { ++ // Load the member_arg into register, if necessary. ++ assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob"); ++ assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object"); ++ VMReg r = regs[member_arg_pos].first(); ++ assert(r->is_valid(), "bad member arg"); ++ if (r->is_stack()) { ++ RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; ++ ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); ++ __ ld_ptr(SP, ld_off, member_reg); ++ } else { ++ // no data motion is needed ++ member_reg = r->as_Register(); ++ } ++ } ++ ++ if (has_receiver) { ++ // Make sure the receiver is loaded into a register. ++ assert(total_args_passed > 0, "oob"); ++ assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); ++ VMReg r = regs[0].first(); ++ assert(r->is_valid(), "bad receiver arg"); ++ if (r->is_stack()) { ++ // Porting note: This assumes that compiled calling conventions always ++ // pass the receiver oop in a register. If this is not true on some ++ // platform, pick a temp and load the receiver from stack. ++ assert(false, "receiver always in a register"); ++ receiver_reg = G3_scratch; // known to be free at this point ++ RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; ++ ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); ++ __ ld_ptr(SP, ld_off, receiver_reg); ++ } else { ++ // no data motion is needed ++ receiver_reg = r->as_Register(); ++ } ++ } ++ ++ // Figure out which address we are really jumping to: ++ MethodHandles::generate_method_handle_dispatch(masm, special_dispatch, ++ receiver_reg, member_reg, /*for_compiler_entry:*/ true); ++} ++ + // --------------------------------------------------------------------------- + // Generate a native wrapper for a given method. The method takes arguments + // in the Java compiled code convention, marshals them to the native + // convention (handlizes oops, etc), transitions to native, makes the call, + // returns to java state (possibly blocking), unhandlizes any result and + // returns. ++// ++// Critical native functions are a shorthand for the use of ++// GetPrimtiveArrayCritical and disallow the use of any other JNI ++// functions. The wrapper is expected to unpack the arguments before ++// passing them to the callee and perform checks before and after the ++// native call to ensure that they GC_locker ++// lock_critical/unlock_critical semantics are followed. Some other ++// parts of JNI setup are skipped like the tear down of the JNI handle ++// block and the check for pending exceptions it's impossible for them ++// to be thrown. ++// ++// They are roughly structured like this: ++// if (GC_locker::needs_gc()) ++// SharedRuntime::block_for_jni_critical(); ++// tranistion to thread_in_native ++// unpack arrray arguments and call native entry point ++// check for safepoint in progress ++// check if any thread suspend flags are set ++// call into JVM and possible unlock the JNI critical ++// if a GC was suppressed while in the critical native. ++// transition back to thread_in_Java ++// return to caller ++// + nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, + methodHandle method, + int compile_id, + int total_in_args, + int comp_args_on_stack, // in VMRegStackSlots +- BasicType *in_sig_bt, +- VMRegPair *in_regs, ++ BasicType* in_sig_bt, ++ VMRegPair* in_regs, + BasicType ret_type) { ++ if (method->is_method_handle_intrinsic()) { ++ vmIntrinsics::ID iid = method->intrinsic_id(); ++ intptr_t start = (intptr_t)__ pc(); ++ int vep_offset = ((intptr_t)__ pc()) - start; ++ gen_special_dispatch(masm, ++ total_in_args, ++ comp_args_on_stack, ++ method->intrinsic_id(), ++ in_sig_bt, ++ in_regs); ++ int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period ++ __ flush(); ++ int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually ++ return nmethod::new_native_nmethod(method, ++ compile_id, ++ masm->code(), ++ vep_offset, ++ frame_complete, ++ stack_slots / VMRegImpl::slots_per_word, ++ in_ByteSize(-1), ++ in_ByteSize(-1), ++ (OopMapSet*)NULL); ++ } + bool is_critical_native = true; + address native_func = method->critical_native_function(); + if (native_func == NULL) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/stubGenerator_sparc.cpp +--- openjdk/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -3404,14 +3404,6 @@ + StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry; + #endif // COMPILER2 !=> _LP64 + +- // Build this early so it's available for the interpreter. The +- // stub expects the required and actual type to already be in O1 +- // and O2 respectively. +- StubRoutines::_throw_WrongMethodTypeException_entry = +- generate_throw_exception("WrongMethodTypeException throw_exception", +- CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException), +- G5_method_type, G3_method_handle); +- + // Build this early so it's available for the interpreter. + StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); + } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/templateInterpreter_sparc.cpp +--- openjdk/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -692,9 +692,9 @@ + // Need to differentiate between igetfield, agetfield, bgetfield etc. + // because they are different sizes. + // Get the type from the constant pool cache +- __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch); +- // Make sure we don't need to mask G1_scratch for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); ++ __ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch); ++ // Make sure we don't need to mask G1_scratch after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); + __ cmp(G1_scratch, atos ); + __ br(Assembler::equal, true, Assembler::pt, xreturn_path); + __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i); +@@ -1659,7 +1659,7 @@ + int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0; + *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS; + } else { +- assert(caller->is_compiled_frame() || caller->is_entry_frame() || caller->is_ricochet_frame(), "only possible cases"); ++ assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases"); + // Don't have Lesp available; lay out locals block in the caller + // adjacent to the register window save area. + // +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/templateTable_sparc.cpp +--- openjdk/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -378,7 +378,7 @@ + Register Rcache = G3_scratch; + Register Rscratch = G4_scratch; + +- resolve_cache_and_index(f1_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1)); ++ resolve_cache_and_index(f12_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1)); + + __ verify_oop(Otos_i); + +@@ -2093,10 +2093,12 @@ + // Depends on cpCacheOop layout! + Label resolved; + +- if (byte_no == f1_oop) { +- // We are resolved if the f1 field contains a non-null object (CallSite, etc.) +- // This kind of CP cache entry does not need to match the flags byte, because ++ if (byte_no == f12_oop) { ++ // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.) ++ // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because + // there is a 1-1 relation between bytecode type and CP entry type. ++ // The caller will also load a methodOop from f2. ++ assert(result != noreg, ""); + assert_different_registers(result, Rcache); + __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); + __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + +@@ -2123,10 +2125,13 @@ + case Bytecodes::_invokespecial : // fall through + case Bytecodes::_invokestatic : // fall through + case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; ++ case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; + case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; + case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; + case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; +- default : ShouldNotReachHere(); break; ++ default: ++ fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); ++ break; + } + // first time invocation - must resolve first + __ call_VM(noreg, entry, O1); +@@ -2139,48 +2144,54 @@ + } + + void TemplateTable::load_invoke_cp_cache_entry(int byte_no, +- Register Rmethod, +- Register Ritable_index, +- Register Rflags, ++ Register method, ++ Register itable_index, ++ Register flags, + bool is_invokevirtual, + bool is_invokevfinal, + bool is_invokedynamic) { + // Uses both G3_scratch and G4_scratch +- Register Rcache = G3_scratch; +- Register Rscratch = G4_scratch; +- assert_different_registers(Rcache, Rmethod, Ritable_index); +- +- ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); ++ Register cache = G3_scratch; ++ Register index = G4_scratch; ++ assert_different_registers(cache, method, itable_index); + + // determine constant pool cache field offsets ++ assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); + const int method_offset = in_bytes( +- cp_base_offset + +- (is_invokevirtual ++ constantPoolCacheOopDesc::base_offset() + ++ ((byte_no == f2_byte) + ? ConstantPoolCacheEntry::f2_offset() + : ConstantPoolCacheEntry::f1_offset() + ) + ); +- const int flags_offset = in_bytes(cp_base_offset + ++ const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + + ConstantPoolCacheEntry::flags_offset()); + // access constant pool cache fields +- const int index_offset = in_bytes(cp_base_offset + ++ const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + + ConstantPoolCacheEntry::f2_offset()); + + if (is_invokevfinal) { +- __ get_cache_and_index_at_bcp(Rcache, Rscratch, 1); +- __ ld_ptr(Rcache, method_offset, Rmethod); +- } else if (byte_no == f1_oop) { +- // Resolved f1_oop goes directly into 'method' register. +- resolve_cache_and_index(byte_no, Rmethod, Rcache, Rscratch, sizeof(u4)); ++ __ get_cache_and_index_at_bcp(cache, index, 1); ++ __ ld_ptr(Address(cache, method_offset), method); ++ } else if (byte_no == f12_oop) { ++ // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'. ++ // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset). ++ // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle. ++ size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); ++ resolve_cache_and_index(byte_no, itable_index, cache, index, index_size); ++ __ ld_ptr(Address(cache, index_offset), method); ++ itable_index = noreg; // hack to disable load below + } else { +- resolve_cache_and_index(byte_no, noreg, Rcache, Rscratch, sizeof(u2)); +- __ ld_ptr(Rcache, method_offset, Rmethod); ++ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); ++ __ ld_ptr(Address(cache, method_offset), method); + } + +- if (Ritable_index != noreg) { +- __ ld_ptr(Rcache, index_offset, Ritable_index); ++ if (itable_index != noreg) { ++ // pick up itable index from f2 also: ++ assert(byte_no == f1_byte, "already picked up f1"); ++ __ ld_ptr(Address(cache, index_offset), itable_index); + } +- __ ld_ptr(Rcache, flags_offset, Rflags); ++ __ ld_ptr(Address(cache, flags_offset), flags); + } + + // The Rcache register must be set before call +@@ -2272,7 +2283,7 @@ + + if (__ membar_has_effect(membar_bits)) { + // Get volatile flag +- __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); ++ __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); + __ and3(Rflags, Lscratch, Lscratch); + } + +@@ -2280,9 +2291,9 @@ + + // compute field type + Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj; +- __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags); +- // Make sure we don't need to mask Rflags for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); ++ __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); ++ // Make sure we don't need to mask Rflags after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); + + // Check atos before itos for getstatic, more likely (in Queens at least) + __ cmp(Rflags, atos); +@@ -2445,7 +2456,7 @@ + if (__ membar_has_effect(membar_bits)) { + // Get volatile flag + __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags); +- __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); ++ __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); + } + + switch (bytecode()) { +@@ -2569,9 +2580,9 @@ + Label two_word, valsizeknown; + __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); + __ mov(Lesp, G4_scratch); +- __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags); +- // Make sure we don't need to mask Rflags for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); ++ __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); ++ // Make sure we don't need to mask Rflags after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); + __ cmp(Rflags, ltos); + __ br(Assembler::equal, false, Assembler::pt, two_word); + __ delayed()->cmp(Rflags, dtos); +@@ -2625,7 +2636,7 @@ + + Label notVolatile, checkVolatile, exit; + if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { +- __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); ++ __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); + __ and3(Rflags, Lscratch, Lscratch); + + if (__ membar_has_effect(read_bits)) { +@@ -2635,9 +2646,9 @@ + } + } + +- __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags); +- // Make sure we don't need to mask Rflags for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); ++ __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); ++ // Make sure we don't need to mask Rflags after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); + + // compute field type + Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat; +@@ -2833,7 +2844,7 @@ + Label notVolatile, checkVolatile, exit; + if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { + __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); +- __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); ++ __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); + __ and3(Rflags, Lscratch, Lscratch); + if (__ membar_has_effect(read_bits)) { + __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); +@@ -2916,7 +2927,7 @@ + + // Test volatile + Label notVolatile; +- __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); ++ __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); + __ btst(Rflags, Lscratch); + __ br(Assembler::zero, false, Assembler::pt, notVolatile); + __ delayed()->nop(); +@@ -2936,27 +2947,82 @@ + ShouldNotReachHere(); + } + ++ ++void TemplateTable::prepare_invoke(int byte_no, ++ Register method, // linked method (or i-klass) ++ Register ra, // return address ++ Register index, // itable index, MethodType, etc. ++ Register recv, // if caller wants to see it ++ Register flags // if caller wants to test it ++ ) { ++ // determine flags ++ const Bytecodes::Code code = bytecode(); ++ const bool is_invokeinterface = code == Bytecodes::_invokeinterface; ++ const bool is_invokedynamic = code == Bytecodes::_invokedynamic; ++ const bool is_invokehandle = code == Bytecodes::_invokehandle; ++ const bool is_invokevirtual = code == Bytecodes::_invokevirtual; ++ const bool is_invokespecial = code == Bytecodes::_invokespecial; ++ const bool load_receiver = (recv != noreg); ++ assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); ++ assert(recv == noreg || recv == O0, ""); ++ assert(flags == noreg || flags == O1, ""); ++ ++ // setup registers & access constant pool cache ++ if (recv == noreg) recv = O0; ++ if (flags == noreg) flags = O1; ++ const Register temp = O2; ++ assert_different_registers(method, ra, index, recv, flags, temp); ++ ++ load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); ++ ++ __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore ++ ++ // maybe push appendix to arguments ++ if (is_invokedynamic || is_invokehandle) { ++ Label L_no_push; ++ __ verify_oop(index); ++ __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp); ++ __ btst(flags, temp); ++ __ br(Assembler::zero, false, Assembler::pt, L_no_push); ++ __ delayed()->nop(); ++ // Push the appendix as a trailing parameter. ++ // This must be done before we get the receiver, ++ // since the parameter_size includes it. ++ __ push_ptr(index); // push appendix (MethodType, CallSite, etc.) ++ __ bind(L_no_push); ++ } ++ ++ // load receiver if needed (after appendix is pushed so parameter size is correct) ++ if (load_receiver) { ++ __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size ++ __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp ++ __ verify_oop(recv); ++ } ++ ++ // compute return type ++ __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra); ++ // Make sure we don't need to mask flags after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); ++ // load return address ++ { ++ const address table_addr = (is_invokeinterface || is_invokedynamic) ? ++ (address)Interpreter::return_5_addrs_by_index_table() : ++ (address)Interpreter::return_3_addrs_by_index_table(); ++ AddressLiteral table(table_addr); ++ __ set(table, temp); ++ __ sll(ra, LogBytesPerWord, ra); ++ __ ld_ptr(Address(temp, ra), ra); ++ } ++} ++ ++ + void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) { + Register Rtemp = G4_scratch; + Register Rcall = Rindex; + assert_different_registers(Rcall, G5_method, Gargs, Rret); + + // get target methodOop & entry point +- const int base = instanceKlass::vtable_start_offset() * wordSize; +- if (vtableEntry::size() % 3 == 0) { +- // scale the vtable index by 12: +- int one_third = vtableEntry::size() / 3; +- __ sll(Rindex, exact_log2(one_third * 1 * wordSize), Rtemp); +- __ sll(Rindex, exact_log2(one_third * 2 * wordSize), Rindex); +- __ add(Rindex, Rtemp, Rindex); +- } else { +- // scale the vtable index by 8: +- __ sll(Rindex, exact_log2(vtableEntry::size() * wordSize), Rindex); +- } +- +- __ add(Rrecv, Rindex, Rrecv); +- __ ld_ptr(Rrecv, base + vtableEntry::method_offset_in_bytes(), G5_method); +- ++ __ lookup_virtual_method(Rrecv, Rindex, G5_method); + __ call_from_interpreter(Rcall, Gargs, Rret); + } + +@@ -2965,16 +3031,16 @@ + assert(byte_no == f2_byte, "use this argument"); + + Register Rscratch = G3_scratch; +- Register Rtemp = G4_scratch; +- Register Rret = Lscratch; +- Register Rrecv = G5_method; ++ Register Rtemp = G4_scratch; ++ Register Rret = Lscratch; ++ Register O0_recv = O0; + Label notFinal; + + load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false); + __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore + + // Check for vfinal +- __ set((1 << ConstantPoolCacheEntry::vfinalMethod), G4_scratch); ++ __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch); + __ btst(Rret, G4_scratch); + __ br(Assembler::zero, false, Assembler::pt, notFinal); + __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters +@@ -2986,27 +3052,27 @@ + __ bind(notFinal); + + __ mov(G5_method, Rscratch); // better scratch register +- __ load_receiver(G4_scratch, O0); // gets receiverOop +- // receiver is in O0 +- __ verify_oop(O0); ++ __ load_receiver(G4_scratch, O0_recv); // gets receiverOop ++ // receiver is in O0_recv ++ __ verify_oop(O0_recv); + + // get return address + AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); + __ set(table, Rtemp); +- __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type +- // Make sure we don't need to mask Rret for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); ++ __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type ++ // Make sure we don't need to mask Rret after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); + __ sll(Rret, LogBytesPerWord, Rret); + __ ld_ptr(Rtemp, Rret, Rret); // get return address + + // get receiver klass +- __ null_check(O0, oopDesc::klass_offset_in_bytes()); +- __ load_klass(O0, Rrecv); +- __ verify_oop(Rrecv); +- +- __ profile_virtual_call(Rrecv, O4); +- +- generate_vtable_call(Rrecv, Rscratch, Rret); ++ __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); ++ __ load_klass(O0_recv, O0_recv); ++ __ verify_oop(O0_recv); ++ ++ __ profile_virtual_call(O0_recv, O4); ++ ++ generate_vtable_call(O0_recv, Rscratch, Rret); + } + + void TemplateTable::fast_invokevfinal(int byte_no) { +@@ -3036,9 +3102,9 @@ + // get return address + AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); + __ set(table, Rtemp); +- __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type +- // Make sure we don't need to mask Rret for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); ++ __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type ++ // Make sure we don't need to mask Rret after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); + __ sll(Rret, LogBytesPerWord, Rret); + __ ld_ptr(Rtemp, Rret, Rret); // get return address + +@@ -3047,65 +3113,37 @@ + __ call_from_interpreter(Rscratch, Gargs, Rret); + } + ++ + void TemplateTable::invokespecial(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f1_byte, "use this argument"); + +- Register Rscratch = G3_scratch; +- Register Rtemp = G4_scratch; +- Register Rret = Lscratch; +- +- load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false); +- __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore +- ++ const Register Rret = Lscratch; ++ const Register O0_recv = O0; ++ const Register Rscratch = G3_scratch; ++ ++ prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check ++ __ null_check(O0_recv); ++ ++ // do the call + __ verify_oop(G5_method); +- +- __ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch); +- __ load_receiver(G4_scratch, O0); +- +- // receiver NULL check +- __ null_check(O0); +- + __ profile_call(O4); +- +- // get return address +- AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); +- __ set(table, Rtemp); +- __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type +- // Make sure we don't need to mask Rret for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); +- __ sll(Rret, LogBytesPerWord, Rret); +- __ ld_ptr(Rtemp, Rret, Rret); // get return address +- +- // do the call + __ call_from_interpreter(Rscratch, Gargs, Rret); + } + ++ + void TemplateTable::invokestatic(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f1_byte, "use this argument"); + +- Register Rscratch = G3_scratch; +- Register Rtemp = G4_scratch; +- Register Rret = Lscratch; +- +- load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false); +- __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore +- ++ const Register Rret = Lscratch; ++ const Register Rscratch = G3_scratch; ++ ++ prepare_invoke(byte_no, G5_method, Rret); // get f1 methodOop ++ ++ // do the call + __ verify_oop(G5_method); +- + __ profile_call(O4); +- +- // get return address +- AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); +- __ set(table, Rtemp); +- __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type +- // Make sure we don't need to mask Rret for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); +- __ sll(Rret, LogBytesPerWord, Rret); +- __ ld_ptr(Rtemp, Rret, Rret); // get return address +- +- // do the call + __ call_from_interpreter(Rscratch, Gargs, Rret); + } + +@@ -3122,7 +3160,7 @@ + Label notFinal; + + // Check for vfinal +- __ set((1 << ConstantPoolCacheEntry::vfinalMethod), Rscratch); ++ __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch); + __ btst(Rflags, Rscratch); + __ br(Assembler::zero, false, Assembler::pt, notFinal); + __ delayed()->nop(); +@@ -3144,53 +3182,37 @@ + transition(vtos, vtos); + assert(byte_no == f1_byte, "use this argument"); + +- Register Rscratch = G4_scratch; +- Register Rret = G3_scratch; +- Register Rindex = Lscratch; +- Register Rinterface = G1_scratch; +- Register RklassOop = G5_method; +- Register Rflags = O1; ++ const Register Rinterface = G1_scratch; ++ const Register Rret = G3_scratch; ++ const Register Rindex = Lscratch; ++ const Register O0_recv = O0; ++ const Register O1_flags = O1; ++ const Register O2_klassOop = O2; ++ const Register Rscratch = G4_scratch; + assert_different_registers(Rscratch, G5_method); + +- load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, /*virtual*/ false, false, false); +- __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore +- +- // get receiver +- __ and3(Rflags, 0xFF, Rscratch); // gets number of parameters +- __ load_receiver(Rscratch, O0); +- __ verify_oop(O0); +- +- __ mov(Rflags, Rret); +- +- // get return address +- AddressLiteral table(Interpreter::return_5_addrs_by_index_table()); +- __ set(table, Rscratch); +- __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type +- // Make sure we don't need to mask Rret for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); +- __ sll(Rret, LogBytesPerWord, Rret); +- __ ld_ptr(Rscratch, Rret, Rret); // get return address ++ prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags); + + // get receiver klass +- __ null_check(O0, oopDesc::klass_offset_in_bytes()); +- __ load_klass(O0, RklassOop); +- __ verify_oop(RklassOop); ++ __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); ++ __ load_klass(O0_recv, O2_klassOop); ++ __ verify_oop(O2_klassOop); + + // Special case of invokeinterface called for virtual method of + // java.lang.Object. See cpCacheOop.cpp for details. + // This code isn't produced by javac, but could be produced by + // another compliant java compiler. + Label notMethod; +- __ set((1 << ConstantPoolCacheEntry::methodInterface), Rscratch); +- __ btst(Rflags, Rscratch); ++ __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch); ++ __ btst(O1_flags, Rscratch); + __ br(Assembler::zero, false, Assembler::pt, notMethod); + __ delayed()->nop(); + +- invokeinterface_object_method(RklassOop, Rinterface, Rret, Rflags); ++ invokeinterface_object_method(O2_klassOop, Rinterface, Rret, O1_flags); + + __ bind(notMethod); + +- __ profile_virtual_call(RklassOop, O4); ++ __ profile_virtual_call(O2_klassOop, O4); + + // + // find entry point to call +@@ -3199,9 +3221,9 @@ + // compute start of first itableOffsetEntry (which is at end of vtable) + const int base = instanceKlass::vtable_start_offset() * wordSize; + Label search; +- Register Rtemp = Rflags; +- +- __ ld(RklassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp); ++ Register Rtemp = O1_flags; ++ ++ __ ld(O2_klassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp); + if (align_object_offset(1) > 1) { + __ round_to(Rtemp, align_object_offset(1)); + } +@@ -3212,7 +3234,7 @@ + __ set(base, Rscratch); + __ add(Rscratch, Rtemp, Rtemp); + } +- __ add(RklassOop, Rtemp, Rscratch); ++ __ add(O2_klassOop, Rtemp, Rscratch); + + __ bind(search); + +@@ -3244,7 +3266,7 @@ + assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below"); + __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8; + __ add(Rscratch, Rindex, Rscratch); +- __ ld_ptr(RklassOop, Rscratch, G5_method); ++ __ ld_ptr(O2_klassOop, Rscratch, G5_method); + + // Check for abstract method error. + { +@@ -3260,13 +3282,42 @@ + + __ verify_oop(G5_method); + __ call_from_interpreter(Rcall, Gargs, Rret); +- ++} ++ ++ ++void TemplateTable::invokehandle(int byte_no) { ++ transition(vtos, vtos); ++ assert(byte_no == f12_oop, "use this argument"); ++ ++ if (!EnableInvokeDynamic) { ++ // rewriter does not generate this bytecode ++ __ should_not_reach_here(); ++ return; ++ } ++ ++ const Register Rret = Lscratch; ++ const Register G4_mtype = G4_scratch; // f1 ++ const Register O0_recv = O0; ++ const Register Rscratch = G3_scratch; ++ ++ prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv); ++ __ null_check(O0_recv); ++ ++ // G4: MethodType object (from f1) ++ // G5: MH.linkToCallSite method (from f2) ++ ++ // Note: G4_mtype is already pushed (if necessary) by prepare_invoke ++ ++ // do the call ++ __ verify_oop(G5_method); ++ __ profile_final_call(O4); // FIXME: profile the LambdaForm also ++ __ call_from_interpreter(Rscratch, Gargs, Rret); + } + + + void TemplateTable::invokedynamic(int byte_no) { + transition(vtos, vtos); +- assert(byte_no == f1_oop, "use this argument"); ++ assert(byte_no == f12_oop, "use this argument"); + + if (!EnableInvokeDynamic) { + // We should not encounter this bytecode if !EnableInvokeDynamic. +@@ -3279,42 +3330,24 @@ + return; + } + +- // G5: CallSite object (f1) +- // XX: unused (f2) +- // XX: flags (unused) +- +- Register G5_callsite = G5_method; +- Register Rscratch = G3_scratch; +- Register Rtemp = G1_scratch; +- Register Rret = Lscratch; +- +- load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, +- /*virtual*/ false, /*vfinal*/ false, /*indy*/ true); +- __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore +- ++ const Register Rret = Lscratch; ++ const Register G4_callsite = G4_scratch; ++ const Register Rscratch = G3_scratch; ++ ++ prepare_invoke(byte_no, G5_method, Rret, G4_callsite); ++ ++ // G4: CallSite object (from f1) ++ // G5: MH.linkToCallSite method (from f2) ++ ++ // Note: G4_callsite is already pushed by prepare_invoke ++ ++ // %%% should make a type profile for any invokedynamic that takes a ref argument + // profile this call + __ profile_call(O4); + +- // get return address +- AddressLiteral table(Interpreter::return_5_addrs_by_index_table()); +- __ set(table, Rtemp); +- __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type +- // Make sure we don't need to mask Rret for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); +- __ sll(Rret, LogBytesPerWord, Rret); +- __ ld_ptr(Rtemp, Rret, Rret); // get return address +- +- __ verify_oop(G5_callsite); +- __ load_heap_oop(G5_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle); +- __ null_check(G3_method_handle); +- __ verify_oop(G3_method_handle); +- +- // Adjust Rret first so Llast_SP can be same as Rret +- __ add(Rret, -frame::pc_return_offset, O7); +- __ add(Lesp, BytesPerWord, Gargs); // setup parameter pointer +- __ jump_to_method_handle_entry(G3_method_handle, Rtemp, /* emit_delayed_nop */ false); +- // Record SP so we can remove any stack space allocated by adapter transition +- __ delayed()->mov(SP, Llast_SP); ++ // do the call ++ __ verify_oop(G5_method); ++ __ call_from_interpreter(Rscratch, Gargs, Rret); + } + + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/templateTable_sparc.hpp +--- openjdk/hotspot/src/cpu/sparc/vm/templateTable_sparc.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/templateTable_sparc.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -25,6 +25,13 @@ + #ifndef CPU_SPARC_VM_TEMPLATETABLE_SPARC_HPP + #define CPU_SPARC_VM_TEMPLATETABLE_SPARC_HPP + ++ static void prepare_invoke(int byte_no, ++ Register method, // linked method (or i-klass) ++ Register ra, // return address ++ Register index = noreg, // itable index, MethodType, etc. ++ Register recv = noreg, // if caller wants to see it ++ Register flags = noreg // if caller wants to test it ++ ); + // helper function + static void invokevfinal_helper(Register Rcache, Register Rret); + static void invokeinterface_object_method(Register RklassOop, Register Rcall, +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/vtableStubs_sparc.cpp +--- openjdk/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -70,7 +70,6 @@ + __ load_klass(O0, G3_scratch); + + // set methodOop (in case of interpreted method), and destination address +- int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); + #ifndef PRODUCT + if (DebugVtables) { + Label L; +@@ -82,13 +81,8 @@ + __ bind(L); + } + #endif +- int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); +- if (Assembler::is_simm13(v_off)) { +- __ ld_ptr(G3, v_off, G5_method); +- } else { +- __ set(v_off,G5); +- __ ld_ptr(G3, G5, G5_method); +- } ++ ++ __ lookup_virtual_method(G3_scratch, vtable_index, G5_method); + + #ifndef PRODUCT + if (DebugVtables) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/assembler_x86.cpp +--- openjdk/hotspot/src/cpu/x86/vm/assembler_x86.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/assembler_x86.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -41,6 +41,15 @@ + #include "gc_implementation/g1/heapRegion.hpp" + #endif + ++#ifdef PRODUCT ++#define BLOCK_COMMENT(str) /* nothing */ ++#define STOP(error) stop(error) ++#else ++#define BLOCK_COMMENT(str) block_comment(str) ++#define STOP(error) block_comment(error); stop(error) ++#endif ++ ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") + // Implementation of AddressLiteral + + AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { +@@ -5393,23 +5402,7 @@ + // To see where a verify_oop failed, get $ebx+40/X for this frame. + // This is the value of eip which points to where verify_oop will return. + if (os::message_box(msg, "Execution stopped, print registers?")) { +- ttyLocker ttyl; +- tty->print_cr("eip = 0x%08x", eip); +-#ifndef PRODUCT +- if ((WizardMode || Verbose) && PrintMiscellaneous) { +- tty->cr(); +- findpc(eip); +- tty->cr(); +- } +-#endif +- tty->print_cr("rax = 0x%08x", rax); +- tty->print_cr("rbx = 0x%08x", rbx); +- tty->print_cr("rcx = 0x%08x", rcx); +- tty->print_cr("rdx = 0x%08x", rdx); +- tty->print_cr("rdi = 0x%08x", rdi); +- tty->print_cr("rsi = 0x%08x", rsi); +- tty->print_cr("rbp = 0x%08x", rbp); +- tty->print_cr("rsp = 0x%08x", rsp); ++ print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip); + BREAKPOINT; + assert(false, "start up GDB"); + } +@@ -5421,12 +5414,53 @@ + ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); + } + ++void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) { ++ ttyLocker ttyl; ++ FlagSetting fs(Debugging, true); ++ tty->print_cr("eip = 0x%08x", eip); ++#ifndef PRODUCT ++ if ((WizardMode || Verbose) && PrintMiscellaneous) { ++ tty->cr(); ++ findpc(eip); ++ tty->cr(); ++ } ++#endif ++#define PRINT_REG(rax) \ ++ { tty->print("%s = ", #rax); os::print_location(tty, rax); } ++ PRINT_REG(rax); ++ PRINT_REG(rbx); ++ PRINT_REG(rcx); ++ PRINT_REG(rdx); ++ PRINT_REG(rdi); ++ PRINT_REG(rsi); ++ PRINT_REG(rbp); ++ PRINT_REG(rsp); ++#undef PRINT_REG ++ // Print some words near top of staack. ++ int* dump_sp = (int*) rsp; ++ for (int col1 = 0; col1 < 8; col1++) { ++ tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); ++ os::print_location(tty, *dump_sp++); ++ } ++ for (int row = 0; row < 16; row++) { ++ tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); ++ for (int col = 0; col < 8; col++) { ++ tty->print(" 0x%08x", *dump_sp++); ++ } ++ tty->cr(); ++ } ++ // Print some instructions around pc: ++ Disassembler::decode((address)eip-64, (address)eip); ++ tty->print_cr("--------"); ++ Disassembler::decode((address)eip, (address)eip+32); ++} ++ + void MacroAssembler::stop(const char* msg) { + ExternalAddress message((address)msg); + // push address of message + pushptr(message.addr()); + { Label L; call(L, relocInfo::none); bind(L); } // push eip +- pusha(); // push registers ++ pusha(); // push registers + call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); + hlt(); + } +@@ -5443,6 +5477,18 @@ + pop_CPU_state(); + } + ++void MacroAssembler::print_state() { ++ { Label L; call(L, relocInfo::none); bind(L); } // push eip ++ pusha(); // push registers ++ ++ push_CPU_state(); ++ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32))); ++ pop_CPU_state(); ++ ++ popa(); ++ addl(rsp, wordSize); ++} ++ + #else // _LP64 + + // 64 bit versions +@@ -5908,14 +5954,33 @@ + } + + void MacroAssembler::warn(const char* msg) { +- push(rsp); ++ push(rbp); ++ movq(rbp, rsp); + andq(rsp, -16); // align stack as required by push_CPU_state and call +- + push_CPU_state(); // keeps alignment at 16 bytes + lea(c_rarg0, ExternalAddress((address) msg)); + call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0); + pop_CPU_state(); +- pop(rsp); ++ mov(rsp, rbp); ++ pop(rbp); ++} ++ ++void MacroAssembler::print_state() { ++ address rip = pc(); ++ pusha(); // get regs on stack ++ push(rbp); ++ movq(rbp, rsp); ++ andq(rsp, -16); // align stack as required by push_CPU_state and call ++ push_CPU_state(); // keeps alignment at 16 bytes ++ ++ lea(c_rarg0, InternalAddress(rip)); ++ lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array ++ call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); ++ ++ pop_CPU_state(); ++ mov(rsp, rbp); ++ pop(rbp); ++ popa(); + } + + #ifndef PRODUCT +@@ -5924,7 +5989,7 @@ + + void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { + // In order to get locks to work, we need to fake a in_VM state +- if (ShowMessageBoxOnError ) { ++ if (ShowMessageBoxOnError) { + JavaThread* thread = JavaThread::current(); + JavaThreadState saved_state = thread->thread_state(); + thread->set_thread_state(_thread_in_vm); +@@ -5938,30 +6003,9 @@ + // XXX correct this offset for amd64 + // This is the value of eip which points to where verify_oop will return. + if (os::message_box(msg, "Execution stopped, print registers?")) { +- ttyLocker ttyl; +- tty->print_cr("rip = 0x%016lx", pc); +-#ifndef PRODUCT +- tty->cr(); +- findpc(pc); +- tty->cr(); +-#endif +- tty->print_cr("rax = 0x%016lx", regs[15]); +- tty->print_cr("rbx = 0x%016lx", regs[12]); +- tty->print_cr("rcx = 0x%016lx", regs[14]); +- tty->print_cr("rdx = 0x%016lx", regs[13]); +- tty->print_cr("rdi = 0x%016lx", regs[8]); +- tty->print_cr("rsi = 0x%016lx", regs[9]); +- tty->print_cr("rbp = 0x%016lx", regs[10]); +- tty->print_cr("rsp = 0x%016lx", regs[11]); +- tty->print_cr("r8 = 0x%016lx", regs[7]); +- tty->print_cr("r9 = 0x%016lx", regs[6]); +- tty->print_cr("r10 = 0x%016lx", regs[5]); +- tty->print_cr("r11 = 0x%016lx", regs[4]); +- tty->print_cr("r12 = 0x%016lx", regs[3]); +- tty->print_cr("r13 = 0x%016lx", regs[2]); +- tty->print_cr("r14 = 0x%016lx", regs[1]); +- tty->print_cr("r15 = 0x%016lx", regs[0]); ++ print_state64(pc, regs); + BREAKPOINT; ++ assert(false, "start up GDB"); + } + ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); + } else { +@@ -5972,6 +6016,54 @@ + } + } + ++void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { ++ ttyLocker ttyl; ++ FlagSetting fs(Debugging, true); ++ tty->print_cr("rip = 0x%016lx", pc); ++#ifndef PRODUCT ++ tty->cr(); ++ findpc(pc); ++ tty->cr(); ++#endif ++#define PRINT_REG(rax, value) \ ++ { tty->print("%s = ", #rax); os::print_location(tty, value); } ++ PRINT_REG(rax, regs[15]); ++ PRINT_REG(rbx, regs[12]); ++ PRINT_REG(rcx, regs[14]); ++ PRINT_REG(rdx, regs[13]); ++ PRINT_REG(rdi, regs[8]); ++ PRINT_REG(rsi, regs[9]); ++ PRINT_REG(rbp, regs[10]); ++ PRINT_REG(rsp, regs[11]); ++ PRINT_REG(r8 , regs[7]); ++ PRINT_REG(r9 , regs[6]); ++ PRINT_REG(r10, regs[5]); ++ PRINT_REG(r11, regs[4]); ++ PRINT_REG(r12, regs[3]); ++ PRINT_REG(r13, regs[2]); ++ PRINT_REG(r14, regs[1]); ++ PRINT_REG(r15, regs[0]); ++#undef PRINT_REG ++ // Print some words near top of staack. ++ int64_t* rsp = (int64_t*) regs[11]; ++ int64_t* dump_sp = rsp; ++ for (int col1 = 0; col1 < 8; col1++) { ++ tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp); ++ os::print_location(tty, *dump_sp++); ++ } ++ for (int row = 0; row < 25; row++) { ++ tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp); ++ for (int col = 0; col < 4; col++) { ++ tty->print(" 0x%016lx", *dump_sp++); ++ } ++ tty->cr(); ++ } ++ // Print some instructions around pc: ++ Disassembler::decode((address)pc-64, (address)pc); ++ tty->print_cr("--------"); ++ Disassembler::decode((address)pc, (address)pc+32); ++} ++ + #endif // _LP64 + + // Now versions that are common to 32/64 bit +@@ -6341,7 +6433,7 @@ + get_thread(rax); + cmpptr(java_thread, rax); + jcc(Assembler::equal, L); +- stop("MacroAssembler::call_VM_base: rdi not callee saved?"); ++ STOP("MacroAssembler::call_VM_base: rdi not callee saved?"); + bind(L); + } + pop(rax); +@@ -7997,7 +8089,7 @@ + shlptr(tsize, LogHeapWordSize); + cmpptr(t1, tsize); + jcc(Assembler::equal, ok); +- stop("assert(t1 != tlab size)"); ++ STOP("assert(t1 != tlab size)"); + should_not_reach_here(); + + bind(ok); +@@ -8244,6 +8336,19 @@ + } + + ++// virtual method calling ++void MacroAssembler::lookup_virtual_method(Register recv_klass, ++ RegisterOrConstant vtable_index, ++ Register method_result) { ++ const int base = instanceKlass::vtable_start_offset() * wordSize; ++ assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); ++ Address vtable_entry_addr(recv_klass, ++ vtable_index, Address::times_ptr, ++ base + vtableEntry::method_offset_in_bytes()); ++ movptr(method_result, vtable_entry_addr); ++} ++ ++ + void MacroAssembler::check_klass_subtype(Register sub_klass, + Register super_klass, + Register temp_reg, +@@ -8493,6 +8598,7 @@ + // Pass register number to verify_oop_subroutine + char* b = new char[strlen(s) + 50]; + sprintf(b, "verify_oop: %s: %s", reg->name(), s); ++ BLOCK_COMMENT("verify_oop {"); + #ifdef _LP64 + push(rscratch1); // save r10, trashed by movptr() + #endif +@@ -8507,6 +8613,7 @@ + movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); + call(rax); + // Caller pops the arguments (oop, message) and restores rax, r10 ++ BLOCK_COMMENT("} verify_oop"); + } + + +@@ -8527,7 +8634,7 @@ + jcc(Assembler::notZero, L); + char* buf = new char[40]; + sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]); +- stop(buf); ++ STOP(buf); + } else { + jccb(Assembler::notZero, L); + hlt(); +@@ -8543,60 +8650,6 @@ + } + + +-// registers on entry: +-// - rax ('check' register): required MethodType +-// - rcx: method handle +-// - rdx, rsi, or ?: killable temp +-void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg, +- Register temp_reg, +- Label& wrong_method_type) { +- Address type_addr(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)); +- // compare method type against that of the receiver +- if (UseCompressedOops) { +- load_heap_oop(temp_reg, type_addr); +- cmpptr(mtype_reg, temp_reg); +- } else { +- cmpptr(mtype_reg, type_addr); +- } +- jcc(Assembler::notEqual, wrong_method_type); +-} +- +- +-// A method handle has a "vmslots" field which gives the size of its +-// argument list in JVM stack slots. This field is either located directly +-// in every method handle, or else is indirectly accessed through the +-// method handle's MethodType. This macro hides the distinction. +-void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg, +- Register temp_reg) { +- assert_different_registers(vmslots_reg, mh_reg, temp_reg); +- // load mh.type.form.vmslots +- Register temp2_reg = vmslots_reg; +- load_heap_oop(temp2_reg, Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg))); +- load_heap_oop(temp2_reg, Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg))); +- movl(vmslots_reg, Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg))); +-} +- +- +-// registers on entry: +-// - rcx: method handle +-// - rdx: killable temp (interpreted only) +-// - rax: killable temp (compiled only) +-void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg) { +- assert(mh_reg == rcx, "caller must put MH object in rcx"); +- assert_different_registers(mh_reg, temp_reg); +- +- // pick out the interpreted side of the handler +- // NOTE: vmentry is not an oop! +- movptr(temp_reg, Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes, temp_reg))); +- +- // off we go... +- jmp(Address(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes())); +- +- // for the various stubs which take control at this point, +- // see MethodHandles::generate_method_handle_stub +-} +- +- + Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, + int extra_slot_offset) { + // cf. TemplateTable::prepare_invoke(), if (load_receiver). +@@ -8669,14 +8722,14 @@ + movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); + cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); + jcc(Assembler::aboveEqual, next); +- stop("assert(top >= start)"); ++ STOP("assert(top >= start)"); + should_not_reach_here(); + + bind(next); + movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); + cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); + jcc(Assembler::aboveEqual, ok); +- stop("assert(top <= end)"); ++ STOP("assert(top <= end)"); + should_not_reach_here(); + + bind(ok); +@@ -9109,6 +9162,25 @@ + movptr(dst, src); + } + ++void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) { ++ assert_different_registers(src1, tmp); ++#ifdef _LP64 ++ if (UseCompressedOops) { ++ bool did_push = false; ++ if (tmp == noreg) { ++ tmp = rax; ++ push(tmp); ++ did_push = true; ++ assert(!src2.uses(rsp), "can't push"); ++ } ++ load_heap_oop(tmp, src2); ++ cmpptr(src1, tmp); ++ if (did_push) pop(tmp); ++ } else ++#endif ++ cmpptr(src1, src2); ++} ++ + // Used for storing NULLs. + void MacroAssembler::store_heap_oop_null(Address dst) { + #ifdef _LP64 +@@ -9139,7 +9211,7 @@ + push(rscratch1); // cmpptr trashes rscratch1 + cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr())); + jcc(Assembler::equal, ok); +- stop(msg); ++ STOP(msg); + bind(ok); + pop(rscratch1); + } +@@ -9172,7 +9244,7 @@ + Label ok; + testq(r, r); + jcc(Assembler::notEqual, ok); +- stop("null oop passed to encode_heap_oop_not_null"); ++ STOP("null oop passed to encode_heap_oop_not_null"); + bind(ok); + } + #endif +@@ -9193,7 +9265,7 @@ + Label ok; + testq(src, src); + jcc(Assembler::notEqual, ok); +- stop("null oop passed to encode_heap_oop_not_null2"); ++ STOP("null oop passed to encode_heap_oop_not_null2"); + bind(ok); + } + #endif +@@ -9384,7 +9456,7 @@ + cmpptr(rax, StackAlignmentInBytes-wordSize); + pop(rax); + jcc(Assembler::equal, L); +- stop("Stack is not properly aligned!"); ++ STOP("Stack is not properly aligned!"); + bind(L); + } + #endif +@@ -10058,13 +10130,6 @@ + bind(DONE); + } + +-#ifdef PRODUCT +-#define BLOCK_COMMENT(str) /* nothing */ +-#else +-#define BLOCK_COMMENT(str) block_comment(str) +-#endif +- +-#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") + void MacroAssembler::generate_fill(BasicType t, bool aligned, + Register to, Register value, Register count, + Register rtmp, XMMRegister xtmp) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/assembler_x86.hpp +--- openjdk/hotspot/src/cpu/x86/vm/assembler_x86.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/assembler_x86.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -1908,6 +1908,7 @@ + void load_heap_oop(Register dst, Address src); + void load_heap_oop_not_null(Register dst, Address src); + void store_heap_oop(Address dst, Register src); ++ void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg); + + // Used for storing NULL. All other oop constants should be + // stored using routines that take a jobject. +@@ -2085,6 +2086,11 @@ + Register scan_temp, + Label& no_such_interface); + ++ // virtual method calling ++ void lookup_virtual_method(Register recv_klass, ++ RegisterOrConstant vtable_index, ++ Register method_result); ++ + // Test sub_klass against super_klass, with fast and slow paths. + + // The fast path produces a tri-state answer: yes / no / maybe-slow. +@@ -2120,15 +2126,8 @@ + Label& L_success); + + // method handles (JSR 292) +- void check_method_handle_type(Register mtype_reg, Register mh_reg, +- Register temp_reg, +- Label& wrong_method_type); +- void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg, +- Register temp_reg); +- void jump_to_method_handle_entry(Register mh_reg, Register temp_reg); + Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); + +- + //---- + void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 + +@@ -2147,8 +2146,13 @@ + // prints msg and continues + void warn(const char* msg); + ++ // dumps registers and other state ++ void print_state(); ++ + static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); + static void debug64(char* msg, int64_t pc, int64_t regs[]); ++ static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); ++ static void print_state64(int64_t pc, int64_t regs[]); + + void os_breakpoint(); + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/c1_LIRAssembler_x86.cpp +--- openjdk/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -3502,6 +3502,7 @@ + void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { + ciMethod* method = op->profiled_method(); + int bci = op->profiled_bci(); ++ ciMethod* callee = op->profiled_callee(); + + // Update counter for all call types + ciMethodData* md = method->method_data_or_null(); +@@ -3513,9 +3514,11 @@ + __ movoop(mdo, md->constant_encoding()); + Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); + Bytecodes::Code bc = method->java_code_at_bci(bci); ++ const bool callee_is_static = callee->is_loaded() && callee->is_static(); + // Perform additional virtual call profiling for invokevirtual and + // invokeinterface bytecodes + if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && ++ !callee_is_static && // required for optimized MH invokes + C1ProfileVirtualCalls) { + assert(op->recv()->is_single_cpu(), "recv must be allocated"); + Register recv = op->recv()->as_register(); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/cppInterpreter_x86.cpp +--- openjdk/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -868,9 +868,9 @@ + // Need to differentiate between igetfield, agetfield, bgetfield etc. + // because they are different sizes. + // Use the type from the constant pool cache +- __ shrl(rdx, ConstantPoolCacheEntry::tosBits); +- // Make sure we don't need to mask rdx for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); ++ __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift); ++ // Make sure we don't need to mask rdx after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); + #ifdef _LP64 + Label notObj; + __ cmpl(rdx, atos); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/frame_x86.cpp +--- openjdk/hotspot/src/cpu/x86/vm/frame_x86.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/frame_x86.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -439,7 +439,6 @@ + // frame::sender_for_compiled_frame + frame frame::sender_for_compiled_frame(RegisterMap* map) const { + assert(map != NULL, "map must be set"); +- assert(!is_ricochet_frame(), "caller must handle this"); + + // frame owned by optimizing compiler + assert(_cb->frame_size() >= 0, "must have non-zero frame size"); +@@ -483,7 +482,6 @@ + if (is_entry_frame()) return sender_for_entry_frame(map); + if (is_interpreted_frame()) return sender_for_interpreter_frame(map); + assert(_cb == CodeCache::find_blob(pc()),"Must be the same"); +- if (is_ricochet_frame()) return sender_for_ricochet_frame(map); + + if (_cb != NULL) { + return sender_for_compiled_frame(map); +@@ -658,9 +656,7 @@ + values.describe(frame_no, fp() + frame::name##_offset, #name) + + void frame::describe_pd(FrameValues& values, int frame_no) { +- if (is_ricochet_frame()) { +- MethodHandles::RicochetFrame::describe(this, values, frame_no); +- } else if (is_interpreted_frame()) { ++ if (is_interpreted_frame()) { + DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp); + DESCRIBE_FP_OFFSET(interpreter_frame_last_sp); + DESCRIBE_FP_OFFSET(interpreter_frame_method); +@@ -682,12 +678,7 @@ + if (_cb != NULL) { + // use the frame size if valid + int size = _cb->frame_size(); +- if ((size > 0) && +- (! is_ricochet_frame())) { +- // Work-around: ricochet explicitly excluded because frame size is not +- // constant for the ricochet blob but its frame_size could not, for +- // some reasons, be declared as <= 0. This potentially confusing +- // size declaration should be fixed as another CR. ++ if (size > 0) { + return unextended_sp() + size; + } + } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/interp_masm_x86_32.cpp +--- openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -253,8 +253,12 @@ + get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); + movptr(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); + const int shift_count = (1 + byte_no) * BitsPerByte; ++ assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || ++ (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), ++ "correct shift count"); + shrptr(bytecode, shift_count); +- andptr(bytecode, 0xFF); ++ assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); ++ andptr(bytecode, ConstantPoolCacheEntry::bytecode_1_mask); + } + + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/interp_masm_x86_64.cpp +--- openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -256,8 +256,12 @@ + // little-endian machines allow us that. + movl(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); + const int shift_count = (1 + byte_no) * BitsPerByte; ++ assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || ++ (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), ++ "correct shift count"); + shrl(bytecode, shift_count); +- andl(bytecode, 0xFF); ++ assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); ++ andl(bytecode, ConstantPoolCacheEntry::bytecode_1_mask); + } + + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/interpreterGenerator_x86.hpp +--- openjdk/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -35,7 +35,6 @@ + address generate_normal_entry(bool synchronized); + address generate_native_entry(bool synchronized); + address generate_abstract_entry(void); +- address generate_method_handle_entry(void); + address generate_math_entry(AbstractInterpreter::MethodKind kind); + address generate_empty_entry(void); + address generate_accessor_entry(void); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/interpreter_x86_32.cpp +--- openjdk/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -230,18 +230,6 @@ + } + + +-// Method handle invoker +-// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...) +-address InterpreterGenerator::generate_method_handle_entry(void) { +- if (!EnableInvokeDynamic) { +- return generate_abstract_entry(); +- } +- +- address entry_point = MethodHandles::generate_method_handle_interpreter_entry(_masm); +- +- return entry_point; +-} +- + void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { + + // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/interpreter_x86_64.cpp +--- openjdk/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -317,19 +317,6 @@ + } + + +-// Method handle invoker +-// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...) +-address InterpreterGenerator::generate_method_handle_entry(void) { +- if (!EnableInvokeDynamic) { +- return generate_abstract_entry(); +- } +- +- address entry_point = MethodHandles::generate_method_handle_interpreter_entry(_masm); +- +- return entry_point; +-} +- +- + // Empty method, generate a very fast return. + + address InterpreterGenerator::generate_empty_entry(void) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/methodHandles_x86.cpp +--- openjdk/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -32,8 +32,10 @@ + + #ifdef PRODUCT + #define BLOCK_COMMENT(str) /* nothing */ ++#define STOP(error) stop(error) + #else + #define BLOCK_COMMENT(str) __ block_comment(str) ++#define STOP(error) block_comment(error); __ stop(error) + #endif + + #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") +@@ -43,483 +45,24 @@ + return RegisterOrConstant(value); + } + +-address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm, +- address interpreted_entry) { +- // Just before the actual machine code entry point, allocate space +- // for a MethodHandleEntry::Data record, so that we can manage everything +- // from one base pointer. +- __ align(wordSize); +- address target = __ pc() + sizeof(Data); +- while (__ pc() < target) { +- __ nop(); +- __ align(wordSize); +- } +- +- MethodHandleEntry* me = (MethodHandleEntry*) __ pc(); +- me->set_end_address(__ pc()); // set a temporary end_address +- me->set_from_interpreted_entry(interpreted_entry); +- me->set_type_checking_entry(NULL); +- +- return (address) me; +-} +- +-MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm, +- address start_addr) { +- MethodHandleEntry* me = (MethodHandleEntry*) start_addr; +- assert(me->end_address() == start_addr, "valid ME"); +- +- // Fill in the real end_address: +- __ align(wordSize); +- me->set_end_address(__ pc()); +- +- return me; +-} +- +-// stack walking support +- +-frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) { +- RicochetFrame* f = RicochetFrame::from_frame(fr); +- if (map->update_map()) +- frame::update_map_with_saved_link(map, &f->_sender_link); +- return frame(f->extended_sender_sp(), f->exact_sender_sp(), f->sender_link(), f->sender_pc()); +-} +- +-void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) { +- RicochetFrame* f = RicochetFrame::from_frame(fr); +- +- // pick up the argument type descriptor: +- Thread* thread = Thread::current(); +- Handle cookie(thread, f->compute_saved_args_layout(true, true)); +- +- // process fixed part +- blk->do_oop((oop*)f->saved_target_addr()); +- blk->do_oop((oop*)f->saved_args_layout_addr()); +- +- // process variable arguments: +- if (cookie.is_null()) return; // no arguments to describe +- +- // the cookie is actually the invokeExact method for my target +- // his argument signature is what I'm interested in +- assert(cookie->is_method(), ""); +- methodHandle invoker(thread, methodOop(cookie())); +- assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method"); +- assert(!invoker->is_static(), "must have MH argument"); +- int slot_count = invoker->size_of_parameters(); +- assert(slot_count >= 1, "must include 'this'"); +- intptr_t* base = f->saved_args_base(); +- intptr_t* retval = NULL; +- if (f->has_return_value_slot()) +- retval = f->return_value_slot_addr(); +- int slot_num = slot_count; +- intptr_t* loc = &base[slot_num -= 1]; +- //blk->do_oop((oop*) loc); // original target, which is irrelevant +- int arg_num = 0; +- for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) { +- if (ss.at_return_type()) continue; +- BasicType ptype = ss.type(); +- if (ptype == T_ARRAY) ptype = T_OBJECT; // fold all refs to T_OBJECT +- assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void"); +- loc = &base[slot_num -= type2size[ptype]]; +- bool is_oop = (ptype == T_OBJECT && loc != retval); +- if (is_oop) blk->do_oop((oop*)loc); +- arg_num += 1; +- } +- assert(slot_num == 0, "must have processed all the arguments"); +-} +- +-oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) { +- oop cookie = NULL; +- if (read_cache) { +- cookie = saved_args_layout(); +- if (cookie != NULL) return cookie; +- } +- oop target = saved_target(); +- oop mtype = java_lang_invoke_MethodHandle::type(target); +- oop mtform = java_lang_invoke_MethodType::form(mtype); +- cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform); +- if (write_cache) { +- (*saved_args_layout_addr()) = cookie; +- } +- return cookie; +-} +- +-void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm, +- // output params: +- int* bounce_offset, +- int* exception_offset, +- int* frame_size_in_words) { +- (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize; +- +- address start = __ pc(); +- +-#ifdef ASSERT +- __ hlt(); __ hlt(); __ hlt(); +- // here's a hint of something special: +- __ push(MAGIC_NUMBER_1); +- __ push(MAGIC_NUMBER_2); +-#endif //ASSERT +- __ hlt(); // not reached +- +- // A return PC has just been popped from the stack. +- // Return values are in registers. +- // The ebp points into the RicochetFrame, which contains +- // a cleanup continuation we must return to. +- +- (*bounce_offset) = __ pc() - start; +- BLOCK_COMMENT("ricochet_blob.bounce"); +- +- if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); +- trace_method_handle(_masm, "return/ricochet_blob.bounce"); +- +- __ jmp(frame_address(continuation_offset_in_bytes())); +- __ hlt(); +- DEBUG_ONLY(__ push(MAGIC_NUMBER_2)); +- +- (*exception_offset) = __ pc() - start; +- BLOCK_COMMENT("ricochet_blob.exception"); +- +- // compare this to Interpreter::rethrow_exception_entry, which is parallel code +- // for example, see TemplateInterpreterGenerator::generate_throw_exception +- // Live registers in: +- // rax: exception +- // rdx: return address/pc that threw exception (ignored, always equal to bounce addr) +- __ verify_oop(rax); +- +- // no need to empty_FPU_stack or reinit_heapbase, since caller frame will do the same if needed +- +- // Take down the frame. +- +- // Cf. InterpreterMacroAssembler::remove_activation. +- leave_ricochet_frame(_masm, /*rcx_recv=*/ noreg, +- saved_last_sp_register(), +- /*sender_pc_reg=*/ rdx); +- +- // In between activations - previous activation type unknown yet +- // compute continuation point - the continuation point expects the +- // following registers set up: +- // +- // rax: exception +- // rdx: return address/pc that threw exception +- // rsp: expression stack of caller +- // rbp: ebp of caller +- __ push(rax); // save exception +- __ push(rdx); // save return address +- Register thread_reg = LP64_ONLY(r15_thread) NOT_LP64(rdi); +- NOT_LP64(__ get_thread(thread_reg)); +- __ call_VM_leaf(CAST_FROM_FN_PTR(address, +- SharedRuntime::exception_handler_for_return_address), +- thread_reg, rdx); +- __ mov(rbx, rax); // save exception handler +- __ pop(rdx); // restore return address +- __ pop(rax); // restore exception +- __ jmp(rbx); // jump to exception +- // handler of caller +-} +- +-void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm, +- Register rcx_recv, +- Register rax_argv, +- address return_handler, +- Register rbx_temp) { +- const Register saved_last_sp = saved_last_sp_register(); +- Address rcx_mh_vmtarget( rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() ); +- Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() ); +- +- // Push the RicochetFrame a word at a time. +- // This creates something similar to an interpreter frame. +- // Cf. TemplateInterpreterGenerator::generate_fixed_frame. +- BLOCK_COMMENT("push RicochetFrame {"); +- DEBUG_ONLY(int rfo = (int) sizeof(RicochetFrame)); +- assert((rfo -= wordSize) == RicochetFrame::sender_pc_offset_in_bytes(), ""); +-#define RF_FIELD(push_value, name) \ +- { push_value; \ +- assert((rfo -= wordSize) == RicochetFrame::name##_offset_in_bytes(), ""); } +- RF_FIELD(__ push(rbp), sender_link); +- RF_FIELD(__ push(saved_last_sp), exact_sender_sp); // rsi/r13 +- RF_FIELD(__ pushptr(rcx_amh_conversion), conversion); +- RF_FIELD(__ push(rax_argv), saved_args_base); // can be updated if args are shifted +- RF_FIELD(__ push((int32_t) NULL_WORD), saved_args_layout); // cache for GC layout cookie +- if (UseCompressedOops) { +- __ load_heap_oop(rbx_temp, rcx_mh_vmtarget); +- RF_FIELD(__ push(rbx_temp), saved_target); +- } else { +- RF_FIELD(__ pushptr(rcx_mh_vmtarget), saved_target); +- } +- __ lea(rbx_temp, ExternalAddress(return_handler)); +- RF_FIELD(__ push(rbx_temp), continuation); +-#undef RF_FIELD +- assert(rfo == 0, "fully initialized the RicochetFrame"); +- // compute new frame pointer: +- __ lea(rbp, Address(rsp, RicochetFrame::sender_link_offset_in_bytes())); +- // Push guard word #1 in debug mode. +- DEBUG_ONLY(__ push((int32_t) RicochetFrame::MAGIC_NUMBER_1)); +- // For debugging, leave behind an indication of which stub built this frame. +- DEBUG_ONLY({ Label L; __ call(L, relocInfo::none); __ bind(L); }); +- BLOCK_COMMENT("} RicochetFrame"); +-} +- +-void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm, +- Register rcx_recv, +- Register new_sp_reg, +- Register sender_pc_reg) { +- assert_different_registers(rcx_recv, new_sp_reg, sender_pc_reg); +- const Register saved_last_sp = saved_last_sp_register(); +- // Take down the frame. +- // Cf. InterpreterMacroAssembler::remove_activation. +- BLOCK_COMMENT("end_ricochet_frame {"); +- // TO DO: If (exact_sender_sp - extended_sender_sp) > THRESH, compact the frame down. +- // This will keep stack in bounds even with unlimited tailcalls, each with an adapter. +- if (rcx_recv->is_valid()) +- __ movptr(rcx_recv, RicochetFrame::frame_address(RicochetFrame::saved_target_offset_in_bytes())); +- __ movptr(sender_pc_reg, RicochetFrame::frame_address(RicochetFrame::sender_pc_offset_in_bytes())); +- __ movptr(saved_last_sp, RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes())); +- __ movptr(rbp, RicochetFrame::frame_address(RicochetFrame::sender_link_offset_in_bytes())); +- __ mov(rsp, new_sp_reg); +- BLOCK_COMMENT("} end_ricochet_frame"); +-} +- +-// Emit code to verify that RBP is pointing at a valid ricochet frame. +-#ifndef PRODUCT +-enum { +- ARG_LIMIT = 255, SLOP = 4, +- // use this parameter for checking for garbage stack movements: +- UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP) +- // the slop defends against false alarms due to fencepost errors +-}; +-#endif +- +-#ifdef ASSERT +-void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) { +- // The stack should look like this: +- // ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args | +- // Check various invariants. +- verify_offsets(); +- +- Register rdi_temp = rdi; +- Register rcx_temp = rcx; +- { __ push(rdi_temp); __ push(rcx_temp); } +-#define UNPUSH_TEMPS \ +- { __ pop(rcx_temp); __ pop(rdi_temp); } +- +- Address magic_number_1_addr = RicochetFrame::frame_address(RicochetFrame::magic_number_1_offset_in_bytes()); +- Address magic_number_2_addr = RicochetFrame::frame_address(RicochetFrame::magic_number_2_offset_in_bytes()); +- Address continuation_addr = RicochetFrame::frame_address(RicochetFrame::continuation_offset_in_bytes()); +- Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); +- Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes()); +- +- Label L_bad, L_ok; +- BLOCK_COMMENT("verify_clean {"); +- // Magic numbers must check out: +- __ cmpptr(magic_number_1_addr, (int32_t) MAGIC_NUMBER_1); +- __ jcc(Assembler::notEqual, L_bad); +- __ cmpptr(magic_number_2_addr, (int32_t) MAGIC_NUMBER_2); +- __ jcc(Assembler::notEqual, L_bad); +- +- // Arguments pointer must look reasonable: +- __ movptr(rcx_temp, saved_args_base_addr); +- __ cmpptr(rcx_temp, rbp); +- __ jcc(Assembler::below, L_bad); +- __ subptr(rcx_temp, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize); +- __ cmpptr(rcx_temp, rbp); +- __ jcc(Assembler::above, L_bad); +- +- load_conversion_dest_type(_masm, rdi_temp, conversion_addr); +- __ cmpl(rdi_temp, T_VOID); +- __ jcc(Assembler::equal, L_ok); +- __ movptr(rcx_temp, saved_args_base_addr); +- load_conversion_vminfo(_masm, rdi_temp, conversion_addr); +- __ cmpptr(Address(rcx_temp, rdi_temp, Interpreter::stackElementScale()), +- (int32_t) RETURN_VALUE_PLACEHOLDER); +- __ jcc(Assembler::equal, L_ok); +- __ BIND(L_bad); +- UNPUSH_TEMPS; +- __ stop("damaged ricochet frame"); +- __ BIND(L_ok); +- UNPUSH_TEMPS; +- BLOCK_COMMENT("} verify_clean"); +- +-#undef UNPUSH_TEMPS +- +-} +-#endif //ASSERT +- + void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) { + if (VerifyMethodHandles) + verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), +- "AMH argument is a Class"); ++ "MH argument is a Class"); + __ load_heap_oop(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes())); + } + +-void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr) { +- int bits = BitsPerByte; +- int offset = (CONV_VMINFO_SHIFT / bits); +- int shift = (CONV_VMINFO_SHIFT % bits); +- __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset)); +- assert(CONV_VMINFO_MASK == right_n_bits(bits - shift), "else change type of previous load"); +- assert(shift == 0, "no shift needed"); ++#ifdef ASSERT ++static int check_nonzero(const char* xname, int x) { ++ assert(x != 0, err_msg("%s should be nonzero", xname)); ++ return x; + } +- +-void MethodHandles::load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr) { +- int bits = BitsPerByte; +- int offset = (CONV_DEST_TYPE_SHIFT / bits); +- int shift = (CONV_DEST_TYPE_SHIFT % bits); +- __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset)); +- assert(CONV_TYPE_MASK == right_n_bits(bits - shift), "else change type of previous load"); +- __ shrl(reg, shift); +- DEBUG_ONLY(int conv_type_bits = (int) exact_log2(CONV_TYPE_MASK+1)); +- assert((shift + conv_type_bits) == bits, "left justified in byte"); +-} +- +-void MethodHandles::load_stack_move(MacroAssembler* _masm, +- Register rdi_stack_move, +- Register rcx_amh, +- bool might_be_negative) { +- BLOCK_COMMENT("load_stack_move {"); +- Address rcx_amh_conversion(rcx_amh, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes()); +- __ movl(rdi_stack_move, rcx_amh_conversion); +- __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT); +-#ifdef _LP64 +- if (might_be_negative) { +- // clean high bits of stack motion register (was loaded as an int) +- __ movslq(rdi_stack_move, rdi_stack_move); +- } +-#endif //_LP64 +-#ifdef ASSERT +- if (VerifyMethodHandles) { +- Label L_ok, L_bad; +- int32_t stack_move_limit = 0x4000; // extra-large +- __ cmpptr(rdi_stack_move, stack_move_limit); +- __ jcc(Assembler::greaterEqual, L_bad); +- __ cmpptr(rdi_stack_move, -stack_move_limit); +- __ jcc(Assembler::greater, L_ok); +- __ bind(L_bad); +- __ stop("load_stack_move of garbage value"); +- __ BIND(L_ok); +- } +-#endif +- BLOCK_COMMENT("} load_stack_move"); +-} ++#define NONZERO(x) check_nonzero(#x, x) ++#else //ASSERT ++#define NONZERO(x) (x) ++#endif //ASSERT + + #ifdef ASSERT +-void MethodHandles::RicochetFrame::verify_offsets() { +- // Check compatibility of this struct with the more generally used offsets of class frame: +- int ebp_off = sender_link_offset_in_bytes(); // offset from struct base to local rbp value +- assert(ebp_off + wordSize*frame::interpreter_frame_method_offset == saved_args_base_offset_in_bytes(), ""); +- assert(ebp_off + wordSize*frame::interpreter_frame_last_sp_offset == conversion_offset_in_bytes(), ""); +- assert(ebp_off + wordSize*frame::interpreter_frame_sender_sp_offset == exact_sender_sp_offset_in_bytes(), ""); +- // These last two have to be exact: +- assert(ebp_off + wordSize*frame::link_offset == sender_link_offset_in_bytes(), ""); +- assert(ebp_off + wordSize*frame::return_addr_offset == sender_pc_offset_in_bytes(), ""); +-} +- +-void MethodHandles::RicochetFrame::verify() const { +- verify_offsets(); +- assert(magic_number_1() == MAGIC_NUMBER_1, err_msg(PTR_FORMAT " == " PTR_FORMAT, magic_number_1(), MAGIC_NUMBER_1)); +- assert(magic_number_2() == MAGIC_NUMBER_2, err_msg(PTR_FORMAT " == " PTR_FORMAT, magic_number_2(), MAGIC_NUMBER_2)); +- if (!Universe::heap()->is_gc_active()) { +- if (saved_args_layout() != NULL) { +- assert(saved_args_layout()->is_method(), "must be valid oop"); +- } +- if (saved_target() != NULL) { +- assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value"); +- } +- } +- int conv_op = adapter_conversion_op(conversion()); +- assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS || +- conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS || +- conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF, +- "must be a sane conversion"); +- if (has_return_value_slot()) { +- assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, ""); +- } +-} +-#endif //PRODUCT +- +-#ifdef ASSERT +-void MethodHandles::verify_argslot(MacroAssembler* _masm, +- Register argslot_reg, +- const char* error_message) { +- // Verify that argslot lies within (rsp, rbp]. +- Label L_ok, L_bad; +- BLOCK_COMMENT("verify_argslot {"); +- __ cmpptr(argslot_reg, rbp); +- __ jccb(Assembler::above, L_bad); +- __ cmpptr(rsp, argslot_reg); +- __ jccb(Assembler::below, L_ok); +- __ bind(L_bad); +- __ stop(error_message); +- __ BIND(L_ok); +- BLOCK_COMMENT("} verify_argslot"); +-} +- +-void MethodHandles::verify_argslots(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, +- Register arg_slot_base_reg, +- bool negate_argslots, +- const char* error_message) { +- // Verify that [argslot..argslot+size) lies within (rsp, rbp). +- Label L_ok, L_bad; +- Register rdi_temp = rdi; +- BLOCK_COMMENT("verify_argslots {"); +- __ push(rdi_temp); +- if (negate_argslots) { +- if (arg_slots.is_constant()) { +- arg_slots = -1 * arg_slots.as_constant(); +- } else { +- __ movptr(rdi_temp, arg_slots); +- __ negptr(rdi_temp); +- arg_slots = rdi_temp; +- } +- } +- __ lea(rdi_temp, Address(arg_slot_base_reg, arg_slots, Interpreter::stackElementScale())); +- __ cmpptr(rdi_temp, rbp); +- __ pop(rdi_temp); +- __ jcc(Assembler::above, L_bad); +- __ cmpptr(rsp, arg_slot_base_reg); +- __ jcc(Assembler::below, L_ok); +- __ bind(L_bad); +- __ stop(error_message); +- __ BIND(L_ok); +- BLOCK_COMMENT("} verify_argslots"); +-} +- +-// Make sure that arg_slots has the same sign as the given direction. +-// If (and only if) arg_slots is a assembly-time constant, also allow it to be zero. +-void MethodHandles::verify_stack_move(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, int direction) { +- bool allow_zero = arg_slots.is_constant(); +- if (direction == 0) { direction = +1; allow_zero = true; } +- assert(stack_move_unit() == -1, "else add extra checks here"); +- if (arg_slots.is_register()) { +- Label L_ok, L_bad; +- BLOCK_COMMENT("verify_stack_move {"); +- // testl(arg_slots.as_register(), -stack_move_unit() - 1); // no need +- // jcc(Assembler::notZero, L_bad); +- __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD); +- if (direction > 0) { +- __ jcc(allow_zero ? Assembler::less : Assembler::lessEqual, L_bad); +- __ cmpptr(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE); +- __ jcc(Assembler::less, L_ok); +- } else { +- __ jcc(allow_zero ? Assembler::greater : Assembler::greaterEqual, L_bad); +- __ cmpptr(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE); +- __ jcc(Assembler::greater, L_ok); +- } +- __ bind(L_bad); +- if (direction > 0) +- __ stop("assert arg_slots > 0"); +- else +- __ stop("assert arg_slots < 0"); +- __ BIND(L_ok); +- BLOCK_COMMENT("} verify_stack_move"); +- } else { +- intptr_t size = arg_slots.as_constant(); +- if (direction < 0) size = -size; +- assert(size >= 0, "correct direction of constant move"); +- assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move"); +- } +-} +- + void MethodHandles::verify_klass(MacroAssembler* _masm, + Register obj, KlassHandle klass, + const char* error_message) { +@@ -528,12 +71,15 @@ + klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(), + "must be one of the SystemDictionaryHandles"); + Register temp = rdi; ++ Register temp2 = noreg; ++ LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr + Label L_ok, L_bad; + BLOCK_COMMENT("verify_klass {"); + __ verify_oop(obj); + __ testptr(obj, obj); + __ jcc(Assembler::zero, L_bad); +- __ push(temp); ++ __ push(temp); if (temp2 != noreg) __ push(temp2); ++#define UNPUSH { if (temp2 != noreg) __ pop(temp2); __ pop(temp); } + __ load_klass(temp, obj); + __ cmpptr(temp, ExternalAddress((address) klass_addr)); + __ jcc(Assembler::equal, L_ok); +@@ -541,17 +87,42 @@ + __ movptr(temp, Address(temp, super_check_offset)); + __ cmpptr(temp, ExternalAddress((address) klass_addr)); + __ jcc(Assembler::equal, L_ok); +- __ pop(temp); ++ UNPUSH; + __ bind(L_bad); +- __ stop(error_message); ++ __ STOP(error_message); + __ BIND(L_ok); +- __ pop(temp); ++ UNPUSH; + BLOCK_COMMENT("} verify_klass"); + } ++ ++void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) { ++ Label L; ++ BLOCK_COMMENT("verify_ref_kind {"); ++ __ movl(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes()))); ++ __ shrl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT); ++ __ andl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK); ++ __ cmpl(temp, ref_kind); ++ __ jcc(Assembler::equal, L); ++ { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); ++ jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); ++ if (ref_kind == JVM_REF_invokeVirtual || ++ ref_kind == JVM_REF_invokeSpecial) ++ // could do this for all ref_kinds, but would explode assembly code size ++ trace_method_handle(_masm, buf); ++ __ STOP(buf); ++ } ++ BLOCK_COMMENT("} verify_ref_kind"); ++ __ bind(L); ++} ++ + #endif //ASSERT + +-void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp) { +- if (JvmtiExport::can_post_interpreter_events()) { ++void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, ++ bool for_compiler_entry) { ++ assert(method == rbx, "interpreter calling convention"); ++ __ verify_oop(method); ++ ++ if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { + Label run_compiled_code; + // JVMTI events, such as single-stepping, are implemented partly by avoiding running + // compiled code in threads for which the event is enabled. Check here for +@@ -567,470 +138,383 @@ + __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0); + __ jccb(Assembler::zero, run_compiled_code); + __ jmp(Address(method, methodOopDesc::interpreter_entry_offset())); +- __ bind(run_compiled_code); ++ __ BIND(run_compiled_code); + } +- __ jmp(Address(method, methodOopDesc::from_interpreted_offset())); ++ ++ const ByteSize entry_offset = for_compiler_entry ? methodOopDesc::from_compiled_offset() : ++ methodOopDesc::from_interpreted_offset(); ++ __ jmp(Address(method, entry_offset)); + } + ++void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, ++ Register recv, Register method_temp, ++ Register temp2, ++ bool for_compiler_entry) { ++ BLOCK_COMMENT("jump_to_lambda_form {"); ++ // This is the initial entry point of a lazy method handle. ++ // After type checking, it picks up the invoker from the LambdaForm. ++ assert_different_registers(recv, method_temp, temp2); ++ assert(recv != noreg, "required register"); ++ assert(method_temp == rbx, "required register for loading method"); ++ ++ //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); }); ++ ++ // Load the invoker, as MH -> MH.form -> LF.vmentry ++ __ verify_oop(recv); ++ __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()))); ++ __ verify_oop(method_temp); ++ __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()))); ++ __ verify_oop(method_temp); ++ // the following assumes that a methodOop is normally compressed in the vmtarget field: ++ __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()))); ++ __ verify_oop(method_temp); ++ ++ if (VerifyMethodHandles && !for_compiler_entry) { ++ // make sure recv is already on stack ++ __ load_sized_value(temp2, ++ Address(method_temp, methodOopDesc::size_of_parameters_offset()), ++ sizeof(u2), /*is_signed*/ false); ++ // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), ""); ++ Label L; ++ __ cmpptr(recv, __ argument_address(temp2, -1)); ++ __ jcc(Assembler::equal, L); ++ __ movptr(rax, __ argument_address(temp2, -1)); ++ __ STOP("receiver not on stack"); ++ __ BIND(L); ++ } ++ ++ jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry); ++ BLOCK_COMMENT("} jump_to_lambda_form"); ++} ++ ++ + // Code generation +-address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { ++address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm, ++ vmIntrinsics::ID iid) { ++ const bool not_for_compiler_entry = false; // this is the interpreter entry ++ assert(is_signature_polymorphic(iid), "expected invoke iid"); ++ if (iid == vmIntrinsics::_invokeGeneric || ++ iid == vmIntrinsics::_compiledLambdaForm) { ++ // Perhaps surprisingly, the symbolic references visible to Java are not directly used. ++ // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. ++ // They all allow an appendix argument. ++ __ hlt(); // empty stubs make SG sick ++ return NULL; ++ } ++ ++ // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) + // rbx: methodOop +- // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots]) +- // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) +- // rdx, rdi: garbage temp, blown away ++ // rdx: argument locator (parameter slot count, added to rsp) ++ // rcx: used as temp to hold mh or receiver ++ // rax, rdi: garbage temps, blown away ++ Register rdx_argp = rdx; // argument list ptr, live on error paths ++ Register rax_temp = rax; ++ Register rcx_mh = rcx; // MH receiver; dies quickly and is recycled ++ Register rbx_method = rbx; // eventual target of this invocation + +- Register rbx_method = rbx; +- Register rcx_recv = rcx; +- Register rax_mtype = rax; +- Register rdx_temp = rdx; +- Register rdi_temp = rdi; +- +- // emit WrongMethodType path first, to enable jccb back-branch from main path +- Label wrong_method_type; +- __ bind(wrong_method_type); +- Label invoke_generic_slow_path, invoke_exact_error_path; +- assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");; +- __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact); +- __ jcc(Assembler::notEqual, invoke_generic_slow_path); +- __ jmp(invoke_exact_error_path); ++ address code_start = __ pc(); + + // here's where control starts out: + __ align(CodeEntryAlignment); + address entry_point = __ pc(); + +- // fetch the MethodType from the method handle into rax (the 'check' register) +- // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list. +- // This would simplify several touchy bits of code. +- // See 6984712: JSR 292 method handle calls need a clean argument base pointer +- { +- Register tem = rbx_method; +- for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) { +- __ movptr(rax_mtype, Address(tem, *pchase)); +- tem = rax_mtype; // in case there is another indirection ++ if (VerifyMethodHandles) { ++ Label L; ++ BLOCK_COMMENT("verify_intrinsic_id {"); ++ __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) iid); ++ __ jcc(Assembler::equal, L); ++ if (iid == vmIntrinsics::_linkToVirtual || ++ iid == vmIntrinsics::_linkToSpecial) { ++ // could do this for all kinds, but would explode assembly code size ++ trace_method_handle(_masm, "bad methodOop::intrinsic_id"); ++ } ++ __ STOP("bad methodOop::intrinsic_id"); ++ __ bind(L); ++ BLOCK_COMMENT("} verify_intrinsic_id"); + } + } + +- // given the MethodType, find out where the MH argument is buried +- __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); +- Register rdx_vmslots = rdx_temp; +- __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp))); +- Address mh_receiver_slot_addr = __ argument_address(rdx_vmslots); +- __ movptr(rcx_recv, mh_receiver_slot_addr); ++ // First task: Find out how big the argument list is. ++ Address rdx_first_arg_addr; ++ int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid); ++ assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic"); ++ if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) { ++ __ load_sized_value(rdx_argp, ++ Address(rbx_method, methodOopDesc::size_of_parameters_offset()), ++ sizeof(u2), /*is_signed*/ false); ++ // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), ""); ++ rdx_first_arg_addr = __ argument_address(rdx_argp, -1); ++ } else { ++ DEBUG_ONLY(rdx_argp = noreg); ++ } + + trace_method_handle(_masm, "invokeExact"); ++ if (!is_signature_polymorphic_static(iid)) { ++ __ movptr(rcx_mh, rdx_first_arg_addr); ++ DEBUG_ONLY(rdx_argp = noreg); ++ } + +- __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type); ++ // rdx_first_arg_addr is live! + +- // Nobody uses the MH receiver slot after this. Make sure. +- DEBUG_ONLY(__ movptr(mh_receiver_slot_addr, (int32_t)0x999999)); ++ if (TraceMethodHandles) { ++ const char* name = vmIntrinsics::name_at(iid); ++ if (*name == '_') name += 1; ++ const size_t len = strlen(name) + 50; ++ char* qname = NEW_C_HEAP_ARRAY(char, len, mtInternal); ++ const char* suffix = ""; ++ if (vmIntrinsics::method_for(iid) == NULL || ++ !vmIntrinsics::method_for(iid)->access_flags().is_public()) { ++ if (is_signature_polymorphic_static(iid)) ++ suffix = "/static"; ++ else ++ suffix = "/private"; ++ } ++ jio_snprintf(qname, len, "MethodHandle::interpreter_entry::%s%s", name, suffix); ++ // note: stub look for mh in rcx ++ trace_method_handle(_masm, qname); ++ } + +- __ jump_to_method_handle_entry(rcx_recv, rdi_temp); ++ if (iid == vmIntrinsics::_invokeBasic) { ++ generate_method_handle_dispatch(_masm, iid, rcx_mh, noreg, not_for_compiler_entry); + +- // error path for invokeExact (only) +- __ bind(invoke_exact_error_path); +- // ensure that the top of stack is properly aligned. +- __ mov(rdi, rsp); +- __ andptr(rsp, -StackAlignmentInBytes); // Align the stack for the ABI +- __ pushptr(Address(rdi, 0)); // Pick up the return address ++ } else { ++ // Adjust argument list by popping the trailing MemberName argument. ++ Register rcx_recv = noreg; ++ if (MethodHandles::ref_kind_has_receiver(ref_kind)) { ++ // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack. ++ __ movptr(rcx_recv = rcx, rdx_first_arg_addr); ++ } ++ DEBUG_ONLY(rdx_argp = noreg); ++ Register rbx_member = rbx_method; // MemberName ptr; incoming method ptr is dead now ++ __ pop(rax_temp); // return address ++ __ pop(rbx_member); // extract last argument ++ __ push(rax_temp); // re-push return address ++ generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry); ++ } + +- // Stub wants expected type in rax and the actual type in rcx +- __ jump(ExternalAddress(StubRoutines::throw_WrongMethodTypeException_entry())); +- +- // for invokeGeneric (only), apply argument and result conversions on the fly +- __ bind(invoke_generic_slow_path); +-#ifdef ASSERT +- if (VerifyMethodHandles) { +- Label L; +- __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric); +- __ jcc(Assembler::equal, L); +- __ stop("bad methodOop::intrinsic_id"); +- __ bind(L); ++ if (PrintMethodHandleStubs) { ++ address code_end = __ pc(); ++ tty->print_cr("--------"); ++ tty->print_cr("method handle interpreter entry for %s", vmIntrinsics::name_at(iid)); ++ Disassembler::decode(code_start, code_end); ++ tty->cr(); + } +-#endif //ASSERT +- Register rbx_temp = rbx_method; // don't need it now +- +- // make room on the stack for another pointer: +- Register rcx_argslot = rcx_recv; +- __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1)); +- insert_arg_slots(_masm, 2 * stack_move_unit(), +- rcx_argslot, rbx_temp, rdx_temp); +- +- // load up an adapter from the calling type (Java weaves this) +- Register rdx_adapter = rdx_temp; +- __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); +- __ load_heap_oop(rdx_adapter, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp))); +- __ verify_oop(rdx_adapter); +- __ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter); +- // As a trusted first argument, pass the type being called, so the adapter knows +- // the actual types of the arguments and return values. +- // (Generic invokers are shared among form-families of method-type.) +- __ movptr(Address(rcx_argslot, 0 * Interpreter::stackElementSize), rax_mtype); +- // FIXME: assert that rdx_adapter is of the right method-type. +- __ mov(rcx, rdx_adapter); +- trace_method_handle(_masm, "invokeGeneric"); +- __ jump_to_method_handle_entry(rcx, rdi_temp); + + return entry_point; + } + +-// Helper to insert argument slots into the stack. +-// arg_slots must be a multiple of stack_move_unit() and < 0 +-// rax_argslot is decremented to point to the new (shifted) location of the argslot +-// But, rdx_temp ends up holding the original value of rax_argslot. +-void MethodHandles::insert_arg_slots(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, +- Register rax_argslot, +- Register rbx_temp, Register rdx_temp) { +- // allow constant zero +- if (arg_slots.is_constant() && arg_slots.as_constant() == 0) +- return; +- assert_different_registers(rax_argslot, rbx_temp, rdx_temp, +- (!arg_slots.is_register() ? rsp : arg_slots.as_register())); +- if (VerifyMethodHandles) +- verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame"); +- if (VerifyMethodHandles) +- verify_stack_move(_masm, arg_slots, -1); + +- // We have to insert at least one word, so bang the stack. +- if (UseStackBanging) { +- int frame_size = (arg_slots.is_constant() ? -1 * arg_slots.as_constant() * wordSize : 0); +- if (frame_size <= 0) +- frame_size = 256 * Interpreter::stackElementSize; // conservative +- __ generate_stack_overflow_check(frame_size); ++void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, ++ vmIntrinsics::ID iid, ++ Register receiver_reg, ++ Register member_reg, ++ bool for_compiler_entry) { ++ assert(is_signature_polymorphic(iid), "expected invoke iid"); ++ Register rbx_method = rbx; // eventual target of this invocation ++ // temps used in this code are not used in *either* compiled or interpreted calling sequences ++#ifdef _LP64 ++ Register temp1 = rscratch1; ++ Register temp2 = rscratch2; ++ Register temp3 = rax; ++ if (for_compiler_entry) { ++ assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment"); ++ assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); ++ assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); ++ assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); + } +- +- // Make space on the stack for the inserted argument(s). +- // Then pull down everything shallower than rax_argslot. +- // The stacked return address gets pulled down with everything else. +- // That is, copy [rsp, argslot) downward by -size words. In pseudo-code: +- // rsp -= size; +- // for (rdx = rsp + size; rdx < argslot; rdx++) +- // rdx[-size] = rdx[0] +- // argslot -= size; +- BLOCK_COMMENT("insert_arg_slots {"); +- __ mov(rdx_temp, rsp); // source pointer for copy +- __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale())); +- { +- Label loop; +- __ BIND(loop); +- // pull one word down each time through the loop +- __ movptr(rbx_temp, Address(rdx_temp, 0)); +- __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp); +- __ addptr(rdx_temp, wordSize); +- __ cmpptr(rdx_temp, rax_argslot); +- __ jcc(Assembler::below, loop); +- } +- +- // Now move the argslot down, to point to the opened-up space. +- __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale())); +- BLOCK_COMMENT("} insert_arg_slots"); +-} +- +-// Helper to remove argument slots from the stack. +-// arg_slots must be a multiple of stack_move_unit() and > 0 +-void MethodHandles::remove_arg_slots(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, +- Register rax_argslot, +- Register rbx_temp, Register rdx_temp) { +- // allow constant zero +- if (arg_slots.is_constant() && arg_slots.as_constant() == 0) +- return; +- assert_different_registers(rax_argslot, rbx_temp, rdx_temp, +- (!arg_slots.is_register() ? rsp : arg_slots.as_register())); +- if (VerifyMethodHandles) +- verify_argslots(_masm, arg_slots, rax_argslot, false, +- "deleted argument(s) must fall within current frame"); +- if (VerifyMethodHandles) +- verify_stack_move(_masm, arg_slots, +1); +- +- BLOCK_COMMENT("remove_arg_slots {"); +- // Pull up everything shallower than rax_argslot. +- // Then remove the excess space on the stack. +- // The stacked return address gets pulled up with everything else. +- // That is, copy [rsp, argslot) upward by size words. In pseudo-code: +- // for (rdx = argslot-1; rdx >= rsp; --rdx) +- // rdx[size] = rdx[0] +- // argslot += size; +- // rsp += size; +- __ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy +- { +- Label loop; +- __ BIND(loop); +- // pull one word up each time through the loop +- __ movptr(rbx_temp, Address(rdx_temp, 0)); +- __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp); +- __ addptr(rdx_temp, -wordSize); +- __ cmpptr(rdx_temp, rsp); +- __ jcc(Assembler::aboveEqual, loop); +- } +- +- // Now move the argslot up, to point to the just-copied block. +- __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale())); +- // And adjust the argslot address to point at the deletion point. +- __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale())); +- BLOCK_COMMENT("} remove_arg_slots"); +-} +- +-// Helper to copy argument slots to the top of the stack. +-// The sequence starts with rax_argslot and is counted by slot_count +-// slot_count must be a multiple of stack_move_unit() and >= 0 +-// This function blows the temps but does not change rax_argslot. +-void MethodHandles::push_arg_slots(MacroAssembler* _masm, +- Register rax_argslot, +- RegisterOrConstant slot_count, +- int skip_words_count, +- Register rbx_temp, Register rdx_temp) { +- assert_different_registers(rax_argslot, rbx_temp, rdx_temp, +- (!slot_count.is_register() ? rbp : slot_count.as_register()), +- rsp); +- assert(Interpreter::stackElementSize == wordSize, "else change this code"); +- +- if (VerifyMethodHandles) +- verify_stack_move(_masm, slot_count, 0); +- +- // allow constant zero +- if (slot_count.is_constant() && slot_count.as_constant() == 0) +- return; +- +- BLOCK_COMMENT("push_arg_slots {"); +- +- Register rbx_top = rbx_temp; +- +- // There is at most 1 word to carry down with the TOS. +- switch (skip_words_count) { +- case 1: __ pop(rdx_temp); break; +- case 0: break; +- default: ShouldNotReachHere(); +- } +- +- if (slot_count.is_constant()) { +- for (int i = slot_count.as_constant() - 1; i >= 0; i--) { +- __ pushptr(Address(rax_argslot, i * wordSize)); +- } +- } else { +- Label L_plural, L_loop, L_break; +- // Emit code to dynamically check for the common cases, zero and one slot. +- __ cmpl(slot_count.as_register(), (int32_t) 1); +- __ jccb(Assembler::greater, L_plural); +- __ jccb(Assembler::less, L_break); +- __ pushptr(Address(rax_argslot, 0)); +- __ jmpb(L_break); +- __ BIND(L_plural); +- +- // Loop for 2 or more: +- // rbx = &rax[slot_count] +- // while (rbx > rax) *(--rsp) = *(--rbx) +- __ lea(rbx_top, Address(rax_argslot, slot_count, Address::times_ptr)); +- __ BIND(L_loop); +- __ subptr(rbx_top, wordSize); +- __ pushptr(Address(rbx_top, 0)); +- __ cmpptr(rbx_top, rax_argslot); +- __ jcc(Assembler::above, L_loop); +- __ bind(L_break); +- } +- switch (skip_words_count) { +- case 1: __ push(rdx_temp); break; +- case 0: break; +- default: ShouldNotReachHere(); +- } +- BLOCK_COMMENT("} push_arg_slots"); +-} +- +-// in-place movement; no change to rsp +-// blows rax_temp, rdx_temp +-void MethodHandles::move_arg_slots_up(MacroAssembler* _masm, +- Register rbx_bottom, // invariant +- Address top_addr, // can use rax_temp +- RegisterOrConstant positive_distance_in_slots, +- Register rax_temp, Register rdx_temp) { +- BLOCK_COMMENT("move_arg_slots_up {"); +- assert_different_registers(rbx_bottom, +- rax_temp, rdx_temp, +- positive_distance_in_slots.register_or_noreg()); +- Label L_loop, L_break; +- Register rax_top = rax_temp; +- if (!top_addr.is_same_address(Address(rax_top, 0))) +- __ lea(rax_top, top_addr); +- // Detect empty (or broken) loop: +-#ifdef ASSERT +- if (VerifyMethodHandles) { +- // Verify that &bottom < &top (non-empty interval) +- Label L_ok, L_bad; +- if (positive_distance_in_slots.is_register()) { +- __ cmpptr(positive_distance_in_slots.as_register(), (int32_t) 0); +- __ jcc(Assembler::lessEqual, L_bad); +- } +- __ cmpptr(rbx_bottom, rax_top); +- __ jcc(Assembler::below, L_ok); +- __ bind(L_bad); +- __ stop("valid bounds (copy up)"); +- __ BIND(L_ok); ++#else ++ Register temp1 = (for_compiler_entry ? rsi : rdx); ++ Register temp2 = rdi; ++ Register temp3 = rax; ++ if (for_compiler_entry) { ++ assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : rcx), "only valid assignment"); ++ assert_different_registers(temp1, rcx, rdx); ++ assert_different_registers(temp2, rcx, rdx); ++ assert_different_registers(temp3, rcx, rdx); + } + #endif +- __ cmpptr(rbx_bottom, rax_top); +- __ jccb(Assembler::aboveEqual, L_break); +- // work rax down to rbx, copying contiguous data upwards +- // In pseudo-code: +- // [rbx, rax) = &[bottom, top) +- // while (--rax >= rbx) *(rax + distance) = *(rax + 0), rax--; +- __ BIND(L_loop); +- __ subptr(rax_top, wordSize); +- __ movptr(rdx_temp, Address(rax_top, 0)); +- __ movptr( Address(rax_top, positive_distance_in_slots, Address::times_ptr), rdx_temp); +- __ cmpptr(rax_top, rbx_bottom); +- __ jcc(Assembler::above, L_loop); +- assert(Interpreter::stackElementSize == wordSize, "else change loop"); +- __ bind(L_break); +- BLOCK_COMMENT("} move_arg_slots_up"); +-} ++ assert_different_registers(temp1, temp2, temp3, receiver_reg); ++ assert_different_registers(temp1, temp2, temp3, member_reg); ++ if (!for_compiler_entry) ++ assert_different_registers(temp1, temp2, temp3, saved_last_sp_register()); // don't trash lastSP + +-// in-place movement; no change to rsp +-// blows rax_temp, rdx_temp +-void MethodHandles::move_arg_slots_down(MacroAssembler* _masm, +- Address bottom_addr, // can use rax_temp +- Register rbx_top, // invariant +- RegisterOrConstant negative_distance_in_slots, +- Register rax_temp, Register rdx_temp) { +- BLOCK_COMMENT("move_arg_slots_down {"); +- assert_different_registers(rbx_top, +- negative_distance_in_slots.register_or_noreg(), +- rax_temp, rdx_temp); +- Label L_loop, L_break; +- Register rax_bottom = rax_temp; +- if (!bottom_addr.is_same_address(Address(rax_bottom, 0))) +- __ lea(rax_bottom, bottom_addr); +- // Detect empty (or broken) loop: +-#ifdef ASSERT +- assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, ""); +- if (VerifyMethodHandles) { +- // Verify that &bottom < &top (non-empty interval) +- Label L_ok, L_bad; +- if (negative_distance_in_slots.is_register()) { +- __ cmpptr(negative_distance_in_slots.as_register(), (int32_t) 0); +- __ jcc(Assembler::greaterEqual, L_bad); ++ if (iid == vmIntrinsics::_invokeBasic) { ++ // indirect through MH.form.vmentry.vmtarget ++ jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry); ++ ++ } else { ++ // The method is a member invoker used by direct method handles. ++ if (VerifyMethodHandles) { ++ // make sure the trailing argument really is a MemberName (caller responsibility) ++ verify_klass(_masm, member_reg, SystemDictionaryHandles::MemberName_klass(), ++ "MemberName required for invokeVirtual etc."); + } +- __ cmpptr(rax_bottom, rbx_top); +- __ jcc(Assembler::below, L_ok); +- __ bind(L_bad); +- __ stop("valid bounds (copy down)"); +- __ BIND(L_ok); ++ ++ Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes())); ++ Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes())); ++ Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())); ++ ++ Register temp1_recv_klass = temp1; ++ if (iid != vmIntrinsics::_linkToStatic) { ++ __ verify_oop(receiver_reg); ++ if (iid == vmIntrinsics::_linkToSpecial) { ++ // Don't actually load the klass; just null-check the receiver. ++ __ null_check(receiver_reg); ++ } else { ++ // load receiver klass itself ++ __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes()); ++ __ load_klass(temp1_recv_klass, receiver_reg); ++ __ verify_oop(temp1_recv_klass); ++ } ++ BLOCK_COMMENT("check_receiver {"); ++ // The receiver for the MemberName must be in receiver_reg. ++ // Check the receiver against the MemberName.clazz ++ if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { ++ // Did not load it above... ++ __ load_klass(temp1_recv_klass, receiver_reg); ++ __ verify_oop(temp1_recv_klass); ++ } ++ if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { ++ Label L_ok; ++ Register temp2_defc = temp2; ++ __ load_heap_oop(temp2_defc, member_clazz); ++ load_klass_from_Class(_masm, temp2_defc); ++ __ verify_oop(temp2_defc); ++ __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok); ++ // If we get here, the type check failed! ++ __ STOP("receiver class disagrees with MemberName.clazz"); ++ __ bind(L_ok); ++ } ++ BLOCK_COMMENT("} check_receiver"); ++ } ++ if (iid == vmIntrinsics::_linkToSpecial || ++ iid == vmIntrinsics::_linkToStatic) { ++ DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass ++ } ++ ++ // Live registers at this point: ++ // member_reg - MemberName that was the trailing argument ++ // temp1_recv_klass - klass of stacked receiver, if needed ++ // rsi/r13 - interpreter linkage (if interpreted) ++ // rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled) ++ ++ bool method_is_live = false; ++ switch (iid) { ++ case vmIntrinsics::_linkToSpecial: ++ if (VerifyMethodHandles) { ++ verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); ++ } ++ __ load_heap_oop(rbx_method, member_vmtarget); ++ method_is_live = true; ++ break; ++ ++ case vmIntrinsics::_linkToStatic: ++ if (VerifyMethodHandles) { ++ verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); ++ } ++ __ load_heap_oop(rbx_method, member_vmtarget); ++ method_is_live = true; ++ break; ++ ++ case vmIntrinsics::_linkToVirtual: ++ { ++ // same as TemplateTable::invokevirtual, ++ // minus the CP setup and profiling: ++ ++ if (VerifyMethodHandles) { ++ verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3); ++ } ++ ++ // pick out the vtable index from the MemberName, and then we can discard it: ++ Register temp2_index = temp2; ++ __ movptr(temp2_index, member_vmindex); ++ ++ if (VerifyMethodHandles) { ++ Label L_index_ok; ++ __ cmpl(temp2_index, 0); ++ __ jcc(Assembler::greaterEqual, L_index_ok); ++ __ STOP("no virtual index"); ++ __ BIND(L_index_ok); ++ } ++ ++ // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget ++ // at this point. And VerifyMethodHandles has already checked clazz, if needed. ++ ++ // get target methodOop & entry point ++ __ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method); ++ method_is_live = true; ++ break; ++ } ++ ++ case vmIntrinsics::_linkToInterface: ++ { ++ // same as TemplateTable::invokeinterface ++ // (minus the CP setup and profiling, with different argument motion) ++ if (VerifyMethodHandles) { ++ verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3); ++ } ++ ++ Register temp3_intf = temp3; ++ __ load_heap_oop(temp3_intf, member_clazz); ++ load_klass_from_Class(_masm, temp3_intf); ++ __ verify_oop(temp3_intf); ++ ++ Register rbx_index = rbx_method; ++ __ movptr(rbx_index, member_vmindex); ++ if (VerifyMethodHandles) { ++ Label L; ++ __ cmpl(rbx_index, 0); ++ __ jcc(Assembler::greaterEqual, L); ++ __ STOP("invalid vtable index for MH.invokeInterface"); ++ __ bind(L); ++ } ++ ++ // given intf, index, and recv klass, dispatch to the implementation method ++ Label L_no_such_interface; ++ __ lookup_interface_method(temp1_recv_klass, temp3_intf, ++ // note: next two args must be the same: ++ rbx_index, rbx_method, ++ temp2, ++ L_no_such_interface); ++ ++ __ verify_oop(rbx_method); ++ jump_from_method_handle(_masm, rbx_method, temp2, for_compiler_entry); ++ __ hlt(); ++ ++ __ bind(L_no_such_interface); ++ __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); ++ break; ++ } ++ ++ default: ++ fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); ++ break; ++ } ++ ++ if (method_is_live) { ++ // live at this point: rbx_method, rsi/r13 (if interpreted) ++ ++ // After figuring out which concrete method to call, jump into it. ++ // Note that this works in the interpreter with no data motion. ++ // But the compiled version will require that rcx_recv be shifted out. ++ __ verify_oop(rbx_method); ++ jump_from_method_handle(_masm, rbx_method, temp1, for_compiler_entry); ++ } + } +-#endif +- __ cmpptr(rax_bottom, rbx_top); +- __ jccb(Assembler::aboveEqual, L_break); +- // work rax up to rbx, copying contiguous data downwards +- // In pseudo-code: +- // [rax, rbx) = &[bottom, top) +- // while (rax < rbx) *(rax - distance) = *(rax + 0), rax++; +- __ BIND(L_loop); +- __ movptr(rdx_temp, Address(rax_bottom, 0)); +- __ movptr( Address(rax_bottom, negative_distance_in_slots, Address::times_ptr), rdx_temp); +- __ addptr(rax_bottom, wordSize); +- __ cmpptr(rax_bottom, rbx_top); +- __ jcc(Assembler::below, L_loop); +- assert(Interpreter::stackElementSize == wordSize, "else change loop"); +- __ bind(L_break); +- BLOCK_COMMENT("} move_arg_slots_down"); +-} +- +-// Copy from a field or array element to a stacked argument slot. +-// is_element (ignored) says whether caller is loading an array element instead of an instance field. +-void MethodHandles::move_typed_arg(MacroAssembler* _masm, +- BasicType type, bool is_element, +- Address slot_dest, Address value_src, +- Register rbx_temp, Register rdx_temp) { +- BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)"); +- if (type == T_OBJECT || type == T_ARRAY) { +- __ load_heap_oop(rbx_temp, value_src); +- __ movptr(slot_dest, rbx_temp); +- } else if (type != T_VOID) { +- int arg_size = type2aelembytes(type); +- bool arg_is_signed = is_signed_subword_type(type); +- int slot_size = (arg_size > wordSize) ? arg_size : wordSize; +- __ load_sized_value( rdx_temp, value_src, arg_size, arg_is_signed, rbx_temp); +- __ store_sized_value( slot_dest, rdx_temp, slot_size, rbx_temp); +- } +- BLOCK_COMMENT("} move_typed_arg"); +-} +- +-void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type, +- Address return_slot) { +- BLOCK_COMMENT("move_return_value {"); +- // Old versions of the JVM must clean the FPU stack after every return. +-#ifndef _LP64 +-#ifdef COMPILER2 +- // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases +- if ((type == T_FLOAT && UseSSE < 1) || (type == T_DOUBLE && UseSSE < 2)) { +- for (int i = 1; i < 8; i++) { +- __ ffree(i); +- } +- } else if (UseSSE < 2) { +- __ empty_FPU_stack(); +- } +-#endif //COMPILER2 +-#endif //!_LP64 +- +- // Look at the type and pull the value out of the corresponding register. +- if (type == T_VOID) { +- // nothing to do +- } else if (type == T_OBJECT) { +- __ movptr(return_slot, rax); +- } else if (type == T_INT || is_subword_type(type)) { +- // write the whole word, even if only 32 bits is significant +- __ movptr(return_slot, rax); +- } else if (type == T_LONG) { +- // store the value by parts +- // Note: We assume longs are continguous (if misaligned) on the interpreter stack. +- __ store_sized_value(return_slot, rax, BytesPerLong, rdx); +- } else if (NOT_LP64((type == T_FLOAT && UseSSE < 1) || +- (type == T_DOUBLE && UseSSE < 2) ||) +- false) { +- // Use old x86 FPU registers: +- if (type == T_FLOAT) +- __ fstp_s(return_slot); +- else +- __ fstp_d(return_slot); +- } else if (type == T_FLOAT) { +- __ movflt(return_slot, xmm0); +- } else if (type == T_DOUBLE) { +- __ movdbl(return_slot, xmm0); +- } else { +- ShouldNotReachHere(); +- } +- BLOCK_COMMENT("} move_return_value"); + } + + #ifndef PRODUCT +-#define DESCRIBE_RICOCHET_OFFSET(rf, name) \ +- values.describe(frame_no, (intptr_t *) (((uintptr_t)rf) + MethodHandles::RicochetFrame::name##_offset_in_bytes()), #name) +- +-void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no) { +- address bp = (address) fr->fp(); +- RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes()); +- +- // ricochet slots +- DESCRIBE_RICOCHET_OFFSET(rf, exact_sender_sp); +- DESCRIBE_RICOCHET_OFFSET(rf, conversion); +- DESCRIBE_RICOCHET_OFFSET(rf, saved_args_base); +- DESCRIBE_RICOCHET_OFFSET(rf, saved_args_layout); +- DESCRIBE_RICOCHET_OFFSET(rf, saved_target); +- DESCRIBE_RICOCHET_OFFSET(rf, continuation); +- +- // relevant ricochet targets (in caller frame) +- values.describe(-1, rf->saved_args_base(), err_msg("*saved_args_base for #%d", frame_no)); +-} +-#endif // ASSERT +- +-#ifndef PRODUCT +-extern "C" void print_method_handle(oop mh); + void trace_method_handle_stub(const char* adaptername, + oop mh, + intptr_t* saved_regs, + intptr_t* entry_sp) { + // called as a leaf from native code: do not block the JVM! +- bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have rcx_mh ++ bool has_mh = (strstr(adaptername, "/static") == NULL && ++ strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH + const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx"; +- tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT, adaptername, mh_reg_name, mh, entry_sp); ++ tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT, ++ adaptername, mh_reg_name, ++ mh, entry_sp); + + if (Verbose) { + tty->print_cr("Registers:"); +@@ -1094,12 +578,18 @@ + values.describe(-1, dump_fp, "fp for #1 "); + values.describe(-1, dump_sp, "sp for #1"); + } ++ values.describe(-1, entry_sp, "raw top of stack"); + + tty->print_cr("Stack layout:"); + values.print(p); + } +- if (has_mh) +- print_method_handle(mh); ++ if (has_mh && mh->is_oop()) { ++ mh->print(); ++ if (java_lang_invoke_MethodHandle::is_instance(mh)) { ++ if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0) ++ java_lang_invoke_MethodHandle::form(mh)->print(); ++ } ++ } + } + } + +@@ -1167,1387 +657,3 @@ + } + #endif //PRODUCT + +-// which conversion op types are implemented here? +-int MethodHandles::adapter_conversion_ops_supported_mask() { +- return ((1<from_compiled_entry(), "method must be linked"); +- +- const Register rax_pc = rax; +- __ pop(rax_pc); // caller PC +- __ mov(rsp, saved_last_sp); // cut the stack back to where the caller started +- +- Register rbx_method = rbx_temp; +- __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method)); +- +- const int jobject_oop_offset = 0; +- __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject +- +- __ movptr(saved_last_sp, rsp); +- __ subptr(rsp, 3 * wordSize); +- __ push(rax_pc); // restore caller PC +- +- __ movl (__ argument_address(constant(2)), rarg0_code); +- __ movptr(__ argument_address(constant(1)), rarg1_actual); +- __ movptr(__ argument_address(constant(0)), rarg2_required); +- jump_from_method_handle(_masm, rbx_method, rax); +- } +- break; +- +- case _invokestatic_mh: +- case _invokespecial_mh: +- { +- Register rbx_method = rbx_temp; +- __ load_heap_oop(rbx_method, rcx_mh_vmtarget); // target is a methodOop +- __ verify_oop(rbx_method); +- // same as TemplateTable::invokestatic or invokespecial, +- // minus the CP setup and profiling: +- if (ek == _invokespecial_mh) { +- // Must load & check the first argument before entering the target method. +- __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp); +- __ movptr(rcx_recv, __ argument_address(rax_argslot, -1)); +- __ null_check(rcx_recv); +- __ verify_oop(rcx_recv); +- } +- jump_from_method_handle(_masm, rbx_method, rax); +- } +- break; +- +- case _invokevirtual_mh: +- { +- // same as TemplateTable::invokevirtual, +- // minus the CP setup and profiling: +- +- // pick out the vtable index and receiver offset from the MH, +- // and then we can discard it: +- __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp); +- Register rbx_index = rbx_temp; +- __ movl(rbx_index, rcx_dmh_vmindex); +- // Note: The verifier allows us to ignore rcx_mh_vmtarget. +- __ movptr(rcx_recv, __ argument_address(rax_argslot, -1)); +- __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes()); +- +- // get receiver klass +- Register rax_klass = rax_argslot; +- __ load_klass(rax_klass, rcx_recv); +- __ verify_oop(rax_klass); +- +- // get target methodOop & entry point +- const int base = instanceKlass::vtable_start_offset() * wordSize; +- assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); +- Address vtable_entry_addr(rax_klass, +- rbx_index, Address::times_ptr, +- base + vtableEntry::method_offset_in_bytes()); +- Register rbx_method = rbx_temp; +- __ movptr(rbx_method, vtable_entry_addr); +- +- __ verify_oop(rbx_method); +- jump_from_method_handle(_masm, rbx_method, rax); +- } +- break; +- +- case _invokeinterface_mh: +- { +- // same as TemplateTable::invokeinterface, +- // minus the CP setup and profiling: +- +- // pick out the interface and itable index from the MH. +- __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp); +- Register rdx_intf = rdx_temp; +- Register rbx_index = rbx_temp; +- __ load_heap_oop(rdx_intf, rcx_mh_vmtarget); +- __ movl(rbx_index, rcx_dmh_vmindex); +- __ movptr(rcx_recv, __ argument_address(rax_argslot, -1)); +- __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes()); +- +- // get receiver klass +- Register rax_klass = rax_argslot; +- __ load_klass(rax_klass, rcx_recv); +- __ verify_oop(rax_klass); +- +- Register rbx_method = rbx_index; +- +- // get interface klass +- Label no_such_interface; +- __ verify_oop(rdx_intf); +- __ lookup_interface_method(rax_klass, rdx_intf, +- // note: next two args must be the same: +- rbx_index, rbx_method, +- rdi_temp, +- no_such_interface); +- +- __ verify_oop(rbx_method); +- jump_from_method_handle(_masm, rbx_method, rax); +- __ hlt(); +- +- __ bind(no_such_interface); +- // Throw an exception. +- // For historical reasons, it will be IncompatibleClassChangeError. +- __ mov(rbx_temp, rcx_recv); // rarg2_required might be RCX +- assert_different_registers(rarg2_required, rbx_temp); +- __ movptr(rarg2_required, Address(rdx_intf, java_mirror_offset)); // required interface +- __ mov( rarg1_actual, rbx_temp); // bad receiver +- __ movl( rarg0_code, (int) Bytecodes::_invokeinterface); // who is complaining? +- __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); +- } +- break; +- +- case _bound_ref_mh: +- case _bound_int_mh: +- case _bound_long_mh: +- case _bound_ref_direct_mh: +- case _bound_int_direct_mh: +- case _bound_long_direct_mh: +- { +- const bool direct_to_method = (ek >= _bound_ref_direct_mh); +- BasicType arg_type = ek_bound_mh_arg_type(ek); +- int arg_slots = type2size[arg_type]; +- +- // make room for the new argument: +- __ movl(rax_argslot, rcx_bmh_vmargslot); +- __ lea(rax_argslot, __ argument_address(rax_argslot)); +- +- insert_arg_slots(_masm, arg_slots * stack_move_unit(), rax_argslot, rbx_temp, rdx_temp); +- +- // store bound argument into the new stack slot: +- __ load_heap_oop(rbx_temp, rcx_bmh_argument); +- if (arg_type == T_OBJECT) { +- __ movptr(Address(rax_argslot, 0), rbx_temp); +- } else { +- Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type)); +- move_typed_arg(_masm, arg_type, false, +- Address(rax_argslot, 0), +- prim_value_addr, +- rbx_temp, rdx_temp); +- } +- +- if (direct_to_method) { +- Register rbx_method = rbx_temp; +- __ load_heap_oop(rbx_method, rcx_mh_vmtarget); +- __ verify_oop(rbx_method); +- jump_from_method_handle(_masm, rbx_method, rax); +- } else { +- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); +- __ verify_oop(rcx_recv); +- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); +- } +- } +- break; +- +- case _adapter_opt_profiling: +- if (java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes() != 0) { +- Address rcx_mh_vmcount(rcx_recv, java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes()); +- __ incrementl(rcx_mh_vmcount); +- } +- // fall through +- +- case _adapter_retype_only: +- case _adapter_retype_raw: +- // immediately jump to the next MH layer: +- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); +- __ verify_oop(rcx_recv); +- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); +- // This is OK when all parameter types widen. +- // It is also OK when a return type narrows. +- break; +- +- case _adapter_check_cast: +- { +- // temps: +- Register rbx_klass = rbx_temp; // interesting AMH data +- +- // check a reference argument before jumping to the next layer of MH: +- __ movl(rax_argslot, rcx_amh_vmargslot); +- vmarg = __ argument_address(rax_argslot); +- +- // What class are we casting to? +- __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object! +- load_klass_from_Class(_masm, rbx_klass); +- +- Label done; +- __ movptr(rdx_temp, vmarg); +- __ testptr(rdx_temp, rdx_temp); +- __ jcc(Assembler::zero, done); // no cast if null +- __ load_klass(rdx_temp, rdx_temp); +- +- // live at this point: +- // - rbx_klass: klass required by the target method +- // - rdx_temp: argument klass to test +- // - rcx_recv: adapter method handle +- __ check_klass_subtype(rdx_temp, rbx_klass, rax_argslot, done); +- +- // If we get here, the type check failed! +- // Call the wrong_method_type stub, passing the failing argument type in rax. +- Register rax_mtype = rax_argslot; +- __ movl(rax_argslot, rcx_amh_vmargslot); // reload argslot field +- __ movptr(rdx_temp, vmarg); +- +- assert_different_registers(rarg2_required, rdx_temp); +- __ load_heap_oop(rarg2_required, rcx_amh_argument); // required class +- __ mov( rarg1_actual, rdx_temp); // bad object +- __ movl( rarg0_code, (int) Bytecodes::_checkcast); // who is complaining? +- __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); +- +- __ bind(done); +- // get the new MH: +- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); +- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); +- } +- break; +- +- case _adapter_prim_to_prim: +- case _adapter_ref_to_prim: +- case _adapter_prim_to_ref: +- // handled completely by optimized cases +- __ stop("init_AdapterMethodHandle should not issue this"); +- break; +- +- case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim +-//case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim +- case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim +- case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim +- { +- // perform an in-place conversion to int or an int subword +- __ movl(rax_argslot, rcx_amh_vmargslot); +- vmarg = __ argument_address(rax_argslot); +- +- switch (ek) { +- case _adapter_opt_i2i: +- __ movl(rdx_temp, vmarg); +- break; +- case _adapter_opt_l2i: +- { +- // just delete the extra slot; on a little-endian machine we keep the first +- __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); +- remove_arg_slots(_masm, -stack_move_unit(), +- rax_argslot, rbx_temp, rdx_temp); +- vmarg = Address(rax_argslot, -Interpreter::stackElementSize); +- __ movl(rdx_temp, vmarg); +- } +- break; +- case _adapter_opt_unboxi: +- { +- // Load the value up from the heap. +- __ movptr(rdx_temp, vmarg); +- int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); +-#ifdef ASSERT +- for (int bt = T_BOOLEAN; bt < T_INT; bt++) { +- if (is_subword_type(BasicType(bt))) +- assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), ""); +- } +-#endif +- __ null_check(rdx_temp, value_offset); +- __ movl(rdx_temp, Address(rdx_temp, value_offset)); +- // We load this as a word. Because we are little-endian, +- // the low bits will be correct, but the high bits may need cleaning. +- // The vminfo will guide us to clean those bits. +- } +- break; +- default: +- ShouldNotReachHere(); +- } +- +- // Do the requested conversion and store the value. +- Register rbx_vminfo = rbx_temp; +- load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion); +- +- // get the new MH: +- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); +- // (now we are done with the old MH) +- +- // original 32-bit vmdata word must be of this form: +- // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 | +- __ xchgptr(rcx, rbx_vminfo); // free rcx for shifts +- __ shll(rdx_temp /*, rcx*/); +- Label zero_extend, done; +- __ testl(rcx, CONV_VMINFO_SIGN_FLAG); +- __ jccb(Assembler::zero, zero_extend); +- +- // this path is taken for int->byte, int->short +- __ sarl(rdx_temp /*, rcx*/); +- __ jmpb(done); +- +- __ bind(zero_extend); +- // this is taken for int->char +- __ shrl(rdx_temp /*, rcx*/); +- +- __ bind(done); +- __ movl(vmarg, rdx_temp); // Store the value. +- __ xchgptr(rcx, rbx_vminfo); // restore rcx_recv +- +- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); +- } +- break; +- +- case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim +- case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim +- { +- // perform an in-place int-to-long or ref-to-long conversion +- __ movl(rax_argslot, rcx_amh_vmargslot); +- +- // on a little-endian machine we keep the first slot and add another after +- __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); +- insert_arg_slots(_masm, stack_move_unit(), +- rax_argslot, rbx_temp, rdx_temp); +- Address vmarg1(rax_argslot, -Interpreter::stackElementSize); +- Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize); +- +- switch (ek) { +- case _adapter_opt_i2l: +- { +-#ifdef _LP64 +- __ movslq(rdx_temp, vmarg1); // Load sign-extended +- __ movq(vmarg1, rdx_temp); // Store into first slot +-#else +- __ movl(rdx_temp, vmarg1); +- __ sarl(rdx_temp, BitsPerInt - 1); // __ extend_sign() +- __ movl(vmarg2, rdx_temp); // store second word +-#endif +- } +- break; +- case _adapter_opt_unboxl: +- { +- // Load the value up from the heap. +- __ movptr(rdx_temp, vmarg1); +- int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG); +- assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), ""); +- __ null_check(rdx_temp, value_offset); +-#ifdef _LP64 +- __ movq(rbx_temp, Address(rdx_temp, value_offset)); +- __ movq(vmarg1, rbx_temp); +-#else +- __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt)); +- __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt)); +- __ movl(vmarg1, rbx_temp); +- __ movl(vmarg2, rdx_temp); +-#endif +- } +- break; +- default: +- ShouldNotReachHere(); +- } +- +- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); +- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); +- } +- break; +- +- case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim +- case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim +- { +- // perform an in-place floating primitive conversion +- __ movl(rax_argslot, rcx_amh_vmargslot); +- __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); +- if (ek == _adapter_opt_f2d) { +- insert_arg_slots(_masm, stack_move_unit(), +- rax_argslot, rbx_temp, rdx_temp); +- } +- Address vmarg(rax_argslot, -Interpreter::stackElementSize); +- +-#ifdef _LP64 +- if (ek == _adapter_opt_f2d) { +- __ movflt(xmm0, vmarg); +- __ cvtss2sd(xmm0, xmm0); +- __ movdbl(vmarg, xmm0); +- } else { +- __ movdbl(xmm0, vmarg); +- __ cvtsd2ss(xmm0, xmm0); +- __ movflt(vmarg, xmm0); +- } +-#else //_LP64 +- if (ek == _adapter_opt_f2d) { +- __ fld_s(vmarg); // load float to ST0 +- __ fstp_d(vmarg); // store double +- } else { +- __ fld_d(vmarg); // load double to ST0 +- __ fstp_s(vmarg); // store single +- } +-#endif //_LP64 +- +- if (ek == _adapter_opt_d2f) { +- remove_arg_slots(_masm, -stack_move_unit(), +- rax_argslot, rbx_temp, rdx_temp); +- } +- +- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); +- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); +- } +- break; +- +- case _adapter_swap_args: +- case _adapter_rot_args: +- // handled completely by optimized cases +- __ stop("init_AdapterMethodHandle should not issue this"); +- break; +- +- case _adapter_opt_swap_1: +- case _adapter_opt_swap_2: +- case _adapter_opt_rot_1_up: +- case _adapter_opt_rot_1_down: +- case _adapter_opt_rot_2_up: +- case _adapter_opt_rot_2_down: +- { +- int swap_slots = ek_adapter_opt_swap_slots(ek); +- int rotate = ek_adapter_opt_swap_mode(ek); +- +- // 'argslot' is the position of the first argument to swap +- __ movl(rax_argslot, rcx_amh_vmargslot); +- __ lea(rax_argslot, __ argument_address(rax_argslot)); +- +- // 'vminfo' is the second +- Register rbx_destslot = rbx_temp; +- load_conversion_vminfo(_masm, rbx_destslot, rcx_amh_conversion); +- __ lea(rbx_destslot, __ argument_address(rbx_destslot)); +- if (VerifyMethodHandles) +- verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame"); +- +- assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here"); +- if (!rotate) { +- // simple swap +- for (int i = 0; i < swap_slots; i++) { +- __ movptr(rdi_temp, Address(rax_argslot, i * wordSize)); +- __ movptr(rdx_temp, Address(rbx_destslot, i * wordSize)); +- __ movptr(Address(rax_argslot, i * wordSize), rdx_temp); +- __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp); +- } +- } else { +- // A rotate is actually pair of moves, with an "odd slot" (or pair) +- // changing place with a series of other slots. +- // First, push the "odd slot", which is going to get overwritten +- for (int i = swap_slots - 1; i >= 0; i--) { +- // handle one with rdi_temp instead of a push: +- if (i == 0) __ movptr(rdi_temp, Address(rax_argslot, i * wordSize)); +- else __ pushptr( Address(rax_argslot, i * wordSize)); +- } +- if (rotate > 0) { +- // Here is rotate > 0: +- // (low mem) (high mem) +- // | dest: more_slots... | arg: odd_slot :arg+1 | +- // => +- // | dest: odd_slot | dest+1: more_slots... :arg+1 | +- // work argslot down to destslot, copying contiguous data upwards +- // pseudo-code: +- // rax = src_addr - swap_bytes +- // rbx = dest_addr +- // while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--; +- move_arg_slots_up(_masm, +- rbx_destslot, +- Address(rax_argslot, 0), +- swap_slots, +- rax_argslot, rdx_temp); +- } else { +- // Here is the other direction, rotate < 0: +- // (low mem) (high mem) +- // | arg: odd_slot | arg+1: more_slots... :dest+1 | +- // => +- // | arg: more_slots... | dest: odd_slot :dest+1 | +- // work argslot up to destslot, copying contiguous data downwards +- // pseudo-code: +- // rax = src_addr + swap_bytes +- // rbx = dest_addr +- // while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++; +- // dest_slot denotes an exclusive upper limit +- int limit_bias = OP_ROT_ARGS_DOWN_LIMIT_BIAS; +- if (limit_bias != 0) +- __ addptr(rbx_destslot, - limit_bias * wordSize); +- move_arg_slots_down(_masm, +- Address(rax_argslot, swap_slots * wordSize), +- rbx_destslot, +- -swap_slots, +- rax_argslot, rdx_temp); +- __ subptr(rbx_destslot, swap_slots * wordSize); +- } +- // pop the original first chunk into the destination slot, now free +- for (int i = 0; i < swap_slots; i++) { +- if (i == 0) __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp); +- else __ popptr(Address(rbx_destslot, i * wordSize)); +- } +- } +- +- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); +- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); +- } +- break; +- +- case _adapter_dup_args: +- { +- // 'argslot' is the position of the first argument to duplicate +- __ movl(rax_argslot, rcx_amh_vmargslot); +- __ lea(rax_argslot, __ argument_address(rax_argslot)); +- +- // 'stack_move' is negative number of words to duplicate +- Register rdi_stack_move = rdi_temp; +- load_stack_move(_masm, rdi_stack_move, rcx_recv, true); +- +- if (VerifyMethodHandles) { +- verify_argslots(_masm, rdi_stack_move, rax_argslot, true, +- "copied argument(s) must fall within current frame"); +- } +- +- if (UseStackBanging) { +- // Bang the stack before pushing args. +- int frame_size = 256 * Interpreter::stackElementSize; // conservative +- __ generate_stack_overflow_check(frame_size + sizeof(RicochetFrame)); +- } +- // insert location is always the bottom of the argument list: +- Address insert_location = __ argument_address(constant(0)); +- int pre_arg_words = insert_location.disp() / wordSize; // return PC is pushed +- assert(insert_location.base() == rsp, ""); +- +- __ negl(rdi_stack_move); +- push_arg_slots(_masm, rax_argslot, rdi_stack_move, +- pre_arg_words, rbx_temp, rdx_temp); +- +- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); +- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); +- } +- break; +- +- case _adapter_drop_args: +- { +- // 'argslot' is the position of the first argument to nuke +- __ movl(rax_argslot, rcx_amh_vmargslot); +- __ lea(rax_argslot, __ argument_address(rax_argslot)); +- +- // (must do previous push after argslot address is taken) +- +- // 'stack_move' is number of words to drop +- Register rdi_stack_move = rdi_temp; +- load_stack_move(_masm, rdi_stack_move, rcx_recv, false); +- remove_arg_slots(_masm, rdi_stack_move, +- rax_argslot, rbx_temp, rdx_temp); +- +- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); +- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); +- } +- break; +- +- case _adapter_collect_args: +- case _adapter_fold_args: +- case _adapter_spread_args: +- // handled completely by optimized cases +- __ stop("init_AdapterMethodHandle should not issue this"); +- break; +- +- case _adapter_opt_collect_ref: +- case _adapter_opt_collect_int: +- case _adapter_opt_collect_long: +- case _adapter_opt_collect_float: +- case _adapter_opt_collect_double: +- case _adapter_opt_collect_void: +- case _adapter_opt_collect_0_ref: +- case _adapter_opt_collect_1_ref: +- case _adapter_opt_collect_2_ref: +- case _adapter_opt_collect_3_ref: +- case _adapter_opt_collect_4_ref: +- case _adapter_opt_collect_5_ref: +- case _adapter_opt_filter_S0_ref: +- case _adapter_opt_filter_S1_ref: +- case _adapter_opt_filter_S2_ref: +- case _adapter_opt_filter_S3_ref: +- case _adapter_opt_filter_S4_ref: +- case _adapter_opt_filter_S5_ref: +- case _adapter_opt_collect_2_S0_ref: +- case _adapter_opt_collect_2_S1_ref: +- case _adapter_opt_collect_2_S2_ref: +- case _adapter_opt_collect_2_S3_ref: +- case _adapter_opt_collect_2_S4_ref: +- case _adapter_opt_collect_2_S5_ref: +- case _adapter_opt_fold_ref: +- case _adapter_opt_fold_int: +- case _adapter_opt_fold_long: +- case _adapter_opt_fold_float: +- case _adapter_opt_fold_double: +- case _adapter_opt_fold_void: +- case _adapter_opt_fold_1_ref: +- case _adapter_opt_fold_2_ref: +- case _adapter_opt_fold_3_ref: +- case _adapter_opt_fold_4_ref: +- case _adapter_opt_fold_5_ref: +- { +- // Given a fresh incoming stack frame, build a new ricochet frame. +- // On entry, TOS points at a return PC, and RBP is the callers frame ptr. +- // RSI/R13 has the caller's exact stack pointer, which we must also preserve. +- // RCX contains an AdapterMethodHandle of the indicated kind. +- +- // Relevant AMH fields: +- // amh.vmargslot: +- // points to the trailing edge of the arguments +- // to filter, collect, or fold. For a boxing operation, +- // it points just after the single primitive value. +- // amh.argument: +- // recursively called MH, on |collect| arguments +- // amh.vmtarget: +- // final destination MH, on return value, etc. +- // amh.conversion.dest: +- // tells what is the type of the return value +- // (not needed here, since dest is also derived from ek) +- // amh.conversion.vminfo: +- // points to the trailing edge of the return value +- // when the vmtarget is to be called; this is +- // equal to vmargslot + (retained ? |collect| : 0) +- +- // Pass 0 or more argument slots to the recursive target. +- int collect_count_constant = ek_adapter_opt_collect_count(ek); +- +- // The collected arguments are copied from the saved argument list: +- int collect_slot_constant = ek_adapter_opt_collect_slot(ek); +- +- assert(ek_orig == _adapter_collect_args || +- ek_orig == _adapter_fold_args, ""); +- bool retain_original_args = (ek_orig == _adapter_fold_args); +- +- // The return value is replaced (or inserted) at the 'vminfo' argslot. +- // Sometimes we can compute this statically. +- int dest_slot_constant = -1; +- if (!retain_original_args) +- dest_slot_constant = collect_slot_constant; +- else if (collect_slot_constant >= 0 && collect_count_constant >= 0) +- // We are preserving all the arguments, and the return value is prepended, +- // so the return slot is to the left (above) the |collect| sequence. +- dest_slot_constant = collect_slot_constant + collect_count_constant; +- +- // Replace all those slots by the result of the recursive call. +- // The result type can be one of ref, int, long, float, double, void. +- // In the case of void, nothing is pushed on the stack after return. +- BasicType dest = ek_adapter_opt_collect_type(ek); +- assert(dest == type2wfield[dest], "dest is a stack slot type"); +- int dest_count = type2size[dest]; +- assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size"); +- +- // Choose a return continuation. +- EntryKind ek_ret = _adapter_opt_return_any; +- if (dest != T_CONFLICT && OptimizeMethodHandles) { +- switch (dest) { +- case T_INT : ek_ret = _adapter_opt_return_int; break; +- case T_LONG : ek_ret = _adapter_opt_return_long; break; +- case T_FLOAT : ek_ret = _adapter_opt_return_float; break; +- case T_DOUBLE : ek_ret = _adapter_opt_return_double; break; +- case T_OBJECT : ek_ret = _adapter_opt_return_ref; break; +- case T_VOID : ek_ret = _adapter_opt_return_void; break; +- default : ShouldNotReachHere(); +- } +- if (dest == T_OBJECT && dest_slot_constant >= 0) { +- EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant); +- if (ek_try <= _adapter_opt_return_LAST && +- ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) { +- ek_ret = ek_try; +- } +- } +- assert(ek_adapter_opt_return_type(ek_ret) == dest, ""); +- } +- +- // Already pushed: ... keep1 | collect | keep2 | sender_pc | +- // push(sender_pc); +- +- // Compute argument base: +- Register rax_argv = rax_argslot; +- __ lea(rax_argv, __ argument_address(constant(0))); +- +- // Push a few extra argument words, if we need them to store the return value. +- { +- int extra_slots = 0; +- if (retain_original_args) { +- extra_slots = dest_count; +- } else if (collect_count_constant == -1) { +- extra_slots = dest_count; // collect_count might be zero; be generous +- } else if (dest_count > collect_count_constant) { +- extra_slots = (dest_count - collect_count_constant); +- } else { +- // else we know we have enough dead space in |collect| to repurpose for return values +- } +- DEBUG_ONLY(extra_slots += 1); +- if (extra_slots > 0) { +- __ pop(rbx_temp); // return value +- __ subptr(rsp, (extra_slots * Interpreter::stackElementSize)); +- // Push guard word #2 in debug mode. +- DEBUG_ONLY(__ movptr(Address(rsp, 0), (int32_t) RicochetFrame::MAGIC_NUMBER_2)); +- __ push(rbx_temp); +- } +- } +- +- RicochetFrame::enter_ricochet_frame(_masm, rcx_recv, rax_argv, +- entry(ek_ret)->from_interpreted_entry(), rbx_temp); +- +- // Now pushed: ... keep1 | collect | keep2 | RF | +- // some handy frame slots: +- Address exact_sender_sp_addr = RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes()); +- Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); +- Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes()); +- +-#ifdef ASSERT +- if (VerifyMethodHandles && dest != T_CONFLICT) { +- BLOCK_COMMENT("verify AMH.conv.dest"); +- load_conversion_dest_type(_masm, rbx_temp, conversion_addr); +- Label L_dest_ok; +- __ cmpl(rbx_temp, (int) dest); +- __ jcc(Assembler::equal, L_dest_ok); +- if (dest == T_INT) { +- for (int bt = T_BOOLEAN; bt < T_INT; bt++) { +- if (is_subword_type(BasicType(bt))) { +- __ cmpl(rbx_temp, (int) bt); +- __ jcc(Assembler::equal, L_dest_ok); +- } +- } +- } +- __ stop("bad dest in AMH.conv"); +- __ BIND(L_dest_ok); +- } +-#endif //ASSERT +- +- // Find out where the original copy of the recursive argument sequence begins. +- Register rax_coll = rax_argv; +- { +- RegisterOrConstant collect_slot = collect_slot_constant; +- if (collect_slot_constant == -1) { +- __ movl(rdi_temp, rcx_amh_vmargslot); +- collect_slot = rdi_temp; +- } +- if (collect_slot_constant != 0) +- __ lea(rax_coll, Address(rax_argv, collect_slot, Interpreter::stackElementScale())); +- // rax_coll now points at the trailing edge of |collect| and leading edge of |keep2| +- } +- +- // Replace the old AMH with the recursive MH. (No going back now.) +- // In the case of a boxing call, the recursive call is to a 'boxer' method, +- // such as Integer.valueOf or Long.valueOf. In the case of a filter +- // or collect call, it will take one or more arguments, transform them, +- // and return some result, to store back into argument_base[vminfo]. +- __ load_heap_oop(rcx_recv, rcx_amh_argument); +- if (VerifyMethodHandles) verify_method_handle(_masm, rcx_recv); +- +- // Push a space for the recursively called MH first: +- __ push((int32_t)NULL_WORD); +- +- // Calculate |collect|, the number of arguments we are collecting. +- Register rdi_collect_count = rdi_temp; +- RegisterOrConstant collect_count; +- if (collect_count_constant >= 0) { +- collect_count = collect_count_constant; +- } else { +- __ load_method_handle_vmslots(rdi_collect_count, rcx_recv, rdx_temp); +- collect_count = rdi_collect_count; +- } +-#ifdef ASSERT +- if (VerifyMethodHandles && collect_count_constant >= 0) { +- __ load_method_handle_vmslots(rbx_temp, rcx_recv, rdx_temp); +- Label L_count_ok; +- __ cmpl(rbx_temp, collect_count_constant); +- __ jcc(Assembler::equal, L_count_ok); +- __ stop("bad vminfo in AMH.conv"); +- __ BIND(L_count_ok); +- } +-#endif //ASSERT +- +- // copy |collect| slots directly to TOS: +- push_arg_slots(_masm, rax_coll, collect_count, 0, rbx_temp, rdx_temp); +- // Now pushed: ... keep1 | collect | keep2 | RF... | collect | +- // rax_coll still points at the trailing edge of |collect| and leading edge of |keep2| +- +- // If necessary, adjust the saved arguments to make room for the eventual return value. +- // Normal adjustment: ... keep1 | +dest+ | -collect- | keep2 | RF... | collect | +- // If retaining args: ... keep1 | +dest+ | collect | keep2 | RF... | collect | +- // In the non-retaining case, this might move keep2 either up or down. +- // We don't have to copy the whole | RF... collect | complex, +- // but we must adjust RF.saved_args_base. +- // Also, from now on, we will forget about the original copy of |collect|. +- // If we are retaining it, we will treat it as part of |keep2|. +- // For clarity we will define |keep3| = |collect|keep2| or |keep2|. +- +- BLOCK_COMMENT("adjust trailing arguments {"); +- // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements. +- int open_count = dest_count; +- RegisterOrConstant close_count = collect_count_constant; +- Register rdi_close_count = rdi_collect_count; +- if (retain_original_args) { +- close_count = constant(0); +- } else if (collect_count_constant == -1) { +- close_count = rdi_collect_count; +- } +- +- // How many slots need moving? This is simply dest_slot (0 => no |keep3|). +- RegisterOrConstant keep3_count; +- Register rsi_keep3_count = rsi; // can repair from RF.exact_sender_sp +- if (dest_slot_constant >= 0) { +- keep3_count = dest_slot_constant; +- } else { +- load_conversion_vminfo(_masm, rsi_keep3_count, conversion_addr); +- keep3_count = rsi_keep3_count; +- } +-#ifdef ASSERT +- if (VerifyMethodHandles && dest_slot_constant >= 0) { +- load_conversion_vminfo(_masm, rbx_temp, conversion_addr); +- Label L_vminfo_ok; +- __ cmpl(rbx_temp, dest_slot_constant); +- __ jcc(Assembler::equal, L_vminfo_ok); +- __ stop("bad vminfo in AMH.conv"); +- __ BIND(L_vminfo_ok); +- } +-#endif //ASSERT +- +- // tasks remaining: +- bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0); +- bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0)); +- bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant()); +- +- if (stomp_dest | fix_arg_base) { +- // we will probably need an updated rax_argv value +- if (collect_slot_constant >= 0) { +- // rax_coll already holds the leading edge of |keep2|, so tweak it +- assert(rax_coll == rax_argv, "elided a move"); +- if (collect_slot_constant != 0) +- __ subptr(rax_argv, collect_slot_constant * Interpreter::stackElementSize); +- } else { +- // Just reload from RF.saved_args_base. +- __ movptr(rax_argv, saved_args_base_addr); +- } +- } +- +- // Old and new argument locations (based at slot 0). +- // Net shift (&new_argv - &old_argv) is (close_count - open_count). +- bool zero_open_count = (open_count == 0); // remember this bit of info +- if (move_keep3 && fix_arg_base) { +- // It will be easier to have everything in one register: +- if (close_count.is_register()) { +- // Deduct open_count from close_count register to get a clean +/- value. +- __ subptr(close_count.as_register(), open_count); +- } else { +- close_count = close_count.as_constant() - open_count; +- } +- open_count = 0; +- } +- Address old_argv(rax_argv, 0); +- Address new_argv(rax_argv, close_count, Interpreter::stackElementScale(), +- - open_count * Interpreter::stackElementSize); +- +- // First decide if any actual data are to be moved. +- // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change. +- // (As it happens, all movements involve an argument list size change.) +- +- // If there are variable parameters, use dynamic checks to skip around the whole mess. +- Label L_done; +- if (!keep3_count.is_constant()) { +- __ testl(keep3_count.as_register(), keep3_count.as_register()); +- __ jcc(Assembler::zero, L_done); +- } +- if (!close_count.is_constant()) { +- __ cmpl(close_count.as_register(), open_count); +- __ jcc(Assembler::equal, L_done); +- } +- +- if (move_keep3 && fix_arg_base) { +- bool emit_move_down = false, emit_move_up = false, emit_guard = false; +- if (!close_count.is_constant()) { +- emit_move_down = emit_guard = !zero_open_count; +- emit_move_up = true; +- } else if (open_count != close_count.as_constant()) { +- emit_move_down = (open_count > close_count.as_constant()); +- emit_move_up = !emit_move_down; +- } +- Label L_move_up; +- if (emit_guard) { +- __ cmpl(close_count.as_register(), open_count); +- __ jcc(Assembler::greater, L_move_up); +- } +- +- if (emit_move_down) { +- // Move arguments down if |+dest+| > |-collect-| +- // (This is rare, except when arguments are retained.) +- // This opens space for the return value. +- if (keep3_count.is_constant()) { +- for (int i = 0; i < keep3_count.as_constant(); i++) { +- __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize)); +- __ movptr( new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp); +- } +- } else { +- Register rbx_argv_top = rbx_temp; +- __ lea(rbx_argv_top, old_argv.plus_disp(keep3_count, Interpreter::stackElementScale())); +- move_arg_slots_down(_masm, +- old_argv, // beginning of old argv +- rbx_argv_top, // end of old argv +- close_count, // distance to move down (must be negative) +- rax_argv, rdx_temp); +- // Used argv as an iteration variable; reload from RF.saved_args_base. +- __ movptr(rax_argv, saved_args_base_addr); +- } +- } +- +- if (emit_guard) { +- __ jmp(L_done); // assumes emit_move_up is true also +- __ BIND(L_move_up); +- } +- +- if (emit_move_up) { +- +- // Move arguments up if |+dest+| < |-collect-| +- // (This is usual, except when |keep3| is empty.) +- // This closes up the space occupied by the now-deleted collect values. +- if (keep3_count.is_constant()) { +- for (int i = keep3_count.as_constant() - 1; i >= 0; i--) { +- __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize)); +- __ movptr( new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp); +- } +- } else { +- Address argv_top = old_argv.plus_disp(keep3_count, Interpreter::stackElementScale()); +- move_arg_slots_up(_masm, +- rax_argv, // beginning of old argv +- argv_top, // end of old argv +- close_count, // distance to move up (must be positive) +- rbx_temp, rdx_temp); +- } +- } +- } +- __ BIND(L_done); +- +- if (fix_arg_base) { +- // adjust RF.saved_args_base by adding (close_count - open_count) +- if (!new_argv.is_same_address(Address(rax_argv, 0))) +- __ lea(rax_argv, new_argv); +- __ movptr(saved_args_base_addr, rax_argv); +- } +- +- if (stomp_dest) { +- // Stomp the return slot, so it doesn't hold garbage. +- // This isn't strictly necessary, but it may help detect bugs. +- int forty_two = RicochetFrame::RETURN_VALUE_PLACEHOLDER; +- __ movptr(Address(rax_argv, keep3_count, Address::times_ptr), +- (int32_t) forty_two); +- // uses rsi_keep3_count +- } +- BLOCK_COMMENT("} adjust trailing arguments"); +- +- BLOCK_COMMENT("do_recursive_call"); +- __ mov(saved_last_sp, rsp); // set rsi/r13 for callee +- __ pushptr(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr()).addr()); +- // The globally unique bounce address has two purposes: +- // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame). +- // 2. When returned to, it cuts back the stack and redirects control flow +- // to the return handler. +- // The return handler will further cut back the stack when it takes +- // down the RF. Perhaps there is a way to streamline this further. +- +- if (UseStackBanging) { +- // Bang the stack before recursive call. +- // Even if slots == 0, we are inside a RicochetFrame. +- int frame_size = collect_count.is_constant() ? collect_count.as_constant() * wordSize : -1; +- if (frame_size < 0) { +- frame_size = 256 * Interpreter::stackElementSize; // conservative +- } +- __ generate_stack_overflow_check(frame_size + sizeof(RicochetFrame)); +- } +- // State during recursive call: +- // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc | +- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); +- +- break; +- } +- +- case _adapter_opt_return_ref: +- case _adapter_opt_return_int: +- case _adapter_opt_return_long: +- case _adapter_opt_return_float: +- case _adapter_opt_return_double: +- case _adapter_opt_return_void: +- case _adapter_opt_return_S0_ref: +- case _adapter_opt_return_S1_ref: +- case _adapter_opt_return_S2_ref: +- case _adapter_opt_return_S3_ref: +- case _adapter_opt_return_S4_ref: +- case _adapter_opt_return_S5_ref: +- { +- BasicType dest_type_constant = ek_adapter_opt_return_type(ek); +- int dest_slot_constant = ek_adapter_opt_return_slot(ek); +- +- if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); +- +- if (dest_slot_constant == -1) { +- // The current stub is a general handler for this dest_type. +- // It can be called from _adapter_opt_return_any below. +- // Stash the address in a little table. +- assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob"); +- address return_handler = __ pc(); +- _adapter_return_handlers[dest_type_constant] = return_handler; +- if (dest_type_constant == T_INT) { +- // do the subword types too +- for (int bt = T_BOOLEAN; bt < T_INT; bt++) { +- if (is_subword_type(BasicType(bt)) && +- _adapter_return_handlers[bt] == NULL) { +- _adapter_return_handlers[bt] = return_handler; +- } +- } +- } +- } +- +- Register rbx_arg_base = rbx_temp; +- assert_different_registers(rax, rdx, // possibly live return value registers +- rdi_temp, rbx_arg_base); +- +- Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); +- Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes()); +- +- __ movptr(rbx_arg_base, saved_args_base_addr); +- RegisterOrConstant dest_slot = dest_slot_constant; +- if (dest_slot_constant == -1) { +- load_conversion_vminfo(_masm, rdi_temp, conversion_addr); +- dest_slot = rdi_temp; +- } +- // Store the result back into the argslot. +- // This code uses the interpreter calling sequence, in which the return value +- // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop. +- // There are certain irregularities with floating point values, which can be seen +- // in TemplateInterpreterGenerator::generate_return_entry_for. +- move_return_value(_masm, dest_type_constant, Address(rbx_arg_base, dest_slot, Interpreter::stackElementScale())); +- +- RicochetFrame::leave_ricochet_frame(_masm, rcx_recv, rbx_arg_base, rdx_temp); +- __ push(rdx_temp); // repush the return PC +- +- // Load the final target and go. +- if (VerifyMethodHandles) verify_method_handle(_masm, rcx_recv); +- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); +- __ hlt(); // -------------------- +- break; +- } +- +- case _adapter_opt_return_any: +- { +- if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); +- Register rdi_conv = rdi_temp; +- assert_different_registers(rax, rdx, // possibly live return value registers +- rdi_conv, rbx_temp); +- +- Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); +- load_conversion_dest_type(_masm, rdi_conv, conversion_addr); +- __ lea(rbx_temp, ExternalAddress((address) &_adapter_return_handlers[0])); +- __ movptr(rbx_temp, Address(rbx_temp, rdi_conv, Address::times_ptr)); +- +-#ifdef ASSERT +- { Label L_badconv; +- __ testptr(rbx_temp, rbx_temp); +- __ jccb(Assembler::zero, L_badconv); +- __ jmp(rbx_temp); +- __ bind(L_badconv); +- __ stop("bad method handle return"); +- } +-#else //ASSERT +- __ jmp(rbx_temp); +-#endif //ASSERT +- break; +- } +- +- case _adapter_opt_spread_0: +- case _adapter_opt_spread_1_ref: +- case _adapter_opt_spread_2_ref: +- case _adapter_opt_spread_3_ref: +- case _adapter_opt_spread_4_ref: +- case _adapter_opt_spread_5_ref: +- case _adapter_opt_spread_ref: +- case _adapter_opt_spread_byte: +- case _adapter_opt_spread_char: +- case _adapter_opt_spread_short: +- case _adapter_opt_spread_int: +- case _adapter_opt_spread_long: +- case _adapter_opt_spread_float: +- case _adapter_opt_spread_double: +- { +- // spread an array out into a group of arguments +- int length_constant = ek_adapter_opt_spread_count(ek); +- bool length_can_be_zero = (length_constant == 0); +- if (length_constant < 0) { +- // some adapters with variable length must handle the zero case +- if (!OptimizeMethodHandles || +- ek_adapter_opt_spread_type(ek) != T_OBJECT) +- length_can_be_zero = true; +- } +- +- // find the address of the array argument +- __ movl(rax_argslot, rcx_amh_vmargslot); +- __ lea(rax_argslot, __ argument_address(rax_argslot)); +- +- // grab another temp +- Register rsi_temp = rsi; +- +- // arx_argslot points both to the array and to the first output arg +- vmarg = Address(rax_argslot, 0); +- +- // Get the array value. +- Register rdi_array = rdi_temp; +- Register rdx_array_klass = rdx_temp; +- BasicType elem_type = ek_adapter_opt_spread_type(ek); +- int elem_slots = type2size[elem_type]; // 1 or 2 +- int array_slots = 1; // array is always a T_OBJECT +- int length_offset = arrayOopDesc::length_offset_in_bytes(); +- int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type); +- __ movptr(rdi_array, vmarg); +- +- Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done; +- if (length_can_be_zero) { +- // handle the null pointer case, if zero is allowed +- Label L_skip; +- if (length_constant < 0) { +- load_conversion_vminfo(_masm, rbx_temp, rcx_amh_conversion); +- __ testl(rbx_temp, rbx_temp); +- __ jcc(Assembler::notZero, L_skip); +- } +- __ testptr(rdi_array, rdi_array); +- __ jcc(Assembler::notZero, L_skip); +- +- // If 'rsi' contains the 'saved_last_sp' (this is only the +- // case in a 32-bit version of the VM) we have to save 'rsi' +- // on the stack because later on (at 'L_array_is_empty') 'rsi' +- // will be overwritten. +- if (rsi_temp == saved_last_sp) { +- __ push(saved_last_sp); +- // Need to re-push return PC to keep it on stack top. +- __ lea(saved_last_sp, ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr()).addr()); +- __ push(saved_last_sp); +- } +- // Also prepare a handy macro which restores 'rsi' if required. +-#define UNPUSH_RSI \ +- { if (rsi_temp == saved_last_sp) { __ pop(saved_last_sp); __ pop(saved_last_sp); } } +- +- __ jmp(L_array_is_empty); +- __ bind(L_skip); +- } +- __ null_check(rdi_array, oopDesc::klass_offset_in_bytes()); +- __ load_klass(rdx_array_klass, rdi_array); +- +- // Save 'rsi' if required (see comment above). Do this only +- // after the null check such that the exception handler which is +- // called in the case of a null pointer exception will not be +- // confused by the extra value on the stack (it expects the +- // return pointer on top of the stack) +- if (rsi_temp == saved_last_sp) { +- __ push(saved_last_sp); +- // Need to re-push return PC to keep it on stack top. +- __ lea(saved_last_sp, ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr()).addr()); +- __ push(saved_last_sp); +- } +- +- // Check the array type. +- Register rbx_klass = rbx_temp; +- __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object! +- load_klass_from_Class(_masm, rbx_klass); +- +- Label ok_array_klass, bad_array_klass, bad_array_length; +- __ check_klass_subtype(rdx_array_klass, rbx_klass, rsi_temp, ok_array_klass); +- // If we get here, the type check failed! +- __ jmp(bad_array_klass); +- __ BIND(ok_array_klass); +- +- // Check length. +- if (length_constant >= 0) { +- __ cmpl(Address(rdi_array, length_offset), length_constant); +- } else { +- Register rbx_vminfo = rbx_temp; +- load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion); +- __ cmpl(rbx_vminfo, Address(rdi_array, length_offset)); +- } +- __ jcc(Assembler::notEqual, bad_array_length); +- +- Register rdx_argslot_limit = rdx_temp; +- +- // Array length checks out. Now insert any required stack slots. +- if (length_constant == -1) { +- // Form a pointer to the end of the affected region. +- __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize)); +- // 'stack_move' is negative number of words to insert +- // This number already accounts for elem_slots. +- Register rsi_stack_move = rsi_temp; +- load_stack_move(_masm, rsi_stack_move, rcx_recv, true); +- __ cmpptr(rsi_stack_move, 0); +- assert(stack_move_unit() < 0, "else change this comparison"); +- __ jcc(Assembler::less, L_insert_arg_space); +- __ jcc(Assembler::equal, L_copy_args); +- // single argument case, with no array movement +- __ BIND(L_array_is_empty); +- remove_arg_slots(_masm, -stack_move_unit() * array_slots, +- rax_argslot, rbx_temp, rdx_temp); +- __ jmp(L_args_done); // no spreading to do +- __ BIND(L_insert_arg_space); +- // come here in the usual case, stack_move < 0 (2 or more spread arguments) +- Register rdi_temp = rdi_array; // spill this +- insert_arg_slots(_masm, rsi_stack_move, +- rax_argslot, rbx_temp, rdi_temp); +- // reload the array since rsi was killed +- // reload from rdx_argslot_limit since rax_argslot is now decremented +- __ movptr(rdi_array, Address(rdx_argslot_limit, -Interpreter::stackElementSize)); +- } else if (length_constant >= 1) { +- int new_slots = (length_constant * elem_slots) - array_slots; +- insert_arg_slots(_masm, new_slots * stack_move_unit(), +- rax_argslot, rbx_temp, rdx_temp); +- } else if (length_constant == 0) { +- __ BIND(L_array_is_empty); +- remove_arg_slots(_masm, -stack_move_unit() * array_slots, +- rax_argslot, rbx_temp, rdx_temp); +- } else { +- ShouldNotReachHere(); +- } +- +- // Copy from the array to the new slots. +- // Note: Stack change code preserves integrity of rax_argslot pointer. +- // So even after slot insertions, rax_argslot still points to first argument. +- // Beware: Arguments that are shallow on the stack are deep in the array, +- // and vice versa. So a downward-growing stack (the usual) has to be copied +- // elementwise in reverse order from the source array. +- __ BIND(L_copy_args); +- if (length_constant == -1) { +- // [rax_argslot, rdx_argslot_limit) is the area we are inserting into. +- // Array element [0] goes at rdx_argslot_limit[-wordSize]. +- Register rdi_source = rdi_array; +- __ lea(rdi_source, Address(rdi_array, elem0_offset)); +- Register rdx_fill_ptr = rdx_argslot_limit; +- Label loop; +- __ BIND(loop); +- __ addptr(rdx_fill_ptr, -Interpreter::stackElementSize * elem_slots); +- move_typed_arg(_masm, elem_type, true, +- Address(rdx_fill_ptr, 0), Address(rdi_source, 0), +- rbx_temp, rsi_temp); +- __ addptr(rdi_source, type2aelembytes(elem_type)); +- __ cmpptr(rdx_fill_ptr, rax_argslot); +- __ jcc(Assembler::above, loop); +- } else if (length_constant == 0) { +- // nothing to copy +- } else { +- int elem_offset = elem0_offset; +- int slot_offset = length_constant * Interpreter::stackElementSize; +- for (int index = 0; index < length_constant; index++) { +- slot_offset -= Interpreter::stackElementSize * elem_slots; // fill backward +- move_typed_arg(_masm, elem_type, true, +- Address(rax_argslot, slot_offset), Address(rdi_array, elem_offset), +- rbx_temp, rsi_temp); +- elem_offset += type2aelembytes(elem_type); +- } +- } +- __ BIND(L_args_done); +- +- // Arguments are spread. Move to next method handle. +- UNPUSH_RSI; +- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); +- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); +- +- __ bind(bad_array_klass); +- UNPUSH_RSI; +- assert(!vmarg.uses(rarg2_required), "must be different registers"); +- __ load_heap_oop( rarg2_required, Address(rdx_array_klass, java_mirror_offset)); // required type +- __ movptr( rarg1_actual, vmarg); // bad array +- __ movl( rarg0_code, (int) Bytecodes::_aaload); // who is complaining? +- __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); +- +- __ bind(bad_array_length); +- UNPUSH_RSI; +- assert(!vmarg.uses(rarg2_required), "must be different registers"); +- __ mov( rarg2_required, rcx_recv); // AMH requiring a certain length +- __ movptr( rarg1_actual, vmarg); // bad array +- __ movl( rarg0_code, (int) Bytecodes::_arraylength); // who is complaining? +- __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); +-#undef UNPUSH_RSI +- +- break; +- } +- +- default: +- // do not require all platforms to recognize all adapter types +- __ nop(); +- return; +- } +- BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek))); +- __ hlt(); +- +- address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry); +- __ unimplemented(entry_name(ek)); // %%% FIXME: NYI +- +- init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie)); +-} +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/methodHandles_x86.hpp +--- openjdk/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -27,266 +27,12 @@ + + // Adapters + enum /* platform_dependent_constants */ { +- adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 15000)) LP64_ONLY(32000 DEBUG_ONLY(+ 120000)) +-}; +- +-public: +- +-// The stack just after the recursive call from a ricochet frame +-// looks something like this. Offsets are marked in words, not bytes. +-// rsi (r13 on LP64) is part of the interpreter calling sequence +-// which tells the callee where is my real rsp (for frame walking). +-// (...lower memory addresses) +-// rsp: [ return pc ] always the global RicochetBlob::bounce_addr +-// rsp+1: [ recursive arg N ] +-// rsp+2: [ recursive arg N-1 ] +-// ... +-// rsp+N: [ recursive arg 1 ] +-// rsp+N+1: [ recursive method handle ] +-// ... +-// rbp-6: [ cleanup continuation pc ] <-- (struct RicochetFrame) +-// rbp-5: [ saved target MH ] the MH we will call on the saved args +-// rbp-4: [ saved args layout oop ] an int[] array which describes argument layout +-// rbp-3: [ saved args pointer ] address of transformed adapter arg M (slot 0) +-// rbp-2: [ conversion ] information about how the return value is used +-// rbp-1: [ exact sender sp ] exact TOS (rsi/r13) of original sender frame +-// rbp+0: [ saved sender fp ] (for original sender of AMH) +-// rbp+1: [ saved sender pc ] (back to original sender of AMH) +-// rbp+2: [ transformed adapter arg M ] <-- (extended TOS of original sender) +-// rbp+3: [ transformed adapter arg M-1] +-// ... +-// rbp+M+1: [ transformed adapter arg 1 ] +-// rbp+M+2: [ padding ] <-- (rbp + saved args base offset) +-// ... [ optional padding] +-// (higher memory addresses...) +-// +-// The arguments originally passed by the original sender +-// are lost, and arbitrary amounts of stack motion might have +-// happened due to argument transformation. +-// (This is done by C2I/I2C adapters and non-direct method handles.) +-// This is why there is an unpredictable amount of memory between +-// the extended and exact TOS of the sender. +-// The ricochet adapter itself will also (in general) perform +-// transformations before the recursive call. +-// +-// The transformed and saved arguments, immediately above the saved +-// return PC, are a well-formed method handle invocation ready to execute. +-// When the GC needs to walk the stack, these arguments are described +-// via the saved arg types oop, an int[] array with a private format. +-// This array is derived from the type of the transformed adapter +-// method handle, which also sits at the base of the saved argument +-// bundle. Since the GC may not be able to fish out the int[] +-// array, so it is pushed explicitly on the stack. This may be +-// an unnecessary expense. +-// +-// The following register conventions are significant at this point: +-// rsp the thread stack, as always; preserved by caller +-// rsi/r13 exact TOS of recursive frame (contents of [rbp-2]) +-// rcx recursive method handle (contents of [rsp+N+1]) +-// rbp preserved by caller (not used by caller) +-// Unless otherwise specified, all registers can be blown by the call. +-// +-// If this frame must be walked, the transformed adapter arguments +-// will be found with the help of the saved arguments descriptor. +-// +-// Therefore, the descriptor must match the referenced arguments. +-// The arguments must be followed by at least one word of padding, +-// which will be necessary to complete the final method handle call. +-// That word is not treated as holding an oop. Neither is the word +-// +-// The word pointed to by the return argument pointer is not +-// treated as an oop, even if points to a saved argument. +-// This allows the saved argument list to have a "hole" in it +-// to receive an oop from the recursive call. +-// (The hole might temporarily contain RETURN_VALUE_PLACEHOLDER.) +-// +-// When the recursive callee returns, RicochetBlob::bounce_addr will +-// immediately jump to the continuation stored in the RF. +-// This continuation will merge the recursive return value +-// into the saved argument list. At that point, the original +-// rsi, rbp, and rsp will be reloaded, the ricochet frame will +-// disappear, and the final target of the adapter method handle +-// will be invoked on the transformed argument list. +- +-class RicochetFrame { +- friend class MethodHandles; +- friend class VMStructs; +- +- private: +- intptr_t* _continuation; // what to do when control gets back here +- oopDesc* _saved_target; // target method handle to invoke on saved_args +- oopDesc* _saved_args_layout; // caching point for MethodTypeForm.vmlayout cookie +- intptr_t* _saved_args_base; // base of pushed arguments (slot 0, arg N) (-3) +- intptr_t _conversion; // misc. information from original AdapterMethodHandle (-2) +- intptr_t* _exact_sender_sp; // parallel to interpreter_frame_sender_sp (-1) +- intptr_t* _sender_link; // *must* coincide with frame::link_offset (0) +- address _sender_pc; // *must* coincide with frame::return_addr_offset (1) +- +- public: +- intptr_t* continuation() const { return _continuation; } +- oop saved_target() const { return _saved_target; } +- oop saved_args_layout() const { return _saved_args_layout; } +- intptr_t* saved_args_base() const { return _saved_args_base; } +- intptr_t conversion() const { return _conversion; } +- intptr_t* exact_sender_sp() const { return _exact_sender_sp; } +- intptr_t* sender_link() const { return _sender_link; } +- address sender_pc() const { return _sender_pc; } +- +- intptr_t* extended_sender_sp() const { +- // The extended sender SP is above the current RicochetFrame. +- return (intptr_t*) (((address) this) + sizeof(RicochetFrame)); +- } +- +- intptr_t return_value_slot_number() const { +- return adapter_conversion_vminfo(conversion()); +- } +- BasicType return_value_type() const { +- return adapter_conversion_dest_type(conversion()); +- } +- bool has_return_value_slot() const { +- return return_value_type() != T_VOID; +- } +- intptr_t* return_value_slot_addr() const { +- assert(has_return_value_slot(), ""); +- return saved_arg_slot_addr(return_value_slot_number()); +- } +- intptr_t* saved_target_slot_addr() const { +- return saved_arg_slot_addr(saved_args_length()); +- } +- intptr_t* saved_arg_slot_addr(int slot) const { +- assert(slot >= 0, ""); +- return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) ); +- } +- +- jint saved_args_length() const; +- jint saved_arg_offset(int arg) const; +- +- // GC interface +- oop* saved_target_addr() { return (oop*)&_saved_target; } +- oop* saved_args_layout_addr() { return (oop*)&_saved_args_layout; } +- +- oop compute_saved_args_layout(bool read_cache, bool write_cache); +- +- // Compiler/assembler interface. +- static int continuation_offset_in_bytes() { return offset_of(RicochetFrame, _continuation); } +- static int saved_target_offset_in_bytes() { return offset_of(RicochetFrame, _saved_target); } +- static int saved_args_layout_offset_in_bytes(){ return offset_of(RicochetFrame, _saved_args_layout); } +- static int saved_args_base_offset_in_bytes() { return offset_of(RicochetFrame, _saved_args_base); } +- static int conversion_offset_in_bytes() { return offset_of(RicochetFrame, _conversion); } +- static int exact_sender_sp_offset_in_bytes() { return offset_of(RicochetFrame, _exact_sender_sp); } +- static int sender_link_offset_in_bytes() { return offset_of(RicochetFrame, _sender_link); } +- static int sender_pc_offset_in_bytes() { return offset_of(RicochetFrame, _sender_pc); } +- +- // This value is not used for much, but it apparently must be nonzero. +- static int frame_size_in_bytes() { return sender_link_offset_in_bytes(); } +- +-#ifdef ASSERT +- // The magic number is supposed to help find ricochet frames within the bytes of stack dumps. +- enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E }; +- static int magic_number_1_offset_in_bytes() { return -wordSize; } +- static int magic_number_2_offset_in_bytes() { return sizeof(RicochetFrame); } +- intptr_t magic_number_1() const { return *(intptr_t*)((address)this + magic_number_1_offset_in_bytes()); }; +- intptr_t magic_number_2() const { return *(intptr_t*)((address)this + magic_number_2_offset_in_bytes()); }; +-#endif //ASSERT +- +- enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) }; +- +- static void verify_offsets() NOT_DEBUG_RETURN; +- void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc. +- void zap_arguments() NOT_DEBUG_RETURN; +- +- static void generate_ricochet_blob(MacroAssembler* _masm, +- // output params: +- int* bounce_offset, +- int* exception_offset, +- int* frame_size_in_words); +- +- static void enter_ricochet_frame(MacroAssembler* _masm, +- Register rcx_recv, +- Register rax_argv, +- address return_handler, +- Register rbx_temp); +- static void leave_ricochet_frame(MacroAssembler* _masm, +- Register rcx_recv, +- Register new_sp_reg, +- Register sender_pc_reg); +- +- static Address frame_address(int offset = 0) { +- // The RicochetFrame is found by subtracting a constant offset from rbp. +- return Address(rbp, - sender_link_offset_in_bytes() + offset); +- } +- +- static RicochetFrame* from_frame(const frame& fr) { +- address bp = (address) fr.fp(); +- RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes()); +- rf->verify(); +- return rf; +- } +- +- static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN; +- +- static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN; ++ adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 25000)) LP64_ONLY(32000 DEBUG_ONLY(+ 150000)) + }; + + // Additional helper methods for MethodHandles code generation: + public: + static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg); +- static void load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr); +- static void load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr); +- +- static void load_stack_move(MacroAssembler* _masm, +- Register rdi_stack_move, +- Register rcx_amh, +- bool might_be_negative); +- +- static void insert_arg_slots(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, +- Register rax_argslot, +- Register rbx_temp, Register rdx_temp); +- +- static void remove_arg_slots(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, +- Register rax_argslot, +- Register rbx_temp, Register rdx_temp); +- +- static void push_arg_slots(MacroAssembler* _masm, +- Register rax_argslot, +- RegisterOrConstant slot_count, +- int skip_words_count, +- Register rbx_temp, Register rdx_temp); +- +- static void move_arg_slots_up(MacroAssembler* _masm, +- Register rbx_bottom, // invariant +- Address top_addr, // can use rax_temp +- RegisterOrConstant positive_distance_in_slots, +- Register rax_temp, Register rdx_temp); +- +- static void move_arg_slots_down(MacroAssembler* _masm, +- Address bottom_addr, // can use rax_temp +- Register rbx_top, // invariant +- RegisterOrConstant negative_distance_in_slots, +- Register rax_temp, Register rdx_temp); +- +- static void move_typed_arg(MacroAssembler* _masm, +- BasicType type, bool is_element, +- Address slot_dest, Address value_src, +- Register rbx_temp, Register rdx_temp); +- +- static void move_return_value(MacroAssembler* _masm, BasicType type, +- Address return_slot); +- +- static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, +- const char* error_message) NOT_DEBUG_RETURN; +- +- static void verify_argslots(MacroAssembler* _masm, +- RegisterOrConstant argslot_count, +- Register argslot_reg, +- bool negate_argslot, +- const char* error_message) NOT_DEBUG_RETURN; +- +- static void verify_stack_move(MacroAssembler* _masm, +- RegisterOrConstant arg_slots, +- int direction) NOT_DEBUG_RETURN; + + static void verify_klass(MacroAssembler* _masm, + Register obj, KlassHandle klass, +@@ -297,9 +43,17 @@ + "reference is a MH"); + } + ++ static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN; ++ + // Similar to InterpreterMacroAssembler::jump_from_interpreted. + // Takes care of special dispatch from single stepping too. +- static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp); ++ static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, ++ bool for_compiler_entry); ++ ++ static void jump_to_lambda_form(MacroAssembler* _masm, ++ Register recv, Register method_temp, ++ Register temp2, ++ bool for_compiler_entry); + + static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN; + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/sharedRuntime_x86_32.cpp +--- openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -643,6 +643,19 @@ + __ movdbl(r, Address(saved_sp, next_val_off)); + } + ++static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, ++ address code_start, address code_end, ++ Label& L_ok) { ++ Label L_fail; ++ __ lea(temp_reg, ExternalAddress(code_start)); ++ __ cmpptr(pc_reg, temp_reg); ++ __ jcc(Assembler::belowEqual, L_fail); ++ __ lea(temp_reg, ExternalAddress(code_end)); ++ __ cmpptr(pc_reg, temp_reg); ++ __ jcc(Assembler::below, L_ok); ++ __ bind(L_fail); ++} ++ + static void gen_i2c_adapter(MacroAssembler *masm, + int total_args_passed, + int comp_args_on_stack, +@@ -653,9 +666,53 @@ + // we may do a i2c -> c2i transition if we lose a race where compiled + // code goes non-entrant while we get args ready. + ++ // Adapters can be frameless because they do not require the caller ++ // to perform additional cleanup work, such as correcting the stack pointer. ++ // An i2c adapter is frameless because the *caller* frame, which is interpreted, ++ // routinely repairs its own stack pointer (from interpreter_frame_last_sp), ++ // even if a callee has modified the stack pointer. ++ // A c2i adapter is frameless because the *callee* frame, which is interpreted, ++ // routinely repairs its caller's stack pointer (from sender_sp, which is set ++ // up via the senderSP register). ++ // In other words, if *either* the caller or callee is interpreted, we can ++ // get the stack pointer repaired after a call. ++ // This is why c2i and i2c adapters cannot be indefinitely composed. ++ // In particular, if a c2i adapter were to somehow call an i2c adapter, ++ // both caller and callee would be compiled methods, and neither would ++ // clean up the stack pointer changes performed by the two adapters. ++ // If this happens, control eventually transfers back to the compiled ++ // caller, but with an uncorrected stack, causing delayed havoc. ++ + // Pick up the return address + __ movptr(rax, Address(rsp, 0)); + ++ if (VerifyAdapterCalls && ++ (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { ++ // So, let's test for cascading c2i/i2c adapters right now. ++ // assert(Interpreter::contains($return_addr) || ++ // StubRoutines::contains($return_addr), ++ // "i2c adapter must return to an interpreter frame"); ++ __ block_comment("verify_i2c { "); ++ Label L_ok; ++ if (Interpreter::code() != NULL) ++ range_check(masm, rax, rdi, ++ Interpreter::code()->code_start(), Interpreter::code()->code_end(), ++ L_ok); ++ if (StubRoutines::code1() != NULL) ++ range_check(masm, rax, rdi, ++ StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), ++ L_ok); ++ if (StubRoutines::code2() != NULL) ++ range_check(masm, rax, rdi, ++ StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), ++ L_ok); ++ const char* msg = "i2c adapter must return to an interpreter frame"; ++ __ block_comment(msg); ++ __ stop(msg); ++ __ bind(L_ok); ++ __ block_comment("} verify_i2ce "); ++ } ++ + // Must preserve original SP for loading incoming arguments because + // we need to align the outgoing SP for compiled code. + __ movptr(rdi, rsp); +@@ -1293,6 +1350,89 @@ + __ bind(done); + } + ++static void verify_oop_args(MacroAssembler* masm, ++ int total_args_passed, ++ const BasicType* sig_bt, ++ const VMRegPair* regs) { ++ Register temp_reg = rbx; // not part of any compiled calling seq ++ if (VerifyOops) { ++ for (int i = 0; i < total_args_passed; i++) { ++ if (sig_bt[i] == T_OBJECT || ++ sig_bt[i] == T_ARRAY) { ++ VMReg r = regs[i].first(); ++ assert(r->is_valid(), "bad oop arg"); ++ if (r->is_stack()) { ++ __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); ++ __ verify_oop(temp_reg); ++ } else { ++ __ verify_oop(r->as_Register()); ++ } ++ } ++ } ++ } ++} ++ ++static void gen_special_dispatch(MacroAssembler* masm, ++ int total_args_passed, ++ int comp_args_on_stack, ++ vmIntrinsics::ID special_dispatch, ++ const BasicType* sig_bt, ++ const VMRegPair* regs) { ++ verify_oop_args(masm, total_args_passed, sig_bt, regs); ++ ++ // Now write the args into the outgoing interpreter space ++ bool has_receiver = false; ++ Register receiver_reg = noreg; ++ int member_arg_pos = -1; ++ Register member_reg = noreg; ++ int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch); ++ if (ref_kind != 0) { ++ member_arg_pos = total_args_passed - 1; // trailing MemberName argument ++ member_reg = rbx; // known to be free at this point ++ has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); ++ } else if (special_dispatch == vmIntrinsics::_invokeBasic) { ++ has_receiver = true; ++ } else { ++ guarantee(false, err_msg("special_dispatch=%d", special_dispatch)); ++ } ++ ++ if (member_reg != noreg) { ++ // Load the member_arg into register, if necessary. ++ assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob"); ++ assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object"); ++ VMReg r = regs[member_arg_pos].first(); ++ assert(r->is_valid(), "bad member arg"); ++ if (r->is_stack()) { ++ __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); ++ } else { ++ // no data motion is needed ++ member_reg = r->as_Register(); ++ } ++ } ++ ++ if (has_receiver) { ++ // Make sure the receiver is loaded into a register. ++ assert(total_args_passed > 0, "oob"); ++ assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); ++ VMReg r = regs[0].first(); ++ assert(r->is_valid(), "bad receiver arg"); ++ if (r->is_stack()) { ++ // Porting note: This assumes that compiled calling conventions always ++ // pass the receiver oop in a register. If this is not true on some ++ // platform, pick a temp and load the receiver from stack. ++ assert(false, "receiver always in a register"); ++ receiver_reg = rcx; // known to be free at this point ++ __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); ++ } else { ++ // no data motion is needed ++ receiver_reg = r->as_Register(); ++ } ++ } ++ ++ // Figure out which address we are really jumping to: ++ MethodHandles::generate_method_handle_dispatch(masm, special_dispatch, ++ receiver_reg, member_reg, /*for_compiler_entry:*/ true); ++} + + // --------------------------------------------------------------------------- + // Generate a native wrapper for a given method. The method takes arguments +@@ -1323,14 +1463,37 @@ + // transition back to thread_in_Java + // return to caller + // +-nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, ++nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, + methodHandle method, + int compile_id, + int total_in_args, + int comp_args_on_stack, +- BasicType *in_sig_bt, +- VMRegPair *in_regs, ++ BasicType* in_sig_bt, ++ VMRegPair* in_regs, + BasicType ret_type) { ++ if (method->is_method_handle_intrinsic()) { ++ vmIntrinsics::ID iid = method->intrinsic_id(); ++ intptr_t start = (intptr_t)__ pc(); ++ int vep_offset = ((intptr_t)__ pc()) - start; ++ gen_special_dispatch(masm, ++ total_in_args, ++ comp_args_on_stack, ++ method->intrinsic_id(), ++ in_sig_bt, ++ in_regs); ++ int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period ++ __ flush(); ++ int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually ++ return nmethod::new_native_nmethod(method, ++ compile_id, ++ masm->code(), ++ vep_offset, ++ frame_complete, ++ stack_slots / VMRegImpl::slots_per_word, ++ in_ByteSize(-1), ++ in_ByteSize(-1), ++ (OopMapSet*)NULL); ++ } + bool is_critical_native = true; + address native_func = method->critical_native_function(); + if (native_func == NULL) { +@@ -1436,7 +1599,7 @@ + if (in_regs[i].first()->is_Register()) { + const Register reg = in_regs[i].first()->as_Register(); + switch (in_sig_bt[i]) { +- case T_ARRAY: ++ case T_ARRAY: // critical array (uses 2 slots on LP64) + case T_BOOLEAN: + case T_BYTE: + case T_SHORT: +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/sharedRuntime_x86_64.cpp +--- openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -590,6 +590,19 @@ + __ jmp(rcx); + } + ++static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, ++ address code_start, address code_end, ++ Label& L_ok) { ++ Label L_fail; ++ __ lea(temp_reg, ExternalAddress(code_start)); ++ __ cmpptr(pc_reg, temp_reg); ++ __ jcc(Assembler::belowEqual, L_fail); ++ __ lea(temp_reg, ExternalAddress(code_end)); ++ __ cmpptr(pc_reg, temp_reg); ++ __ jcc(Assembler::below, L_ok); ++ __ bind(L_fail); ++} ++ + static void gen_i2c_adapter(MacroAssembler *masm, + int total_args_passed, + int comp_args_on_stack, +@@ -605,9 +618,53 @@ + // save code can segv when fxsave instructions find improperly + // aligned stack pointer. + ++ // Adapters can be frameless because they do not require the caller ++ // to perform additional cleanup work, such as correcting the stack pointer. ++ // An i2c adapter is frameless because the *caller* frame, which is interpreted, ++ // routinely repairs its own stack pointer (from interpreter_frame_last_sp), ++ // even if a callee has modified the stack pointer. ++ // A c2i adapter is frameless because the *callee* frame, which is interpreted, ++ // routinely repairs its caller's stack pointer (from sender_sp, which is set ++ // up via the senderSP register). ++ // In other words, if *either* the caller or callee is interpreted, we can ++ // get the stack pointer repaired after a call. ++ // This is why c2i and i2c adapters cannot be indefinitely composed. ++ // In particular, if a c2i adapter were to somehow call an i2c adapter, ++ // both caller and callee would be compiled methods, and neither would ++ // clean up the stack pointer changes performed by the two adapters. ++ // If this happens, control eventually transfers back to the compiled ++ // caller, but with an uncorrected stack, causing delayed havoc. ++ + // Pick up the return address + __ movptr(rax, Address(rsp, 0)); + ++ if (VerifyAdapterCalls && ++ (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { ++ // So, let's test for cascading c2i/i2c adapters right now. ++ // assert(Interpreter::contains($return_addr) || ++ // StubRoutines::contains($return_addr), ++ // "i2c adapter must return to an interpreter frame"); ++ __ block_comment("verify_i2c { "); ++ Label L_ok; ++ if (Interpreter::code() != NULL) ++ range_check(masm, rax, r11, ++ Interpreter::code()->code_start(), Interpreter::code()->code_end(), ++ L_ok); ++ if (StubRoutines::code1() != NULL) ++ range_check(masm, rax, r11, ++ StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), ++ L_ok); ++ if (StubRoutines::code2() != NULL) ++ range_check(masm, rax, r11, ++ StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), ++ L_ok); ++ const char* msg = "i2c adapter must return to an interpreter frame"; ++ __ block_comment(msg); ++ __ stop(msg); ++ __ bind(L_ok); ++ __ block_comment("} verify_i2ce "); ++ } ++ + // Must preserve original SP for loading incoming arguments because + // we need to align the outgoing SP for compiled code. + __ movptr(r11, rsp); +@@ -1366,6 +1423,14 @@ + } + + ++// Different signatures may require very different orders for the move ++// to avoid clobbering other arguments. There's no simple way to ++// order them safely. Compute a safe order for issuing stores and ++// break any cycles in those stores. This code is fairly general but ++// it's not necessary on the other platforms so we keep it in the ++// platform dependent code instead of moving it into a shared file. ++// (See bugs 7013347 & 7145024.) ++// Note that this code is specific to LP64. + class ComputeMoveOrder: public StackObj { + class MoveOperation: public ResourceObj { + friend class ComputeMoveOrder; +@@ -1532,6 +1597,89 @@ + } + }; + ++static void verify_oop_args(MacroAssembler* masm, ++ int total_args_passed, ++ const BasicType* sig_bt, ++ const VMRegPair* regs) { ++ Register temp_reg = rbx; // not part of any compiled calling seq ++ if (VerifyOops) { ++ for (int i = 0; i < total_args_passed; i++) { ++ if (sig_bt[i] == T_OBJECT || ++ sig_bt[i] == T_ARRAY) { ++ VMReg r = regs[i].first(); ++ assert(r->is_valid(), "bad oop arg"); ++ if (r->is_stack()) { ++ __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); ++ __ verify_oop(temp_reg); ++ } else { ++ __ verify_oop(r->as_Register()); ++ } ++ } ++ } ++ } ++} ++ ++static void gen_special_dispatch(MacroAssembler* masm, ++ int total_args_passed, ++ int comp_args_on_stack, ++ vmIntrinsics::ID special_dispatch, ++ const BasicType* sig_bt, ++ const VMRegPair* regs) { ++ verify_oop_args(masm, total_args_passed, sig_bt, regs); ++ ++ // Now write the args into the outgoing interpreter space ++ bool has_receiver = false; ++ Register receiver_reg = noreg; ++ int member_arg_pos = -1; ++ Register member_reg = noreg; ++ int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch); ++ if (ref_kind != 0) { ++ member_arg_pos = total_args_passed - 1; // trailing MemberName argument ++ member_reg = rbx; // known to be free at this point ++ has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); ++ } else if (special_dispatch == vmIntrinsics::_invokeBasic) { ++ has_receiver = true; ++ } else { ++ guarantee(false, err_msg("special_dispatch=%d", special_dispatch)); ++ } ++ ++ if (member_reg != noreg) { ++ // Load the member_arg into register, if necessary. ++ assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob"); ++ assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object"); ++ VMReg r = regs[member_arg_pos].first(); ++ assert(r->is_valid(), "bad member arg"); ++ if (r->is_stack()) { ++ __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); ++ } else { ++ // no data motion is needed ++ member_reg = r->as_Register(); ++ } ++ } ++ ++ if (has_receiver) { ++ // Make sure the receiver is loaded into a register. ++ assert(total_args_passed > 0, "oob"); ++ assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); ++ VMReg r = regs[0].first(); ++ assert(r->is_valid(), "bad receiver arg"); ++ if (r->is_stack()) { ++ // Porting note: This assumes that compiled calling conventions always ++ // pass the receiver oop in a register. If this is not true on some ++ // platform, pick a temp and load the receiver from stack. ++ assert(false, "receiver always in a register"); ++ receiver_reg = j_rarg0; // known to be free at this point ++ __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); ++ } else { ++ // no data motion is needed ++ receiver_reg = r->as_Register(); ++ } ++ } ++ ++ // Figure out which address we are really jumping to: ++ MethodHandles::generate_method_handle_dispatch(masm, special_dispatch, ++ receiver_reg, member_reg, /*for_compiler_entry:*/ true); ++} + + // --------------------------------------------------------------------------- + // Generate a native wrapper for a given method. The method takes arguments +@@ -1539,14 +1687,60 @@ + // convention (handlizes oops, etc), transitions to native, makes the call, + // returns to java state (possibly blocking), unhandlizes any result and + // returns. +-nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, ++// ++// Critical native functions are a shorthand for the use of ++// GetPrimtiveArrayCritical and disallow the use of any other JNI ++// functions. The wrapper is expected to unpack the arguments before ++// passing them to the callee and perform checks before and after the ++// native call to ensure that they GC_locker ++// lock_critical/unlock_critical semantics are followed. Some other ++// parts of JNI setup are skipped like the tear down of the JNI handle ++// block and the check for pending exceptions it's impossible for them ++// to be thrown. ++// ++// They are roughly structured like this: ++// if (GC_locker::needs_gc()) ++// SharedRuntime::block_for_jni_critical(); ++// tranistion to thread_in_native ++// unpack arrray arguments and call native entry point ++// check for safepoint in progress ++// check if any thread suspend flags are set ++// call into JVM and possible unlock the JNI critical ++// if a GC was suppressed while in the critical native. ++// transition back to thread_in_Java ++// return to caller ++// ++nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, + methodHandle method, + int compile_id, + int total_in_args, + int comp_args_on_stack, +- BasicType *in_sig_bt, +- VMRegPair *in_regs, ++ BasicType* in_sig_bt, ++ VMRegPair* in_regs, + BasicType ret_type) { ++ if (method->is_method_handle_intrinsic()) { ++ vmIntrinsics::ID iid = method->intrinsic_id(); ++ intptr_t start = (intptr_t)__ pc(); ++ int vep_offset = ((intptr_t)__ pc()) - start; ++ gen_special_dispatch(masm, ++ total_in_args, ++ comp_args_on_stack, ++ method->intrinsic_id(), ++ in_sig_bt, ++ in_regs); ++ int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period ++ __ flush(); ++ int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually ++ return nmethod::new_native_nmethod(method, ++ compile_id, ++ masm->code(), ++ vep_offset, ++ frame_complete, ++ stack_slots / VMRegImpl::slots_per_word, ++ in_ByteSize(-1), ++ in_ByteSize(-1), ++ (OopMapSet*)NULL); ++ } + bool is_critical_native = true; + address native_func = method->critical_native_function(); + if (native_func == NULL) { +@@ -1658,7 +1852,7 @@ + case T_SHORT: + case T_CHAR: + case T_INT: single_slots++; break; +- case T_ARRAY: ++ case T_ARRAY: // specific to LP64 (7145024) + case T_LONG: double_slots++; break; + default: ShouldNotReachHere(); + } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/stubGenerator_x86_32.cpp +--- openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -2315,12 +2315,6 @@ + CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); + + // Build this early so it's available for the interpreter +- StubRoutines::_throw_WrongMethodTypeException_entry = +- generate_throw_exception("WrongMethodTypeException throw_exception", +- CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException), +- rax, rcx); +- +- // Build this early so it's available for the interpreter + StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); + } + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/stubGenerator_x86_64.cpp +--- openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -3063,14 +3063,6 @@ + + StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); + +- // Build this early so it's available for the interpreter. Stub +- // expects the required and actual types as register arguments in +- // j_rarg0 and j_rarg1 respectively. +- StubRoutines::_throw_WrongMethodTypeException_entry = +- generate_throw_exception("WrongMethodTypeException throw_exception", +- CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException), +- rax, rcx); +- + // Build this early so it's available for the interpreter. + StubRoutines::_throw_StackOverflowError_entry = + generate_throw_exception("StackOverflowError throw_exception", +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/templateInterpreter_x86_32.cpp +--- openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -708,9 +708,9 @@ + // Need to differentiate between igetfield, agetfield, bgetfield etc. + // because they are different sizes. + // Use the type from the constant pool cache +- __ shrl(rdx, ConstantPoolCacheEntry::tosBits); +- // Make sure we don't need to mask rdx for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); ++ __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift); ++ // Make sure we don't need to mask rdx after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); + __ cmpl(rdx, btos); + __ jcc(Assembler::notEqual, notByte); + __ load_signed_byte(rax, field_address); +@@ -1510,7 +1510,6 @@ + case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; + case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; + case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; +- case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break; + + case Interpreter::java_lang_math_sin : // fall thru + case Interpreter::java_lang_math_cos : // fall thru +@@ -1521,7 +1520,10 @@ + case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break; + case Interpreter::java_lang_ref_reference_get + : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; +- default : ShouldNotReachHere(); break; ++ default: ++ fatal(err_msg("unexpected method kind: %d", kind)); ++ break; ++k; + } + + if (entry_point) return entry_point; +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/templateInterpreter_x86_64.cpp +--- openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -681,9 +681,9 @@ + // Need to differentiate between igetfield, agetfield, bgetfield etc. + // because they are different sizes. + // Use the type from the constant pool cache +- __ shrl(rdx, ConstantPoolCacheEntry::tosBits); +- // Make sure we don't need to mask edx for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); ++ __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift); ++ // Make sure we don't need to mask edx after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); + + __ cmpl(rdx, atos); + __ jcc(Assembler::notEqual, notObj); +@@ -1521,12 +1521,11 @@ + switch (kind) { + case Interpreter::zerolocals : break; + case Interpreter::zerolocals_synchronized: synchronized = true; break; +- case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break; +- case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break; +- case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break; +- case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break; +- case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break; +- case Interpreter::method_handle : entry_point = ((InterpreterGenerator*) this)->generate_method_handle_entry();break; ++ case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break; ++ case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break; ++ case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; ++ case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; ++ case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; + + case Interpreter::java_lang_math_sin : // fall thru + case Interpreter::java_lang_math_cos : // fall thru +@@ -1537,7 +1536,9 @@ + case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break; + case Interpreter::java_lang_ref_reference_get + : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; +- default : ShouldNotReachHere(); break; ++ default: ++ fatal(err_msg("unexpected method kind: %d", kind)); ++ break; + } + + if (entry_point) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/templateTable_x86_32.cpp +--- openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -446,13 +446,13 @@ + const Register cache = rcx; + const Register index = rdx; + +- resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); ++ resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); + if (VerifyOops) { + __ verify_oop(rax); + } + + Label L_done, L_throw_exception; +- const Register con_klass_temp = rcx; // same as Rcache ++ const Register con_klass_temp = rcx; // same as cache + __ load_klass(con_klass_temp, rax); + __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr())); + __ jcc(Assembler::notEqual, L_done); +@@ -2084,15 +2084,15 @@ + Register Rcache, + Register index, + size_t index_size) { +- Register temp = rbx; +- ++ const Register temp = rbx; + assert_different_registers(result, Rcache, index, temp); + + Label resolved; +- if (byte_no == f1_oop) { +- // We are resolved if the f1 field contains a non-null object (CallSite, etc.) +- // This kind of CP cache entry does not need to match the flags byte, because ++ if (byte_no == f12_oop) { ++ // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.) ++ // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because + // there is a 1-1 relation between bytecode type and CP entry type. ++ // The caller will also load a methodOop from f2. + assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD) + __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); + __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset())); +@@ -2112,15 +2112,18 @@ + case Bytecodes::_getstatic : // fall through + case Bytecodes::_putstatic : // fall through + case Bytecodes::_getfield : // fall through +- case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; ++ case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; + case Bytecodes::_invokevirtual : // fall through + case Bytecodes::_invokespecial : // fall through + case Bytecodes::_invokestatic : // fall through +- case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; +- case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; +- case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; +- case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; +- default : ShouldNotReachHere(); break; ++ case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; ++ case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; ++ case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; ++ case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; ++ case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; ++ default: ++ fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); ++ break; + } + __ movl(temp, (int)bytecode()); + __ call_VM(noreg, entry, temp); +@@ -2149,7 +2152,7 @@ + __ movl(flags, Address(cache, index, Address::times_ptr, + in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()))); + +- // klass overwrite register ++ // klass overwrite register + if (is_static) { + __ movptr(obj, Address(cache, index, Address::times_ptr, + in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset()))); +@@ -2161,7 +2164,7 @@ + Register itable_index, + Register flags, + bool is_invokevirtual, +- bool is_invokevfinal /*unused*/, ++ bool is_invokevfinal, /*unused*/ + bool is_invokedynamic) { + // setup registers + const Register cache = rcx; +@@ -2171,28 +2174,33 @@ + assert_different_registers(itable_index, flags); + assert_different_registers(itable_index, cache, index); + // determine constant pool cache field offsets ++ assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); + const int method_offset = in_bytes( + constantPoolCacheOopDesc::base_offset() + +- (is_invokevirtual ++ ((byte_no == f2_byte) + ? ConstantPoolCacheEntry::f2_offset() +- : ConstantPoolCacheEntry::f1_offset() +- ) +- ); ++ : ConstantPoolCacheEntry::f1_offset())); + const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + + ConstantPoolCacheEntry::flags_offset()); + // access constant pool cache fields + const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + + ConstantPoolCacheEntry::f2_offset()); + +- if (byte_no == f1_oop) { +- // Resolved f1_oop goes directly into 'method' register. +- assert(is_invokedynamic, ""); +- resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4)); ++ if (byte_no == f12_oop) { ++ // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'. ++ // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset). ++ // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle. ++ size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); ++ resolve_cache_and_index(byte_no, itable_index, cache, index, index_size); ++ __ movptr(method, Address(cache, index, Address::times_ptr, index_offset)); ++ itable_index = noreg; // hack to disable load below + } else { + resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); + __ movptr(method, Address(cache, index, Address::times_ptr, method_offset)); + } + if (itable_index != noreg) { ++ // pick up itable index from f2 also: ++ assert(byte_no == f1_byte, "already picked up f1"); + __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset)); + } + __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset)); +@@ -2260,10 +2268,10 @@ + + Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; + +- __ shrl(flags, ConstantPoolCacheEntry::tosBits); ++ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); + assert(btos == 0, "change code, btos != 0"); + // btos +- __ andptr(flags, 0x0f); ++ __ andptr(flags, ConstantPoolCacheEntry::tos_state_mask); + __ jcc(Assembler::notZero, notByte); + + __ load_signed_byte(rax, lo ); +@@ -2415,9 +2423,9 @@ + __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset + + ConstantPoolCacheEntry::flags_offset()))); + __ mov(rbx, rsp); +- __ shrl(rcx, ConstantPoolCacheEntry::tosBits); +- // Make sure we don't need to mask rcx for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); ++ __ shrl(rcx, ConstantPoolCacheEntry::tos_state_shift); ++ // Make sure we don't need to mask rcx after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); + __ cmpl(rcx, ltos); + __ jccb(Assembler::equal, two_word); + __ cmpl(rcx, dtos); +@@ -2467,7 +2475,7 @@ + + Label notVolatile, Done; + __ movl(rdx, flags); +- __ shrl(rdx, ConstantPoolCacheEntry::volatileField); ++ __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); + __ andl(rdx, 0x1); + + // field addresses +@@ -2476,9 +2484,9 @@ + + Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; + +- __ shrl(flags, ConstantPoolCacheEntry::tosBits); ++ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); + assert(btos == 0, "change code, btos != 0"); +- __ andl(flags, 0x0f); ++ __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); + __ jcc(Assembler::notZero, notByte); + + // btos +@@ -2726,7 +2734,7 @@ + // volatile_barrier( ); + + Label notVolatile, Done; +- __ shrl(rdx, ConstantPoolCacheEntry::volatileField); ++ __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); + __ andl(rdx, 0x1); + // Check for volatile store + __ testl(rdx, rdx); +@@ -2892,19 +2900,29 @@ + } + + +-void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) { ++void TemplateTable::prepare_invoke(int byte_no, ++ Register method, // linked method (or i-klass) ++ Register index, // itable index, MethodType, etc. ++ Register recv, // if caller wants to see it ++ Register flags // if caller wants to test it ++ ) { + // determine flags +- Bytecodes::Code code = bytecode(); ++ const Bytecodes::Code code = bytecode(); + const bool is_invokeinterface = code == Bytecodes::_invokeinterface; + const bool is_invokedynamic = code == Bytecodes::_invokedynamic; ++ const bool is_invokehandle = code == Bytecodes::_invokehandle; + const bool is_invokevirtual = code == Bytecodes::_invokevirtual; + const bool is_invokespecial = code == Bytecodes::_invokespecial; +- const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic); +- const bool receiver_null_check = is_invokespecial; +- const bool save_flags = is_invokeinterface || is_invokevirtual; ++ const bool load_receiver = (recv != noreg); ++ const bool save_flags = (flags != noreg); ++ assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); ++ assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal"); ++ assert(flags == noreg || flags == rdx, ""); ++ assert(recv == noreg || recv == rcx, ""); ++ + // setup registers & access constant pool cache +- const Register recv = rcx; +- const Register flags = rdx; ++ if (recv == noreg) recv = rcx; ++ if (flags == noreg) flags = rdx; + assert_different_registers(method, index, recv, flags); + + // save 'interpreter return address' +@@ -2912,37 +2930,43 @@ + + load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); + ++ // maybe push appendix to arguments (just before return address) ++ if (is_invokedynamic || is_invokehandle) { ++ Label L_no_push; ++ __ verify_oop(index); ++ __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift)); ++ __ jccb(Assembler::zero, L_no_push); ++ // Push the appendix as a trailing parameter. ++ // This must be done before we get the receiver, ++ // since the parameter_size includes it. ++ __ push(index); // push appendix (MethodType, CallSite, etc.) ++ __ bind(L_no_push); ++ } ++ + // load receiver if needed (note: no return address pushed yet) + if (load_receiver) { +- assert(!is_invokedynamic, ""); + __ movl(recv, flags); +- __ andl(recv, 0xFF); +- // recv count is 0 based? +- Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1)); ++ __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask); ++ const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address ++ const int receiver_is_at_end = -1; // back off one slot to get receiver ++ Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end); + __ movptr(recv, recv_addr); + __ verify_oop(recv); + } + +- // do null check if needed +- if (receiver_null_check) { +- __ null_check(recv); +- } +- + if (save_flags) { + __ mov(rsi, flags); + } + + // compute return type +- __ shrl(flags, ConstantPoolCacheEntry::tosBits); +- // Make sure we don't need to mask flags for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); ++ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); ++ // Make sure we don't need to mask flags after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); + // load return address + { +- address table_addr; +- if (is_invokeinterface || is_invokedynamic) +- table_addr = (address)Interpreter::return_5_addrs_by_index_table(); +- else +- table_addr = (address)Interpreter::return_3_addrs_by_index_table(); ++ const address table_addr = (is_invokeinterface || is_invokedynamic) ? ++ (address)Interpreter::return_5_addrs_by_index_table() : ++ (address)Interpreter::return_3_addrs_by_index_table(); + ExternalAddress table(table_addr); + __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))); + } +@@ -2950,7 +2974,7 @@ + // push return address + __ push(flags); + +- // Restore flag value from the constant pool cache, and restore rsi ++ // Restore flags value from the constant pool cache, and restore rsi + // for later null checks. rsi is the bytecode pointer + if (save_flags) { + __ mov(flags, rsi); +@@ -2959,22 +2983,26 @@ + } + + +-void TemplateTable::invokevirtual_helper(Register index, Register recv, +- Register flags) { +- ++void TemplateTable::invokevirtual_helper(Register index, ++ Register recv, ++ Register flags) { + // Uses temporary registers rax, rdx + assert_different_registers(index, recv, rax, rdx); ++ assert(index == rbx, ""); ++ assert(recv == rcx, ""); + + // Test for an invoke of a final method + Label notFinal; + __ movl(rax, flags); +- __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod)); ++ __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift)); + __ jcc(Assembler::zero, notFinal); + +- Register method = index; // method must be rbx, +- assert(method == rbx, "methodOop must be rbx, for interpreter calling convention"); ++ const Register method = index; // method must be rbx ++ assert(method == rbx, ++ "methodOop must be rbx for interpreter calling convention"); + + // do the call - the index is actually the method to call ++ // that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop + __ verify_oop(method); + + // It's final, need a null check here! +@@ -2989,7 +3017,6 @@ + + // get receiver klass + __ null_check(recv, oopDesc::klass_offset_in_bytes()); +- // Keep recv in rcx for callee expects it there + __ load_klass(rax, recv); + __ verify_oop(rax); + +@@ -2997,9 +3024,7 @@ + __ profile_virtual_call(rax, rdi, rdx); + + // get target methodOop & entry point +- const int base = instanceKlass::vtable_start_offset() * wordSize; +- assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below"); +- __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes())); ++ __ lookup_virtual_method(rax, index, method); + __ jump_from_interpreted(method, rdx); + } + +@@ -3007,9 +3032,12 @@ + void TemplateTable::invokevirtual(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f2_byte, "use this argument"); +- prepare_invoke(rbx, noreg, byte_no); +- +- // rbx,: index ++ prepare_invoke(byte_no, ++ rbx, // method or vtable index ++ noreg, // unused itable index ++ rcx, rdx); // recv, flags ++ ++ // rbx: index + // rcx: receiver + // rdx: flags + +@@ -3020,7 +3048,10 @@ + void TemplateTable::invokespecial(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f1_byte, "use this argument"); +- prepare_invoke(rbx, noreg, byte_no); ++ prepare_invoke(byte_no, rbx, noreg, // get f1 methodOop ++ rcx); // get receiver also for null check ++ __ verify_oop(rcx); ++ __ null_check(rcx); + // do the call + __ verify_oop(rbx); + __ profile_call(rax); +@@ -3031,7 +3062,7 @@ + void TemplateTable::invokestatic(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f1_byte, "use this argument"); +- prepare_invoke(rbx, noreg, byte_no); ++ prepare_invoke(byte_no, rbx); // get f1 methodOop + // do the call + __ verify_oop(rbx); + __ profile_call(rax); +@@ -3049,10 +3080,11 @@ + void TemplateTable::invokeinterface(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f1_byte, "use this argument"); +- prepare_invoke(rax, rbx, byte_no); +- +- // rax,: Interface +- // rbx,: index ++ prepare_invoke(byte_no, rax, rbx, // get f1 klassOop, f2 itable index ++ rcx, rdx); // recv, flags ++ ++ // rax: interface klass (from f1) ++ // rbx: itable index (from f2) + // rcx: receiver + // rdx: flags + +@@ -3062,7 +3094,7 @@ + // another compliant java compiler. + Label notMethod; + __ movl(rdi, rdx); +- __ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface)); ++ __ andl(rdi, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift)); + __ jcc(Assembler::zero, notMethod); + + invokevirtual_helper(rbx, rcx, rdx); +@@ -3070,6 +3102,7 @@ + + // Get receiver klass into rdx - also a null check + __ restore_locals(); // restore rdi ++ __ null_check(rcx, oopDesc::klass_offset_in_bytes()); + __ load_klass(rdx, rcx); + __ verify_oop(rdx); + +@@ -3084,7 +3117,7 @@ + rbx, rsi, + no_such_interface); + +- // rbx,: methodOop to call ++ // rbx: methodOop to call + // rcx: receiver + // Check for abstract method error + // Note: This should be done more efficiently via a throw_abstract_method_error +@@ -3123,9 +3156,39 @@ + __ should_not_reach_here(); + } + ++void TemplateTable::invokehandle(int byte_no) { ++ transition(vtos, vtos); ++ assert(byte_no == f12_oop, "use this argument"); ++ const Register rbx_method = rbx; // (from f2) ++ const Register rax_mtype = rax; // (from f1) ++ const Register rcx_recv = rcx; ++ const Register rdx_flags = rdx; ++ ++ if (!EnableInvokeDynamic) { ++ // rewriter does not generate this bytecode ++ __ should_not_reach_here(); ++ return; ++ } ++ ++ prepare_invoke(byte_no, ++ rbx_method, rax_mtype, // get f2 methodOop, f1 MethodType ++ rcx_recv); ++ __ verify_oop(rbx_method); ++ __ verify_oop(rcx_recv); ++ __ null_check(rcx_recv); ++ ++ // Note: rax_mtype is already pushed (if necessary) by prepare_invoke ++ ++ // FIXME: profile the LambdaForm also ++ __ profile_final_call(rax); ++ ++ __ jump_from_interpreted(rbx_method, rdx); ++} ++ ++ + void TemplateTable::invokedynamic(int byte_no) { + transition(vtos, vtos); +- assert(byte_no == f1_oop, "use this argument"); ++ assert(byte_no == f12_oop, "use this argument"); + + if (!EnableInvokeDynamic) { + // We should not encounter this bytecode if !EnableInvokeDynamic. +@@ -3138,26 +3201,23 @@ + return; + } + +- prepare_invoke(rax, rbx, byte_no); +- +- // rax: CallSite object (f1) +- // rbx: unused (f2) +- // rcx: receiver address +- // rdx: flags (unused) +- +- Register rax_callsite = rax; +- Register rcx_method_handle = rcx; ++ const Register rbx_method = rbx; ++ const Register rax_callsite = rax; ++ ++ prepare_invoke(byte_no, rbx_method, rax_callsite); ++ ++ // rax: CallSite object (from f1) ++ // rbx: MH.linkToCallSite method (from f2) ++ ++ // Note: rax_callsite is already pushed by prepare_invoke + + // %%% should make a type profile for any invokedynamic that takes a ref argument + // profile this call + __ profile_call(rsi); + + __ verify_oop(rax_callsite); +- __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx))); +- __ null_check(rcx_method_handle); +- __ verify_oop(rcx_method_handle); +- __ prepare_to_jump_from_interpreted(); +- __ jump_to_method_handle_entry(rcx_method_handle, rdx); ++ ++ __ jump_from_interpreted(rbx_method, rdx); + } + + //---------------------------------------------------------------------------------------------------- +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/templateTable_x86_32.hpp +--- openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_32.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_32.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -25,10 +25,15 @@ + #ifndef CPU_X86_VM_TEMPLATETABLE_X86_32_HPP + #define CPU_X86_VM_TEMPLATETABLE_X86_32_HPP + +- static void prepare_invoke(Register method, Register index, int byte_no); ++ static void prepare_invoke(int byte_no, ++ Register method, // linked method (or i-klass) ++ Register index = noreg, // itable index, MethodType, etc. ++ Register recv = noreg, // if caller wants to see it ++ Register flags = noreg // if caller wants to test it ++ ); + static void invokevirtual_helper(Register index, Register recv, + Register flags); +- static void volatile_barrier(Assembler::Membar_mask_bits order_constraint ); ++ static void volatile_barrier(Assembler::Membar_mask_bits order_constraint); + + // Helpers + static void index_check(Register array, Register index); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/templateTable_x86_64.cpp +--- openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -458,7 +458,7 @@ + const Register cache = rcx; + const Register index = rdx; + +- resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); ++ resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); + if (VerifyOops) { + __ verify_oop(rax); + } +@@ -2125,10 +2125,11 @@ + assert_different_registers(result, Rcache, index, temp); + + Label resolved; +- if (byte_no == f1_oop) { +- // We are resolved if the f1 field contains a non-null object (CallSite, etc.) +- // This kind of CP cache entry does not need to match the flags byte, because ++ if (byte_no == f12_oop) { ++ // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.) ++ // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because + // there is a 1-1 relation between bytecode type and CP entry type. ++ // The caller will also load a methodOop from f2. + assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD) + __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); + __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset())); +@@ -2157,6 +2158,9 @@ + case Bytecodes::_invokeinterface: + entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); + break; ++ case Bytecodes::_invokehandle: ++ entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); ++ break; + case Bytecodes::_invokedynamic: + entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); + break; +@@ -2167,7 +2171,7 @@ + entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); + break; + default: +- ShouldNotReachHere(); ++ fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); + break; + } + __ movl(temp, (int) bytecode()); +@@ -2180,7 +2184,7 @@ + __ bind(resolved); + } + +-// The Rcache and index registers must be set before call ++// The cache and index registers must be set before call + void TemplateTable::load_field_cp_cache_entry(Register obj, + Register cache, + Register index, +@@ -2191,17 +2195,17 @@ + + ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); + // Field offset +- __ movptr(off, Address(cache, index, Address::times_8, ++ __ movptr(off, Address(cache, index, Address::times_ptr, + in_bytes(cp_base_offset + + ConstantPoolCacheEntry::f2_offset()))); + // Flags +- __ movl(flags, Address(cache, index, Address::times_8, ++ __ movl(flags, Address(cache, index, Address::times_ptr, + in_bytes(cp_base_offset + + ConstantPoolCacheEntry::flags_offset()))); + + // klass overwrite register + if (is_static) { +- __ movptr(obj, Address(cache, index, Address::times_8, ++ __ movptr(obj, Address(cache, index, Address::times_ptr, + in_bytes(cp_base_offset + + ConstantPoolCacheEntry::f1_offset()))); + } +@@ -2222,9 +2226,10 @@ + assert_different_registers(itable_index, flags); + assert_different_registers(itable_index, cache, index); + // determine constant pool cache field offsets ++ assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); + const int method_offset = in_bytes( + constantPoolCacheOopDesc::base_offset() + +- (is_invokevirtual ++ ((byte_no == f2_byte) + ? ConstantPoolCacheEntry::f2_offset() + : ConstantPoolCacheEntry::f1_offset())); + const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + +@@ -2233,15 +2238,21 @@ + const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + + ConstantPoolCacheEntry::f2_offset()); + +- if (byte_no == f1_oop) { +- // Resolved f1_oop goes directly into 'method' register. +- assert(is_invokedynamic, ""); +- resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4)); ++ if (byte_no == f12_oop) { ++ // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'. ++ // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset). ++ // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle. ++ size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); ++ resolve_cache_and_index(byte_no, itable_index, cache, index, index_size); ++ __ movptr(method, Address(cache, index, Address::times_ptr, index_offset)); ++ itable_index = noreg; // hack to disable load below + } else { + resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); + __ movptr(method, Address(cache, index, Address::times_ptr, method_offset)); + } + if (itable_index != noreg) { ++ // pick up itable index from f2 also: ++ assert(byte_no == f1_byte, "already picked up f1"); + __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset)); + } + __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset)); +@@ -2317,10 +2328,11 @@ + Label Done, notByte, notInt, notShort, notChar, + notLong, notFloat, notObj, notDouble; + +- __ shrl(flags, ConstantPoolCacheEntry::tosBits); ++ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); ++ // Make sure we don't need to mask edx after the above shift + assert(btos == 0, "change code, btos != 0"); + +- __ andl(flags, 0x0F); ++ __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); + __ jcc(Assembler::notZero, notByte); + // btos + __ load_signed_byte(rax, field); +@@ -2466,10 +2478,9 @@ + Address::times_8, + in_bytes(cp_base_offset + + ConstantPoolCacheEntry::flags_offset()))); +- __ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits); +- // Make sure we don't need to mask rcx for tosBits after the +- // above shift +- ConstantPoolCacheEntry::verify_tosBits(); ++ __ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift); ++ // Make sure we don't need to mask rcx after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); + __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue + __ cmpl(c_rarg3, ltos); + __ cmovptr(Assembler::equal, +@@ -2516,7 +2527,7 @@ + + Label notVolatile, Done; + __ movl(rdx, flags); +- __ shrl(rdx, ConstantPoolCacheEntry::volatileField); ++ __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); + __ andl(rdx, 0x1); + + // field address +@@ -2525,10 +2536,10 @@ + Label notByte, notInt, notShort, notChar, + notLong, notFloat, notObj, notDouble; + +- __ shrl(flags, ConstantPoolCacheEntry::tosBits); ++ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); + + assert(btos == 0, "change code, btos != 0"); +- __ andl(flags, 0x0f); ++ __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); + __ jcc(Assembler::notZero, notByte); + + // btos +@@ -2745,7 +2756,7 @@ + // Assembler::StoreStore)); + + Label notVolatile; +- __ shrl(rdx, ConstantPoolCacheEntry::volatileField); ++ __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); + __ andl(rdx, 0x1); + + // Get object from stack +@@ -2826,7 +2837,7 @@ + // __ movl(rdx, Address(rcx, rbx, Address::times_8, + // in_bytes(constantPoolCacheOopDesc::base_offset() + + // ConstantPoolCacheEntry::flags_offset()))); +- // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); ++ // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); + // __ andl(rdx, 0x1); + // } + __ movptr(rbx, Address(rcx, rbx, Address::times_8, +@@ -2914,7 +2925,7 @@ + // __ movl(rdx, Address(rcx, rdx, Address::times_8, + // in_bytes(constantPoolCacheOopDesc::base_offset() + + // ConstantPoolCacheEntry::flags_offset()))); +- // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); ++ // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); + // __ testl(rdx, 0x1); + // __ jcc(Assembler::zero, notVolatile); + // __ membar(Assembler::LoadLoad); +@@ -2934,19 +2945,29 @@ + ShouldNotReachHere(); + } + +-void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) { ++void TemplateTable::prepare_invoke(int byte_no, ++ Register method, // linked method (or i-klass) ++ Register index, // itable index, MethodType, etc. ++ Register recv, // if caller wants to see it ++ Register flags // if caller wants to test it ++ ) { + // determine flags +- Bytecodes::Code code = bytecode(); ++ const Bytecodes::Code code = bytecode(); + const bool is_invokeinterface = code == Bytecodes::_invokeinterface; + const bool is_invokedynamic = code == Bytecodes::_invokedynamic; ++ const bool is_invokehandle = code == Bytecodes::_invokehandle; + const bool is_invokevirtual = code == Bytecodes::_invokevirtual; + const bool is_invokespecial = code == Bytecodes::_invokespecial; +- const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic); +- const bool receiver_null_check = is_invokespecial; +- const bool save_flags = is_invokeinterface || is_invokevirtual; ++ const bool load_receiver = (recv != noreg); ++ const bool save_flags = (flags != noreg); ++ assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); ++ assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal"); ++ assert(flags == noreg || flags == rdx, ""); ++ assert(recv == noreg || recv == rcx, ""); ++ + // setup registers & access constant pool cache +- const Register recv = rcx; +- const Register flags = rdx; ++ if (recv == noreg) recv = rcx; ++ if (flags == noreg) flags = rdx; + assert_different_registers(method, index, recv, flags); + + // save 'interpreter return address' +@@ -2954,36 +2975,44 @@ + + load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); + +- // load receiver if needed (note: no return address pushed yet) ++ // maybe push appendix to arguments (just before return address) ++ if (is_invokedynamic || is_invokehandle) { ++ Label L_no_push; ++ __ verify_oop(index); ++ __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift)); ++ __ jccb(Assembler::zero, L_no_push); ++ // Push the appendix as a trailing parameter. ++ // This must be done before we get the receiver, ++ // since the parameter_size includes it. ++ __ push(index); // push appendix (MethodType, CallSite, etc.) ++ __ bind(L_no_push); ++ } ++ ++ // load receiver if needed (after appendix is pushed so parameter size is correct) ++ // Note: no return address pushed yet + if (load_receiver) { +- assert(!is_invokedynamic, ""); + __ movl(recv, flags); +- __ andl(recv, 0xFF); +- Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1)); ++ __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask); ++ const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address ++ const int receiver_is_at_end = -1; // back off one slot to get receiver ++ Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end); + __ movptr(recv, recv_addr); + __ verify_oop(recv); + } + +- // do null check if needed +- if (receiver_null_check) { +- __ null_check(recv); +- } +- + if (save_flags) { + __ movl(r13, flags); + } + + // compute return type +- __ shrl(flags, ConstantPoolCacheEntry::tosBits); +- // Make sure we don't need to mask flags for tosBits after the above shift +- ConstantPoolCacheEntry::verify_tosBits(); ++ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); ++ // Make sure we don't need to mask flags after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); + // load return address + { +- address table_addr; +- if (is_invokeinterface || is_invokedynamic) +- table_addr = (address)Interpreter::return_5_addrs_by_index_table(); +- else +- table_addr = (address)Interpreter::return_3_addrs_by_index_table(); ++ const address table_addr = (is_invokeinterface || is_invokedynamic) ? ++ (address)Interpreter::return_5_addrs_by_index_table() : ++ (address)Interpreter::return_3_addrs_by_index_table(); + ExternalAddress table(table_addr); + __ lea(rscratch1, table); + __ movptr(flags, Address(rscratch1, flags, Address::times_ptr)); +@@ -2992,7 +3021,7 @@ + // push return address + __ push(flags); + +- // Restore flag field from the constant pool cache, and restore esi ++ // Restore flags value from the constant pool cache, and restore rsi + // for later null checks. r13 is the bytecode pointer + if (save_flags) { + __ movl(flags, r13); +@@ -3006,11 +3035,13 @@ + Register flags) { + // Uses temporary registers rax, rdx + assert_different_registers(index, recv, rax, rdx); ++ assert(index == rbx, ""); ++ assert(recv == rcx, ""); + + // Test for an invoke of a final method + Label notFinal; + __ movl(rax, flags); +- __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod)); ++ __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift)); + __ jcc(Assembler::zero, notFinal); + + const Register method = index; // method must be rbx +@@ -3018,6 +3049,7 @@ + "methodOop must be rbx for interpreter calling convention"); + + // do the call - the index is actually the method to call ++ // that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop + __ verify_oop(method); + + // It's final, need a null check here! +@@ -3033,20 +3065,13 @@ + // get receiver klass + __ null_check(recv, oopDesc::klass_offset_in_bytes()); + __ load_klass(rax, recv); +- + __ verify_oop(rax); + + // profile this call + __ profile_virtual_call(rax, r14, rdx); + + // get target methodOop & entry point +- const int base = instanceKlass::vtable_start_offset() * wordSize; +- assert(vtableEntry::size() * wordSize == 8, +- "adjust the scaling in the code below"); +- __ movptr(method, Address(rax, index, +- Address::times_8, +- base + vtableEntry::method_offset_in_bytes())); +- __ movptr(rdx, Address(method, methodOopDesc::interpreter_entry_offset())); ++ __ lookup_virtual_method(rax, index, method); + __ jump_from_interpreted(method, rdx); + } + +@@ -3054,7 +3079,10 @@ + void TemplateTable::invokevirtual(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f2_byte, "use this argument"); +- prepare_invoke(rbx, noreg, byte_no); ++ prepare_invoke(byte_no, ++ rbx, // method or vtable index ++ noreg, // unused itable index ++ rcx, rdx); // recv, flags + + // rbx: index + // rcx: receiver +@@ -3067,7 +3095,10 @@ + void TemplateTable::invokespecial(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f1_byte, "use this argument"); +- prepare_invoke(rbx, noreg, byte_no); ++ prepare_invoke(byte_no, rbx, noreg, // get f1 methodOop ++ rcx); // get receiver also for null check ++ __ verify_oop(rcx); ++ __ null_check(rcx); + // do the call + __ verify_oop(rbx); + __ profile_call(rax); +@@ -3078,7 +3109,7 @@ + void TemplateTable::invokestatic(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f1_byte, "use this argument"); +- prepare_invoke(rbx, noreg, byte_no); ++ prepare_invoke(byte_no, rbx); // get f1 methodOop + // do the call + __ verify_oop(rbx); + __ profile_call(rax); +@@ -3094,10 +3125,11 @@ + void TemplateTable::invokeinterface(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f1_byte, "use this argument"); +- prepare_invoke(rax, rbx, byte_no); +- +- // rax: Interface +- // rbx: index ++ prepare_invoke(byte_no, rax, rbx, // get f1 klassOop, f2 itable index ++ rcx, rdx); // recv, flags ++ ++ // rax: interface klass (from f1) ++ // rbx: itable index (from f2) + // rcx: receiver + // rdx: flags + +@@ -3107,14 +3139,15 @@ + // another compliant java compiler. + Label notMethod; + __ movl(r14, rdx); +- __ andl(r14, (1 << ConstantPoolCacheEntry::methodInterface)); ++ __ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift)); + __ jcc(Assembler::zero, notMethod); + + invokevirtual_helper(rbx, rcx, rdx); + __ bind(notMethod); + + // Get receiver klass into rdx - also a null check +- __ restore_locals(); // restore r14 ++ __ restore_locals(); // restore r14 ++ __ null_check(rcx, oopDesc::klass_offset_in_bytes()); + __ load_klass(rdx, rcx); + __ verify_oop(rdx); + +@@ -3129,7 +3162,7 @@ + rbx, r13, + no_such_interface); + +- // rbx,: methodOop to call ++ // rbx: methodOop to call + // rcx: receiver + // Check for abstract method error + // Note: This should be done more efficiently via a throw_abstract_method_error +@@ -3166,12 +3199,42 @@ + InterpreterRuntime::throw_IncompatibleClassChangeError)); + // the call_VM checks for exception, so we should never return here. + __ should_not_reach_here(); +- return; + } + ++ ++void TemplateTable::invokehandle(int byte_no) { ++ transition(vtos, vtos); ++ assert(byte_no == f12_oop, "use this argument"); ++ const Register rbx_method = rbx; // f2 ++ const Register rax_mtype = rax; // f1 ++ const Register rcx_recv = rcx; ++ const Register rdx_flags = rdx; ++ ++ if (!EnableInvokeDynamic) { ++ // rewriter does not generate this bytecode ++ __ should_not_reach_here(); ++ return; ++ } ++ ++ prepare_invoke(byte_no, ++ rbx_method, rax_mtype, // get f2 methodOop, f1 MethodType ++ rcx_recv); ++ __ verify_oop(rbx_method); ++ __ verify_oop(rcx_recv); ++ __ null_check(rcx_recv); ++ ++ // Note: rax_mtype is already pushed (if necessary) by prepare_invoke ++ ++ // FIXME: profile the LambdaForm also ++ __ profile_final_call(rax); ++ ++ __ jump_from_interpreted(rbx_method, rdx); ++} ++ ++ + void TemplateTable::invokedynamic(int byte_no) { + transition(vtos, vtos); +- assert(byte_no == f1_oop, "use this argument"); ++ assert(byte_no == f12_oop, "use this argument"); + + if (!EnableInvokeDynamic) { + // We should not encounter this bytecode if !EnableInvokeDynamic. +@@ -3184,26 +3247,23 @@ + return; + } + +- prepare_invoke(rax, rbx, byte_no); +- +- // rax: CallSite object (f1) +- // rbx: unused (f2) +- // rcx: receiver address +- // rdx: flags (unused) +- +- Register rax_callsite = rax; +- Register rcx_method_handle = rcx; ++ const Register rbx_method = rbx; ++ const Register rax_callsite = rax; ++ ++ prepare_invoke(byte_no, rbx_method, rax_callsite); ++ ++ // rax: CallSite object (from f1) ++ // rbx: MH.linkToCallSite method (from f2) ++ ++ // Note: rax_callsite is already pushed by prepare_invoke + + // %%% should make a type profile for any invokedynamic that takes a ref argument + // profile this call + __ profile_call(r13); + + __ verify_oop(rax_callsite); +- __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx))); +- __ null_check(rcx_method_handle); +- __ verify_oop(rcx_method_handle); +- __ prepare_to_jump_from_interpreted(); +- __ jump_to_method_handle_entry(rcx_method_handle, rdx); ++ ++ __ jump_from_interpreted(rbx_method, rdx); + } + + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/templateTable_x86_64.hpp +--- openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -25,7 +25,12 @@ + #ifndef CPU_X86_VM_TEMPLATETABLE_X86_64_HPP + #define CPU_X86_VM_TEMPLATETABLE_X86_64_HPP + +- static void prepare_invoke(Register method, Register index, int byte_no); ++ static void prepare_invoke(int byte_no, ++ Register method, // linked method (or i-klass) ++ Register index = noreg, // itable index, MethodType, etc. ++ Register recv = noreg, // if caller wants to see it ++ Register flags = noreg // if caller wants to test it ++ ); + static void invokevirtual_helper(Register index, Register recv, + Register flags); + static void volatile_barrier(Assembler::Membar_mask_bits order_constraint); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/vtableStubs_x86_32.cpp +--- openjdk/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -76,8 +76,7 @@ + // get receiver klass + address npe_addr = __ pc(); + __ movptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes())); +- // compute entry offset (in words) +- int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); ++ + #ifndef PRODUCT + if (DebugVtables) { + Label L; +@@ -93,7 +92,8 @@ + const Register method = rbx; + + // load methodOop and target address +- __ movptr(method, Address(rax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes())); ++ __ lookup_virtual_method(rax, vtable_index, method); ++ + if (DebugVtables) { + Label L; + __ cmpptr(method, (int32_t)NULL_WORD); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/vtableStubs_x86_64.cpp +--- openjdk/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -69,10 +69,6 @@ + address npe_addr = __ pc(); + __ load_klass(rax, j_rarg0); + +- // compute entry offset (in words) +- int entry_offset = +- instanceKlass::vtable_start_offset() + vtable_index * vtableEntry::size(); +- + #ifndef PRODUCT + if (DebugVtables) { + Label L; +@@ -90,9 +86,8 @@ + // load methodOop and target address + const Register method = rbx; + +- __ movptr(method, Address(rax, +- entry_offset * wordSize + +- vtableEntry::method_offset_in_bytes())); ++ __ lookup_virtual_method(rax, vtable_index, method); ++ + if (DebugVtables) { + Label L; + __ cmpptr(method, (int32_t)NULL_WORD); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp +--- openjdk/hotspot/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -31,12 +31,17 @@ + return _masm; + } + +- protected: +- address generate_entry(address entry_point) { +- ZeroEntry *entry = (ZeroEntry *) assembler()->pc(); +- assembler()->advance(sizeof(ZeroEntry)); ++ public: ++ static address generate_entry_impl(MacroAssembler* masm, address entry_point) { ++ ZeroEntry *entry = (ZeroEntry *) masm->pc(); ++ masm->advance(sizeof(ZeroEntry)); + entry->set_entry_point(entry_point); + return (address) entry; + } + ++ protected: ++ address generate_entry(address entry_point) { ++ return generate_entry_impl(assembler(), entry_point); ++ } ++ + #endif // CPU_ZERO_VM_CPPINTERPRETERGENERATOR_ZERO_HPP +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/zero/vm/cppInterpreter_zero.cpp +--- openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -188,25 +188,6 @@ + method, istate->osr_entry(), istate->osr_buf(), THREAD); + return; + } +- else if (istate->msg() == BytecodeInterpreter::call_method_handle) { +- oop method_handle = istate->callee(); +- +- // Trim back the stack to put the parameters at the top +- stack->set_sp(istate->stack() + 1); +- +- // Make the call +- process_method_handle(method_handle, THREAD); +- fixup_after_potential_safepoint(); +- +- // Convert the result +- istate->set_stack(stack->sp() - 1); +- +- // Restore the stack +- stack->set_sp(istate->stack_limit() + 1); +- +- // Resume the interpreter +- istate->set_msg(BytecodeInterpreter::method_resume); +- } + else { + ShouldNotReachHere(); + } +@@ -543,35 +524,35 @@ + if (entry->is_volatile()) { + switch (entry->flag_state()) { + case ctos: +- SET_LOCALS_INT(object->char_field_acquire(entry->f2()), 0); ++ SET_LOCALS_INT(object->char_field_acquire(entry->f2_as_index()), 0); + break; + + case btos: +- SET_LOCALS_INT(object->byte_field_acquire(entry->f2()), 0); ++ SET_LOCALS_INT(object->byte_field_acquire(entry->f2_as_index()), 0); + break; + + case stos: +- SET_LOCALS_INT(object->short_field_acquire(entry->f2()), 0); ++ SET_LOCALS_INT(object->short_field_acquire(entry->f2_as_index()), 0); + break; + + case itos: +- SET_LOCALS_INT(object->int_field_acquire(entry->f2()), 0); ++ SET_LOCALS_INT(object->int_field_acquire(entry->f2_as_index()), 0); + break; + + case ltos: +- SET_LOCALS_LONG(object->long_field_acquire(entry->f2()), 0); ++ SET_LOCALS_LONG(object->long_field_acquire(entry->f2_as_index()), 0); + break; + + case ftos: +- SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2()), 0); ++ SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2_as_index()), 0); + break; + + case dtos: +- SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2()), 0); ++ SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2_as_index()), 0); + break; + + case atos: +- SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2()), 0); ++ SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2_as_index()), 0); + break; + + default: +@@ -581,35 +562,35 @@ + else { + switch (entry->flag_state()) { + case ctos: +- SET_LOCALS_INT(object->char_field(entry->f2()), 0); ++ SET_LOCALS_INT(object->char_field(entry->f2_as_index()), 0); + break; + + case btos: +- SET_LOCALS_INT(object->byte_field(entry->f2()), 0); ++ SET_LOCALS_INT(object->byte_field(entry->f2_as_index()), 0); + break; + + case stos: +- SET_LOCALS_INT(object->short_field(entry->f2()), 0); ++ SET_LOCALS_INT(object->short_field(entry->f2_as_index()), 0); + break; + + case itos: +- SET_LOCALS_INT(object->int_field(entry->f2()), 0); ++ SET_LOCALS_INT(object->int_field(entry->f2_as_index()), 0); + break; + + case ltos: +- SET_LOCALS_LONG(object->long_field(entry->f2()), 0); ++ SET_LOCALS_LONG(object->long_field(entry->f2_as_index()), 0); + break; + + case ftos: +- SET_LOCALS_FLOAT(object->float_field(entry->f2()), 0); ++ SET_LOCALS_FLOAT(object->float_field(entry->f2_as_index()), 0); + break; + + case dtos: +- SET_LOCALS_DOUBLE(object->double_field(entry->f2()), 0); ++ SET_LOCALS_DOUBLE(object->double_field(entry->f2_as_index()), 0); + break; + + case atos: +- SET_LOCALS_OBJECT(object->obj_field(entry->f2()), 0); ++ SET_LOCALS_OBJECT(object->obj_field(entry->f2_as_index()), 0); + break; + + default: +@@ -637,829 +618,6 @@ + return 0; + } + +-int CppInterpreter::method_handle_entry(methodOop method, +- intptr_t UNUSED, TRAPS) { +- JavaThread *thread = (JavaThread *) THREAD; +- ZeroStack *stack = thread->zero_stack(); +- int argument_slots = method->size_of_parameters(); +- int result_slots = type2size[result_type_of(method)]; +- intptr_t *vmslots = stack->sp(); +- intptr_t *unwind_sp = vmslots + argument_slots; +- +- // Find the MethodType +- address p = (address) method; +- for (jint* pc = method->method_type_offsets_chain(); (*pc) != -1; pc++) { +- p = *(address*)(p + (*pc)); +- } +- oop method_type = (oop) p; +- +- // The MethodHandle is in the slot after the arguments +- oop form = java_lang_invoke_MethodType::form(method_type); +- int num_vmslots = java_lang_invoke_MethodTypeForm::vmslots(form); +- assert(argument_slots == num_vmslots + 1, "should be"); +- oop method_handle = VMSLOTS_OBJECT(num_vmslots); +- +- // InvokeGeneric requires some extra shuffling +- oop mhtype = java_lang_invoke_MethodHandle::type(method_handle); +- bool is_exact = mhtype == method_type; +- if (!is_exact) { +- if (method->intrinsic_id() == vmIntrinsics::_invokeExact) { +- CALL_VM_NOCHECK_NOFIX( +- SharedRuntime::throw_WrongMethodTypeException( +- thread, method_type, mhtype)); +- // NB all oops trashed! +- assert(HAS_PENDING_EXCEPTION, "should do"); +- stack->set_sp(unwind_sp); +- return 0; +- } +- assert(method->intrinsic_id() == vmIntrinsics::_invokeGeneric, "should be"); +- +- // Load up an adapter from the calling type +- // NB the x86 code for this (in methodHandles_x86.cpp, search for +- // "genericInvoker") is really really odd. I'm hoping it's trying +- // to accomodate odd VM/class library combinations I can ignore. +- oop adapter = java_lang_invoke_MethodTypeForm::genericInvoker(form); +- if (adapter == NULL) { +- CALL_VM_NOCHECK_NOFIX( +- SharedRuntime::throw_WrongMethodTypeException( +- thread, method_type, mhtype)); +- // NB all oops trashed! +- assert(HAS_PENDING_EXCEPTION, "should do"); +- stack->set_sp(unwind_sp); +- return 0; +- } +- +- // Adapters are shared among form-families of method-type. The +- // type being called is passed as a trusted first argument so that +- // the adapter knows the actual types of its arguments and return +- // values. +- insert_vmslots(num_vmslots + 1, 1, THREAD); +- if (HAS_PENDING_EXCEPTION) { +- // NB all oops trashed! +- stack->set_sp(unwind_sp); +- return 0; +- } +- +- vmslots = stack->sp(); +- num_vmslots++; +- SET_VMSLOTS_OBJECT(method_type, num_vmslots); +- +- method_handle = adapter; +- } +- +- CPPINT_DEBUG( tty->print_cr( "Process method_handle sp: 0x%x unwind_sp: 0x%x result_slots: %d.", \ +- stack->sp(), unwind_sp, result_slots ); ) +- +- // Start processing +- process_method_handle(method_handle, THREAD); +- if (HAS_PENDING_EXCEPTION) +- result_slots = 0; +- +- // If this is an invokeExact then the eventual callee will not +- // have unwound the method handle argument so we have to do it. +- // If a result is being returned the it will be above the method +- // handle argument we're unwinding. +- if (is_exact) { +- intptr_t result[2]; +- for (int i = 0; i < result_slots; i++) +- result[i] = stack->pop(); +- stack->pop(); +- for (int i = result_slots - 1; i >= 0; i--) +- stack->push(result[i]); +- } +- +- // Check +- CPPINT_DEBUG( tty->print_cr( "Exiting method_handle_entry, sp: 0x%x unwind_sp: 0x%x result_slots: %d.", \ +- stack->sp(), unwind_sp, result_slots ); ) +- assert(stack->sp() == unwind_sp - result_slots, "should be"); +- +- // No deoptimized frames on the stack +- return 0; +-} +- +-void CppInterpreter::process_method_handle(oop method_handle, TRAPS) { +- +- JavaThread *thread = (JavaThread *) THREAD; +- ZeroStack *stack = thread->zero_stack(); +- intptr_t *vmslots = stack->sp(); +- +- bool direct_to_method = false; +- BasicType src_rtype = T_ILLEGAL; +- BasicType dst_rtype = T_ILLEGAL; +- +- MethodHandleEntry *entry = +- java_lang_invoke_MethodHandle::vmentry(method_handle); +- MethodHandles::EntryKind entry_kind = +- (MethodHandles::EntryKind) (((intptr_t) entry) & 0xffffffff); +- +- methodOop method = NULL; +- CPPINT_DEBUG( tty->print_cr( "\nEntering %s 0x%x.",MethodHandles::entry_name(entry_kind), (char *)vmslots ); ) +- switch (entry_kind) { +- case MethodHandles::_invokestatic_mh: +- direct_to_method = true; +- break; +- +- case MethodHandles::_invokespecial_mh: +- case MethodHandles::_invokevirtual_mh: +- case MethodHandles::_invokeinterface_mh: +- { +- oop receiver = +- VMSLOTS_OBJECT( +- java_lang_invoke_MethodHandle::vmslots(method_handle) - 1); +- if (receiver == NULL) { +- stack->set_sp(calculate_unwind_sp(stack, method_handle)); +- CALL_VM_NOCHECK_NOFIX( +- throw_exception( +- thread, vmSymbols::java_lang_NullPointerException())); +- // NB all oops trashed! +- assert(HAS_PENDING_EXCEPTION, "should do"); +- return; +- } +- if (entry_kind != MethodHandles::_invokespecial_mh) { +- int index = java_lang_invoke_DirectMethodHandle::vmindex(method_handle); +- instanceKlass* rcvrKlass = +- (instanceKlass *) receiver->klass()->klass_part(); +- if (entry_kind == MethodHandles::_invokevirtual_mh) { +- method = (methodOop) rcvrKlass->start_of_vtable()[index]; +- } +- else { +- oop iclass = java_lang_invoke_MethodHandle::vmtarget(method_handle); +- itableOffsetEntry* ki = +- (itableOffsetEntry *) rcvrKlass->start_of_itable(); +- int i, length = rcvrKlass->itable_length(); +- for (i = 0; i < length; i++, ki++ ) { +- if (ki->interface_klass() == iclass) +- break; +- } +- if (i == length) { +- stack->set_sp(calculate_unwind_sp(stack, method_handle)); +- CALL_VM_NOCHECK_NOFIX( +- throw_exception( +- thread, vmSymbols::java_lang_IncompatibleClassChangeError())); +- // NB all oops trashed! +- assert(HAS_PENDING_EXCEPTION, "should do"); +- return; +- } +- itableMethodEntry* im = ki->first_method_entry(receiver->klass()); +- method = im[index].method(); +- if (method == NULL) { +- stack->set_sp(calculate_unwind_sp(stack, method_handle)); +- CALL_VM_NOCHECK_NOFIX( +- throw_exception( +- thread, vmSymbols::java_lang_AbstractMethodError())); +- // NB all oops trashed! +- assert(HAS_PENDING_EXCEPTION, "should do"); +- return; +- } +- } +- } +- } +- direct_to_method = true; +- break; +- +- case MethodHandles::_bound_ref_direct_mh: +- case MethodHandles::_bound_int_direct_mh: +- case MethodHandles::_bound_long_direct_mh: +- direct_to_method = true; +- // fall through +- case MethodHandles::_bound_ref_mh: +- case MethodHandles::_bound_int_mh: +- case MethodHandles::_bound_long_mh: +- { +- // BasicType arg_type = T_ILLEGAL; +- // int arg_mask = -1; +- // int arg_slots = -1; +- // MethodHandles::get_ek_bound_mh_info( +- // entry_kind, arg_type, arg_mask, arg_slots); +- BasicType arg_type = MethodHandles::ek_bound_mh_arg_type(entry_kind); +- int arg_mask = 0; +- int arg_slots = type2size[arg_type];; +- +- int arg_slot = +- java_lang_invoke_BoundMethodHandle::vmargslot(method_handle); +- +- // Create the new slot(s) +- intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); +- insert_vmslots(arg_slot, arg_slots, THREAD); +- if (HAS_PENDING_EXCEPTION) { +- // all oops trashed +- stack->set_sp(unwind_sp); +- return; +- } +- vmslots = stack->sp(); +- +- // Store bound argument into new stack slot +- oop arg = java_lang_invoke_BoundMethodHandle::argument(method_handle); +- if (arg_type == T_OBJECT) { +- assert(arg_slots == 1, "should be"); +- SET_VMSLOTS_OBJECT(arg, arg_slot); +- } +- else { +- jvalue arg_value; +- arg_type = java_lang_boxing_object::get_value(arg, &arg_value); +- switch (arg_type) { +- case T_BOOLEAN: +- SET_VMSLOTS_INT(arg_value.z, arg_slot); +- break; +- case T_CHAR: +- SET_VMSLOTS_INT(arg_value.c, arg_slot); +- break; +- case T_BYTE: +- SET_VMSLOTS_INT(arg_value.b, arg_slot); +- break; +- case T_SHORT: +- SET_VMSLOTS_INT(arg_value.s, arg_slot); +- break; +- case T_INT: +- SET_VMSLOTS_INT(arg_value.i, arg_slot); +- break; +- case T_FLOAT: +- SET_VMSLOTS_FLOAT(arg_value.f, arg_slot); +- break; +- case T_LONG: +- SET_VMSLOTS_LONG(arg_value.j, arg_slot + 1); +- break; +- case T_DOUBLE: +- SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot + 1); +- break; +- default: +- tty->print_cr("unhandled type %s", type2name(arg_type)); +- ShouldNotReachHere(); +- } +- } +- } +- break; +- +- case MethodHandles::_adapter_retype_only: +- case MethodHandles::_adapter_retype_raw: +- src_rtype = result_type_of_handle( +- java_lang_invoke_MethodHandle::vmtarget(method_handle)); +- dst_rtype = result_type_of_handle(method_handle); +- break; +- +- case MethodHandles::_adapter_check_cast: +- { +- int arg_slot = +- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); +- oop arg = VMSLOTS_OBJECT(arg_slot); +- if (arg != NULL) { +- klassOop objKlassOop = arg->klass(); +- klassOop klassOf = java_lang_Class::as_klassOop( +- java_lang_invoke_AdapterMethodHandle::argument(method_handle)); +- +- if (objKlassOop != klassOf && +- !objKlassOop->klass_part()->is_subtype_of(klassOf)) { +- ResourceMark rm(THREAD); +- const char* objName = Klass::cast(objKlassOop)->external_name(); +- const char* klassName = Klass::cast(klassOf)->external_name(); +- char* message = SharedRuntime::generate_class_cast_message( +- objName, klassName); +- +- stack->set_sp(calculate_unwind_sp(stack, method_handle)); +- CALL_VM_NOCHECK_NOFIX( +- throw_exception( +- thread, vmSymbols::java_lang_ClassCastException(), message)); +- // NB all oops trashed! +- assert(HAS_PENDING_EXCEPTION, "should do"); +- return; +- } +- } +- } +- break; +- +- case MethodHandles::_adapter_dup_args: +- { +- int arg_slot = +- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); +- int conv = +- java_lang_invoke_AdapterMethodHandle::conversion(method_handle); +- int num_slots = -MethodHandles::adapter_conversion_stack_move(conv); +- assert(num_slots > 0, "should be"); +- +- // Create the new slot(s) +- intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); +- stack->overflow_check(num_slots, THREAD); +- if (HAS_PENDING_EXCEPTION) { +- // all oops trashed +- stack->set_sp(unwind_sp); +- return; +- } +- +- // Duplicate the arguments +- for (int i = num_slots - 1; i >= 0; i--) +- stack->push(*VMSLOTS_SLOT(arg_slot + i)); +- +- vmslots = stack->sp(); // unused, but let the compiler figure that out +- } +- break; +- +- case MethodHandles::_adapter_drop_args: +- { +- int arg_slot = +- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); +- int conv = +- java_lang_invoke_AdapterMethodHandle::conversion(method_handle); +- int num_slots = MethodHandles::adapter_conversion_stack_move(conv); +- assert(num_slots > 0, "should be"); +- +- remove_vmslots(arg_slot, num_slots, THREAD); // doesn't trap +- vmslots = stack->sp(); // unused, but let the compiler figure that out +- } +- break; +- +- case MethodHandles::_adapter_opt_swap_1: +- case MethodHandles::_adapter_opt_swap_2: +- case MethodHandles::_adapter_opt_rot_1_up: +- case MethodHandles::_adapter_opt_rot_1_down: +- case MethodHandles::_adapter_opt_rot_2_up: +- case MethodHandles::_adapter_opt_rot_2_down: +- { +- int arg1 = +- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); +- int conv = +- java_lang_invoke_AdapterMethodHandle::conversion(method_handle); +- int arg2 = MethodHandles::adapter_conversion_vminfo(conv); +- +- // int swap_bytes = 0, rotate = 0; +- // MethodHandles::get_ek_adapter_opt_swap_rot_info( +- // entry_kind, swap_bytes, rotate); +- int swap_slots = MethodHandles::ek_adapter_opt_swap_slots(entry_kind); +- int rotate = MethodHandles::ek_adapter_opt_swap_mode(entry_kind); +- int swap_bytes = swap_slots * Interpreter::stackElementSize; +- swap_slots = swap_bytes >> LogBytesPerWord; +- +- intptr_t tmp; +- switch (rotate) { +- case 0: // swap +- for (int i = 0; i < swap_slots; i++) { +- tmp = *VMSLOTS_SLOT(arg1 + i); +- SET_VMSLOTS_SLOT(VMSLOTS_SLOT(arg2 + i), arg1 + i); +- SET_VMSLOTS_SLOT(&tmp, arg2 + i); +- } +- break; +- +- case 1: // up +- assert(arg1 - swap_slots > arg2, "should be"); +- +- tmp = *VMSLOTS_SLOT(arg1); +- for (int i = arg1 - swap_slots; i >= arg2; i--) +- SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i + swap_slots); +- SET_VMSLOTS_SLOT(&tmp, arg2); +- +- break; +- +- case -1: // down +- assert(arg2 - swap_slots > arg1, "should be"); +- +- tmp = *VMSLOTS_SLOT(arg1); +- for (int i = arg1 + swap_slots; i <= arg2; i++) +- SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i - swap_slots); +- SET_VMSLOTS_SLOT(&tmp, arg2); +- break; +- +- default: +- ShouldNotReachHere(); +- } +- } +- break; +- +- case MethodHandles::_adapter_opt_i2l: +- { +- int arg_slot = +- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); +- int arg = VMSLOTS_INT(arg_slot); +- intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); +- insert_vmslots(arg_slot, 1, THREAD); +- if (HAS_PENDING_EXCEPTION) { +- // all oops trashed +- stack->set_sp(unwind_sp); +- return; +- } +- vmslots = stack->sp(); +- arg_slot++; +- SET_VMSLOTS_LONG(arg, arg_slot); +- } +- break; +- +- case MethodHandles::_adapter_opt_unboxi: +- case MethodHandles::_adapter_opt_unboxl: +- { +- int arg_slot = +- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); +- oop arg = VMSLOTS_OBJECT(arg_slot); +- jvalue arg_value; +- if (arg == NULL) { +- // queue a nullpointer exception for the caller +- stack->set_sp(calculate_unwind_sp(stack, method_handle)); +- CALL_VM_NOCHECK_NOFIX( +- throw_exception( +- thread, vmSymbols::java_lang_NullPointerException())); +- // NB all oops trashed! +- assert(HAS_PENDING_EXCEPTION, "should do"); +- return; +- } +- BasicType arg_type = java_lang_boxing_object::get_value(arg, &arg_value); +- if (arg_type == T_LONG || arg_type == T_DOUBLE) { +- intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); +- insert_vmslots(arg_slot, 1, THREAD); +- if (HAS_PENDING_EXCEPTION) { +- // all oops trashed +- stack->set_sp(unwind_sp); +- return; +- } +- vmslots = stack->sp(); +- arg_slot++; +- } +- switch (arg_type) { +- case T_BOOLEAN: +- SET_VMSLOTS_INT(arg_value.z, arg_slot); +- break; +- case T_CHAR: +- SET_VMSLOTS_INT(arg_value.c, arg_slot); +- break; +- case T_BYTE: +- SET_VMSLOTS_INT(arg_value.b, arg_slot); +- break; +- case T_SHORT: +- SET_VMSLOTS_INT(arg_value.s, arg_slot); +- break; +- case T_INT: +- SET_VMSLOTS_INT(arg_value.i, arg_slot); +- break; +- case T_FLOAT: +- SET_VMSLOTS_FLOAT(arg_value.f, arg_slot); +- break; +- case T_LONG: +- SET_VMSLOTS_LONG(arg_value.j, arg_slot); +- break; +- case T_DOUBLE: +- SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot); +- break; +- default: +- tty->print_cr("unhandled type %s", type2name(arg_type)); +- ShouldNotReachHere(); +- } +- } +- break; +- +- case MethodHandles::_adapter_opt_spread_0: +- case MethodHandles::_adapter_opt_spread_1_ref: +- case MethodHandles::_adapter_opt_spread_2_ref: +- case MethodHandles::_adapter_opt_spread_3_ref: +- case MethodHandles::_adapter_opt_spread_4_ref: +- case MethodHandles::_adapter_opt_spread_5_ref: +- case MethodHandles::_adapter_opt_spread_ref: +- case MethodHandles::_adapter_opt_spread_byte: +- case MethodHandles::_adapter_opt_spread_char: +- case MethodHandles::_adapter_opt_spread_short: +- case MethodHandles::_adapter_opt_spread_int: +- case MethodHandles::_adapter_opt_spread_long: +- case MethodHandles::_adapter_opt_spread_float: +- case MethodHandles::_adapter_opt_spread_double: +- { +- +- // spread an array out into a group of arguments +- +- int arg_slot = +- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); +- // Fetch the argument, which we will cast to the required array type. +- oop arg = VMSLOTS_OBJECT(arg_slot); +- +- BasicType elem_type = +- MethodHandles::ek_adapter_opt_spread_type(entry_kind); +- int elem_slots = +- type2size[elem_type]; // 1 or 2 +- int array_slots = +- 1; // array is always a T_OBJECT +- int length_offset = +- arrayOopDesc::length_offset_in_bytes(); +- int elem0_offset = +- arrayOopDesc::base_offset_in_bytes(elem_type); +- int length_constant = +- MethodHandles::ek_adapter_opt_spread_count(entry_kind); +- int array_length = 0; +- void *array_elem0 = NULL; +- +- CPPINT_DEBUG( tty->print_cr( \ +- "ENTERING _adapter_opt_spread: %s %d %d 0x%x 0x%x", \ +- type2name(elem_type), arg_slot, length_constant, (char *)arg, stack->sp() ); ) +- +- // If the spread count is -1, the length is "variable" ie controlled +- // by the array length. +- // See ek_adapter_opt_spread_count in methodHandles.hpp +- // If array lenth is 0 or spread count is 0 , we will remove the argslot. +- +- bool length_can_be_zero = (length_constant == 0); +- if (length_constant < 0) { +- // some adapters with variable length must handle the zero case +- if (!OptimizeMethodHandles || +- elem_type != T_OBJECT) +- length_can_be_zero = true; +- } +- +- if (arg == NULL) { +- CPPINT_DEBUG( tty->print_cr( \ +- "arg NULL implies Array_length == 0, remove slot." ); ) +- // remove arg slot +- remove_vmslots(arg_slot, 1, THREAD); // doesn't trap +- vmslots = stack->sp(); // unused, but let the compiler figure that out +- CPPINT_DEBUG( tty->print_cr( \ +- " >> Would LEAVE _adapter_opt_spread with NPE." ); ) +-#ifdef _NOT_DEF_ +- // queue a nullpointer exception for the caller +- stack->set_sp(calculate_unwind_sp(stack, method_handle)); +- CALL_VM_NOCHECK_NOFIX( +- throw_exception( +- thread, +- vmSymbols::java_lang_NullPointerException())); +- // NB all oops trashed! +- assert(HAS_PENDING_EXCEPTION, "should do"); +- return; +-#endif +- } else { // (arg != NULL) +- klassOop objKlassOop = arg->klass(); +- klassOop klassOf = java_lang_Class::as_klassOop( +- java_lang_invoke_AdapterMethodHandle::argument(method_handle)); +- +- if (objKlassOop != klassOf && +- !objKlassOop->klass_part()->is_subtype_of(klassOf)) { +- CPPINT_DEBUG( tty->print_cr( \ +- "CLASS CAST ERROR #1 in _adapter_opt_spread." ); ) +- ResourceMark rm(THREAD); +- const char* objName = Klass::cast(objKlassOop)->external_name(); +- const char* klassName = Klass::cast(klassOf)->external_name(); +- char* message = SharedRuntime::generate_class_cast_message( +- objName, klassName); +- +- stack->set_sp(calculate_unwind_sp(stack, method_handle)); +- CALL_VM_NOCHECK_NOFIX( +- throw_exception( +- thread, +- vmSymbols::java_lang_ClassCastException(), message)); +- // NB all oops trashed! +- assert(HAS_PENDING_EXCEPTION, "should do"); +- return; +- } +- +- // Check the array type. +- +- klassOop array_klass_oop = NULL; +- BasicType array_type = java_lang_Class::as_BasicType( +- java_lang_invoke_AdapterMethodHandle::argument(method_handle), +- &array_klass_oop); +- arrayKlassHandle array_klass(THREAD, array_klass_oop); +- +- assert(array_type == T_OBJECT, ""); +- assert(Klass::cast(array_klass_oop)->oop_is_array(), ""); +- if (!(array_type == T_OBJECT) || +- !(Klass::cast(array_klass_oop)->oop_is_array())) { +- CPPINT_DEBUG( tty->print_cr( \ +- "CLASS CAST ERROR #2 not an array in _adapter_opt_spread." ); ) +- ResourceMark rm(THREAD); +- const char* objName = Klass::cast(objKlassOop)->external_name(); +- const char* klassName = Klass::cast(klassOf)->external_name(); +- char* message = SharedRuntime::generate_class_cast_message( +- objName, klassName); +- stack->set_sp(calculate_unwind_sp(stack, method_handle)); +- CALL_VM_NOCHECK_NOFIX( +- throw_exception( +- thread, +- vmSymbols::java_lang_ClassCastException(), message)); +- // NB all oops trashed! +- assert(HAS_PENDING_EXCEPTION, "should do"); +- return; +- } +- +- klassOop element_klass_oop = NULL; +- BasicType element_type = +- java_lang_Class::as_BasicType(array_klass->component_mirror(), +- &element_klass_oop); +- KlassHandle element_klass(THREAD, element_klass_oop); +- if ((elem_type != T_OBJECT) && (elem_type != element_type)) { +- CPPINT_DEBUG( tty->print_cr( \ +- "CLASS CAST ERROR #3 invalid type %s != %s in _adapter_opt_spread.", \ +- type2name(elem_type), type2name(element_type) ); ) +- ResourceMark rm(THREAD); +- const char* objName = Klass::cast(objKlassOop)->external_name(); +- const char* klassName = Klass::cast(klassOf)->external_name(); +- char* message = SharedRuntime::generate_class_cast_message( +- objName, klassName); +- stack->set_sp(calculate_unwind_sp(stack, method_handle)); +- CALL_VM_NOCHECK_NOFIX( +- throw_exception( +- thread, +- vmSymbols::java_lang_ClassCastException(), message)); +- // NB all oops trashed! +- assert(HAS_PENDING_EXCEPTION, "should do"); +- return; +- } +- +- array_length = arrayOop(arg)->length(); +- +- // Check the required length. +- if (length_constant > 0) { // must match ? +- if ( array_length != length_constant ) { +- CPPINT_DEBUG( tty->print_cr( \ +- "ARRY INDEX ERROR #4 invalid array length in _adapter_opt_spread." ); ) +- //fixme ArrayIndexOutOfBoundsException ? +- ResourceMark rm(THREAD); +- const char* objName = Klass::cast(objKlassOop)->external_name(); +- const char* klassName = Klass::cast(klassOf)->external_name(); +- char* message = SharedRuntime::generate_class_cast_message( +- objName, klassName); +- +- stack->set_sp(calculate_unwind_sp(stack, method_handle)); +- CALL_VM_NOCHECK_NOFIX( +- throw_exception( +- thread, +- vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message)); +- // NB all oops trashed! +- assert(HAS_PENDING_EXCEPTION, "should do"); +- return; +- } +- // use array_length ? +- } else { // length_constant == [ -1 or 0 ] +- if ( (array_length > 0) || length_can_be_zero ) { +- // use array_length. +- } else { // array_length 0 and not length_can_be_zero +- CPPINT_DEBUG( tty->print_cr( \ +- "ARRY INDEX ERROR #5 arry length 0 in _adapter_opt_spread." ); ) +- //fixme ArrayIndexOutOfBoundsException ? +- ResourceMark rm(THREAD); +- const char* objName = Klass::cast(objKlassOop)->external_name(); +- const char* klassName = Klass::cast(klassOf)->external_name(); +- char* message = SharedRuntime::generate_class_cast_message( +- objName, klassName); +- +- stack->set_sp(calculate_unwind_sp(stack, method_handle)); +- CALL_VM_NOCHECK_NOFIX( +- throw_exception( +- thread, +- vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message)); +- // NB all oops trashed! +- assert(HAS_PENDING_EXCEPTION, "should do"); +- return; +- } +- } +- +- // Array length checked out. Now insert any required arg slots. +- // array_length - 1 more slots if array_length > 0 +- // otherwise if array_length == 0 remove arg_slot. +- +- if ( array_length > 0 ) { +- int slots = (array_length * elem_slots) - 1; +- CPPINT_DEBUG( tty->print_cr( \ +- "array_length %d %d slots needed in _adapter_opt_spread.",\ +- array_length, slots); ) +- debug_only(if (elem_slots == 2) \ +- assert ((slots % 2 == 1)," bad slots calc")); +- if ( slots > 0 ) { +- intptr_t *unwind_sp = +- calculate_unwind_sp(stack, method_handle); +- insert_vmslots(arg_slot, slots, THREAD); +- if (HAS_PENDING_EXCEPTION) { +- // all oops trashed +- stack->set_sp(unwind_sp); +- return; +- } +- } +- vmslots = stack->sp(); +- arg_slot += slots; +- +- array_elem0 = arrayOop(arg)->base(elem_type); +- +- // Copy from the array to the new arg slots. +- // [from native : Beware: Arguments that are shallow +- // on the stack are deep in the array, +- // and vice versa. So a downward-growing stack (the usual) +- // has to be copied elementwise in reverse order +- // from the source array.] +- +- void * array_elem = array_elem0; +- int top_slot = arg_slot; +- +- debug_only(if (elem_slots == 2) \ +- assert ((((ulong)(char *)&vmslots[top_slot]) % \ +- (u_int)type2aelembytes(elem_type) == 0), \ +- " bad arg alignment")); +- +- CPPINT_DEBUG( tty->print_cr( \ +- "BEGIN ARRY LOOP %d %d 0x%x 0x%x _adapter_opt_spread.",\ +- array_length, top_slot, &vmslots[top_slot], array_elem ); ) +- +- for (int index = 0; index < array_length; index++) { +- switch (elem_type) { +- case T_BYTE: +- SET_VMSLOTS_INT(*(jint*)array_elem, top_slot); +- break; +- case T_CHAR: +- SET_VMSLOTS_INT(*(jint*)array_elem, top_slot); +- break; +- case T_SHORT: +- SET_VMSLOTS_INT(*(jint*)array_elem, top_slot); +- break; +- case T_INT: +- SET_VMSLOTS_INT(*(jint*)array_elem, top_slot); +- break; +- case T_FLOAT: +- SET_VMSLOTS_FLOAT(*(jfloat*)array_elem,top_slot); +- break; +- case T_LONG: +- SET_VMSLOTS_LONG(*(jlong*)array_elem, top_slot); +- break; +- case T_DOUBLE: +- SET_VMSLOTS_DOUBLE(*(jdouble*)array_elem, top_slot); +- break; +- case T_OBJECT: +- SET_VMSLOTS_OBJECT(*(oopDesc**)array_elem, top_slot); +- break; +- default: +- tty->print_cr("unhandled type %s", type2name(elem_type)); +- ShouldNotReachHere(); +- } +- array_elem = (void*)((char *)array_elem + +- type2aelembytes(element_type)); +- top_slot -= elem_slots; +- } +- arg_slot++; +- } +- } +- if ((array_length == 0) && (arg != NULL)) { +- CPPINT_DEBUG( tty->print_cr( \ +- "Array_length == 0, will remove slot." ); ) +- // remove arg slot +- remove_vmslots(arg_slot, 1, THREAD); // doesn't trap +- // unused, but let the compiler figure that out +- vmslots = stack->sp(); +- // +- } +- CPPINT_DEBUG( tty->print_cr( \ +- "LEAVING _adapter_opt_spread: %s 0x%x 0x%x \n", \ +- type2name(elem_type), (char *)arg, (char *)stack->sp() ); ) +- } +- break; +- default: +- tty->print_cr("unhandled entry_kind %s", +- MethodHandles::entry_name(entry_kind)); +- ShouldNotReachHere(); +- } +- +- +- // Continue along the chain +- if (direct_to_method) { +- if (method == NULL) { +- method = +- (methodOop) java_lang_invoke_MethodHandle::vmtarget(method_handle); +- } +- address entry_point = method->from_interpreted_entry(); +- Interpreter::invoke_method(method, entry_point, THREAD); +- } +- else { +- process_method_handle( +- java_lang_invoke_MethodHandle::vmtarget(method_handle), THREAD); +- } +- // NB all oops now trashed +- +- // Adapt the result type, if necessary +- if (src_rtype != dst_rtype && !HAS_PENDING_EXCEPTION) { +- switch (dst_rtype) { +- case T_VOID: +- for (int i = 0; i < type2size[src_rtype]; i++) +- stack->pop(); +- return; +- +- case T_INT: +- switch (src_rtype) { +- case T_VOID: +- stack->overflow_check(1, CHECK); +- stack->push(0); +- return; +- +- case T_BOOLEAN: +- case T_CHAR: +- case T_BYTE: +- case T_SHORT: +- return; +- } +- // INT results sometimes need narrowing +- case T_BOOLEAN: +- case T_CHAR: +- case T_BYTE: +- case T_SHORT: +- switch (src_rtype) { +- case T_INT: +- return; +- } +- } +- +- tty->print_cr("unhandled conversion:"); +- tty->print_cr("src_rtype = %s", type2name(src_rtype)); +- tty->print_cr("dst_rtype = %s", type2name(dst_rtype)); +- ShouldNotReachHere(); +- } +- CPPINT_DEBUG( tty->print_cr( "LEAVING %s\n",MethodHandles::entry_name(entry_kind) ); ) +-} +- + // The new slots will be inserted before slot insert_before. + // Slots < insert_before will have the same slot number after the insert. + // Slots >= insert_before will become old_slot + num_slots. +@@ -1499,8 +657,7 @@ + intptr_t* CppInterpreter::calculate_unwind_sp(ZeroStack* stack, + oop method_handle) { + oop method_type = java_lang_invoke_MethodHandle::type(method_handle); +- oop form = java_lang_invoke_MethodType::form(method_type); +- int argument_slots = java_lang_invoke_MethodTypeForm::vmslots(form); ++ int argument_slots = java_lang_invoke_MethodType::ptype_slot_count(method_type); + + return stack->sp() + argument_slots; + } +@@ -1713,10 +870,6 @@ + entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); + break; + +- case Interpreter::method_handle: +- entry_point = ((InterpreterGenerator*) this)->generate_method_handle_entry(); +- break; +- + case Interpreter::java_lang_math_sin: + case Interpreter::java_lang_math_cos: + case Interpreter::java_lang_math_tan: +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/zero/vm/interpreterGenerator_zero.hpp +--- openjdk/hotspot/src/cpu/zero/vm/interpreterGenerator_zero.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/zero/vm/interpreterGenerator_zero.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -38,6 +38,5 @@ + address generate_empty_entry(); + address generate_accessor_entry(); + address generate_Reference_get_entry(); +- address generate_method_handle_entry(); + + #endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/zero/vm/interpreter_zero.cpp +--- openjdk/hotspot/src/cpu/zero/vm/interpreter_zero.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/zero/vm/interpreter_zero.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -70,14 +70,6 @@ + return generate_entry((address) ShouldNotCallThisEntry()); + } + +-address InterpreterGenerator::generate_method_handle_entry() { +-#ifdef CC_INTERP +- return generate_entry((address) CppInterpreter::method_handle_entry); +-#else +- return generate_entry((address) ShouldNotCallThisEntry()); +-#endif // CC_INTERP +-} +- + bool AbstractInterpreter::can_be_compiled(methodHandle m) { + return true; + } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/zero/vm/methodHandles_zero.cpp +--- openjdk/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -24,89 +24,161 @@ + */ + + #include "precompiled.hpp" ++#include "interpreter/interpreterGenerator.hpp" + #include "interpreter/interpreter.hpp" + #include "memory/allocation.inline.hpp" + #include "prims/methodHandles.hpp" + + #define __ _masm-> + +-int MethodHandles::adapter_conversion_ops_supported_mask() { +- return ((1<zero_stack(); ++ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); ++ interpreterState istate = frame->interpreter_state(); ++ ++ // Trim back the stack to put the parameters at the top ++ stack->set_sp(istate->stack() + 1); ++ ++ Interpreter::invoke_method(method, method->from_interpreted_entry(), THREAD); ++ ++ // Convert the result ++ istate->set_stack(stack->sp() - 1); ++ ++ } ++ ++oop MethodHandles::popFromStack(TRAPS) { ++ ++ JavaThread *thread = (JavaThread *) THREAD; ++ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); ++ interpreterState istate = frame->interpreter_state(); ++ intptr_t* topOfStack = istate->stack(); ++ ++ oop top = STACK_OBJECT(-1); ++ MORE_STACK(-1); ++ istate->set_stack(topOfStack); ++ ++ return top; ++ + } + +-void MethodHandles::generate_method_handle_stub(MacroAssembler* masm, +- MethodHandles::EntryKind ek) { +- init_entry(ek, (MethodHandleEntry *) ek); +-} +-void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm, +- // output params: +- int* bounce_offset, +- int* exception_offset, +- int* frame_size_in_words) { +- (*frame_size_in_words) = 0; +- address start = __ pc(); +- (*bounce_offset) = __ pc() - start; +- (*exception_offset) = __ pc() - start; ++int MethodHandles::method_handle_entry_invokeBasic(methodOop method, intptr_t UNUSED, TRAPS) { ++ ++ JavaThread *thread = (JavaThread *) THREAD; ++ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); ++ interpreterState istate = frame->interpreter_state(); ++ intptr_t* topOfStack = istate->stack(); ++ ++ // 'this' is a MethodHandle. We resolve the target method by accessing this.form.vmentry.vmtarget. ++ int numArgs = method->size_of_parameters(); ++ oop lform1 = java_lang_invoke_MethodHandle::form(STACK_OBJECT(-numArgs)); // this.form ++ oop vmEntry1 = java_lang_invoke_LambdaForm::vmentry(lform1); ++ methodOop vmtarget = (methodOop) java_lang_invoke_MemberName::vmtarget(vmEntry1); ++ ++ invoke_target(vmtarget, THREAD); ++ ++ // No deoptimized frames on the stack ++ return 0; + } + +-frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) { +- //RicochetFrame* f = RicochetFrame::from_frame(fr); +- // Cf. is_interpreted_frame path of frame::sender +- // intptr_t* younger_sp = fr.sp(); +- // intptr_t* sp = fr.sender_sp(); +- // return frame(sp, younger_sp, this_frame_adjusted_stack); +- ShouldNotCallThis(); ++int MethodHandles::method_handle_entry_linkToStaticOrSpecial(methodOop method, intptr_t UNUSED, TRAPS) { ++ ++ // Pop appendix argument from stack. This is a MemberName which we resolve to the ++ // target method. ++ oop vmentry = popFromStack(THREAD); ++ ++ methodOop vmtarget = (methodOop) java_lang_invoke_MemberName::vmtarget(vmentry); ++ ++ invoke_target(vmtarget, THREAD); ++ ++ return 0; + } + +-void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) { +- // ResourceMark rm; +- // RicochetFrame* f = RicochetFrame::from_frame(fr); ++int MethodHandles::method_handle_entry_linkToInterface(methodOop method, intptr_t UNUSED, TRAPS) { ++ JavaThread *thread = (JavaThread *) THREAD; ++ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); ++ interpreterState istate = frame->interpreter_state(); + +- // pick up the argument type descriptor: +- // Thread* thread = Thread::current(); +- // process fixed part +- // blk->do_oop((oop*)f->saved_target_addr()); +- // blk->do_oop((oop*)f->saved_args_layout_addr()); ++ // Pop appendix argument from stack. This is a MemberName which we resolve to the ++ // target method. ++ oop vmentry = popFromStack(THREAD); ++ intptr_t* topOfStack = istate->stack(); + +- // process variable arguments: +- // if (cookie.is_null()) return; // no arguments to describe ++ // Resolve target method by looking up in the receiver object's itable. ++ klassOop clazz = java_lang_Class::as_klassOop(java_lang_invoke_MemberName::clazz(vmentry)); ++ intptr_t vmindex = java_lang_invoke_MemberName::vmindex(vmentry); ++ methodOop target = (methodOop) java_lang_invoke_MemberName::vmtarget(vmentry); + +- // the cookie is actually the invokeExact method for my target +- // his argument signature is what I'm interested in +- // assert(cookie->is_method(), ""); +- // methodHandle invoker(thread, methodOop(cookie())); +- // assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method"); +- // assert(!invoker->is_static(), "must have MH argument"); +- // int slot_count = invoker->size_of_parameters(); +- // assert(slot_count >= 1, "must include 'this'"); +- // intptr_t* base = f->saved_args_base(); +- // intptr_t* retval = NULL; +- // if (f->has_return_value_slot()) +- // retval = f->return_value_slot_addr(); +- // int slot_num = slot_count - 1; +- // intptr_t* loc = &base[slot_num]; +- //blk->do_oop((oop*) loc); // original target, which is irrelevant +- // int arg_num = 0; +- // for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) { +- // if (ss.at_return_type()) continue; +- // BasicType ptype = ss.type(); +- // if (ptype == T_ARRAY) ptype = T_OBJECT; // fold all refs to T_OBJECT +- // assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void"); +- // slot_num -= type2size[ptype]; +- // loc = &base[slot_num]; +- // bool is_oop = (ptype == T_OBJECT && loc != retval); +- // if (is_oop) blk->do_oop((oop*)loc); +- // arg_num += 1; +- // } +- // assert(slot_num == 0, "must have processed all the arguments"); ++ int numArgs = target->size_of_parameters(); ++ oop recv = STACK_OBJECT(-numArgs); ++ ++ instanceKlass* recvKlass = (instanceKlass *) recv->klass()->klass_part(); ++ itableOffsetEntry* ki = (itableOffsetEntry*) recvKlass->start_of_itable(); ++ int i; ++ for ( i = 0 ; i < recvKlass->itable_length() ; i++, ki++ ) { ++ if (ki->interface_klass() == clazz) break; ++ } ++ ++ itableMethodEntry* im = ki->first_method_entry(recv->klass()); ++ methodOop vmtarget = im[vmindex].method(); ++ ++ invoke_target(vmtarget, THREAD); ++ ++ return 0; + } ++ ++int MethodHandles::method_handle_entry_linkToVirtual(methodOop method, intptr_t UNUSED, TRAPS) { ++ JavaThread *thread = (JavaThread *) THREAD; ++ ++ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); ++ interpreterState istate = frame->interpreter_state(); ++ ++ // Pop appendix argument from stack. This is a MemberName which we resolve to the ++ // target method. ++ oop vmentry = popFromStack(THREAD); ++ intptr_t* topOfStack = istate->stack(); ++ ++ // Resolve target method by looking up in the receiver object's vtable. ++ intptr_t vmindex = java_lang_invoke_MemberName::vmindex(vmentry); ++ methodOop target = (methodOop) java_lang_invoke_MemberName::vmtarget(vmentry); ++ int numArgs = target->size_of_parameters(); ++ oop recv = STACK_OBJECT(-numArgs); ++ instanceKlass* recvKlass_part = (instanceKlass *) recv->klass()->klass_part(); ++ ++ klassVtable* vtable = recvKlass_part->vtable(); ++ methodOop vmtarget = vtable->method_at(vmindex); ++ ++ invoke_target(vmtarget, THREAD); ++ ++ return 0; ++} ++ ++int MethodHandles::method_handle_entry_invalid(methodOop method, intptr_t UNUSED, TRAPS) { ++ ShouldNotReachHere(); ++ return 0; ++} ++ ++address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* masm, ++ vmIntrinsics::ID iid) { ++ switch (iid) { ++ case vmIntrinsics::_invokeGeneric: ++ case vmIntrinsics::_compiledLambdaForm: ++ // Perhaps surprisingly, the symbolic references visible to Java are not directly used. ++ // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. ++ // They all allow an appendix argument. ++ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invalid); ++ case vmIntrinsics::_invokeBasic: ++ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invokeBasic); ++ case vmIntrinsics::_linkToStatic: ++ case vmIntrinsics::_linkToSpecial: ++ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToStaticOrSpecial); ++ case vmIntrinsics::_linkToInterface: ++ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToInterface); ++ case vmIntrinsics::_linkToVirtual: ++ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToVirtual); ++ default: ++ ShouldNotReachHere(); ++ return NULL; ++ } ++} +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/zero/vm/methodHandles_zero.hpp +--- openjdk/hotspot/src/cpu/zero/vm/methodHandles_zero.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/zero/vm/methodHandles_zero.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -26,29 +26,16 @@ + + // Adapters + enum /* platform_dependent_constants */ { +- adapter_code_size = 0 ++ adapter_code_size = sizeof(ZeroEntry) * (Interpreter::method_handle_invoke_LAST - Interpreter::method_handle_invoke_FIRST + 1) + }; + +-class RicochetFrame : public ResourceObj { +- friend class MethodHandles; +- private: +- /* +- RF field x86 SPARC +- sender_pc *(rsp+0) I7-0x8 +- sender_link rbp I6+BIAS +- exact_sender_sp rsi/r13 I5_savedSP +- conversion *(rcx+&amh_conv) L5_conv +- saved_args_base rax L4_sab (cf. Gargs = G4) +- saved_args_layout #NULL L3_sal +- saved_target *(rcx+&mh_vmtgt) L2_stgt +- continuation #STUB_CON L1_cont +- */ +- public: +- +-static void generate_ricochet_blob(MacroAssembler* _masm, +- // output params: +- int* bounce_offset, +- int* exception_offset, +- int* frame_size_in_words); ++private: ++ static oop popFromStack(TRAPS); ++ static void invoke_target(methodOop method, TRAPS); ++ static int method_handle_entry_invokeBasic(methodOop method, intptr_t UNUSED, TRAPS); ++ static int method_handle_entry_linkToStaticOrSpecial(methodOop method, intptr_t UNUSED, TRAPS); ++ static int method_handle_entry_linkToVirtual(methodOop method, intptr_t UNUSED, TRAPS); ++ static int method_handle_entry_linkToInterface(methodOop method, intptr_t UNUSED, TRAPS); ++ static int method_handle_entry_invalid(methodOop method, intptr_t UNUSED, TRAPS); + + }; +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/zero/vm/register_zero.hpp +--- openjdk/hotspot/src/cpu/zero/vm/register_zero.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/cpu/zero/vm/register_zero.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -114,5 +114,8 @@ + }; + + CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1)); ++#ifndef DONT_USE_REGISTER_DEFINES ++#define noreg ((Register)(noreg_RegisterEnumValue)) ++#endif + + #endif // CPU_ZERO_VM_REGISTER_ZERO_HPP +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/adlc/output_h.cpp +--- openjdk/hotspot/src/share/vm/adlc/output_h.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/adlc/output_h.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -674,16 +674,19 @@ + else if( inst.is_ideal_mem() ) { + // Print out the field name if available to improve readability + fprintf(fp, " if (ra->C->alias_type(adr_type())->field() != NULL) {\n"); +- fprintf(fp, " st->print(\" ! Field \");\n"); +- fprintf(fp, " if( ra->C->alias_type(adr_type())->is_volatile() )\n"); +- fprintf(fp, " st->print(\" Volatile\");\n"); +- fprintf(fp, " ra->C->alias_type(adr_type())->field()->holder()->name()->print_symbol_on(st);\n"); ++ fprintf(fp, " ciField* f = ra->C->alias_type(adr_type())->field();\n"); ++ fprintf(fp, " st->print(\" ! Field: \");\n"); ++ fprintf(fp, " if (f->is_volatile())\n"); ++ fprintf(fp, " st->print(\"volatile \");\n"); ++ fprintf(fp, " f->holder()->name()->print_symbol_on(st);\n"); + fprintf(fp, " st->print(\".\");\n"); +- fprintf(fp, " ra->C->alias_type(adr_type())->field()->name()->print_symbol_on(st);\n"); ++ fprintf(fp, " f->name()->print_symbol_on(st);\n"); ++ fprintf(fp, " if (f->is_constant())\n"); ++ fprintf(fp, " st->print(\" (constant)\");\n"); + fprintf(fp, " } else\n"); + // Make sure 'Volatile' gets printed out +- fprintf(fp, " if( ra->C->alias_type(adr_type())->is_volatile() )\n"); +- fprintf(fp, " st->print(\" Volatile!\");\n"); ++ fprintf(fp, " if (ra->C->alias_type(adr_type())->is_volatile())\n"); ++ fprintf(fp, " st->print(\" volatile!\");\n"); + } + + // Complete the definition of the format function +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/asm/assembler.cpp +--- openjdk/hotspot/src/share/vm/asm/assembler.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/asm/assembler.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -318,6 +318,16 @@ + } + } + ++RegisterOrConstant AbstractAssembler::delayed_value(int(*value_fn)(), Register tmp, int offset) { ++ intptr_t val = (intptr_t) (*value_fn)(); ++ if (val != 0) return val + offset; ++ return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset); ++} ++RegisterOrConstant AbstractAssembler::delayed_value(address(*value_fn)(), Register tmp, int offset) { ++ intptr_t val = (intptr_t) (*value_fn)(); ++ if (val != 0) return val + offset; ++ return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset); ++} + intptr_t* AbstractAssembler::delayed_value_addr(int(*value_fn)()) { + DelayedConstant* dcon = DelayedConstant::add(T_INT, (DelayedConstant::value_fn_t) value_fn); + return &dcon->value; +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/asm/assembler.hpp +--- openjdk/hotspot/src/share/vm/asm/assembler.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/asm/assembler.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -406,12 +406,8 @@ + // offsets in code which must be generated before the object class is loaded. + // Field offsets are never zero, since an object's header (mark word) + // is located at offset zero. +- RegisterOrConstant delayed_value(int(*value_fn)(), Register tmp, int offset = 0) { +- return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset); +- } +- RegisterOrConstant delayed_value(address(*value_fn)(), Register tmp, int offset = 0) { +- return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset); +- } ++ RegisterOrConstant delayed_value(int(*value_fn)(), Register tmp, int offset = 0); ++ RegisterOrConstant delayed_value(address(*value_fn)(), Register tmp, int offset = 0); + virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset) = 0; + // Last overloading is platform-dependent; look in assembler_.cpp. + static intptr_t* delayed_value_addr(int(*constant_fn)()); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/asm/register.hpp +--- openjdk/hotspot/src/share/vm/asm/register.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/asm/register.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -103,7 +103,8 @@ + ) { + assert( + a != b, +- "registers must be different" ++ err_msg("registers must be different: a=%d, b=%d", ++ a, b) + ); + } + +@@ -116,7 +117,8 @@ + assert( + a != b && a != c + && b != c, +- "registers must be different" ++ err_msg("registers must be different: a=%d, b=%d, c=%d", ++ a, b, c) + ); + } + +@@ -131,7 +133,8 @@ + a != b && a != c && a != d + && b != c && b != d + && c != d, +- "registers must be different" ++ err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d", ++ a, b, c, d) + ); + } + +@@ -148,7 +151,8 @@ + && b != c && b != d && b != e + && c != d && c != e + && d != e, +- "registers must be different" ++ err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d", ++ a, b, c, d, e) + ); + } + +@@ -167,7 +171,8 @@ + && c != d && c != e && c != f + && d != e && d != f + && e != f, +- "registers must be different" ++ err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d", ++ a, b, c, d, e, f) + ); + } + +@@ -188,7 +193,8 @@ + && d != e && d != f && d != g + && e != f && e != g + && f != g, +- "registers must be different" ++ err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d", ++ a, b, c, d, e, f, g) + ); + } + +@@ -211,7 +217,34 @@ + && e != f && e != g && e != h + && f != g && f != h + && g != h, +- "registers must be different" ++ err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d", ++ a, b, c, d, e, f, g, h) ++ ); ++} ++ ++ ++inline void assert_different_registers( ++ AbstractRegister a, ++ AbstractRegister b, ++ AbstractRegister c, ++ AbstractRegister d, ++ AbstractRegister e, ++ AbstractRegister f, ++ AbstractRegister g, ++ AbstractRegister h, ++ AbstractRegister i ++) { ++ assert( ++ a != b && a != c && a != d && a != e && a != f && a != g && a != h && a != i ++ && b != c && b != d && b != e && b != f && b != g && b != h && b != i ++ && c != d && c != e && c != f && c != g && c != h && c != i ++ && d != e && d != f && d != g && d != h && d != i ++ && e != f && e != g && e != h && e != i ++ && f != g && f != h && f != i ++ && g != h && g != i ++ && h != i, ++ err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d, i=%d", ++ a, b, c, d, e, f, g, h, i) + ); + } + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_Canonicalizer.cpp +--- openjdk/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -540,6 +540,7 @@ + } + } + ++void Canonicalizer::do_TypeCast (TypeCast* x) {} + void Canonicalizer::do_Invoke (Invoke* x) {} + void Canonicalizer::do_NewInstance (NewInstance* x) {} + void Canonicalizer::do_NewTypeArray (NewTypeArray* x) {} +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_Canonicalizer.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -74,6 +74,7 @@ + virtual void do_IfInstanceOf (IfInstanceOf* x); + virtual void do_Convert (Convert* x); + virtual void do_NullCheck (NullCheck* x); ++ virtual void do_TypeCast (TypeCast* x); + virtual void do_Invoke (Invoke* x); + virtual void do_NewInstance (NewInstance* x); + virtual void do_NewTypeArray (NewTypeArray* x); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_Compilation.cpp +--- openjdk/hotspot/src/share/vm/c1/c1_Compilation.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_Compilation.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -523,7 +523,7 @@ + assert(msg != NULL, "bailout message must exist"); + if (!bailed_out()) { + // keep first bailout message +- if (PrintBailouts) tty->print_cr("compilation bailout: %s", msg); ++ if (PrintCompilation || PrintBailouts) tty->print_cr("compilation bailout: %s", msg); + _bailout_msg = msg; + } + } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_FrameMap.cpp +--- openjdk/hotspot/src/share/vm/c1/c1_FrameMap.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_FrameMap.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -92,7 +92,6 @@ + for (i = 0; i < sizeargs;) { + BasicType t = sig_bt[i]; + assert(t != T_VOID, "should be skipping these"); +- + LIR_Opr opr = map_to_opr(t, regs + i, outgoing); + args->append(opr); + if (opr->is_address()) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_FrameMap.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_FrameMap.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_FrameMap.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -181,8 +181,8 @@ + + // for outgoing calls, these also update the reserved area to + // include space for arguments and any ABI area. +- CallingConvention* c_calling_convention (const BasicTypeArray* signature); +- CallingConvention* java_calling_convention (const BasicTypeArray* signature, bool outgoing); ++ CallingConvention* c_calling_convention(const BasicTypeArray* signature); ++ CallingConvention* java_calling_convention(const BasicTypeArray* signature, bool outgoing); + + // deopt support + ByteSize sp_offset_for_orig_pc() { return sp_offset_for_monitor_base(_num_monitors); } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_GraphBuilder.cpp +--- openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -31,7 +31,7 @@ + #include "ci/ciCallSite.hpp" + #include "ci/ciField.hpp" + #include "ci/ciKlass.hpp" +-#include "ci/ciMethodHandle.hpp" ++#include "ci/ciMemberName.hpp" + #include "compiler/compileBroker.hpp" + #include "interpreter/bytecode.hpp" + #include "runtime/sharedRuntime.hpp" +@@ -914,11 +914,11 @@ + + void GraphBuilder::store_local(ValueType* type, int index) { + Value x = pop(type); +- store_local(state(), x, type, index); ++ store_local(state(), x, index); + } + + +-void GraphBuilder::store_local(ValueStack* state, Value x, ValueType* type, int index) { ++void GraphBuilder::store_local(ValueStack* state, Value x, int index) { + if (parsing_jsr()) { + // We need to do additional tracking of the location of the return + // address for jsrs since we don't handle arbitrary jsr/ret +@@ -1533,7 +1533,7 @@ + case T_ARRAY: + case T_OBJECT: + if (field_val.as_object()->should_be_constant()) { +- constant = new Constant(as_ValueType(field_val)); ++ constant = new Constant(as_ValueType(field_val)); + } + break; + +@@ -1560,12 +1560,51 @@ + append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching)); + } + break; +- case Bytecodes::_getfield : +- { ++ case Bytecodes::_getfield: { ++ // Check for compile-time constants, i.e., trusted final non-static fields. ++ Instruction* constant = NULL; ++ obj = apop(); ++ ObjectType* obj_type = obj->type()->as_ObjectType(); ++ if (obj_type->is_constant() && !PatchALot) { ++ ciObject* const_oop = obj_type->constant_value(); ++ if (field->is_constant()) { ++ ciConstant field_val = field->constant_value_of(const_oop); ++ BasicType field_type = field_val.basic_type(); ++ switch (field_type) { ++ case T_ARRAY: ++ case T_OBJECT: ++ if (field_val.as_object()->should_be_constant()) { ++ constant = new Constant(as_ValueType(field_val)); ++ } ++ break; ++ default: ++ constant = new Constant(as_ValueType(field_val)); ++ } ++ } else { ++ // For constant CallSites treat the target field as a compile time constant. ++ if (const_oop->is_call_site()) { ++ ciCallSite* call_site = const_oop->as_call_site(); ++ if (field->is_call_site_target()) { ++ ciMethodHandle* target = call_site->get_target(); ++ if (target != NULL) { // just in case ++ ciConstant field_val(T_OBJECT, target); ++ constant = new Constant(as_ValueType(field_val)); ++ // Add a dependence for invalidation of the optimization. ++ if (!call_site->is_constant_call_site()) { ++ dependency_recorder()->assert_call_site_target_value(call_site, target); ++ } ++ } ++ } ++ } ++ } ++ } ++ if (constant != NULL) { ++ push(type, append(constant)); ++ } else { + if (state_before == NULL) { + state_before = copy_state_for_exception(); + } +- LoadField* load = new LoadField(apop(), offset, field, false, state_before, needs_patching); ++ LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching); + Value replacement = !needs_patching ? _memory->load(load) : load; + if (replacement != load) { + assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked"); +@@ -1573,22 +1612,23 @@ + } else { + push(type, append(load)); + } +- break; +- } +- +- case Bytecodes::_putfield : +- { Value val = pop(type); +- if (state_before == NULL) { +- state_before = copy_state_for_exception(); +- } +- StoreField* store = new StoreField(apop(), offset, field, val, false, state_before, needs_patching); +- if (!needs_patching) store = _memory->store(store); +- if (store != NULL) { +- append(store); +- } + } + break; +- default : ++ } ++ case Bytecodes::_putfield: { ++ Value val = pop(type); ++ obj = apop(); ++ if (state_before == NULL) { ++ state_before = copy_state_for_exception(); ++ } ++ StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching); ++ if (!needs_patching) store = _memory->store(store); ++ if (store != NULL) { ++ append(store); ++ } ++ break; ++ } ++ default: + ShouldNotReachHere(); + break; + } +@@ -1602,38 +1642,73 @@ + + + void GraphBuilder::invoke(Bytecodes::Code code) { ++ const bool has_receiver = ++ code == Bytecodes::_invokespecial || ++ code == Bytecodes::_invokevirtual || ++ code == Bytecodes::_invokeinterface; ++ const bool is_invokedynamic = (code == Bytecodes::_invokedynamic); ++ + bool will_link; +- ciMethod* target = stream()->get_method(will_link); ++ ciMethod* target = stream()->get_method(will_link); ++ ciKlass* holder = stream()->get_declared_method_holder(); ++ const Bytecodes::Code bc_raw = stream()->cur_bc_raw(); ++ ++ // FIXME bail out for now ++ if ((bc_raw == Bytecodes::_invokehandle || is_invokedynamic) && !will_link) { ++ BAILOUT("unlinked call site (FIXME needs patching or recompile support)"); ++ } ++ + // we have to make sure the argument size (incl. the receiver) + // is correct for compilation (the call would fail later during + // linkage anyway) - was bug (gri 7/28/99) +- if (target->is_loaded() && target->is_static() != (code == Bytecodes::_invokestatic)) BAILOUT("will cause link error"); ++ { ++ // Use raw to get rewritten bytecode. ++ const bool is_invokestatic = bc_raw == Bytecodes::_invokestatic; ++ const bool allow_static = ++ is_invokestatic || ++ bc_raw == Bytecodes::_invokehandle || ++ bc_raw == Bytecodes::_invokedynamic; ++ if (target->is_loaded()) { ++ if (( target->is_static() && !allow_static) || ++ (!target->is_static() && is_invokestatic)) { ++ BAILOUT("will cause link error"); ++ } ++ } ++ } + ciInstanceKlass* klass = target->holder(); + + // check if CHA possible: if so, change the code to invoke_special + ciInstanceKlass* calling_klass = method()->holder(); +- ciKlass* holder = stream()->get_declared_method_holder(); + ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder); + ciInstanceKlass* actual_recv = callee_holder; + +- // some methods are obviously bindable without any type checks so +- // convert them directly to an invokespecial. +- if (target->is_loaded() && !target->is_abstract() && +- target->can_be_statically_bound() && code == Bytecodes::_invokevirtual) { +- code = Bytecodes::_invokespecial; ++ // Some methods are obviously bindable without any type checks so ++ // convert them directly to an invokespecial or invokestatic. ++ if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) { ++ switch (bc_raw) { ++ case Bytecodes::_invokevirtual: code = Bytecodes::_invokespecial; break; ++ case Bytecodes::_invokehandle: code = Bytecodes::_invokestatic; break; ++ } + } + +- bool is_invokedynamic = code == Bytecodes::_invokedynamic; ++ // Push appendix argument (MethodType, CallSite, etc.), if one. ++ if (stream()->has_appendix()) { ++ ciObject* appendix = stream()->get_appendix(); ++ Value arg = append(new Constant(new ObjectConstant(appendix))); ++ apush(arg); ++ } + + // NEEDS_CLEANUP +- // I've added the target-is_loaded() test below but I don't really understand ++ // I've added the target->is_loaded() test below but I don't really understand + // how klass->is_loaded() can be true and yet target->is_loaded() is false. + // this happened while running the JCK invokevirtual tests under doit. TKR + ciMethod* cha_monomorphic_target = NULL; + ciMethod* exact_target = NULL; + Value better_receiver = NULL; + if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() && +- !target->is_method_handle_invoke()) { ++ !(// %%% FIXME: Are both of these relevant? ++ target->is_method_handle_intrinsic() || ++ target->is_compiled_lambda_form())) { + Value receiver = NULL; + ciInstanceKlass* receiver_klass = NULL; + bool type_is_exact = false; +@@ -1757,23 +1832,15 @@ + code == Bytecodes::_invokedynamic) { + ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target; + bool success = false; +- if (target->is_method_handle_invoke()) { ++ if (target->is_method_handle_intrinsic()) { + // method handle invokes +- success = !is_invokedynamic ? for_method_handle_inline(target) : for_invokedynamic_inline(target); +- } +- if (!success) { ++ success = for_method_handle_inline(target); ++ } else { + // static binding => check if callee is ok +- success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), better_receiver); ++ success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver); + } + CHECK_BAILOUT(); + +-#ifndef PRODUCT +- // printing +- if (PrintInlining && !success) { +- // if it was successfully inlined, then it was already printed. +- print_inline_result(inline_target, success); +- } +-#endif + clear_inline_bailout(); + if (success) { + // Register dependence if JVMTI has either breakpoint +@@ -1784,8 +1851,13 @@ + } + return; + } ++ } else { ++ print_inlining(target, "no static binding", /*success*/ false); + } ++ } else { ++ print_inlining(target, "not inlineable", /*success*/ false); + } ++ + // If we attempted an inline which did not succeed because of a + // bailout during construction of the callee graph, the entire + // compilation has to be aborted. This is fairly rare and currently +@@ -1799,10 +1871,6 @@ + + // inlining not successful => standard invoke + bool is_loaded = target->is_loaded(); +- bool has_receiver = +- code == Bytecodes::_invokespecial || +- code == Bytecodes::_invokevirtual || +- code == Bytecodes::_invokeinterface; + ValueType* result_type = as_ValueType(target->return_type()); + + // We require the debug info to be the "state before" because +@@ -1851,7 +1919,7 @@ + } else if (exact_target != NULL) { + target_klass = exact_target->holder(); + } +- profile_call(recv, target_klass); ++ profile_call(target, recv, target_klass); + } + } + +@@ -3088,30 +3156,61 @@ + } + + +-bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Value receiver) { +- // Clear out any existing inline bailout condition ++bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Code bc, Value receiver) { ++ const char* msg = NULL; ++ ++ // clear out any existing inline bailout condition + clear_inline_bailout(); + +- if (callee->should_exclude()) { +- // callee is excluded +- INLINE_BAILOUT("excluded by CompilerOracle") +- } else if (callee->should_not_inline()) { +- // callee is excluded +- INLINE_BAILOUT("disallowed by CompilerOracle") +- } else if (!callee->can_be_compiled()) { +- // callee is not compilable (prob. has breakpoints) +- INLINE_BAILOUT("not compilable (disabled)") +- } else if (callee->intrinsic_id() != vmIntrinsics::_none && try_inline_intrinsics(callee)) { +- // intrinsics can be native or not ++ // exclude methods we don't want to inline ++ msg = should_not_inline(callee); ++ if (msg != NULL) { ++ print_inlining(callee, msg, /*success*/ false); ++ return false; ++ } ++ ++ // handle intrinsics ++ if (callee->intrinsic_id() != vmIntrinsics::_none) { ++ if (try_inline_intrinsics(callee)) { ++ print_inlining(callee, "intrinsic"); ++ return true; ++ } ++ // try normal inlining ++ } ++ ++ // certain methods cannot be parsed at all ++ msg = check_can_parse(callee); ++ if (msg != NULL) { ++ print_inlining(callee, msg, /*success*/ false); ++ return false; ++ } ++ ++ // If bytecode not set use the current one. ++ if (bc == Bytecodes::_illegal) { ++ bc = code(); ++ } ++ if (try_inline_full(callee, holder_known, bc, receiver)) + return true; +- } else if (callee->is_native()) { +- // non-intrinsic natives cannot be inlined +- INLINE_BAILOUT("non-intrinsic native") +- } else if (callee->is_abstract()) { +- INLINE_BAILOUT("abstract") +- } else { +- return try_inline_full(callee, holder_known, NULL, receiver); +- } ++ print_inlining(callee, _inline_bailout_msg, /*success*/ false); ++ return false; ++} ++ ++ ++const char* GraphBuilder::check_can_parse(ciMethod* callee) const { ++ // Certain methods cannot be parsed at all: ++ if ( callee->is_native()) return "native method"; ++ if ( callee->is_abstract()) return "abstract method"; ++ if (!callee->can_be_compiled()) return "not compilable (disabled)"; ++ return NULL; ++} ++ ++ ++// negative filter: should callee NOT be inlined? returns NULL, ok to inline, or rejection msg ++const char* GraphBuilder::should_not_inline(ciMethod* callee) const { ++ if ( callee->should_exclude()) return "excluded by CompilerOracle"; ++ if ( callee->should_not_inline()) return "disallowed by CompilerOracle"; ++ if ( callee->dont_inline()) return "don't inline by annotation"; ++ return NULL; + } + + +@@ -3286,7 +3385,7 @@ + recv = args->at(0); + null_check(recv); + } +- profile_call(recv, NULL); ++ profile_call(callee, recv, NULL); + } + } + } +@@ -3297,13 +3396,6 @@ + Value value = append_split(result); + if (result_type != voidType) push(result_type, value); + +-#ifndef PRODUCT +- // printing +- if (PrintInlining) { +- print_inline_result(callee, true); +- } +-#endif +- + // done + return true; + } +@@ -3459,7 +3551,7 @@ + } + + +-bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBegin* cont_block, Value receiver) { ++bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecodes::Code bc, Value receiver) { + assert(!callee->is_native(), "callee must not be native"); + if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) { + INLINE_BAILOUT("inlining prohibited by policy"); +@@ -3490,8 +3582,8 @@ + if (callee->should_inline()) { + // ignore heuristic controls on inlining + } else { +- if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("too-deep inlining"); +- if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining"); ++ if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("inlining too deep"); ++ if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep"); + if (callee->code_size_for_inlining() > max_inline_size() ) INLINE_BAILOUT("callee is too large"); + + // don't inline throwable methods unless the inlining tree is rooted in a throwable class +@@ -3510,28 +3602,25 @@ + if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) { + INLINE_BAILOUT("total inlining greater than DesiredMethodLimit"); + } ++ // printing ++ print_inlining(callee, ""); + } + +-#ifndef PRODUCT +- // printing +- if (PrintInlining) { +- print_inline_result(callee, true); +- } +-#endif +- + // NOTE: Bailouts from this point on, which occur at the + // GraphBuilder level, do not cause bailout just of the inlining but + // in fact of the entire compilation. + + BlockBegin* orig_block = block(); + ++ const bool is_invokedynamic = bc == Bytecodes::_invokedynamic; ++ const bool has_receiver = (bc != Bytecodes::_invokestatic && !is_invokedynamic); ++ + const int args_base = state()->stack_size() - callee->arg_size(); + assert(args_base >= 0, "stack underflow during inlining"); + + // Insert null check if necessary + Value recv = NULL; +- if (code() != Bytecodes::_invokestatic && +- code() != Bytecodes::_invokedynamic) { ++ if (has_receiver) { + // note: null check must happen even if first instruction of callee does + // an implicit null check since the callee is in a different scope + // and we must make sure exception handling does the right thing +@@ -3547,7 +3636,7 @@ + compilation()->set_would_profile(true); + + if (profile_calls()) { +- profile_call(recv, holder_known ? callee->holder() : NULL); ++ profile_call(callee, recv, holder_known ? callee->holder() : NULL); + } + } + +@@ -3556,7 +3645,7 @@ + // fall-through of control flow, all return instructions of the + // callee will need to be replaced by Goto's pointing to this + // continuation point. +- BlockBegin* cont = cont_block != NULL ? cont_block : block_at(next_bci()); ++ BlockBegin* cont = block_at(next_bci()); + bool continuation_existed = true; + if (cont == NULL) { + cont = new BlockBegin(next_bci()); +@@ -3589,17 +3678,10 @@ + // note: this will also ensure that all arguments are computed before being passed + ValueStack* callee_state = state(); + ValueStack* caller_state = state()->caller_state(); +- { int i = args_base; +- while (i < caller_state->stack_size()) { +- const int par_no = i - args_base; +- Value arg = caller_state->stack_at_inc(i); +- // NOTE: take base() of arg->type() to avoid problems storing +- // constants +- if (receiver != NULL && par_no == 0) { +- arg = receiver; +- } +- store_local(callee_state, arg, arg->type()->base(), par_no); +- } ++ for (int i = args_base; i < caller_state->stack_size(); ) { ++ const int arg_no = i - args_base; ++ Value arg = caller_state->stack_at_inc(i); ++ store_local(callee_state, arg, arg_no); + } + + // Remove args from stack. +@@ -3675,29 +3757,27 @@ + // block merging. This allows load elimination and CSE to take place + // across multiple callee scopes if they are relatively simple, and + // is currently essential to making inlining profitable. +- if (cont_block == NULL) { +- if (num_returns() == 1 +- && block() == orig_block +- && block() == inline_cleanup_block()) { +- _last = inline_cleanup_return_prev(); +- _state = inline_cleanup_state(); +- } else if (continuation_preds == cont->number_of_preds()) { +- // Inlining caused that the instructions after the invoke in the +- // caller are not reachable any more. So skip filling this block +- // with instructions! +- assert(cont == continuation(), ""); ++ if (num_returns() == 1 ++ && block() == orig_block ++ && block() == inline_cleanup_block()) { ++ _last = inline_cleanup_return_prev(); ++ _state = inline_cleanup_state(); ++ } else if (continuation_preds == cont->number_of_preds()) { ++ // Inlining caused that the instructions after the invoke in the ++ // caller are not reachable any more. So skip filling this block ++ // with instructions! ++ assert(cont == continuation(), ""); ++ assert(_last && _last->as_BlockEnd(), ""); ++ _skip_block = true; ++ } else { ++ // Resume parsing in continuation block unless it was already parsed. ++ // Note that if we don't change _last here, iteration in ++ // iterate_bytecodes_for_block will stop when we return. ++ if (!continuation()->is_set(BlockBegin::was_visited_flag)) { ++ // add continuation to work list instead of parsing it immediately + assert(_last && _last->as_BlockEnd(), ""); ++ scope_data()->parent()->add_to_work_list(continuation()); + _skip_block = true; +- } else { +- // Resume parsing in continuation block unless it was already parsed. +- // Note that if we don't change _last here, iteration in +- // iterate_bytecodes_for_block will stop when we return. +- if (!continuation()->is_set(BlockBegin::was_visited_flag)) { +- // add continuation to work list instead of parsing it immediately +- assert(_last && _last->as_BlockEnd(), ""); +- scope_data()->parent()->add_to_work_list(continuation()); +- _skip_block = true; +- } + } + } + +@@ -3715,114 +3795,88 @@ + + + bool GraphBuilder::for_method_handle_inline(ciMethod* callee) { +- assert(!callee->is_static(), "change next line"); +- int index = state()->stack_size() - (callee->arg_size_no_receiver() + 1); +- Value receiver = state()->stack_at(index); +- +- if (receiver->type()->is_constant()) { +- ciMethodHandle* method_handle = receiver->type()->as_ObjectType()->constant_value()->as_method_handle(); +- +- // Set the callee to have access to the class and signature in +- // the MethodHandleCompiler. +- method_handle->set_callee(callee); +- method_handle->set_caller(method()); +- +- // Get an adapter for the MethodHandle. +- ciMethod* method_handle_adapter = method_handle->get_method_handle_adapter(); +- if (method_handle_adapter != NULL) { +- return try_inline(method_handle_adapter, /*holder_known=*/ true); +- } +- } else if (receiver->as_CheckCast()) { +- // Match MethodHandle.selectAlternative idiom +- Phi* phi = receiver->as_CheckCast()->obj()->as_Phi(); +- +- if (phi != NULL && phi->operand_count() == 2) { +- // Get the two MethodHandle inputs from the Phi. +- Value op1 = phi->operand_at(0); +- Value op2 = phi->operand_at(1); +- ObjectType* op1type = op1->type()->as_ObjectType(); +- ObjectType* op2type = op2->type()->as_ObjectType(); +- +- if (op1type->is_constant() && op2type->is_constant()) { +- ciMethodHandle* mh1 = op1type->constant_value()->as_method_handle(); +- ciMethodHandle* mh2 = op2type->constant_value()->as_method_handle(); +- +- // Set the callee to have access to the class and signature in +- // the MethodHandleCompiler. +- mh1->set_callee(callee); +- mh1->set_caller(method()); +- mh2->set_callee(callee); +- mh2->set_caller(method()); +- +- // Get adapters for the MethodHandles. +- ciMethod* mh1_adapter = mh1->get_method_handle_adapter(); +- ciMethod* mh2_adapter = mh2->get_method_handle_adapter(); +- +- if (mh1_adapter != NULL && mh2_adapter != NULL) { +- set_inline_cleanup_info(); +- +- // Build the If guard +- BlockBegin* one = new BlockBegin(next_bci()); +- BlockBegin* two = new BlockBegin(next_bci()); +- BlockBegin* end = new BlockBegin(next_bci()); +- Instruction* iff = append(new If(phi, If::eql, false, op1, one, two, NULL, false)); +- block()->set_end(iff->as_BlockEnd()); +- +- // Connect up the states +- one->merge(block()->end()->state()); +- two->merge(block()->end()->state()); +- +- // Save the state for the second inlinee +- ValueStack* state_before = copy_state_before(); +- +- // Parse first adapter +- _last = _block = one; +- if (!try_inline_full(mh1_adapter, /*holder_known=*/ true, end, NULL)) { +- restore_inline_cleanup_info(); +- block()->clear_end(); // remove appended iff +- return false; +- } +- +- // Parse second adapter +- _last = _block = two; +- _state = state_before; +- if (!try_inline_full(mh2_adapter, /*holder_known=*/ true, end, NULL)) { +- restore_inline_cleanup_info(); +- block()->clear_end(); // remove appended iff +- return false; +- } +- +- connect_to_end(end); ++ ValueStack* state_before = state()->copy_for_parsing(); ++ vmIntrinsics::ID iid = callee->intrinsic_id(); ++ switch (iid) { ++ case vmIntrinsics::_invokeBasic: ++ { ++ // get MethodHandle receiver ++ const int args_base = state()->stack_size() - callee->arg_size(); ++ ValueType* type = state()->stack_at(args_base)->type(); ++ if (type->is_constant()) { ++ ciMethod* target = type->as_ObjectType()->constant_value()->as_method_handle()->get_vmtarget(); ++ guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove ++ Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; ++ if (try_inline(target, /*holder_known*/ true, bc)) { + return true; + } ++ } else { ++ print_inlining(callee, "receiver not constant", /*success*/ false); + } + } ++ break; ++ ++ case vmIntrinsics::_linkToVirtual: ++ case vmIntrinsics::_linkToStatic: ++ case vmIntrinsics::_linkToSpecial: ++ case vmIntrinsics::_linkToInterface: ++ { ++ // pop MemberName argument ++ const int args_base = state()->stack_size() - callee->arg_size(); ++ ValueType* type = apop()->type(); ++ if (type->is_constant()) { ++ ciMethod* target = type->as_ObjectType()->constant_value()->as_member_name()->get_vmtarget(); ++ // If the target is another method handle invoke try recursivly to get ++ // a better target. ++ if (target->is_method_handle_intrinsic()) { ++ if (for_method_handle_inline(target)) { ++ return true; ++ } ++ } else { ++ ciSignature* signature = target->signature(); ++ const int receiver_skip = target->is_static() ? 0 : 1; ++ // Cast receiver to its type. ++ if (!target->is_static()) { ++ ciKlass* tk = signature->accessing_klass(); ++ Value obj = state()->stack_at(args_base); ++ if (obj->exact_type() == NULL && ++ obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) { ++ TypeCast* c = new TypeCast(tk, obj, state_before); ++ append(c); ++ state()->stack_at_put(args_base, c); ++ } ++ } ++ // Cast reference arguments to its type. ++ for (int i = 0, j = 0; i < signature->count(); i++) { ++ ciType* t = signature->type_at(i); ++ if (t->is_klass()) { ++ ciKlass* tk = t->as_klass(); ++ Value obj = state()->stack_at(args_base + receiver_skip + j); ++ if (obj->exact_type() == NULL && ++ obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) { ++ TypeCast* c = new TypeCast(t, obj, state_before); ++ append(c); ++ state()->stack_at_put(args_base + receiver_skip + j, c); ++ } ++ } ++ j += t->size(); // long and double take two slots ++ } ++ Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; ++ if (try_inline(target, /*holder_known*/ true, bc)) { ++ return true; ++ } ++ } ++ } else { ++ print_inlining(callee, "MemberName not constant", /*success*/ false); ++ } ++ } ++ break; ++ ++ default: ++ fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); ++ break; + } +- return false; +-} +- +- +-bool GraphBuilder::for_invokedynamic_inline(ciMethod* callee) { +- // Get the MethodHandle from the CallSite. +- ciCallSite* call_site = stream()->get_call_site(); +- ciMethodHandle* method_handle = call_site->get_target(); +- +- // Set the callee to have access to the class and signature in the +- // MethodHandleCompiler. +- method_handle->set_callee(callee); +- method_handle->set_caller(method()); +- +- // Get an adapter for the MethodHandle. +- ciMethod* method_handle_adapter = method_handle->get_invokedynamic_adapter(); +- if (method_handle_adapter != NULL) { +- if (try_inline(method_handle_adapter, /*holder_known=*/ true)) { +- // Add a dependence for invalidation of the optimization. +- if (!call_site->is_constant_call_site()) { +- dependency_recorder()->assert_call_site_target_value(call_site, method_handle); +- } +- return true; +- } +- } ++ set_state(state_before); + return false; + } + +@@ -4014,22 +4068,24 @@ + } + + +-#ifndef PRODUCT +-void GraphBuilder::print_inline_result(ciMethod* callee, bool res) { +- CompileTask::print_inlining(callee, scope()->level(), bci(), _inline_bailout_msg); +- if (res && CIPrintMethodCodes) { ++void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) { ++ if (!PrintInlining) return; ++ assert(msg != NULL, "must be"); ++ CompileTask::print_inlining(callee, scope()->level(), bci(), msg); ++ if (success && CIPrintMethodCodes) { + callee->print_codes(); + } + } + + ++#ifndef PRODUCT + void GraphBuilder::print_stats() { + vmap()->print(); + } + #endif // PRODUCT + +-void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) { +- append(new ProfileCall(method(), bci(), recv, known_holder)); ++void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder) { ++ append(new ProfileCall(method(), bci(), callee, recv, known_holder)); + } + + void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_GraphBuilder.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -225,7 +225,7 @@ + void load_constant(); + void load_local(ValueType* type, int index); + void store_local(ValueType* type, int index); +- void store_local(ValueStack* state, Value value, ValueType* type, int index); ++ void store_local(ValueStack* state, Value value, int index); + void load_indexed (BasicType type); + void store_indexed(BasicType type); + void stack_op(Bytecodes::Code code); +@@ -337,14 +337,16 @@ + void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false); + + // inliners +- bool try_inline( ciMethod* callee, bool holder_known, Value receiver = NULL); ++ bool try_inline( ciMethod* callee, bool holder_known, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = NULL); + bool try_inline_intrinsics(ciMethod* callee); +- bool try_inline_full( ciMethod* callee, bool holder_known, BlockBegin* cont_block, Value receiver); ++ bool try_inline_full( ciMethod* callee, bool holder_known, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = NULL); + bool try_inline_jsr(int jsr_dest_bci); + ++ const char* check_can_parse(ciMethod* callee) const; ++ const char* should_not_inline(ciMethod* callee) const; ++ + // JSR 292 support + bool for_method_handle_inline(ciMethod* callee); +- bool for_invokedynamic_inline(ciMethod* callee); + + // helpers + void inline_bailout(const char* msg); +@@ -366,9 +368,9 @@ + bool append_unsafe_prefetch(ciMethod* callee, bool is_store, bool is_static); + void append_unsafe_CAS(ciMethod* callee); + +- NOT_PRODUCT(void print_inline_result(ciMethod* callee, bool res);) ++ void print_inlining(ciMethod* callee, const char* msg, bool success = true); + +- void profile_call(Value recv, ciKlass* predicted_holder); ++ void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder); + void profile_invocation(ciMethod* inlinee, ValueStack* state); + + // Shortcuts to profiling control. +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_Instruction.cpp +--- openjdk/hotspot/src/share/vm/c1/c1_Instruction.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_Instruction.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -161,6 +161,12 @@ + return NULL; + } + ++ciType* Constant::exact_type() const { ++ if (type()->is_object()) { ++ return type()->as_ObjectType()->exact_type(); ++ } ++ return NULL; ++} + + ciType* LoadIndexed::exact_type() const { + ciType* array_type = array()->exact_type(); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_Instruction.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_Instruction.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_Instruction.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -66,6 +66,7 @@ + class IfOp; + class Convert; + class NullCheck; ++class TypeCast; + class OsrEntry; + class ExceptionObject; + class StateSplit; +@@ -174,6 +175,7 @@ + virtual void do_IfOp (IfOp* x) = 0; + virtual void do_Convert (Convert* x) = 0; + virtual void do_NullCheck (NullCheck* x) = 0; ++ virtual void do_TypeCast (TypeCast* x) = 0; + virtual void do_Invoke (Invoke* x) = 0; + virtual void do_NewInstance (NewInstance* x) = 0; + virtual void do_NewTypeArray (NewTypeArray* x) = 0; +@@ -304,7 +306,8 @@ + + bool has_printable_bci() const { return NOT_PRODUCT(_printable_bci != -99) PRODUCT_ONLY(false); } + +- protected: ++ //protected: ++ public: + void set_type(ValueType* type) { + assert(type != NULL, "type must exist"); + _type = type; +@@ -486,6 +489,7 @@ + virtual TypeCheck* as_TypeCheck() { return NULL; } + virtual CheckCast* as_CheckCast() { return NULL; } + virtual InstanceOf* as_InstanceOf() { return NULL; } ++ virtual TypeCast* as_TypeCast() { return NULL; } + virtual AccessMonitor* as_AccessMonitor() { return NULL; } + virtual MonitorEnter* as_MonitorEnter() { return NULL; } + virtual MonitorExit* as_MonitorExit() { return NULL; } +@@ -636,8 +640,8 @@ + // accessors + int java_index() const { return _java_index; } + +- ciType* declared_type() const { return _declared_type; } +- ciType* exact_type() const; ++ virtual ciType* declared_type() const { return _declared_type; } ++ virtual ciType* exact_type() const; + + // generic + virtual void input_values_do(ValueVisitor* f) { /* no values */ } +@@ -648,13 +652,13 @@ + public: + // creation + Constant(ValueType* type): +- Instruction(type, NULL, true) ++ Instruction(type, NULL, /*type_is_constant*/ true) + { + assert(type->is_constant(), "must be a constant"); + } + + Constant(ValueType* type, ValueStack* state_before): +- Instruction(type, state_before, true) ++ Instruction(type, state_before, /*type_is_constant*/ true) + { + assert(state_before != NULL, "only used for constants which need patching"); + assert(type->is_constant(), "must be a constant"); +@@ -668,6 +672,7 @@ + virtual intx hash() const; + virtual bool is_equal(Value v) const; + ++ virtual ciType* exact_type() const; + + enum CompareResult { not_comparable = -1, cond_false, cond_true }; + +@@ -1101,6 +1106,29 @@ + }; + + ++// This node is supposed to cast the type of another node to a more precise ++// declared type. ++LEAF(TypeCast, Instruction) ++ private: ++ ciType* _declared_type; ++ Value _obj; ++ ++ public: ++ // The type of this node is the same type as the object type (and it might be constant). ++ TypeCast(ciType* type, Value obj, ValueStack* state_before) ++ : Instruction(obj->type(), state_before, obj->type()->is_constant()), ++ _declared_type(type), ++ _obj(obj) {} ++ ++ // accessors ++ ciType* declared_type() const { return _declared_type; } ++ Value obj() const { return _obj; } ++ ++ // generic ++ virtual void input_values_do(ValueVisitor* f) { f->visit(&_obj); } ++}; ++ ++ + BASE(StateSplit, Instruction) + private: + ValueStack* _state; +@@ -1164,6 +1192,7 @@ + + // JSR 292 support + bool is_invokedynamic() const { return code() == Bytecodes::_invokedynamic; } ++ bool is_method_handle_intrinsic() const { return target()->is_method_handle_intrinsic(); } + + virtual bool needs_exception_state() const { return false; } + +@@ -2275,14 +2304,16 @@ + private: + ciMethod* _method; + int _bci_of_invoke; ++ ciMethod* _callee; // the method that is called at the given bci + Value _recv; + ciKlass* _known_holder; + + public: +- ProfileCall(ciMethod* method, int bci, Value recv, ciKlass* known_holder) ++ ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder) + : Instruction(voidType) + , _method(method) + , _bci_of_invoke(bci) ++ , _callee(callee) + , _recv(recv) + , _known_holder(known_holder) + { +@@ -2292,6 +2323,7 @@ + + ciMethod* method() { return _method; } + int bci_of_invoke() { return _bci_of_invoke; } ++ ciMethod* callee() { return _callee; } + Value recv() { return _recv; } + ciKlass* known_holder() { return _known_holder; } + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_InstructionPrinter.cpp +--- openjdk/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -137,12 +137,16 @@ + ciMethod* m = (ciMethod*)value; + output()->print("", m->holder()->name()->as_utf8(), m->name()->as_utf8()); + } else { +- output()->print("", value->constant_encoding()); ++ output()->print(""); + } + } else if (type->as_InstanceConstant() != NULL) { + ciInstance* value = type->as_InstanceConstant()->value(); + if (value->is_loaded()) { +- output()->print("", value->constant_encoding()); ++ output()->print(""); + } else { + output()->print("", value); + } +@@ -453,6 +457,14 @@ + } + + ++void InstructionPrinter::do_TypeCast(TypeCast* x) { ++ output()->print("type_cast("); ++ print_value(x->obj()); ++ output()->print(") "); ++ print_klass(x->declared_type()->klass()); ++} ++ ++ + void InstructionPrinter::do_Invoke(Invoke* x) { + if (x->receiver() != NULL) { + print_value(x->receiver()); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_InstructionPrinter.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -101,6 +101,7 @@ + virtual void do_IfOp (IfOp* x); + virtual void do_Convert (Convert* x); + virtual void do_NullCheck (NullCheck* x); ++ virtual void do_TypeCast (TypeCast* x); + virtual void do_Invoke (Invoke* x); + virtual void do_NewInstance (NewInstance* x); + virtual void do_NewTypeArray (NewTypeArray* x); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_LIR.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_LIR.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_LIR.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -26,6 +26,7 @@ + #define SHARE_VM_C1_C1_LIR_HPP + + #include "c1/c1_ValueType.hpp" ++#include "oops/methodOop.hpp" + + class BlockBegin; + class BlockList; +@@ -1160,8 +1161,9 @@ + return + is_invokedynamic() // An invokedynamic is always a MethodHandle call site. + || +- (method()->holder()->name() == ciSymbol::java_lang_invoke_MethodHandle() && +- methodOopDesc::is_method_handle_invoke_name(method()->name()->sid())); ++ method()->is_compiled_lambda_form() // Java-generated adapter ++ || ++ method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic + } + + intptr_t vtable_offset() const { +@@ -1796,18 +1798,20 @@ + + private: + ciMethod* _profiled_method; +- int _profiled_bci; +- LIR_Opr _mdo; +- LIR_Opr _recv; +- LIR_Opr _tmp1; +- ciKlass* _known_holder; ++ int _profiled_bci; ++ ciMethod* _profiled_callee; ++ LIR_Opr _mdo; ++ LIR_Opr _recv; ++ LIR_Opr _tmp1; ++ ciKlass* _known_holder; + + public: + // Destroys recv +- LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder) ++ LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder) + : LIR_Op(code, LIR_OprFact::illegalOpr, NULL) // no result, no info + , _profiled_method(profiled_method) + , _profiled_bci(profiled_bci) ++ , _profiled_callee(profiled_callee) + , _mdo(mdo) + , _recv(recv) + , _tmp1(t1) +@@ -1815,6 +1819,7 @@ + + ciMethod* profiled_method() const { return _profiled_method; } + int profiled_bci() const { return _profiled_bci; } ++ ciMethod* profiled_callee() const { return _profiled_callee; } + LIR_Opr mdo() const { return _mdo; } + LIR_Opr recv() const { return _recv; } + LIR_Opr tmp1() const { return _tmp1; } +@@ -2116,8 +2121,8 @@ + CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, + ciMethod* profiled_method, int profiled_bci); + // methodDataOop profiling +- void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { +- append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass)); ++ void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { ++ append(new LIR_OpProfileCall(lir_profile_call, method, bci, callee, mdo, recv, t1, cha_klass)); + } + }; + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_LIRGenerator.cpp +--- openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -1910,6 +1910,14 @@ + } + + ++void LIRGenerator::do_TypeCast(TypeCast* x) { ++ LIRItem value(x->obj(), this); ++ value.load_item(); ++ // the result is the same as from the node we are casting ++ set_result(x, value.result()); ++} ++ ++ + void LIRGenerator::do_Throw(Throw* x) { + LIRItem exception(x->exception(), this); + exception.load_item(); +@@ -2737,7 +2745,10 @@ + // JSR 292 + // Preserve the SP over MethodHandle call sites. + ciMethod* target = x->target(); +- if (target->is_method_handle_invoke()) { ++ bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant? ++ target->is_method_handle_intrinsic() || ++ target->is_compiled_lambda_form()); ++ if (is_method_handle_invoke) { + info->set_is_method_handle_invoke(true); + __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr()); + } +@@ -2813,7 +2824,7 @@ + + // JSR 292 + // Restore the SP after MethodHandle call sites. +- if (target->is_method_handle_invoke()) { ++ if (is_method_handle_invoke) { + __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer()); + } + +@@ -2959,7 +2970,7 @@ + recv = new_register(T_OBJECT); + __ move(value.result(), recv); + } +- __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder()); ++ __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder()); + } + + void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_LIRGenerator.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -494,6 +494,7 @@ + virtual void do_IfOp (IfOp* x); + virtual void do_Convert (Convert* x); + virtual void do_NullCheck (NullCheck* x); ++ virtual void do_TypeCast (TypeCast* x); + virtual void do_Invoke (Invoke* x); + virtual void do_NewInstance (NewInstance* x); + virtual void do_NewTypeArray (NewTypeArray* x); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_Optimizer.cpp +--- openjdk/hotspot/src/share/vm/c1/c1_Optimizer.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_Optimizer.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -478,6 +478,7 @@ + void do_IfOp (IfOp* x); + void do_Convert (Convert* x); + void do_NullCheck (NullCheck* x); ++ void do_TypeCast (TypeCast* x); + void do_Invoke (Invoke* x); + void do_NewInstance (NewInstance* x); + void do_NewTypeArray (NewTypeArray* x); +@@ -648,6 +649,7 @@ + void NullCheckVisitor::do_IfOp (IfOp* x) {} + void NullCheckVisitor::do_Convert (Convert* x) {} + void NullCheckVisitor::do_NullCheck (NullCheck* x) { nce()->handle_NullCheck(x); } ++void NullCheckVisitor::do_TypeCast (TypeCast* x) {} + void NullCheckVisitor::do_Invoke (Invoke* x) { nce()->handle_Invoke(x); } + void NullCheckVisitor::do_NewInstance (NewInstance* x) { nce()->handle_NewInstance(x); } + void NullCheckVisitor::do_NewTypeArray (NewTypeArray* x) { nce()->handle_NewArray(x); } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_ValueMap.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_ValueMap.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_ValueMap.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -178,6 +178,7 @@ + void do_IfOp (IfOp* x) { /* nothing to do */ } + void do_Convert (Convert* x) { /* nothing to do */ } + void do_NullCheck (NullCheck* x) { /* nothing to do */ } ++ void do_TypeCast (TypeCast* x) { /* nothing to do */ } + void do_NewInstance (NewInstance* x) { /* nothing to do */ } + void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ } + void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_ValueStack.cpp +--- openjdk/hotspot/src/share/vm/c1/c1_ValueStack.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_ValueStack.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -195,6 +195,7 @@ + + void ValueStack::print() { + scope()->method()->print_name(); ++ tty->cr(); + if (stack_is_empty()) { + tty->print_cr("empty stack"); + } else { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_ValueStack.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_ValueStack.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_ValueStack.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -142,6 +142,10 @@ + return x; + } + ++ void stack_at_put(int i, Value x) { ++ _stack.at_put(i, x); ++ } ++ + // pinning support + void pin_stack_for_linear_scan(); + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_ValueType.cpp +--- openjdk/hotspot/src/share/vm/c1/c1_ValueType.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_ValueType.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -101,6 +101,23 @@ + ciObject* InstanceConstant::constant_value() const { return _value; } + ciObject* ClassConstant::constant_value() const { return _value; } + ++ciType* ObjectConstant::exact_type() const { ++ ciObject* c = constant_value(); ++ return (c != NULL && !c->is_null_object()) ? c->klass() : NULL; ++} ++ciType* ArrayConstant::exact_type() const { ++ ciObject* c = constant_value(); ++ return (c != NULL && !c->is_null_object()) ? c->klass() : NULL; ++} ++ciType* InstanceConstant::exact_type() const { ++ ciObject* c = constant_value(); ++ return (c != NULL && !c->is_null_object()) ? c->klass() : NULL; ++} ++ciType* ClassConstant::exact_type() const { ++ ciObject* c = constant_value(); ++ return (c != NULL && !c->is_null_object()) ? c->klass() : NULL; ++} ++ + + ValueType* as_ValueType(BasicType type) { + switch (type) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_ValueType.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_ValueType.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/c1/c1_ValueType.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -297,7 +297,8 @@ + virtual const char tchar() const { return 'a'; } + virtual const char* name() const { return "object"; } + virtual ObjectType* as_ObjectType() { return this; } +- virtual ciObject* constant_value() const { ShouldNotReachHere(); return NULL; } ++ virtual ciObject* constant_value() const { ShouldNotReachHere(); return NULL; } ++ virtual ciType* exact_type() const { return NULL; } + bool is_loaded() const; + jobject encoding() const; + }; +@@ -315,6 +316,7 @@ + virtual bool is_constant() const { return true; } + virtual ObjectConstant* as_ObjectConstant() { return this; } + virtual ciObject* constant_value() const; ++ virtual ciType* exact_type() const; + }; + + +@@ -334,9 +336,9 @@ + ciArray* value() const { return _value; } + + virtual bool is_constant() const { return true; } +- + virtual ArrayConstant* as_ArrayConstant() { return this; } + virtual ciObject* constant_value() const; ++ virtual ciType* exact_type() const; + }; + + +@@ -356,9 +358,9 @@ + ciInstance* value() const { return _value; } + + virtual bool is_constant() const { return true; } +- + virtual InstanceConstant* as_InstanceConstant(){ return this; } + virtual ciObject* constant_value() const; ++ virtual ciType* exact_type() const; + }; + + +@@ -378,9 +380,9 @@ + ciInstanceKlass* value() const { return _value; } + + virtual bool is_constant() const { return true; } +- + virtual ClassConstant* as_ClassConstant() { return this; } + virtual ciObject* constant_value() const; ++ virtual ciType* exact_type() const; + }; + + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/bcEscapeAnalyzer.cpp +--- openjdk/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -238,9 +238,11 @@ + + // some methods are obviously bindable without any type checks so + // convert them directly to an invokespecial. +- if (target->is_loaded() && !target->is_abstract() && +- target->can_be_statically_bound() && code == Bytecodes::_invokevirtual) { +- code = Bytecodes::_invokespecial; ++ if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) { ++ switch (code) { ++ case Bytecodes::_invokevirtual: code = Bytecodes::_invokespecial; break; ++ case Bytecodes::_invokehandle: code = Bytecodes::_invokestatic; break; ++ } + } + + // compute size of arguments +@@ -866,7 +868,12 @@ + { bool will_link; + ciMethod* target = s.get_method(will_link); + ciKlass* holder = s.get_declared_method_holder(); +- invoke(state, s.cur_bc(), target, holder); ++ // Push appendix argument, if one. ++ if (s.has_appendix()) { ++ state.apush(unknown_obj); ++ } ++ // Pass in raw bytecode because we need to see invokehandle instructions. ++ invoke(state, s.cur_bc_raw(), target, holder); + ciType* return_type = target->return_type(); + if (!return_type->is_primitive_type()) { + state.apush(unknown_obj); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciClassList.hpp +--- openjdk/hotspot/src/share/vm/ci/ciClassList.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/ciClassList.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -47,6 +47,7 @@ + class ciNullObject; + class ciInstance; + class ciCallSite; ++class ciMemberName; + class ciMethodHandle; + class ciMethod; + class ciMethodData; +@@ -100,6 +101,7 @@ + friend class ciObject; \ + friend class ciNullObject; \ + friend class ciInstance; \ ++friend class ciMemberName; \ + friend class ciMethod; \ + friend class ciMethodData; \ + friend class ciMethodHandle; \ +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciEnv.cpp +--- openjdk/hotspot/src/share/vm/ci/ciEnv.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/ciEnv.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -50,7 +50,6 @@ + #include "oops/oop.inline.hpp" + #include "oops/oop.inline2.hpp" + #include "prims/jvmtiExport.hpp" +-#include "prims/methodHandleWalk.hpp" + #include "runtime/init.hpp" + #include "runtime/reflection.hpp" + #include "runtime/sharedRuntime.hpp" +@@ -582,7 +581,7 @@ + assert(index < 0, "only one kind of index at a time"); + ConstantPoolCacheEntry* cpc_entry = cpool->cache()->entry_at(cache_index); + index = cpc_entry->constant_pool_index(); +- oop obj = cpc_entry->f1(); ++ oop obj = cpc_entry->f1_as_instance(); + if (obj != NULL) { + assert(obj->is_instance() || obj->is_array(), "must be a Java reference"); + ciObject* ciobj = get_object(obj); +@@ -750,7 +749,7 @@ + + if (cpool->has_preresolution() + || (holder == ciEnv::MethodHandle_klass() && +- methodOopDesc::is_method_handle_invoke_name(name_sym))) { ++ MethodHandles::is_signature_polymorphic_name(holder->get_klassOop(), name_sym))) { + // Short-circuit lookups for JSR 292-related call sites. + // That is, do not rely only on name-based lookups, because they may fail + // if the names are not resolvable in the boot class loader (7056328). +@@ -760,11 +759,13 @@ + case Bytecodes::_invokespecial: + case Bytecodes::_invokestatic: + { +- methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index, bc); ++ oop appendix_oop = NULL; ++ methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index); + if (m != NULL) { + return get_object(m)->as_method(); + } + } ++ break; + } + } + +@@ -800,27 +801,28 @@ + // Compare the following logic with InterpreterRuntime::resolve_invokedynamic. + assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic"); + +- bool is_resolved = cpool->cache()->main_entry_at(index)->is_resolved(bc); +- if (is_resolved && cpool->cache()->secondary_entry_at(index)->is_f1_null()) +- // FIXME: code generation could allow for null (unlinked) call site +- is_resolved = false; ++ ConstantPoolCacheEntry* secondary_entry = cpool->cache()->secondary_entry_at(index); ++ bool is_resolved = !secondary_entry->is_f1_null(); ++ // FIXME: code generation could allow for null (unlinked) call site ++ // The call site could be made patchable as follows: ++ // Load the appendix argument from the constant pool. ++ // Test the appendix argument and jump to a known deopt routine if it is null. ++ // Jump through a patchable call site, which is initially a deopt routine. ++ // Patch the call site to the nmethod entry point of the static compiled lambda form. ++ // As with other two-component call sites, both values must be independently verified. + +- // Call site might not be resolved yet. We could create a real invoker method from the +- // compiler, but it is simpler to stop the code path here with an unlinked method. ++ // Call site might not be resolved yet. ++ // Stop the code path here with an unlinked method. + if (!is_resolved) { + ciInstanceKlass* holder = get_object(SystemDictionary::MethodHandle_klass())->as_instance_klass(); +- ciSymbol* name = ciSymbol::invokeExact_name(); ++ ciSymbol* name = ciSymbol::invokeBasic_name(); + ciSymbol* signature = get_symbol(cpool->signature_ref_at(index)); + return get_unloaded_method(holder, name, signature, accessor); + } + +- // Get the invoker methodOop from the constant pool. +- oop f1_value = cpool->cache()->main_entry_at(index)->f1(); +- methodOop signature_invoker = (methodOop) f1_value; +- assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(), +- "correct result from LinkResolver::resolve_invokedynamic"); +- +- return get_object(signature_invoker)->as_method(); ++ // Get the invoker methodOop and the extra argument from the constant pool. ++ methodOop adapter = secondary_entry->f2_as_vfinal_method(); ++ return get_object(adapter)->as_method(); + } + + +@@ -1131,7 +1133,7 @@ + // ------------------------------------------------------------------ + // ciEnv::notice_inlined_method() + void ciEnv::notice_inlined_method(ciMethod* method) { +- _num_inlined_bytecodes += method->code_size(); ++ _num_inlined_bytecodes += method->code_size_for_inlining(); + } + + // ------------------------------------------------------------------ +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciMemberName.cpp +--- /dev/null Thu Jan 01 00:00:00 1970 +0000 ++++ openjdk/hotspot/src/share/vm/ci/ciMemberName.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -0,0 +1,39 @@ ++/* ++ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "ci/ciClassList.hpp" ++#include "ci/ciMemberName.hpp" ++#include "ci/ciUtilities.hpp" ++#include "classfile/javaClasses.hpp" ++ ++// ------------------------------------------------------------------ ++// ciMemberName::get_vmtarget ++// ++// Return: MN.vmtarget ++ciMethod* ciMemberName::get_vmtarget() const { ++ VM_ENTRY_MARK; ++ oop vmtarget_oop = java_lang_invoke_MemberName::vmtarget(get_oop()); ++ return CURRENT_ENV->get_object(vmtarget_oop)->as_method(); ++} +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciMemberName.hpp +--- /dev/null Thu Jan 01 00:00:00 1970 +0000 ++++ openjdk/hotspot/src/share/vm/ci/ciMemberName.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -0,0 +1,44 @@ ++/* ++ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARE_VM_CI_CIMEMBERNAME_HPP ++#define SHARE_VM_CI_CIMEMBERNAME_HPP ++ ++#include "ci/ciCallProfile.hpp" ++#include "ci/ciInstance.hpp" ++ ++// ciMemberName ++// ++// The class represents a java.lang.invoke.MemberName object. ++class ciMemberName : public ciInstance { ++public: ++ ciMemberName(instanceHandle h_i) : ciInstance(h_i) {} ++ ++ // What kind of ciObject is this? ++ bool is_member_name() const { return true; } ++ ++ ciMethod* get_vmtarget() const; ++}; ++ ++#endif // SHARE_VM_CI_CIMEMBERNAME_HPP +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciMethod.cpp +--- openjdk/hotspot/src/share/vm/ci/ciMethod.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/ciMethod.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -770,39 +770,37 @@ + // invokedynamic support + + // ------------------------------------------------------------------ +-// ciMethod::is_method_handle_invoke ++// ciMethod::is_method_handle_intrinsic + // +-// Return true if the method is an instance of one of the two +-// signature-polymorphic MethodHandle methods, invokeExact or invokeGeneric. +-bool ciMethod::is_method_handle_invoke() const { +- if (!is_loaded()) { +- bool flag = (holder()->name() == ciSymbol::java_lang_invoke_MethodHandle() && +- methodOopDesc::is_method_handle_invoke_name(name()->sid())); +- return flag; +- } +- VM_ENTRY_MARK; +- return get_methodOop()->is_method_handle_invoke(); ++// Return true if the method is an instance of the JVM-generated ++// signature-polymorphic MethodHandle methods, _invokeBasic, _linkToVirtual, etc. ++bool ciMethod::is_method_handle_intrinsic() const { ++ vmIntrinsics::ID iid = _intrinsic_id; // do not check if loaded ++ return (MethodHandles::is_signature_polymorphic(iid) && ++ MethodHandles::is_signature_polymorphic_intrinsic(iid)); + } + + // ------------------------------------------------------------------ +-// ciMethod::is_method_handle_adapter ++// ciMethod::is_compiled_lambda_form + // + // Return true if the method is a generated MethodHandle adapter. +-// These are built by MethodHandleCompiler. +-bool ciMethod::is_method_handle_adapter() const { +- if (!is_loaded()) return false; +- VM_ENTRY_MARK; +- return get_methodOop()->is_method_handle_adapter(); ++// These are built by Java code. ++bool ciMethod::is_compiled_lambda_form() const { ++ vmIntrinsics::ID iid = _intrinsic_id; // do not check if loaded ++ return iid == vmIntrinsics::_compiledLambdaForm; + } + +-ciInstance* ciMethod::method_handle_type() { +- check_is_loaded(); +- VM_ENTRY_MARK; +- oop mtype = get_methodOop()->method_handle_type(); +- return CURRENT_THREAD_ENV->get_object(mtype)->as_instance(); ++// ------------------------------------------------------------------ ++// ciMethod::has_member_arg ++// ++// Return true if the method is a linker intrinsic like _linkToVirtual. ++// These are built by the JVM. ++bool ciMethod::has_member_arg() const { ++ vmIntrinsics::ID iid = _intrinsic_id; // do not check if loaded ++ return (MethodHandles::is_signature_polymorphic(iid) && ++ MethodHandles::has_member_arg(iid)); + } + +- + // ------------------------------------------------------------------ + // ciMethod::ensure_method_data + // +@@ -1025,28 +1023,13 @@ + // ------------------------------------------------------------------ + // ciMethod::code_size_for_inlining + // +-// Code size for inlining decisions. +-// +-// Don't fully count method handle adapters against inlining budgets: +-// the metric we use here is the number of call sites in the adapter +-// as they are probably the instructions which generate some code. ++// Code size for inlining decisions. This method returns a code ++// size of 1 for methods which has the ForceInline annotation. + int ciMethod::code_size_for_inlining() { + check_is_loaded(); +- +- // Method handle adapters +- if (is_method_handle_adapter()) { +- // Count call sites +- int call_site_count = 0; +- ciBytecodeStream iter(this); +- while (iter.next() != ciBytecodeStream::EOBC()) { +- if (Bytecodes::is_invoke(iter.cur_bc())) { +- call_site_count++; +- } +- } +- return call_site_count; ++ if (get_methodOop()->force_inline()) { ++ return 1; + } +- +- // Normal method + return code_size(); + } + +@@ -1128,7 +1111,8 @@ + constantPoolHandle pool (THREAD, get_methodOop()->constants()); + methodHandle spec_method; + KlassHandle spec_klass; +- LinkResolver::resolve_method(spec_method, spec_klass, pool, refinfo_index, THREAD); ++ Bytecodes::Code code = (is_static ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual); ++ LinkResolver::resolve_method_statically(spec_method, spec_klass, code, pool, refinfo_index, THREAD); + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + return false; +@@ -1208,8 +1192,16 @@ + // + // Print the name of this method, without signature. + void ciMethod::print_short_name(outputStream* st) { +- check_is_loaded(); +- GUARDED_VM_ENTRY(get_methodOop()->print_short_name(st);) ++ if (is_loaded()) { ++ GUARDED_VM_ENTRY(get_methodOop()->print_short_name(st);); ++ } else { ++ // Fall back if method is not loaded. ++ holder()->print_name_on(st); ++ st->print("::"); ++ name()->print_symbol_on(st); ++ if (WizardMode) ++ signature()->as_symbol()->print_symbol_on(st); ++ } + } + + // ------------------------------------------------------------------ +@@ -1224,6 +1216,7 @@ + holder()->print_name_on(st); + st->print(" signature="); + signature()->as_symbol()->print_symbol_on(st); ++ st->print(" arg_size=%d", arg_size()); + if (is_loaded()) { + st->print(" loaded=true flags="); + flags().print_member_flags(st); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciMethod.hpp +--- openjdk/hotspot/src/share/vm/ci/ciMethod.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/ciMethod.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -133,16 +133,20 @@ + return _signature->size() + (_flags.is_static() ? 0 : 1); + } + // Report the number of elements on stack when invoking this method. +- // This is different than the regular arg_size because invokdynamic ++ // This is different than the regular arg_size because invokedynamic + // has an implicit receiver. + int invoke_arg_size(Bytecodes::Code code) const { +- int arg_size = _signature->size(); +- // Add a receiver argument, maybe: +- if (code != Bytecodes::_invokestatic && +- code != Bytecodes::_invokedynamic) { +- arg_size++; ++ if (is_loaded()) { ++ return arg_size(); ++ } else { ++ int arg_size = _signature->size(); ++ // Add a receiver argument, maybe: ++ if (code != Bytecodes::_invokestatic && ++ code != Bytecodes::_invokedynamic) { ++ arg_size++; ++ } ++ return arg_size; + } +- return arg_size; + } + + +@@ -160,6 +164,9 @@ + // Code size for inlining decisions. + int code_size_for_inlining(); + ++ bool force_inline() { return get_methodOop()->force_inline(); } ++ bool dont_inline() { return get_methodOop()->dont_inline(); } ++ + int comp_level(); + int highest_osr_comp_level(); + +@@ -256,9 +263,9 @@ + int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC + + // JSR 292 support +- bool is_method_handle_invoke() const; +- bool is_method_handle_adapter() const; +- ciInstance* method_handle_type(); ++ bool is_method_handle_intrinsic() const; ++ bool is_compiled_lambda_form() const; ++ bool has_member_arg() const; + + // What kind of ciObject is this? + bool is_method() { return true; } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciMethodHandle.cpp +--- openjdk/hotspot/src/share/vm/ci/ciMethodHandle.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/ciMethodHandle.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -24,84 +24,18 @@ + + #include "precompiled.hpp" + #include "ci/ciClassList.hpp" +-#include "ci/ciInstance.hpp" +-#include "ci/ciMethodData.hpp" + #include "ci/ciMethodHandle.hpp" + #include "ci/ciUtilities.hpp" +-#include "prims/methodHandleWalk.hpp" +-#include "prims/methodHandles.hpp" +- +-// ciMethodHandle ++#include "classfile/javaClasses.hpp" + + // ------------------------------------------------------------------ +-// ciMethodHandle::get_adapter ++// ciMethodHandle::get_vmtarget + // +-// Return an adapter for this MethodHandle. +-ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) { ++// Return: MH.form -> LF.vmentry -> MN.vmtarget ++ciMethod* ciMethodHandle::get_vmtarget() const { + VM_ENTRY_MARK; +- Handle h(get_oop()); +- methodHandle callee(_callee->get_methodOop()); +- assert(callee->is_method_handle_invoke(), ""); +- oop mt1 = callee->method_handle_type(); +- oop mt2 = java_lang_invoke_MethodHandle::type(h()); +- if (!java_lang_invoke_MethodType::equals(mt1, mt2)) { +- if (PrintMiscellaneous && (Verbose || WizardMode)) { +- tty->print_cr("ciMethodHandle::get_adapter: types not equal"); +- mt1->print(); mt2->print(); +- } +- return NULL; +- } +- // We catch all exceptions here that could happen in the method +- // handle compiler and stop the VM. +- MethodHandleCompiler mhc(h, callee->name(), callee->signature(), _profile.count(), is_invokedynamic, THREAD); +- if (!HAS_PENDING_EXCEPTION) { +- methodHandle m = mhc.compile(THREAD); +- if (!HAS_PENDING_EXCEPTION) { +- return CURRENT_ENV->get_object(m())->as_method(); +- } +- } +- if (PrintMiscellaneous && (Verbose || WizardMode)) { +- tty->print("*** ciMethodHandle::get_adapter => "); +- PENDING_EXCEPTION->print(); +- tty->print("*** get_adapter (%s): ", is_invokedynamic ? "indy" : "mh"); ((ciObject*)this)->print(); +- } +- CLEAR_PENDING_EXCEPTION; +- return NULL; ++ oop form_oop = java_lang_invoke_MethodHandle::form(get_oop()); ++ oop vmentry_oop = java_lang_invoke_LambdaForm::vmentry(form_oop); ++ oop vmtarget_oop = java_lang_invoke_MemberName::vmtarget(vmentry_oop); ++ return CURRENT_ENV->get_object(vmtarget_oop)->as_method(); + } +- +-// ------------------------------------------------------------------ +-// ciMethodHandle::get_adapter +-// +-// Return an adapter for this MethodHandle. +-ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) { +- ciMethod* result = get_adapter_impl(is_invokedynamic); +- if (result) { +- // Fake up the MDO maturity. +- ciMethodData* mdo = result->method_data(); +- if (mdo != NULL && _caller->method_data() != NULL && _caller->method_data()->is_mature()) { +- mdo->set_mature(); +- } +- } +- return result; +-} +- +- +-#ifdef ASSERT +-// ------------------------------------------------------------------ +-// ciMethodHandle::print_chain_impl +-// +-// Implementation of the print method. +-void ciMethodHandle::print_chain_impl() { +- ASSERT_IN_VM; +- MethodHandleChain::print(get_oop()); +-} +- +- +-// ------------------------------------------------------------------ +-// ciMethodHandle::print_chain +-// +-// Implementation of the print_chain method. +-void ciMethodHandle::print_chain() { +- GUARDED_VM_ENTRY(print_chain_impl();); +-} +-#endif +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciMethodHandle.hpp +--- openjdk/hotspot/src/share/vm/ci/ciMethodHandle.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/ciMethodHandle.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -25,61 +25,20 @@ + #ifndef SHARE_VM_CI_CIMETHODHANDLE_HPP + #define SHARE_VM_CI_CIMETHODHANDLE_HPP + +-#include "ci/ciCallProfile.hpp" ++#include "ci/ciClassList.hpp" + #include "ci/ciInstance.hpp" +-#include "prims/methodHandles.hpp" + + // ciMethodHandle + // + // The class represents a java.lang.invoke.MethodHandle object. + class ciMethodHandle : public ciInstance { +-private: +- ciMethod* _callee; +- ciMethod* _caller; +- ciCallProfile _profile; +- ciMethod* _method_handle_adapter; +- ciMethod* _invokedynamic_adapter; +- +- // Return an adapter for this MethodHandle. +- ciMethod* get_adapter_impl(bool is_invokedynamic); +- ciMethod* get_adapter( bool is_invokedynamic); +- +-protected: +- void print_chain_impl() NOT_DEBUG_RETURN; +- + public: +- ciMethodHandle(instanceHandle h_i) : +- ciInstance(h_i), +- _callee(NULL), +- _caller(NULL), +- _method_handle_adapter(NULL), +- _invokedynamic_adapter(NULL) +- {} ++ ciMethodHandle(instanceHandle h_i) : ciInstance(h_i) {} + + // What kind of ciObject is this? + bool is_method_handle() const { return true; } + +- void set_callee(ciMethod* m) { _callee = m; } +- void set_caller(ciMethod* m) { _caller = m; } +- void set_call_profile(ciCallProfile profile) { _profile = profile; } +- +- // Return an adapter for a MethodHandle call. +- ciMethod* get_method_handle_adapter() { +- if (_method_handle_adapter == NULL) { +- _method_handle_adapter = get_adapter(false); +- } +- return _method_handle_adapter; +- } +- +- // Return an adapter for an invokedynamic call. +- ciMethod* get_invokedynamic_adapter() { +- if (_invokedynamic_adapter == NULL) { +- _invokedynamic_adapter = get_adapter(true); +- } +- return _invokedynamic_adapter; +- } +- +- void print_chain() NOT_DEBUG_RETURN; ++ ciMethod* get_vmtarget() const; + }; + + #endif // SHARE_VM_CI_CIMETHODHANDLE_HPP +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciObject.hpp +--- openjdk/hotspot/src/share/vm/ci/ciObject.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/ciObject.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -138,13 +138,14 @@ + jobject constant_encoding(); + + // What kind of ciObject is this? +- virtual bool is_null_object() const { return false; } +- virtual bool is_call_site() const { return false; } +- virtual bool is_cpcache() const { return false; } ++ virtual bool is_null_object() const { return false; } ++ virtual bool is_call_site() const { return false; } ++ virtual bool is_cpcache() const { return false; } + virtual bool is_instance() { return false; } ++ virtual bool is_member_name() const { return false; } + virtual bool is_method() { return false; } + virtual bool is_method_data() { return false; } +- virtual bool is_method_handle() const { return false; } ++ virtual bool is_method_handle() const { return false; } + virtual bool is_array() { return false; } + virtual bool is_obj_array() { return false; } + virtual bool is_type_array() { return false; } +@@ -208,6 +209,10 @@ + assert(is_instance(), "bad cast"); + return (ciInstance*)this; + } ++ ciMemberName* as_member_name() { ++ assert(is_member_name(), "bad cast"); ++ return (ciMemberName*)this; ++ } + ciMethod* as_method() { + assert(is_method(), "bad cast"); + return (ciMethod*)this; +@@ -290,7 +295,8 @@ + } + + // Print debugging output about this ciObject. +- void print(outputStream* st = tty); ++ void print(outputStream* st); ++ void print() { print(tty); } // GDB cannot handle default arguments + + // Print debugging output about the oop this ciObject represents. + void print_oop(outputStream* st = tty); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciObjectFactory.cpp +--- openjdk/hotspot/src/share/vm/ci/ciObjectFactory.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/ciObjectFactory.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -28,6 +28,7 @@ + #include "ci/ciInstance.hpp" + #include "ci/ciInstanceKlass.hpp" + #include "ci/ciInstanceKlassKlass.hpp" ++#include "ci/ciMemberName.hpp" + #include "ci/ciMethod.hpp" + #include "ci/ciMethodData.hpp" + #include "ci/ciMethodHandle.hpp" +@@ -344,6 +345,8 @@ + instanceHandle h_i(THREAD, (instanceOop)o); + if (java_lang_invoke_CallSite::is_instance(o)) + return new (arena()) ciCallSite(h_i); ++ else if (java_lang_invoke_MemberName::is_instance(o)) ++ return new (arena()) ciMemberName(h_i); + else if (java_lang_invoke_MethodHandle::is_instance(o)) + return new (arena()) ciMethodHandle(h_i); + else +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciSignature.hpp +--- openjdk/hotspot/src/share/vm/ci/ciSignature.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/ciSignature.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -39,10 +39,11 @@ + ciKlass* _accessing_klass; + + GrowableArray* _types; +- int _size; +- int _count; ++ int _size; // number of stack slots required for arguments ++ int _count; // number of parameter types in the signature + + friend class ciMethod; ++ friend class ciBytecodeStream; + friend class ciObjectFactory; + + ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* signature); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciStreams.cpp +--- openjdk/hotspot/src/share/vm/ci/ciStreams.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/ciStreams.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -364,6 +364,29 @@ + } + + // ------------------------------------------------------------------ ++// ciBytecodeStream::has_appendix ++// ++// Returns true if there is an appendix argument stored in the ++// constant pool cache at the current bci. ++bool ciBytecodeStream::has_appendix() { ++ VM_ENTRY_MARK; ++ constantPoolHandle cpool(_method->get_methodOop()->constants()); ++ return constantPoolOopDesc::has_appendix_at_if_loaded(cpool, get_method_index()); ++} ++ ++// ------------------------------------------------------------------ ++// ciBytecodeStream::get_appendix ++// ++// Return the appendix argument stored in the constant pool cache at ++// the current bci. ++ciObject* ciBytecodeStream::get_appendix() { ++ VM_ENTRY_MARK; ++ constantPoolHandle cpool(_method->get_methodOop()->constants()); ++ oop appendix_oop = constantPoolOopDesc::appendix_at_if_loaded(cpool, get_method_index()); ++ return CURRENT_ENV->get_object(appendix_oop); ++} ++ ++// ------------------------------------------------------------------ + // ciBytecodeStream::get_declared_method_holder + // + // Get the declared holder of the currently referenced method. +@@ -378,9 +401,9 @@ + VM_ENTRY_MARK; + constantPoolHandle cpool(_method->get_methodOop()->constants()); + bool ignore; +- // report as InvokeDynamic for invokedynamic, which is syntactically classless ++ // report as MethodHandle for invokedynamic, which is syntactically classless + if (cur_bc() == Bytecodes::_invokedynamic) +- return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_lang_invoke_InvokeDynamic(), false); ++ return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_lang_invoke_MethodHandle(), false); + return CURRENT_ENV->get_klass_by_index(cpool, get_method_holder_index(), ignore, _holder); + } + +@@ -396,6 +419,24 @@ + } + + // ------------------------------------------------------------------ ++// ciBytecodeStream::get_declared_method_signature ++// ++// Get the declared signature of the currently referenced method. ++// ++// This is always the same as the signature of the resolved method ++// itself, except for _invokehandle and _invokedynamic calls. ++// ++ciSignature* ciBytecodeStream::get_declared_method_signature() { ++ int sig_index = get_method_signature_index(); ++ VM_ENTRY_MARK; ++ ciEnv* env = CURRENT_ENV; ++ constantPoolHandle cpool(_method->get_methodOop()->constants()); ++ Symbol* sig_sym = cpool->symbol_at(sig_index); ++ ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass(); ++ return new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym)); ++} ++ ++// ------------------------------------------------------------------ + // ciBytecodeStream::get_method_signature_index + // + // Get the constant pool index of the signature of the method +@@ -434,7 +475,7 @@ + // Get the CallSite from the constant pool cache. + int method_index = get_method_index(); + ConstantPoolCacheEntry* cpcache_entry = cpcache->secondary_entry_at(method_index); +- oop call_site_oop = cpcache_entry->f1(); ++ oop call_site_oop = cpcache_entry->f1_as_instance(); + + // Create a CallSite object and return it. + return CURRENT_ENV->get_object(call_site_oop)->as_call_site(); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciStreams.hpp +--- openjdk/hotspot/src/share/vm/ci/ciStreams.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/ciStreams.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -259,8 +259,11 @@ + + // If this is a method invocation bytecode, get the invoked method. + ciMethod* get_method(bool& will_link); ++ bool has_appendix(); ++ ciObject* get_appendix(); + ciKlass* get_declared_method_holder(); + int get_method_holder_index(); ++ ciSignature* get_declared_method_signature(); + int get_method_signature_index(); + + ciCPCache* get_cpcache() const; +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciSymbol.cpp +--- openjdk/hotspot/src/share/vm/ci/ciSymbol.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/ciSymbol.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -83,6 +83,10 @@ + GUARDED_VM_ENTRY(return get_symbol()->starts_with(prefix, len);) + } + ++bool ciSymbol::is_signature_polymorphic_name() const { ++ GUARDED_VM_ENTRY(return MethodHandles::is_signature_polymorphic_name(get_symbol());) ++} ++ + // ------------------------------------------------------------------ + // ciSymbol::index_of + // +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciSymbol.hpp +--- openjdk/hotspot/src/share/vm/ci/ciSymbol.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/ciSymbol.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -107,6 +107,8 @@ + + // Are two ciSymbols equal? + bool equals(ciSymbol* obj) { return this->_symbol == obj->get_symbol(); } ++ ++ bool is_signature_polymorphic_name() const; + }; + + #endif // SHARE_VM_CI_CISYMBOL_HPP +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciTypeFlow.cpp +--- openjdk/hotspot/src/share/vm/ci/ciTypeFlow.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/ci/ciTypeFlow.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -643,9 +643,9 @@ + // ------------------------------------------------------------------ + // ciTypeFlow::StateVector::do_invoke + void ciTypeFlow::StateVector::do_invoke(ciBytecodeStream* str, +- bool has_receiver) { ++ bool has_receiver_foo) { + bool will_link; +- ciMethod* method = str->get_method(will_link); ++ ciMethod* callee = str->get_method(will_link); + if (!will_link) { + // We weren't able to find the method. + if (str->cur_bc() == Bytecodes::_invokedynamic) { +@@ -654,12 +654,24 @@ + (Deoptimization::Reason_uninitialized, + Deoptimization::Action_reinterpret)); + } else { +- ciKlass* unloaded_holder = method->holder(); ++ ciKlass* unloaded_holder = callee->holder(); + trap(str, unloaded_holder, str->get_method_holder_index()); + } + } else { +- ciSignature* signature = method->signature(); ++ // TODO Use Bytecode_invoke after metadata changes. ++ //Bytecode_invoke inv(str->method(), str->cur_bci()); ++ //const bool has_receiver = callee->is_loaded() ? !callee->is_static() : inv.has_receiver(); ++ Bytecode inv(str); ++ Bytecodes::Code code = inv.invoke_code(); ++ const bool has_receiver = callee->is_loaded() ? !callee->is_static() : code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic; ++ ++ ciSignature* signature = callee->signature(); + ciSignatureStream sigstr(signature); ++ // Push appendix argument, if one. ++ if (str->has_appendix()) { ++ ciObject* appendix = str->get_appendix(); ++ push_object(appendix->klass()); ++ } + int arg_size = signature->size(); + int stack_base = stack_size() - arg_size; + int i = 0; +@@ -677,6 +689,7 @@ + for (int j = 0; j < arg_size; j++) { + pop(); + } ++ assert(!callee->is_loaded() || has_receiver == !callee->is_static(), "mismatch"); + if (has_receiver) { + // Check this? + pop_object(); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/classFileParser.cpp +--- openjdk/hotspot/src/share/vm/classfile/classFileParser.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/classfile/classFileParser.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -2128,12 +2128,6 @@ + _has_vanilla_constructor = true; + } + +- if (EnableInvokeDynamic && (m->is_method_handle_invoke() || +- m->is_method_handle_adapter())) { +- THROW_MSG_(vmSymbols::java_lang_VirtualMachineError(), +- "Method handle invokers must be defined internally to the VM", nullHandle); +- } +- + return m; + } + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/javaClasses.cpp +--- openjdk/hotspot/src/share/vm/classfile/javaClasses.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/classfile/javaClasses.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -126,6 +126,13 @@ + if (!find_field(ik, name_symbol, signature_symbol, &fd, allow_super)) { + ResourceMark rm; + tty->print_cr("Invalid layout of %s at %s", ik->external_name(), name_symbol->as_C_string()); ++#ifndef PRODUCT ++ klass_oop->print(); ++ tty->print_cr("all fields:"); ++ for (AllFieldStream fs(instanceKlass::cast(klass_oop)); !fs.done(); fs.next()) { ++ tty->print_cr(" name: %s, sig: %s, flags: %08x", fs.name()->as_C_string(), fs.signature()->as_C_string(), fs.access_flags().as_int()); ++ } ++#endif //PRODUCT + fatal("Invalid layout of preloaded class"); + } + dest_offset = fd.offset(); +@@ -1455,6 +1462,7 @@ + nmethod* nm = NULL; + bool skip_fillInStackTrace_check = false; + bool skip_throwableInit_check = false; ++ bool skip_hidden = false; + + for (frame fr = thread->last_frame(); max_depth != total_count;) { + methodOop method = NULL; +@@ -1534,6 +1542,12 @@ + skip_throwableInit_check = true; + } + } ++ if (method->is_hidden()) { ++ if (skip_hidden) continue; ++ } else { ++ // start skipping hidden frames after first non-hidden frame ++ skip_hidden = !ShowHiddenFrames; ++ } + bt.push(method, bci, CHECK); + total_count++; + } +@@ -1724,6 +1738,8 @@ + java_lang_StackTraceElement::set_methodName(element(), methodname); + // Fill in source file name + Symbol* source = instanceKlass::cast(method->method_holder())->source_file_name(); ++ if (ShowHiddenFrames && source == NULL) ++ source = vmSymbols::unknown_class_name(); + oop filename = StringTable::intern(source, CHECK_0); + java_lang_StackTraceElement::set_fileName(element(), filename); + // File in source line number +@@ -1736,6 +1752,9 @@ + } else { + // Returns -1 if no LineNumberTable, and otherwise actual line number + line_number = method->line_number_from_bci(bci); ++ if (line_number == -1 && ShowHiddenFrames) { ++ line_number = bci + 1000000; ++ } + } + java_lang_StackTraceElement::set_lineNumber(element(), line_number); + +@@ -2377,8 +2396,7 @@ + // Support for java_lang_invoke_MethodHandle + + int java_lang_invoke_MethodHandle::_type_offset; +-int java_lang_invoke_MethodHandle::_vmtarget_offset; +-int java_lang_invoke_MethodHandle::_vmentry_offset; ++int java_lang_invoke_MethodHandle::_form_offset; + + int java_lang_invoke_MemberName::_clazz_offset; + int java_lang_invoke_MemberName::_name_offset; +@@ -2387,21 +2405,16 @@ + int java_lang_invoke_MemberName::_vmtarget_offset; + int java_lang_invoke_MemberName::_vmindex_offset; + +-int java_lang_invoke_DirectMethodHandle::_vmindex_offset; +- +-int java_lang_invoke_BoundMethodHandle::_argument_offset; +-int java_lang_invoke_BoundMethodHandle::_vmargslot_offset; +- +-int java_lang_invoke_AdapterMethodHandle::_conversion_offset; +- +-int java_lang_invoke_CountingMethodHandle::_vmcount_offset; ++int java_lang_invoke_LambdaForm::_vmentry_offset; + + void java_lang_invoke_MethodHandle::compute_offsets() { + klassOop klass_oop = SystemDictionary::MethodHandle_klass(); + if (klass_oop != NULL && EnableInvokeDynamic) { +- bool allow_super = false; +- compute_offset(_type_offset, klass_oop, vmSymbols::type_name(), vmSymbols::java_lang_invoke_MethodType_signature(), allow_super); +- METHODHANDLE_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET); ++ compute_offset(_type_offset, klass_oop, vmSymbols::type_name(), vmSymbols::java_lang_invoke_MethodType_signature()); ++ compute_optional_offset(_form_offset, klass_oop, vmSymbols::form_name(), vmSymbols::java_lang_invoke_LambdaForm_signature()); ++ if (_form_offset == 0) { ++ EnableInvokeDynamic = false; ++ } + } + } + +@@ -2412,50 +2425,17 @@ + compute_offset(_name_offset, klass_oop, vmSymbols::name_name(), vmSymbols::string_signature()); + compute_offset(_type_offset, klass_oop, vmSymbols::type_name(), vmSymbols::object_signature()); + compute_offset(_flags_offset, klass_oop, vmSymbols::flags_name(), vmSymbols::int_signature()); +- compute_offset(_vmindex_offset, klass_oop, vmSymbols::vmindex_name(), vmSymbols::int_signature()); + MEMBERNAME_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET); + } + } + +-void java_lang_invoke_DirectMethodHandle::compute_offsets() { +- klassOop k = SystemDictionary::DirectMethodHandle_klass(); +- if (k != NULL && EnableInvokeDynamic) { +- DIRECTMETHODHANDLE_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET); ++void java_lang_invoke_LambdaForm::compute_offsets() { ++ klassOop klass_oop = SystemDictionary::LambdaForm_klass(); ++ if (klass_oop != NULL && EnableInvokeDynamic) { ++ compute_offset(_vmentry_offset, klass_oop, vmSymbols::vmentry_name(), vmSymbols::java_lang_invoke_MemberName_signature()); + } + } + +-void java_lang_invoke_BoundMethodHandle::compute_offsets() { +- klassOop k = SystemDictionary::BoundMethodHandle_klass(); +- if (k != NULL && EnableInvokeDynamic) { +- compute_offset(_vmargslot_offset, k, vmSymbols::vmargslot_name(), vmSymbols::int_signature(), true); +- compute_offset(_argument_offset, k, vmSymbols::argument_name(), vmSymbols::object_signature(), true); +- } +-} +- +-void java_lang_invoke_AdapterMethodHandle::compute_offsets() { +- klassOop k = SystemDictionary::AdapterMethodHandle_klass(); +- if (k != NULL && EnableInvokeDynamic) { +- compute_offset(_conversion_offset, k, vmSymbols::conversion_name(), vmSymbols::int_signature(), true); +- } +-} +- +-void java_lang_invoke_CountingMethodHandle::compute_offsets() { +- klassOop k = SystemDictionary::CountingMethodHandle_klass(); +- if (k != NULL && EnableInvokeDynamic) { +- compute_offset(_vmcount_offset, k, vmSymbols::vmcount_name(), vmSymbols::int_signature(), true); +- } +-} +- +-int java_lang_invoke_CountingMethodHandle::vmcount(oop mh) { +- assert(is_instance(mh), "CMH only"); +- return mh->int_field(_vmcount_offset); +-} +- +-void java_lang_invoke_CountingMethodHandle::set_vmcount(oop mh, int count) { +- assert(is_instance(mh), "CMH only"); +- mh->int_field_put(_vmcount_offset, count); +-} +- + oop java_lang_invoke_MethodHandle::type(oop mh) { + return mh->obj_field(_type_offset); + } +@@ -2464,31 +2444,14 @@ + mh->obj_field_put(_type_offset, mtype); + } + +-// fetch type.form.vmslots, which is the number of JVM stack slots +-// required to carry the arguments of this MH +-int java_lang_invoke_MethodHandle::vmslots(oop mh) { +- oop mtype = type(mh); +- if (mtype == NULL) return 0; // Java code would get NPE +- oop form = java_lang_invoke_MethodType::form(mtype); +- if (form == NULL) return 0; // Java code would get NPE +- return java_lang_invoke_MethodTypeForm::vmslots(form); ++oop java_lang_invoke_MethodHandle::form(oop mh) { ++ assert(_form_offset != 0, ""); ++ return mh->obj_field(_form_offset); + } + +-// fetch the low-level entry point for this mh +-MethodHandleEntry* java_lang_invoke_MethodHandle::vmentry(oop mh) { +- return (MethodHandleEntry*) mh->address_field(_vmentry_offset); +-} +- +-void java_lang_invoke_MethodHandle::set_vmentry(oop mh, MethodHandleEntry* me) { +- assert(_vmentry_offset != 0, "must be present"); +- +- // This is always the final step that initializes a valid method handle: +- mh->release_address_field_put(_vmentry_offset, (address) me); +- +- // There should be enough memory barriers on exit from native methods +- // to ensure that the MH is fully initialized to all threads before +- // Java code can publish it in global data structures. +- // But just in case, we use release_address_field_put. ++void java_lang_invoke_MethodHandle::set_form(oop mh, oop lform) { ++ assert(_form_offset != 0, ""); ++ mh->obj_field_put(_form_offset, lform); + } + + /// MemberName accessors +@@ -2540,57 +2503,40 @@ + + void java_lang_invoke_MemberName::set_vmtarget(oop mname, oop ref) { + assert(is_instance(mname), "wrong type"); ++#ifdef ASSERT ++ // check the type of the vmtarget ++ if (ref != NULL) { ++ switch (flags(mname) & (MN_IS_METHOD | ++ MN_IS_CONSTRUCTOR | ++ MN_IS_FIELD)) { ++ case MN_IS_METHOD: ++ case MN_IS_CONSTRUCTOR: ++ assert(ref->is_method(), "should be a method"); ++ break; ++ case MN_IS_FIELD: ++ assert(ref->is_klass(), "should be a class"); ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ } ++#endif //ASSERT + mname->obj_field_put(_vmtarget_offset, ref); + } + +-int java_lang_invoke_MemberName::vmindex(oop mname) { ++intptr_t java_lang_invoke_MemberName::vmindex(oop mname) { + assert(is_instance(mname), "wrong type"); +- return mname->int_field(_vmindex_offset); ++ return (intptr_t) mname->address_field(_vmindex_offset); + } + +-void java_lang_invoke_MemberName::set_vmindex(oop mname, int index) { ++void java_lang_invoke_MemberName::set_vmindex(oop mname, intptr_t index) { + assert(is_instance(mname), "wrong type"); +- mname->int_field_put(_vmindex_offset, index); ++ mname->address_field_put(_vmindex_offset, (address) index); + } + +-oop java_lang_invoke_MethodHandle::vmtarget(oop mh) { +- assert(is_instance(mh), "MH only"); +- return mh->obj_field(_vmtarget_offset); +-} +- +-void java_lang_invoke_MethodHandle::set_vmtarget(oop mh, oop ref) { +- assert(is_instance(mh), "MH only"); +- mh->obj_field_put(_vmtarget_offset, ref); +-} +- +-int java_lang_invoke_DirectMethodHandle::vmindex(oop mh) { +- assert(is_instance(mh), "DMH only"); +- return mh->int_field(_vmindex_offset); +-} +- +-void java_lang_invoke_DirectMethodHandle::set_vmindex(oop mh, int index) { +- assert(is_instance(mh), "DMH only"); +- mh->int_field_put(_vmindex_offset, index); +-} +- +-int java_lang_invoke_BoundMethodHandle::vmargslot(oop mh) { +- assert(is_instance(mh), "BMH only"); +- return mh->int_field(_vmargslot_offset); +-} +- +-oop java_lang_invoke_BoundMethodHandle::argument(oop mh) { +- assert(is_instance(mh), "BMH only"); +- return mh->obj_field(_argument_offset); +-} +- +-int java_lang_invoke_AdapterMethodHandle::conversion(oop mh) { +- assert(is_instance(mh), "AMH only"); +- return mh->int_field(_conversion_offset); +-} +- +-void java_lang_invoke_AdapterMethodHandle::set_conversion(oop mh, int conv) { +- assert(is_instance(mh), "AMH only"); +- mh->int_field_put(_conversion_offset, conv); ++oop java_lang_invoke_LambdaForm::vmentry(oop lform) { ++ assert(is_instance(lform), "wrong type"); ++ return lform->obj_field(_vmentry_offset); + } + + +@@ -2598,14 +2544,12 @@ + + int java_lang_invoke_MethodType::_rtype_offset; + int java_lang_invoke_MethodType::_ptypes_offset; +-int java_lang_invoke_MethodType::_form_offset; + + void java_lang_invoke_MethodType::compute_offsets() { + klassOop k = SystemDictionary::MethodType_klass(); + if (k != NULL) { + compute_offset(_rtype_offset, k, vmSymbols::rtype_name(), vmSymbols::class_signature()); + compute_offset(_ptypes_offset, k, vmSymbols::ptypes_name(), vmSymbols::class_array_signature()); +- compute_offset(_form_offset, k, vmSymbols::form_name(), vmSymbols::java_lang_invoke_MethodTypeForm_signature()); + } + } + +@@ -2635,6 +2579,8 @@ + } + + bool java_lang_invoke_MethodType::equals(oop mt1, oop mt2) { ++ if (mt1 == mt2) ++ return true; + if (rtype(mt1) != rtype(mt2)) + return false; + if (ptype_count(mt1) != ptype_count(mt2)) +@@ -2656,11 +2602,6 @@ + return (objArrayOop) mt->obj_field(_ptypes_offset); + } + +-oop java_lang_invoke_MethodType::form(oop mt) { +- assert(is_instance(mt), "must be a MethodType"); +- return mt->obj_field(_form_offset); +-} +- + oop java_lang_invoke_MethodType::ptype(oop mt, int idx) { + return ptypes(mt)->obj_at(idx); + } +@@ -2669,62 +2610,20 @@ + return ptypes(mt)->length(); + } + +- +- +-// Support for java_lang_invoke_MethodTypeForm +- +-int java_lang_invoke_MethodTypeForm::_vmslots_offset; +-int java_lang_invoke_MethodTypeForm::_vmlayout_offset; +-int java_lang_invoke_MethodTypeForm::_erasedType_offset; +-int java_lang_invoke_MethodTypeForm::_genericInvoker_offset; +- +-void java_lang_invoke_MethodTypeForm::compute_offsets() { +- klassOop k = SystemDictionary::MethodTypeForm_klass(); +- if (k != NULL) { +- compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), true); +- compute_optional_offset(_vmlayout_offset, k, vmSymbols::vmlayout_name(), vmSymbols::object_signature()); +- compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_lang_invoke_MethodType_signature(), true); +- compute_optional_offset(_genericInvoker_offset, k, vmSymbols::genericInvoker_name(), vmSymbols::java_lang_invoke_MethodHandle_signature(), true); +- if (_genericInvoker_offset == 0) _genericInvoker_offset = -1; // set to explicit "empty" value +- METHODTYPEFORM_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET); ++int java_lang_invoke_MethodType::ptype_slot_count(oop mt) { ++ objArrayOop pts = ptypes(mt); ++ int count = pts->length(); ++ int slots = 0; ++ for (int i = 0; i < count; i++) { ++ BasicType bt = java_lang_Class::as_BasicType(pts->obj_at(i)); ++ slots += type2size[bt]; + } ++ return slots; + } + +-int java_lang_invoke_MethodTypeForm::vmslots(oop mtform) { +- assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); +- assert(_vmslots_offset > 0, ""); +- return mtform->int_field(_vmslots_offset); +-} +- +-oop java_lang_invoke_MethodTypeForm::vmlayout(oop mtform) { +- assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); +- assert(_vmlayout_offset > 0, ""); +- return mtform->obj_field(_vmlayout_offset); +-} +- +-oop java_lang_invoke_MethodTypeForm::init_vmlayout(oop mtform, oop cookie) { +- assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); +- oop previous = vmlayout(mtform); +- if (previous != NULL) { +- return previous; // someone else beat us to it +- } +- HeapWord* cookie_addr = (HeapWord*) mtform->obj_field_addr(_vmlayout_offset); +- OrderAccess::storestore(); // make sure our copy is fully committed +- previous = oopDesc::atomic_compare_exchange_oop(cookie, cookie_addr, previous); +- if (previous != NULL) { +- return previous; // someone else beat us to it +- } +- return cookie; +-} +- +-oop java_lang_invoke_MethodTypeForm::erasedType(oop mtform) { +- assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); +- return mtform->obj_field(_erasedType_offset); +-} +- +-oop java_lang_invoke_MethodTypeForm::genericInvoker(oop mtform) { +- assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); +- return mtform->obj_field(_genericInvoker_offset); ++int java_lang_invoke_MethodType::rtype_slot_count(oop mt) { ++ BasicType bt = java_lang_Class::as_BasicType(rtype(mt)); ++ return type2size[bt]; + } + + +@@ -2825,10 +2724,26 @@ + } + + oop java_lang_ClassLoader::parent(oop loader) { +- assert(loader->is_oop(), "loader must be oop"); ++ assert(is_instance(loader), "loader must be oop"); + return loader->obj_field(parent_offset); + } + ++bool java_lang_ClassLoader::isAncestor(oop loader, oop cl) { ++ assert(is_instance(loader), "loader must be oop"); ++ assert(cl == NULL || is_instance(cl), "cl argument must be oop"); ++ oop acl = loader; ++ debug_only(jint loop_count = 0); ++ // This loop taken verbatim from ClassLoader.java: ++ do { ++ acl = parent(acl); ++ if (cl == acl) { ++ return true; ++ } ++ assert(++loop_count > 0, "loop_count overflow"); ++ } while (acl != NULL); ++ return false; ++} ++ + + // For class loader classes, parallelCapable defined + // based on non-null field +@@ -3115,13 +3030,9 @@ + if (EnableInvokeDynamic) { + java_lang_invoke_MethodHandle::compute_offsets(); + java_lang_invoke_MemberName::compute_offsets(); +- java_lang_invoke_DirectMethodHandle::compute_offsets(); +- java_lang_invoke_BoundMethodHandle::compute_offsets(); +- java_lang_invoke_AdapterMethodHandle::compute_offsets(); ++ java_lang_invoke_LambdaForm::compute_offsets(); + java_lang_invoke_MethodType::compute_offsets(); +- java_lang_invoke_MethodTypeForm::compute_offsets(); + java_lang_invoke_CallSite::compute_offsets(); +- java_lang_invoke_CountingMethodHandle::compute_offsets(); + } + java_security_AccessControlContext::compute_offsets(); + // Initialize reflection classes. The layouts of these classes +@@ -3351,7 +3262,14 @@ + } + } + ResourceMark rm; +- tty->print_cr("Invalid layout of %s at %s", instanceKlass::cast(klass_oop)->external_name(), name()->as_C_string()); ++ tty->print_cr("Invalid layout of %s at %s/%s%s", instanceKlass::cast(klass_oop)->external_name(), name()->as_C_string(), signature()->as_C_string(), may_be_java ? " (may_be_java)" : ""); ++#ifndef PRODUCT ++ klass_oop->print(); ++ tty->print_cr("all fields:"); ++ for (AllFieldStream fs(instanceKlass::cast(klass_oop)); !fs.done(); fs.next()) { ++ tty->print_cr(" name: %s, sig: %s, flags: %08x", fs.name()->as_C_string(), fs.signature()->as_C_string(), fs.access_flags().as_int()); ++ } ++#endif //PRODUCT + fatal("Invalid layout of preloaded class"); + return -1; + } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/javaClasses.hpp +--- openjdk/hotspot/src/share/vm/classfile/javaClasses.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/classfile/javaClasses.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -883,19 +883,14 @@ + + // Interface to java.lang.invoke.MethodHandle objects + +-#define METHODHANDLE_INJECTED_FIELDS(macro) \ +- macro(java_lang_invoke_MethodHandle, vmentry, intptr_signature, false) \ +- macro(java_lang_invoke_MethodHandle, vmtarget, object_signature, true) +- + class MethodHandleEntry; + + class java_lang_invoke_MethodHandle: AllStatic { + friend class JavaClasses; + + private: +- static int _vmentry_offset; // assembly code trampoline for MH +- static int _vmtarget_offset; // class-specific target reference +- static int _type_offset; // the MethodType of this MH ++ static int _type_offset; // the MethodType of this MH ++ static int _form_offset; // the LambdaForm of this MH + + static void compute_offsets(); + +@@ -904,13 +899,8 @@ + static oop type(oop mh); + static void set_type(oop mh, oop mtype); + +- static oop vmtarget(oop mh); +- static void set_vmtarget(oop mh, oop target); +- +- static MethodHandleEntry* vmentry(oop mh); +- static void set_vmentry(oop mh, MethodHandleEntry* data); +- +- static int vmslots(oop mh); ++ static oop form(oop mh); ++ static void set_form(oop mh, oop lform); + + // Testers + static bool is_subclass(klassOop klass) { +@@ -922,149 +912,45 @@ + + // Accessors for code generation: + static int type_offset_in_bytes() { return _type_offset; } +- static int vmtarget_offset_in_bytes() { return _vmtarget_offset; } +- static int vmentry_offset_in_bytes() { return _vmentry_offset; } ++ static int form_offset_in_bytes() { return _form_offset; } + }; + +-#define DIRECTMETHODHANDLE_INJECTED_FIELDS(macro) \ +- macro(java_lang_invoke_DirectMethodHandle, vmindex, int_signature, true) ++// Interface to java.lang.invoke.LambdaForm objects ++// (These are a private interface for managing adapter code generation.) + +-class java_lang_invoke_DirectMethodHandle: public java_lang_invoke_MethodHandle { ++class java_lang_invoke_LambdaForm: AllStatic { + friend class JavaClasses; + + private: +- static int _vmindex_offset; // negative or vtable idx or itable idx ++ static int _vmentry_offset; // type is MemberName ++ + static void compute_offsets(); + + public: + // Accessors +- static int vmindex(oop mh); +- static void set_vmindex(oop mh, int index); ++ static oop vmentry(oop lform); ++ static void set_vmentry(oop lform, oop invoker); + + // Testers + static bool is_subclass(klassOop klass) { +- return Klass::cast(klass)->is_subclass_of(SystemDictionary::DirectMethodHandle_klass()); ++ return SystemDictionary::LambdaForm_klass() != NULL && ++ Klass::cast(klass)->is_subclass_of(SystemDictionary::LambdaForm_klass()); + } + static bool is_instance(oop obj) { + return obj != NULL && is_subclass(obj->klass()); + } + + // Accessors for code generation: +- static int vmindex_offset_in_bytes() { return _vmindex_offset; } ++ static int vmentry_offset_in_bytes() { return _vmentry_offset; } + }; + +-class java_lang_invoke_BoundMethodHandle: public java_lang_invoke_MethodHandle { +- friend class JavaClasses; +- +- private: +- static int _argument_offset; // argument value bound into this MH +- static int _vmargslot_offset; // relevant argument slot (<= vmslots) +- static void compute_offsets(); +- +-public: +- static oop argument(oop mh); +- static void set_argument(oop mh, oop ref); +- +- static jint vmargslot(oop mh); +- static void set_vmargslot(oop mh, jint slot); +- +- // Testers +- static bool is_subclass(klassOop klass) { +- return Klass::cast(klass)->is_subclass_of(SystemDictionary::BoundMethodHandle_klass()); +- } +- static bool is_instance(oop obj) { +- return obj != NULL && is_subclass(obj->klass()); +- } +- +- static int argument_offset_in_bytes() { return _argument_offset; } +- static int vmargslot_offset_in_bytes() { return _vmargslot_offset; } +-}; +- +-class java_lang_invoke_AdapterMethodHandle: public java_lang_invoke_BoundMethodHandle { +- friend class JavaClasses; +- +- private: +- static int _conversion_offset; // type of conversion to apply +- static void compute_offsets(); +- +- public: +- static int conversion(oop mh); +- static void set_conversion(oop mh, int conv); +- +- // Testers +- static bool is_subclass(klassOop klass) { +- return Klass::cast(klass)->is_subclass_of(SystemDictionary::AdapterMethodHandle_klass()); +- } +- static bool is_instance(oop obj) { +- return obj != NULL && is_subclass(obj->klass()); +- } +- +- // Relevant integer codes (keep these in synch. with MethodHandleNatives.Constants): +- enum { +- OP_RETYPE_ONLY = 0x0, // no argument changes; straight retype +- OP_RETYPE_RAW = 0x1, // straight retype, trusted (void->int, Object->T) +- OP_CHECK_CAST = 0x2, // ref-to-ref conversion; requires a Class argument +- OP_PRIM_TO_PRIM = 0x3, // converts from one primitive to another +- OP_REF_TO_PRIM = 0x4, // unboxes a wrapper to produce a primitive +- OP_PRIM_TO_REF = 0x5, // boxes a primitive into a wrapper +- OP_SWAP_ARGS = 0x6, // swap arguments (vminfo is 2nd arg) +- OP_ROT_ARGS = 0x7, // rotate arguments (vminfo is displaced arg) +- OP_DUP_ARGS = 0x8, // duplicates one or more arguments (at TOS) +- OP_DROP_ARGS = 0x9, // remove one or more argument slots +- OP_COLLECT_ARGS = 0xA, // combine arguments using an auxiliary function +- OP_SPREAD_ARGS = 0xB, // expand in place a varargs array (of known size) +- OP_FOLD_ARGS = 0xC, // combine but do not remove arguments; prepend result +- //OP_UNUSED_13 = 0xD, // unused code, perhaps for reified argument lists +- CONV_OP_LIMIT = 0xE, // limit of CONV_OP enumeration +- +- CONV_OP_MASK = 0xF00, // this nybble contains the conversion op field +- CONV_TYPE_MASK = 0x0F, // fits T_ADDRESS and below +- CONV_VMINFO_MASK = 0x0FF, // LSB is reserved for JVM use +- CONV_VMINFO_SHIFT = 0, // position of bits in CONV_VMINFO_MASK +- CONV_OP_SHIFT = 8, // position of bits in CONV_OP_MASK +- CONV_DEST_TYPE_SHIFT = 12, // byte 2 has the adapter BasicType (if needed) +- CONV_SRC_TYPE_SHIFT = 16, // byte 2 has the source BasicType (if needed) +- CONV_STACK_MOVE_SHIFT = 20, // high 12 bits give signed SP change +- CONV_STACK_MOVE_MASK = (1 << (32 - CONV_STACK_MOVE_SHIFT)) - 1 +- }; +- +- static int conversion_offset_in_bytes() { return _conversion_offset; } +-}; +- +- +-// A simple class that maintains an invocation count +-class java_lang_invoke_CountingMethodHandle: public java_lang_invoke_MethodHandle { +- friend class JavaClasses; +- +- private: +- static int _vmcount_offset; +- static void compute_offsets(); +- +- public: +- // Accessors +- static int vmcount(oop mh); +- static void set_vmcount(oop mh, int count); +- +- // Testers +- static bool is_subclass(klassOop klass) { +- return SystemDictionary::CountingMethodHandle_klass() != NULL && +- Klass::cast(klass)->is_subclass_of(SystemDictionary::CountingMethodHandle_klass()); +- } +- static bool is_instance(oop obj) { +- return obj != NULL && is_subclass(obj->klass()); +- } +- +- // Accessors for code generation: +- static int vmcount_offset_in_bytes() { return _vmcount_offset; } +-}; +- +- + + // Interface to java.lang.invoke.MemberName objects + // (These are a private interface for Java code to query the class hierarchy.) + +-#define MEMBERNAME_INJECTED_FIELDS(macro) \ +- macro(java_lang_invoke_MemberName, vmtarget, object_signature, true) ++#define MEMBERNAME_INJECTED_FIELDS(macro) \ ++ macro(java_lang_invoke_MemberName, vmindex, intptr_signature, false) \ ++ macro(java_lang_invoke_MemberName, vmtarget, object_signature, false) + + class java_lang_invoke_MemberName: AllStatic { + friend class JavaClasses; +@@ -1076,7 +962,7 @@ + // private Object type; // may be null if not yet materialized + // private int flags; // modifier bits; see reflect.Modifier + // private Object vmtarget; // VM-specific target value +- // private int vmindex; // method index within class or interface ++ // private intptr_t vmindex; // member index within class or interface + static int _clazz_offset; + static int _name_offset; + static int _type_offset; +@@ -1100,15 +986,11 @@ + static int flags(oop mname); + static void set_flags(oop mname, int flags); + +- static int modifiers(oop mname) { return (u2) flags(mname); } +- static void set_modifiers(oop mname, int mods) +- { set_flags(mname, (flags(mname) &~ (u2)-1) | (u2)mods); } +- + static oop vmtarget(oop mname); + static void set_vmtarget(oop mname, oop target); + +- static int vmindex(oop mname); +- static void set_vmindex(oop mname, int index); ++ static intptr_t vmindex(oop mname); ++ static void set_vmindex(oop mname, intptr_t index); + + // Testers + static bool is_subclass(klassOop klass) { +@@ -1124,9 +1006,11 @@ + MN_IS_CONSTRUCTOR = 0x00020000, // constructor + MN_IS_FIELD = 0x00040000, // field + MN_IS_TYPE = 0x00080000, // nested type +- MN_SEARCH_SUPERCLASSES = 0x00100000, // for MHN.getMembers +- MN_SEARCH_INTERFACES = 0x00200000, // for MHN.getMembers +- VM_INDEX_UNINITIALIZED = -99 ++ MN_REFERENCE_KIND_SHIFT = 24, // refKind ++ MN_REFERENCE_KIND_MASK = 0x0F000000 >> MN_REFERENCE_KIND_SHIFT, ++ // The SEARCH_* bits are not for MN.flags but for the matchFlags argument of MHN.getMembers: ++ MN_SEARCH_SUPERCLASSES = 0x00100000, // walk super classes ++ MN_SEARCH_INTERFACES = 0x00200000 // walk implemented interfaces + }; + + // Accessors for code generation: +@@ -1147,7 +1031,6 @@ + private: + static int _rtype_offset; + static int _ptypes_offset; +- static int _form_offset; + + static void compute_offsets(); + +@@ -1155,11 +1038,13 @@ + // Accessors + static oop rtype(oop mt); + static objArrayOop ptypes(oop mt); +- static oop form(oop mt); + + static oop ptype(oop mt, int index); + static int ptype_count(oop mt); + ++ static int ptype_slot_count(oop mt); // extra counts for long/double ++ static int rtype_slot_count(oop mt); // extra counts for long/double ++ + static Symbol* as_signature(oop mt, bool intern_if_not_found, TRAPS); + static void print_signature(oop mt, outputStream* st); + +@@ -1172,40 +1057,6 @@ + // Accessors for code generation: + static int rtype_offset_in_bytes() { return _rtype_offset; } + static int ptypes_offset_in_bytes() { return _ptypes_offset; } +- static int form_offset_in_bytes() { return _form_offset; } +-}; +- +-#define METHODTYPEFORM_INJECTED_FIELDS(macro) \ +- macro(java_lang_invoke_MethodTypeForm, vmslots, int_signature, true) \ +- macro(java_lang_invoke_MethodTypeForm, vmlayout, object_signature, true) +- +-class java_lang_invoke_MethodTypeForm: AllStatic { +- friend class JavaClasses; +- +- private: +- static int _vmslots_offset; // number of argument slots needed +- static int _vmlayout_offset; // object describing internal calling sequence +- static int _erasedType_offset; // erasedType = canonical MethodType +- static int _genericInvoker_offset; // genericInvoker = adapter for invokeGeneric +- +- static void compute_offsets(); +- +- public: +- // Accessors +- static int vmslots(oop mtform); +- static void set_vmslots(oop mtform, int vmslots); +- +- static oop erasedType(oop mtform); +- static oop genericInvoker(oop mtform); +- +- static oop vmlayout(oop mtform); +- static oop init_vmlayout(oop mtform, oop cookie); +- +- // Accessors for code generation: +- static int vmslots_offset_in_bytes() { return _vmslots_offset; } +- static int vmlayout_offset_in_bytes() { return _vmlayout_offset; } +- static int erasedType_offset_in_bytes() { return _erasedType_offset; } +- static int genericInvoker_offset_in_bytes() { return _genericInvoker_offset; } + }; + + +@@ -1278,6 +1129,7 @@ + + public: + static oop parent(oop loader); ++ static bool isAncestor(oop loader, oop cl); + + // Support for parallelCapable field + static bool parallelCapable(oop the_class_mirror); +@@ -1287,6 +1139,14 @@ + // Fix for 4474172 + static oop non_reflection_class_loader(oop loader); + ++ // Testers ++ static bool is_subclass(klassOop klass) { ++ return Klass::cast(klass)->is_subclass_of(SystemDictionary::ClassLoader_klass()); ++ } ++ static bool is_instance(oop obj) { ++ return obj != NULL && is_subclass(obj->klass()); ++ } ++ + // Debugging + friend class JavaClasses; + }; +@@ -1441,10 +1301,7 @@ + + #define ALL_INJECTED_FIELDS(macro) \ + CLASS_INJECTED_FIELDS(macro) \ +- METHODHANDLE_INJECTED_FIELDS(macro) \ +- DIRECTMETHODHANDLE_INJECTED_FIELDS(macro) \ +- MEMBERNAME_INJECTED_FIELDS(macro) \ +- METHODTYPEFORM_INJECTED_FIELDS(macro) ++ MEMBERNAME_INJECTED_FIELDS(macro) + + // Interface to hard-coded offset checking + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/symbolTable.hpp +--- openjdk/hotspot/src/share/vm/classfile/symbolTable.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/classfile/symbolTable.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -57,12 +57,15 @@ + + // Operator= increments reference count. + void operator=(const TempNewSymbol &s) { ++ //clear(); //FIXME + _temp = s._temp; + if (_temp !=NULL) _temp->increment_refcount(); + } + + // Decrement reference counter so it can go away if it's unique +- ~TempNewSymbol() { if (_temp != NULL) _temp->decrement_refcount(); } ++ void clear() { if (_temp != NULL) _temp->decrement_refcount(); _temp = NULL; } ++ ++ ~TempNewSymbol() { clear(); } + + // Operators so they can be used like Symbols + Symbol* operator -> () const { return _temp; } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/systemDictionary.cpp +--- openjdk/hotspot/src/share/vm/classfile/systemDictionary.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/classfile/systemDictionary.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -30,6 +30,7 @@ + #include "classfile/resolutionErrors.hpp" + #include "classfile/systemDictionary.hpp" + #include "classfile/vmSymbols.hpp" ++#include "compiler/compileBroker.hpp" + #include "interpreter/bytecodeStream.hpp" + #include "interpreter/interpreter.hpp" + #include "memory/gcLocker.hpp" +@@ -193,7 +194,10 @@ + // Forwards to resolve_instance_class_or_null + + klassOop SystemDictionary::resolve_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS) { +- assert(!THREAD->is_Compiler_thread(), "Can not load classes with the Compiler thread"); ++ assert(!THREAD->is_Compiler_thread(), ++ err_msg("can not load classes with compiler thread: class=%s, classloader=%s", ++ class_name->as_C_string(), ++ class_loader.is_null() ? "null" : class_loader->klass()->klass_part()->name()->as_C_string())); + if (FieldType::is_array(class_name)) { + return resolve_array_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL); + } else if (FieldType::is_obj(class_name)) { +@@ -2358,72 +2362,134 @@ + } + + +-methodOop SystemDictionary::find_method_handle_invoke(Symbol* name, +- Symbol* signature, +- KlassHandle accessing_klass, +- TRAPS) { +- if (!EnableInvokeDynamic) return NULL; +- vmSymbols::SID name_id = vmSymbols::find_sid(name); +- assert(name_id != vmSymbols::NO_SID, "must be a known name"); +- unsigned int hash = invoke_method_table()->compute_hash(signature, name_id); ++methodHandle SystemDictionary::find_method_handle_intrinsic(vmIntrinsics::ID iid, ++ Symbol* signature, ++ TRAPS) { ++ methodHandle empty; ++ assert(EnableInvokeDynamic, ""); ++ assert(MethodHandles::is_signature_polymorphic(iid) && ++ MethodHandles::is_signature_polymorphic_intrinsic(iid) && ++ iid != vmIntrinsics::_invokeGeneric, ++ err_msg("must be a known MH intrinsic iid=%d: %s", iid, vmIntrinsics::name_at(iid))); ++ ++ unsigned int hash = invoke_method_table()->compute_hash(signature, iid); + int index = invoke_method_table()->hash_to_index(hash); +- SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature, name_id); +- methodHandle non_cached_result; ++ SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature, iid); ++ methodHandle m; + if (spe == NULL || spe->property_oop() == NULL) { + spe = NULL; + // Must create lots of stuff here, but outside of the SystemDictionary lock. +- if (THREAD->is_Compiler_thread()) +- return NULL; // do not attempt from within compiler +- bool for_invokeGeneric = (name_id != vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name)); +- bool found_on_bcp = false; +- Handle mt = find_method_handle_type(signature, accessing_klass, +- for_invokeGeneric, +- found_on_bcp, CHECK_NULL); +- KlassHandle mh_klass = SystemDictionaryHandles::MethodHandle_klass(); +- methodHandle m = methodOopDesc::make_invoke_method(mh_klass, name, signature, +- mt, CHECK_NULL); ++ m = methodOopDesc::make_method_handle_intrinsic(iid, signature, CHECK_(empty)); ++ CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_highest_tier, ++ methodHandle(), CompileThreshold, "MH", CHECK_(empty)); ++ + // Now grab the lock. We might have to throw away the new method, + // if a racing thread has managed to install one at the same time. +- if (found_on_bcp) { +- MutexLocker ml(SystemDictionary_lock, Thread::current()); +- spe = invoke_method_table()->find_entry(index, hash, signature, name_id); ++ { ++ MutexLocker ml(SystemDictionary_lock, THREAD); ++ spe = invoke_method_table()->find_entry(index, hash, signature, iid); + if (spe == NULL) +- spe = invoke_method_table()->add_entry(index, hash, signature, name_id); +- if (spe->property_oop() == NULL) { ++ spe = invoke_method_table()->add_entry(index, hash, signature, iid); ++ if (spe->property_oop() == NULL) + spe->set_property_oop(m()); +- // Link m to his method type, if it is suitably generic. +- oop mtform = java_lang_invoke_MethodType::form(mt()); +- if (mtform != NULL && mt() == java_lang_invoke_MethodTypeForm::erasedType(mtform) +- // vmlayout must be an invokeExact: +- && name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name) +- && java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) { +- java_lang_invoke_MethodTypeForm::init_vmlayout(mtform, m()); +- } +- } +- } else { +- non_cached_result = m; + } + } +- if (spe != NULL && spe->property_oop() != NULL) { +- assert(spe->property_oop()->is_method(), ""); +- return (methodOop) spe->property_oop(); +- } else { +- return non_cached_result(); ++ ++ assert(spe != NULL && spe->property_oop() != NULL, ""); ++ m = methodOop(spe->property_oop()); ++ assert(m->is_method(), ""); ++ ++ return m; ++} ++ ++// Helper for unpacking the return value from linkMethod and linkCallSite. ++static methodHandle unpack_method_and_appendix(Handle mname, ++ objArrayHandle appendix_box, ++ Handle* appendix_result, ++ TRAPS) { ++ methodHandle empty; ++ if (mname.not_null()) { ++ oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname()); ++ if (vmtarget != NULL && vmtarget->is_method()) { ++ methodOop m = methodOop(vmtarget); ++ oop appendix = appendix_box->obj_at(0); ++ if (TraceMethodHandles) { ++ #ifndef PRODUCT ++ tty->print("Linked method="INTPTR_FORMAT": ", m); ++ m->print(); ++ if (appendix != NULL) { tty->print("appendix = "); appendix->print(); } ++ tty->cr(); ++ #endif //PRODUCT ++ } ++ (*appendix_result) = Handle(THREAD, appendix); ++ return methodHandle(THREAD, m); ++ } + } ++ THROW_MSG_(vmSymbols::java_lang_LinkageError(), "bad value from MethodHandleNatives", empty); ++ return empty; + } + ++methodHandle SystemDictionary::find_method_handle_invoker(Symbol* name, ++ Symbol* signature, ++ KlassHandle accessing_klass, ++ Handle* appendix_result, ++ TRAPS) { ++ methodHandle empty; ++ assert(EnableInvokeDynamic, ""); ++ assert(!THREAD->is_Compiler_thread(), ""); ++ Handle method_type = ++ SystemDictionary::find_method_handle_type(signature, accessing_klass, CHECK_(empty)); ++ if (false) { // FIXME: Decide if the Java upcall should resolve signatures. ++ method_type = java_lang_String::create_from_symbol(signature, CHECK_(empty)); ++ } ++ ++ KlassHandle mh_klass = SystemDictionaryHandles::MethodHandle_klass(); ++ int ref_kind = JVM_REF_invokeVirtual; ++ Handle name_str = StringTable::intern(name, CHECK_(empty)); ++ objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty)); ++ assert(appendix_box->obj_at(0) == NULL, ""); ++ ++ // call java.lang.invoke.MethodHandleNatives::linkMethod(... String, MethodType) -> MemberName ++ JavaCallArguments args; ++ args.push_oop(accessing_klass()->java_mirror()); ++ args.push_int(ref_kind); ++ args.push_oop(mh_klass()->java_mirror()); ++ args.push_oop(name_str()); ++ args.push_oop(method_type()); ++ args.push_oop(appendix_box()); ++ JavaValue result(T_OBJECT); ++ JavaCalls::call_static(&result, ++ SystemDictionary::MethodHandleNatives_klass(), ++ vmSymbols::linkMethod_name(), ++ vmSymbols::linkMethod_signature(), ++ &args, CHECK_(empty)); ++ Handle mname(THREAD, (oop) result.get_jobject()); ++ return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD); ++} ++ ++ + // Ask Java code to find or construct a java.lang.invoke.MethodType for the given + // signature, as interpreted relative to the given class loader. + // Because of class loader constraints, all method handle usage must be + // consistent with this loader. + Handle SystemDictionary::find_method_handle_type(Symbol* signature, + KlassHandle accessing_klass, +- bool for_invokeGeneric, +- bool& return_bcp_flag, + TRAPS) { ++ Handle empty; ++ vmIntrinsics::ID null_iid = vmIntrinsics::_none; // distinct from all method handle invoker intrinsics ++ unsigned int hash = invoke_method_table()->compute_hash(signature, null_iid); ++ int index = invoke_method_table()->hash_to_index(hash); ++ SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature, null_iid); ++ if (spe != NULL && spe->property_oop() != NULL) { ++ assert(java_lang_invoke_MethodType::is_instance(spe->property_oop()), ""); ++ return Handle(THREAD, spe->property_oop()); ++ } else if (THREAD->is_Compiler_thread()) { ++ warning("SystemDictionary::find_method_handle_type called from compiler thread"); // FIXME ++ return Handle(); // do not attempt from within compiler, unless it was cached ++ } ++ + Handle class_loader, protection_domain; + bool is_on_bcp = true; // keep this true as long as we can materialize from the boot classloader +- Handle empty; + int npts = ArgumentCount(signature).size(); + objArrayHandle pts = oopFactory::new_objArray(SystemDictionary::Class_klass(), npts, CHECK_(empty)); + int arg = 0; +@@ -2432,6 +2498,7 @@ + for (SignatureStream ss(signature); !ss.is_done(); ss.next()) { + oop mirror = NULL; + if (is_on_bcp) { ++ // Note: class_loader & protection_domain are both null at this point. + mirror = ss.as_java_mirror(class_loader, protection_domain, + SignatureStream::ReturnNull, CHECK_(empty)); + if (mirror == NULL) { +@@ -2452,9 +2519,11 @@ + rt = Handle(THREAD, mirror); + else + pts->obj_at_put(arg++, mirror); ++ + // Check accessibility. + if (ss.is_object() && accessing_klass.not_null()) { + klassOop sel_klass = java_lang_Class::as_klassOop(mirror); ++ mirror = NULL; // safety + // Emulate constantPoolOopDesc::verify_constant_pool_resolve. + if (Klass::cast(sel_klass)->oop_is_objArray()) + sel_klass = objArrayKlass::cast(sel_klass)->bottom_klass(); +@@ -2477,23 +2546,18 @@ + &args, CHECK_(empty)); + Handle method_type(THREAD, (oop) result.get_jobject()); + +- if (for_invokeGeneric) { +- // call java.lang.invoke.MethodHandleNatives::notifyGenericMethodType(MethodType) -> void +- JavaCallArguments args(Handle(THREAD, method_type())); +- JavaValue no_result(T_VOID); +- JavaCalls::call_static(&no_result, +- SystemDictionary::MethodHandleNatives_klass(), +- vmSymbols::notifyGenericMethodType_name(), +- vmSymbols::notifyGenericMethodType_signature(), +- &args, THREAD); +- if (HAS_PENDING_EXCEPTION) { +- // If the notification fails, just kill it. +- CLEAR_PENDING_EXCEPTION; ++ if (is_on_bcp) { ++ // We can cache this MethodType inside the JVM. ++ MutexLocker ml(SystemDictionary_lock, THREAD); ++ spe = invoke_method_table()->find_entry(index, hash, signature, null_iid); ++ if (spe == NULL) ++ spe = invoke_method_table()->add_entry(index, hash, signature, null_iid); ++ if (spe->property_oop() == NULL) { ++ spe->set_property_oop(method_type()); + } + } + +- // report back to the caller with the MethodType and the "on_bcp" flag +- return_bcp_flag = is_on_bcp; ++ // report back to the caller with the MethodType + return method_type; + } + +@@ -2508,8 +2572,7 @@ + Handle name = java_lang_String::create_from_symbol(name_sym, CHECK_(empty)); + Handle type; + if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') { +- bool ignore_is_on_bcp = false; +- type = find_method_handle_type(signature, caller, false, ignore_is_on_bcp, CHECK_(empty)); ++ type = find_method_handle_type(signature, caller, CHECK_(empty)); + } else { + ResourceMark rm(THREAD); + SignatureStream ss(signature, false); +@@ -2543,119 +2606,54 @@ + + // Ask Java code to find or construct a java.lang.invoke.CallSite for the given + // name and signature, as interpreted relative to the given class loader. +-Handle SystemDictionary::make_dynamic_call_site(Handle bootstrap_method, +- Symbol* name, +- methodHandle signature_invoker, +- Handle info, +- methodHandle caller_method, +- int caller_bci, +- TRAPS) { +- Handle empty; +- guarantee(bootstrap_method.not_null() && +- java_lang_invoke_MethodHandle::is_instance(bootstrap_method()), ++methodHandle SystemDictionary::find_dynamic_call_site_invoker(KlassHandle caller, ++ Handle bootstrap_specifier, ++ Symbol* name, ++ Symbol* type, ++ Handle* appendix_result, ++ TRAPS) { ++ methodHandle empty; ++ Handle bsm, info; ++ if (java_lang_invoke_MethodHandle::is_instance(bootstrap_specifier())) { ++ bsm = bootstrap_specifier; ++ } else { ++ assert(bootstrap_specifier->is_objArray(), ""); ++ objArrayHandle args(THREAD, (objArrayOop) bootstrap_specifier()); ++ int len = args->length(); ++ assert(len >= 1, ""); ++ bsm = Handle(THREAD, args->obj_at(0)); ++ if (len > 1) { ++ objArrayOop args1 = oopFactory::new_objArray(SystemDictionary::Object_klass(), len-1, CHECK_(empty)); ++ for (int i = 1; i < len; i++) ++ args1->obj_at_put(i-1, args->obj_at(i)); ++ info = Handle(THREAD, args1); ++ } ++ } ++ guarantee(java_lang_invoke_MethodHandle::is_instance(bsm()), + "caller must supply a valid BSM"); + +- Handle caller_mname = MethodHandles::new_MemberName(CHECK_(empty)); +- MethodHandles::init_MemberName(caller_mname(), caller_method()); +- +- // call java.lang.invoke.MethodHandleNatives::makeDynamicCallSite(bootm, name, mtype, info, caller_mname, caller_pos) +- oop name_str_oop = StringTable::intern(name, CHECK_(empty)); // not a handle! +- JavaCallArguments args(Handle(THREAD, bootstrap_method())); +- args.push_oop(name_str_oop); +- args.push_oop(signature_invoker->method_handle_type()); ++ Handle method_name = java_lang_String::create_from_symbol(name, CHECK_(empty)); ++ Handle method_type = find_method_handle_type(type, caller, CHECK_(empty)); ++ ++ objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty)); ++ assert(appendix_box->obj_at(0) == NULL, ""); ++ ++ // call java.lang.invoke.MethodHandleNatives::linkCallSite(caller, bsm, name, mtype, info, &appendix) ++ JavaCallArguments args; ++ args.push_oop(caller->java_mirror()); ++ args.push_oop(bsm()); ++ args.push_oop(method_name()); ++ args.push_oop(method_type()); + args.push_oop(info()); +- args.push_oop(caller_mname()); +- args.push_int(caller_bci); ++ args.push_oop(appendix_box); + JavaValue result(T_OBJECT); + JavaCalls::call_static(&result, + SystemDictionary::MethodHandleNatives_klass(), +- vmSymbols::makeDynamicCallSite_name(), +- vmSymbols::makeDynamicCallSite_signature(), ++ vmSymbols::linkCallSite_name(), ++ vmSymbols::linkCallSite_signature(), + &args, CHECK_(empty)); +- oop call_site_oop = (oop) result.get_jobject(); +- assert(call_site_oop->is_oop() +- /*&& java_lang_invoke_CallSite::is_instance(call_site_oop)*/, "must be sane"); +- if (TraceMethodHandles) { +-#ifndef PRODUCT +- tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop); +- call_site_oop->print(); +- tty->cr(); +-#endif //PRODUCT +- } +- return call_site_oop; +-} +- +-Handle SystemDictionary::find_bootstrap_method(methodHandle caller_method, int caller_bci, +- int cache_index, +- Handle& argument_info_result, +- TRAPS) { +- Handle empty; +- +- constantPoolHandle pool; +- { +- klassOop caller = caller_method->method_holder(); +- if (!Klass::cast(caller)->oop_is_instance()) return empty; +- pool = constantPoolHandle(THREAD, instanceKlass::cast(caller)->constants()); +- } +- +- int constant_pool_index = pool->cache()->entry_at(cache_index)->constant_pool_index(); +- constantTag tag = pool->tag_at(constant_pool_index); +- +- if (tag.is_invoke_dynamic()) { +- // JVM_CONSTANT_InvokeDynamic is an ordered pair of [bootm, name&type], plus optional arguments +- // The bootm, being a JVM_CONSTANT_MethodHandle, has its own cache entry. +- int bsm_index = pool->invoke_dynamic_bootstrap_method_ref_index_at(constant_pool_index); +- if (bsm_index != 0) { +- int bsm_index_in_cache = pool->cache()->entry_at(cache_index)->bootstrap_method_index_in_cache(); +- DEBUG_ONLY(int bsm_index_2 = pool->cache()->entry_at(bsm_index_in_cache)->constant_pool_index()); +- assert(bsm_index == bsm_index_2, "BSM constant lifted to cache"); +- if (TraceMethodHandles) { +- tty->print_cr("resolving bootstrap method for "PTR_FORMAT" at %d at cache[%d]CP[%d]...", +- (intptr_t) caller_method(), caller_bci, cache_index, constant_pool_index); +- } +- oop bsm_oop = pool->resolve_cached_constant_at(bsm_index_in_cache, CHECK_(empty)); +- if (TraceMethodHandles) { +- tty->print_cr("bootstrap method for "PTR_FORMAT" at %d retrieved as "PTR_FORMAT":", +- (intptr_t) caller_method(), caller_bci, (intptr_t) bsm_oop); +- } +- assert(bsm_oop->is_oop(), "must be sane"); +- // caller must verify that it is of type MethodHandle +- Handle bsm(THREAD, bsm_oop); +- bsm_oop = NULL; // safety +- +- // Extract the optional static arguments. +- Handle argument_info; // either null, or one arg, or Object[]{arg...} +- int argc = pool->invoke_dynamic_argument_count_at(constant_pool_index); +- if (TraceInvokeDynamic) { +- tty->print_cr("find_bootstrap_method: [%d/%d] CONSTANT_InvokeDynamic: %d[%d]", +- constant_pool_index, cache_index, bsm_index, argc); +- } +- if (argc > 0) { +- objArrayHandle arg_array; +- if (argc > 1) { +- objArrayOop arg_array_oop = oopFactory::new_objArray(SystemDictionary::Object_klass(), argc, CHECK_(empty)); +- arg_array = objArrayHandle(THREAD, arg_array_oop); +- argument_info = arg_array; +- } +- for (int arg_i = 0; arg_i < argc; arg_i++) { +- int arg_index = pool->invoke_dynamic_argument_index_at(constant_pool_index, arg_i); +- oop arg_oop = pool->resolve_possibly_cached_constant_at(arg_index, CHECK_(empty)); +- if (arg_array.is_null()) { +- argument_info = Handle(THREAD, arg_oop); +- } else { +- arg_array->obj_at_put(arg_i, arg_oop); +- } +- } +- } +- +- argument_info_result = argument_info; // return argument_info to caller +- return bsm; +- } +- } else { +- ShouldNotReachHere(); // verifier does not allow this +- } +- +- return empty; ++ Handle mname(THREAD, (oop) result.get_jobject()); ++ return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD); + } + + // Since the identity hash code for symbols changes when the symbols are +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/systemDictionary.hpp +--- openjdk/hotspot/src/share/vm/classfile/systemDictionary.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/classfile/systemDictionary.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -147,15 +147,10 @@ + template(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292) \ + template(MemberName_klass, java_lang_invoke_MemberName, Pre_JSR292) \ + template(MethodHandleNatives_klass, java_lang_invoke_MethodHandleNatives, Pre_JSR292) \ +- template(AdapterMethodHandle_klass, java_lang_invoke_AdapterMethodHandle, Pre_JSR292) \ +- template(BoundMethodHandle_klass, java_lang_invoke_BoundMethodHandle, Pre_JSR292) \ +- template(DirectMethodHandle_klass, java_lang_invoke_DirectMethodHandle, Pre_JSR292) \ ++ template(LambdaForm_klass, java_lang_invoke_LambdaForm, Opt) \ + template(MethodType_klass, java_lang_invoke_MethodType, Pre_JSR292) \ +- template(MethodTypeForm_klass, java_lang_invoke_MethodTypeForm, Pre_JSR292) \ + template(BootstrapMethodError_klass, java_lang_BootstrapMethodError, Pre_JSR292) \ +- template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \ + template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \ +- template(CountingMethodHandle_klass, java_lang_invoke_CountingMethodHandle, Opt) \ + template(ConstantCallSite_klass, java_lang_invoke_ConstantCallSite, Pre_JSR292) \ + template(MutableCallSite_klass, java_lang_invoke_MutableCallSite, Pre_JSR292) \ + template(VolatileCallSite_klass, java_lang_invoke_VolatileCallSite, Pre_JSR292) \ +@@ -487,17 +482,24 @@ + Handle loader2, bool is_method, TRAPS); + + // JSR 292 +- // find the java.lang.invoke.MethodHandles::invoke method for a given signature +- static methodOop find_method_handle_invoke(Symbol* name, +- Symbol* signature, +- KlassHandle accessing_klass, +- TRAPS); +- // ask Java to compute a java.lang.invoke.MethodType object for a given signature ++ // find a java.lang.invoke.MethodHandle.invoke* method for a given signature ++ // (asks Java to compute it if necessary, except in a compiler thread) ++ static methodHandle find_method_handle_invoker(Symbol* name, ++ Symbol* signature, ++ KlassHandle accessing_klass, ++ Handle *appendix_result, ++ TRAPS); ++ // for a given signature, find the internal MethodHandle method (linkTo* or invokeBasic) ++ // (does not ask Java, since this is a low-level intrinsic defined by the JVM) ++ static methodHandle find_method_handle_intrinsic(vmIntrinsics::ID iid, ++ Symbol* signature, ++ TRAPS); ++ // find a java.lang.invoke.MethodType object for a given signature ++ // (asks Java to compute it if necessary, except in a compiler thread) + static Handle find_method_handle_type(Symbol* signature, + KlassHandle accessing_klass, +- bool for_invokeGeneric, +- bool& return_bcp_flag, + TRAPS); ++ + // ask Java to compute a java.lang.invoke.MethodHandle object for a given CP entry + static Handle link_method_handle_constant(KlassHandle caller, + int ref_kind, //e.g., JVM_REF_invokeVirtual +@@ -505,23 +507,14 @@ + Symbol* name, + Symbol* signature, + TRAPS); ++ + // ask Java to create a dynamic call site, while linking an invokedynamic op +- static Handle make_dynamic_call_site(Handle bootstrap_method, +- // Callee information: +- Symbol* name, +- methodHandle signature_invoker, +- Handle info, +- // Caller information: +- methodHandle caller_method, +- int caller_bci, +- TRAPS); +- +- // coordinate with Java about bootstrap methods +- static Handle find_bootstrap_method(methodHandle caller_method, +- int caller_bci, // N.B. must be an invokedynamic +- int cache_index, // must be corresponding main_entry +- Handle &argument_info_result, // static BSM arguments, if any +- TRAPS); ++ static methodHandle find_dynamic_call_site_invoker(KlassHandle caller, ++ Handle bootstrap_method, ++ Symbol* name, ++ Symbol* type, ++ Handle *appendix_result, ++ TRAPS); + + // Utility for printing loader "name" as part of tracing constraints + static const char* loader_name(oop loader) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/vmSymbols.cpp +--- openjdk/hotspot/src/share/vm/classfile/vmSymbols.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/classfile/vmSymbols.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -332,7 +332,14 @@ + if (cname == NULL || mname == NULL || msig == NULL) return NULL; + klassOop k = SystemDictionary::find_well_known_klass(cname); + if (k == NULL) return NULL; +- return instanceKlass::cast(k)->find_method(mname, msig); ++ methodOop m = instanceKlass::cast(k)->find_method(mname, msig); ++ if (m == NULL && ++ cname == vmSymbols::java_lang_invoke_MethodHandle() && ++ msig == vmSymbols::star_name()) { ++ // Any signature polymorphic method is represented by a fixed concrete signature: ++ m = instanceKlass::cast(k)->find_method(mname, vmSymbols::object_array_object_signature()); ++ } ++ return m; + } + + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/vmSymbols.hpp +--- openjdk/hotspot/src/share/vm/classfile/vmSymbols.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/classfile/vmSymbols.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -208,10 +208,12 @@ + template(newField_signature, "(Lsun/reflect/FieldInfo;)Ljava/lang/reflect/Field;") \ + template(newMethod_name, "newMethod") \ + template(newMethod_signature, "(Lsun/reflect/MethodInfo;)Ljava/lang/reflect/Method;") \ +- /* the following two names must be in order: */ \ +- template(invokeExact_name, "invokeExact") \ +- template(invokeGeneric_name, "invokeGeneric") \ +- template(invokeVarargs_name, "invokeVarargs") \ ++ template(invokeBasic_name, "invokeBasic") \ ++ template(linkToVirtual_name, "linkToVirtual") \ ++ template(linkToStatic_name, "linkToStatic") \ ++ template(linkToSpecial_name, "linkToSpecial") \ ++ template(linkToInterface_name, "linkToInterface") \ ++ template(compiledLambdaForm_name, "") /*fake name*/ \ + template(star_name, "*") /*not really a name*/ \ + template(invoke_name, "invoke") \ + template(override_name, "override") \ +@@ -232,36 +234,33 @@ + template(base_name, "base") \ + \ + /* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */ \ +- template(java_lang_invoke_InvokeDynamic, "java/lang/invoke/InvokeDynamic") \ +- template(java_lang_invoke_Linkage, "java/lang/invoke/Linkage") \ + template(java_lang_invoke_CallSite, "java/lang/invoke/CallSite") \ + template(java_lang_invoke_ConstantCallSite, "java/lang/invoke/ConstantCallSite") \ + template(java_lang_invoke_MutableCallSite, "java/lang/invoke/MutableCallSite") \ + template(java_lang_invoke_VolatileCallSite, "java/lang/invoke/VolatileCallSite") \ + template(java_lang_invoke_MethodHandle, "java/lang/invoke/MethodHandle") \ + template(java_lang_invoke_MethodType, "java/lang/invoke/MethodType") \ +- template(java_lang_invoke_WrongMethodTypeException, "java/lang/invoke/WrongMethodTypeException") \ + template(java_lang_invoke_MethodType_signature, "Ljava/lang/invoke/MethodType;") \ ++ template(java_lang_invoke_MemberName_signature, "Ljava/lang/invoke/MemberName;") \ ++ template(java_lang_invoke_LambdaForm_signature, "Ljava/lang/invoke/LambdaForm;") \ + template(java_lang_invoke_MethodHandle_signature, "Ljava/lang/invoke/MethodHandle;") \ + /* internal classes known only to the JVM: */ \ +- template(java_lang_invoke_MethodTypeForm, "java/lang/invoke/MethodTypeForm") \ +- template(java_lang_invoke_MethodTypeForm_signature, "Ljava/lang/invoke/MethodTypeForm;") \ + template(java_lang_invoke_MemberName, "java/lang/invoke/MemberName") \ + template(java_lang_invoke_MethodHandleNatives, "java/lang/invoke/MethodHandleNatives") \ +- template(java_lang_invoke_MethodHandleImpl, "java/lang/invoke/MethodHandleImpl") \ +- template(java_lang_invoke_AdapterMethodHandle, "java/lang/invoke/AdapterMethodHandle") \ +- template(java_lang_invoke_BoundMethodHandle, "java/lang/invoke/BoundMethodHandle") \ +- template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \ +- template(java_lang_invoke_CountingMethodHandle, "java/lang/invoke/CountingMethodHandle") \ ++ template(java_lang_invoke_LambdaForm, "java/lang/invoke/LambdaForm") \ ++ template(java_lang_invoke_ForceInline_signature, "Ljava/lang/invoke/ForceInline;") \ ++ template(java_lang_invoke_DontInline_signature, "Ljava/lang/invoke/DontInline;") \ ++ template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \ ++ template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;") \ + /* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \ + template(findMethodHandleType_name, "findMethodHandleType") \ + template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \ +- template(notifyGenericMethodType_name, "notifyGenericMethodType") \ +- template(notifyGenericMethodType_signature, "(Ljava/lang/invoke/MethodType;)V") \ + template(linkMethodHandleConstant_name, "linkMethodHandleConstant") \ + template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/invoke/MethodHandle;") \ +- template(makeDynamicCallSite_name, "makeDynamicCallSite") \ +- template(makeDynamicCallSite_signature, "(Ljava/lang/invoke/MethodHandle;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/Object;Ljava/lang/invoke/MemberName;I)Ljava/lang/invoke/CallSite;") \ ++ template(linkMethod_name, "linkMethod") \ ++ template(linkMethod_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/invoke/MemberName;") \ ++ template(linkCallSite_name, "linkCallSite") \ ++ template(linkCallSite_signature, "(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/invoke/MemberName;") \ + template(setTargetNormal_name, "setTargetNormal") \ + template(setTargetVolatile_name, "setTargetVolatile") \ + template(setTarget_signature, "(Ljava/lang/invoke/MethodHandle;)V") \ +@@ -355,22 +354,15 @@ + template(toString_name, "toString") \ + template(values_name, "values") \ + template(receiver_name, "receiver") \ +- template(vmmethod_name, "vmmethod") \ + template(vmtarget_name, "vmtarget") \ ++ template(vmindex_name, "vmindex") \ ++ template(vmcount_name, "vmcount") \ + template(vmentry_name, "vmentry") \ +- template(vmcount_name, "vmcount") \ +- template(vmslots_name, "vmslots") \ +- template(vmlayout_name, "vmlayout") \ +- template(vmindex_name, "vmindex") \ +- template(vmargslot_name, "vmargslot") \ + template(flags_name, "flags") \ +- template(argument_name, "argument") \ +- template(conversion_name, "conversion") \ + template(rtype_name, "rtype") \ + template(ptypes_name, "ptypes") \ + template(form_name, "form") \ +- template(erasedType_name, "erasedType") \ +- template(genericInvoker_name, "genericInvoker") \ ++ template(basicType_name, "basicType") \ + template(append_name, "append") \ + template(klass_name, "klass") \ + template(resolved_constructor_name, "resolved_constructor") \ +@@ -922,15 +914,15 @@ + \ + do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_object_array_object_signature, F_R) \ + /* (symbols invoke_name and invoke_signature defined above) */ \ +- do_intrinsic(_checkSpreadArgument, java_lang_invoke_MethodHandleNatives, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) \ +- do_name( checkSpreadArgument_name, "checkSpreadArgument") \ +- do_name( checkSpreadArgument_signature, "(Ljava/lang/Object;I)V") \ +- do_intrinsic(_invokeExact, java_lang_invoke_MethodHandle, invokeExact_name, object_array_object_signature, F_RN) \ +- do_intrinsic(_invokeGeneric, java_lang_invoke_MethodHandle, invokeGeneric_name, object_array_object_signature, F_RN) \ +- do_intrinsic(_invokeVarargs, java_lang_invoke_MethodHandle, invokeVarargs_name, object_array_object_signature, F_R) \ +- do_intrinsic(_invokeDynamic, java_lang_invoke_InvokeDynamic, star_name, object_array_object_signature, F_SN) \ +- \ +- do_intrinsic(_selectAlternative, java_lang_invoke_MethodHandleImpl, selectAlternative_name, selectAlternative_signature, F_S) \ ++ /* the polymorphic MH intrinsics must be in compact order, with _invokeGeneric first and _linkToInterface last */ \ ++ do_intrinsic(_invokeGeneric, java_lang_invoke_MethodHandle, invoke_name, star_name, F_RN) \ ++ do_intrinsic(_invokeBasic, java_lang_invoke_MethodHandle, invokeBasic_name, star_name, F_RN) \ ++ do_intrinsic(_linkToVirtual, java_lang_invoke_MethodHandle, linkToVirtual_name, star_name, F_SN) \ ++ do_intrinsic(_linkToStatic, java_lang_invoke_MethodHandle, linkToStatic_name, star_name, F_SN) \ ++ do_intrinsic(_linkToSpecial, java_lang_invoke_MethodHandle, linkToSpecial_name, star_name, F_SN) \ ++ do_intrinsic(_linkToInterface, java_lang_invoke_MethodHandle, linkToInterface_name, star_name, F_SN) \ ++ /* special marker for bytecode generated for the JVM from a LambdaForm: */ \ ++ do_intrinsic(_compiledLambdaForm, java_lang_invoke_MethodHandle, compiledLambdaForm_name, star_name, F_RN) \ + \ + /* unboxing methods: */ \ + do_intrinsic(_booleanValue, java_lang_Boolean, booleanValue_name, void_boolean_signature, F_R) \ +@@ -1063,6 +1055,10 @@ + + ID_LIMIT, + LAST_COMPILER_INLINE = _prefetchWriteStatic, ++ FIRST_MH_SIG_POLY = _invokeGeneric, ++ FIRST_MH_STATIC = _linkToVirtual, ++ LAST_MH_SIG_POLY = _linkToInterface, ++ + FIRST_ID = _none + 1 + }; + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/code/codeBlob.cpp +--- openjdk/hotspot/src/share/vm/code/codeBlob.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/code/codeBlob.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -359,43 +359,6 @@ + + + //---------------------------------------------------------------------------------------------------- +-// Implementation of RicochetBlob +- +-RicochetBlob::RicochetBlob( +- CodeBuffer* cb, +- int size, +- int bounce_offset, +- int exception_offset, +- int frame_size +-) +-: SingletonBlob("RicochetBlob", cb, sizeof(RicochetBlob), size, frame_size, (OopMapSet*) NULL) +-{ +- _bounce_offset = bounce_offset; +- _exception_offset = exception_offset; +-} +- +- +-RicochetBlob* RicochetBlob::create( +- CodeBuffer* cb, +- int bounce_offset, +- int exception_offset, +- int frame_size) +-{ +- RicochetBlob* blob = NULL; +- ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock +- { +- MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); +- unsigned int size = allocation_size(cb, sizeof(RicochetBlob)); +- blob = new (size) RicochetBlob(cb, size, bounce_offset, exception_offset, frame_size); +- } +- +- trace_new_stub(blob, "RicochetBlob"); +- +- return blob; +-} +- +- +-//---------------------------------------------------------------------------------------------------- + // Implementation of DeoptimizationBlob + + DeoptimizationBlob::DeoptimizationBlob( +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/code/codeBlob.hpp +--- openjdk/hotspot/src/share/vm/code/codeBlob.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/code/codeBlob.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -35,7 +35,6 @@ + // Suptypes are: + // nmethod : Compiled Java methods (include method that calls to native code) + // RuntimeStub : Call to VM runtime methods +-// RicochetBlob : Used for blocking MethodHandle adapters + // DeoptimizationBlob : Used for deoptimizatation + // ExceptionBlob : Used for stack unrolling + // SafepointBlob : Used to handle illegal instruction exceptions +@@ -99,7 +98,6 @@ + virtual bool is_buffer_blob() const { return false; } + virtual bool is_nmethod() const { return false; } + virtual bool is_runtime_stub() const { return false; } +- virtual bool is_ricochet_stub() const { return false; } + virtual bool is_deoptimization_stub() const { return false; } + virtual bool is_uncommon_trap_stub() const { return false; } + virtual bool is_exception_stub() const { return false; } +@@ -350,50 +348,6 @@ + + + //---------------------------------------------------------------------------------------------------- +-// RicochetBlob +-// Holds an arbitrary argument list indefinitely while Java code executes recursively. +- +-class RicochetBlob: public SingletonBlob { +- friend class VMStructs; +- private: +- +- int _bounce_offset; +- int _exception_offset; +- +- // Creation support +- RicochetBlob( +- CodeBuffer* cb, +- int size, +- int bounce_offset, +- int exception_offset, +- int frame_size +- ); +- +- public: +- // Creation +- static RicochetBlob* create( +- CodeBuffer* cb, +- int bounce_offset, +- int exception_offset, +- int frame_size +- ); +- +- // Typing +- bool is_ricochet_stub() const { return true; } +- +- // GC for args +- void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ } +- +- address bounce_addr() const { return code_begin() + _bounce_offset; } +- address exception_addr() const { return code_begin() + _exception_offset; } +- bool returns_to_bounce_addr(address pc) const { +- address bounce_pc = bounce_addr(); +- return (pc == bounce_pc || (pc + frame::pc_return_offset) == bounce_pc); +- } +-}; +- +- +-//---------------------------------------------------------------------------------------------------- + // DeoptimizationBlob + + class DeoptimizationBlob: public SingletonBlob { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/code/codeCache.cpp +--- openjdk/hotspot/src/share/vm/code/codeCache.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/code/codeCache.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -796,7 +796,6 @@ + int nmethodCount = 0; + int runtimeStubCount = 0; + int adapterCount = 0; +- int ricochetStubCount = 0; + int deoptimizationStubCount = 0; + int uncommonTrapStubCount = 0; + int bufferBlobCount = 0; +@@ -841,8 +840,6 @@ + } + } else if (cb->is_runtime_stub()) { + runtimeStubCount++; +- } else if (cb->is_ricochet_stub()) { +- ricochetStubCount++; + } else if (cb->is_deoptimization_stub()) { + deoptimizationStubCount++; + } else if (cb->is_uncommon_trap_stub()) { +@@ -879,7 +876,6 @@ + tty->print_cr("runtime_stubs: %d",runtimeStubCount); + tty->print_cr("adapters: %d",adapterCount); + tty->print_cr("buffer blobs: %d",bufferBlobCount); +- tty->print_cr("ricochet_stubs: %d",ricochetStubCount); + tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); + tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); + tty->print_cr("\nnmethod size distribution (non-zombie java)"); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/code/debugInfoRec.cpp +--- openjdk/hotspot/src/share/vm/code/debugInfoRec.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/code/debugInfoRec.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -311,6 +311,7 @@ + assert(method == NULL || + (method->is_native() && bci == 0) || + (!method->is_native() && 0 <= bci && bci < method->code_size()) || ++ (method->is_compiled_lambda_form() && bci == -99) || // this might happen in C1 + bci == -1, "illegal bci"); + + // serialize the locals/expressions/monitors +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/code/nmethod.cpp +--- openjdk/hotspot/src/share/vm/code/nmethod.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/code/nmethod.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -945,8 +945,12 @@ + void nmethod::print_on(outputStream* st, const char* msg) const { + if (st != NULL) { + ttyLocker ttyl; +- CompileTask::print_compilation(st, this, msg); +- if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this); ++ if (WizardMode) { ++ CompileTask::print_compilation(st, this, msg, /*short_form:*/ true); ++ st->print_cr(" (" INTPTR_FORMAT ")", this); ++ } else { ++ CompileTask::print_compilation(st, this, msg, /*short_form:*/ false); ++ } + } + } + +@@ -964,7 +968,9 @@ + if (printmethod) { + print_code(); + print_pcs(); +- oop_maps()->print(); ++ if (oop_maps()) { ++ oop_maps()->print(); ++ } + } + if (PrintDebugInfo) { + print_scopes(); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/code/vtableStubs.hpp +--- openjdk/hotspot/src/share/vm/code/vtableStubs.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/code/vtableStubs.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -55,6 +55,8 @@ + int index() const { return _index; } + static VMReg receiver_location() { return _receiver_location; } + void set_next(VtableStub* n) { _next = n; } ++ ++ public: + address code_begin() const { return (address)(this + 1); } + address code_end() const { return code_begin() + pd_code_size_limit(_is_vtable_stub); } + address entry_point() const { return code_begin(); } +@@ -65,6 +67,7 @@ + } + bool contains(address pc) const { return code_begin() <= pc && pc < code_end(); } + ++ private: + void set_exception_points(address npe_addr, address ame_addr) { + _npe_offset = npe_addr - code_begin(); + _ame_offset = ame_addr - code_begin(); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/compiler/compileBroker.cpp +--- openjdk/hotspot/src/share/vm/compiler/compileBroker.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/compiler/compileBroker.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -407,7 +407,10 @@ + if (is_osr_method) { + st->print(" @ %d", osr_bci); + } +- st->print(" (%d bytes)", method->code_size()); ++ if (method->is_native()) ++ st->print(" (native)"); ++ else ++ st->print(" (%d bytes)", method->code_size()); + } + + if (msg != NULL) { +@@ -427,12 +430,17 @@ + st->print(" "); // print compilation number + + // method attributes +- const char sync_char = method->is_synchronized() ? 's' : ' '; +- const char exception_char = method->has_exception_handlers() ? '!' : ' '; +- const char monitors_char = method->has_monitor_bytecodes() ? 'm' : ' '; ++ if (method->is_loaded()) { ++ const char sync_char = method->is_synchronized() ? 's' : ' '; ++ const char exception_char = method->has_exception_handlers() ? '!' : ' '; ++ const char monitors_char = method->has_monitor_bytecodes() ? 'm' : ' '; + +- // print method attributes +- st->print(" %c%c%c ", sync_char, exception_char, monitors_char); ++ // print method attributes ++ st->print(" %c%c%c ", sync_char, exception_char, monitors_char); ++ } else { ++ // %s!bn ++ st->print(" "); // print method attributes ++ } + + if (TieredCompilation) { + st->print(" "); +@@ -444,7 +452,10 @@ + + st->print("@ %d ", bci); // print bci + method->print_short_name(st); +- st->print(" (%d bytes)", method->code_size()); ++ if (method->is_loaded()) ++ st->print(" (%d bytes)", method->code_size()); ++ else ++ st->print(" (not loaded)"); + + if (msg != NULL) { + st->print(" %s", msg); +@@ -1018,6 +1029,7 @@ + "sanity check"); + assert(!instanceKlass::cast(method->method_holder())->is_not_initialized(), + "method holder must be initialized"); ++ assert(!method->is_method_handle_intrinsic(), "do not enqueue these guys"); + + if (CIPrintRequests) { + tty->print("request: "); +@@ -1231,7 +1243,7 @@ + // + // Note: A native method implies non-osr compilation which is + // checked with an assertion at the entry of this method. +- if (method->is_native()) { ++ if (method->is_native() && !method->is_method_handle_intrinsic()) { + bool in_base_library; + address adr = NativeLookup::lookup(method, in_base_library, THREAD); + if (HAS_PENDING_EXCEPTION) { +@@ -1264,7 +1276,7 @@ + + // do the compilation + if (method->is_native()) { +- if (!PreferInterpreterNativeStubs) { ++ if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) { + // Acquire our lock. + int compile_id; + { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/compiler/compileBroker.hpp +--- openjdk/hotspot/src/share/vm/compiler/compileBroker.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/compiler/compileBroker.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -104,10 +104,10 @@ + + public: + void print_compilation(outputStream* st = tty, bool short_form = false); +- static void print_compilation(outputStream* st, const nmethod* nm, const char* msg = NULL) { ++ static void print_compilation(outputStream* st, const nmethod* nm, const char* msg = NULL, bool short_form = false) { + print_compilation_impl(st, nm->method(), nm->compile_id(), nm->comp_level(), + nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false, +- msg); ++ msg, short_form); + } + + static void print_inlining(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg = NULL); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/abstractInterpreter.hpp +--- openjdk/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -99,7 +99,10 @@ + empty, // empty method (code: _return) + accessor, // accessor method (code: _aload_0, _getfield, _(a|i)return) + abstract, // abstract method (throws an AbstractMethodException) +- method_handle, // java.lang.invoke.MethodHandles::invoke ++ method_handle_invoke_FIRST, // java.lang.invoke.MethodHandles::invokeExact, etc. ++ method_handle_invoke_LAST = (method_handle_invoke_FIRST ++ + (vmIntrinsics::LAST_MH_SIG_POLY ++ - vmIntrinsics::FIRST_MH_SIG_POLY)), + java_lang_math_sin, // implementation of java.lang.Math.sin (x) + java_lang_math_cos, // implementation of java.lang.Math.cos (x) + java_lang_math_tan, // implementation of java.lang.Math.tan (x) +@@ -112,6 +115,14 @@ + invalid = -1 + }; + ++ // Conversion from the part of the above enum to vmIntrinsics::_invokeExact, etc. ++ static vmIntrinsics::ID method_handle_intrinsic(MethodKind kind) { ++ if (kind >= method_handle_invoke_FIRST && kind <= method_handle_invoke_LAST) ++ return (vmIntrinsics::ID)( vmIntrinsics::FIRST_MH_SIG_POLY + (kind - method_handle_invoke_FIRST) ); ++ else ++ return vmIntrinsics::_none; ++ } ++ + enum SomeConstants { + number_of_result_handlers = 10 // number of result handlers for native calls + }; +@@ -146,6 +157,9 @@ + static address entry_for_kind(MethodKind k) { assert(0 <= k && k < number_of_method_entries, "illegal kind"); return _entry_table[k]; } + static address entry_for_method(methodHandle m) { return entry_for_kind(method_kind(m)); } + ++ // used for bootstrapping method handles: ++ static void set_entry_for_kind(MethodKind k, address e); ++ + static void print_method_kind(MethodKind kind) PRODUCT_RETURN; + + static bool can_be_compiled(methodHandle m); +@@ -304,6 +318,7 @@ + void bang_stack_shadow_pages(bool native_call); + + void generate_all(); ++ void initialize_method_handle_entries(); + + public: + AbstractInterpreterGenerator(StubQueue* _code); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/bytecode.cpp +--- openjdk/hotspot/src/share/vm/interpreter/bytecode.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/bytecode.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -120,19 +120,22 @@ + + void Bytecode_invoke::verify() const { + assert(is_valid(), "check invoke"); +- assert(method()->constants()->cache() != NULL, "do not call this from verifier or rewriter"); ++ assert(cpcache() != NULL, "do not call this from verifier or rewriter"); + } + + +-Symbol* Bytecode_member_ref::signature() const { +- constantPoolOop constants = method()->constants(); +- return constants->signature_ref_at(index()); ++Symbol* Bytecode_member_ref::klass() const { ++ return constants()->klass_ref_at_noresolve(index()); + } + + + Symbol* Bytecode_member_ref::name() const { +- constantPoolOop constants = method()->constants(); +- return constants->name_ref_at(index()); ++ return constants()->name_ref_at(index()); ++} ++ ++ ++Symbol* Bytecode_member_ref::signature() const { ++ return constants()->signature_ref_at(index()); + } + + +@@ -146,18 +149,19 @@ + methodHandle Bytecode_invoke::static_target(TRAPS) { + methodHandle m; + KlassHandle resolved_klass; +- constantPoolHandle constants(THREAD, _method->constants()); ++ constantPoolHandle constants(THREAD, this->constants()); + +- if (java_code() == Bytecodes::_invokedynamic) { +- LinkResolver::resolve_dynamic_method(m, resolved_klass, constants, index(), CHECK_(methodHandle())); +- } else if (java_code() != Bytecodes::_invokeinterface) { +- LinkResolver::resolve_method(m, resolved_klass, constants, index(), CHECK_(methodHandle())); +- } else { +- LinkResolver::resolve_interface_method(m, resolved_klass, constants, index(), CHECK_(methodHandle())); +- } ++ Bytecodes::Code bc = invoke_code(); ++ LinkResolver::resolve_method_statically(m, resolved_klass, bc, constants, index(), CHECK_(methodHandle())); + return m; + } + ++Handle Bytecode_invoke::appendix(TRAPS) { ++ ConstantPoolCacheEntry* cpce = cpcache_entry(); ++ if (cpce->has_appendix()) ++ return Handle(THREAD, cpce->f1_appendix()); ++ return Handle(); // usual case ++} + + int Bytecode_member_ref::index() const { + // Note: Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4, +@@ -170,12 +174,16 @@ + } + + int Bytecode_member_ref::pool_index() const { ++ return cpcache_entry()->constant_pool_index(); ++} ++ ++ConstantPoolCacheEntry* Bytecode_member_ref::cpcache_entry() const { + int index = this->index(); + DEBUG_ONLY({ + if (!has_index_u4(code())) +- index -= constantPoolOopDesc::CPCACHE_INDEX_TAG; ++ index = constantPoolOopDesc::get_cpcache_index(index); + }); +- return _method->constants()->cache()->entry_at(index)->constant_pool_index(); ++ return cpcache()->entry_at(index); + } + + // Implementation of Bytecode_field +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/bytecode.hpp +--- openjdk/hotspot/src/share/vm/interpreter/bytecode.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/bytecode.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -80,6 +80,7 @@ + + Bytecodes::Code code() const { return _code; } + Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); } ++ Bytecodes::Code invoke_code() const { return (code() == Bytecodes::_invokehandle) ? code() : java_code(); } + + // Static functions for parsing bytecodes in place. + int get_index_u1(Bytecodes::Code bc) const { +@@ -195,10 +196,14 @@ + Bytecode_member_ref(methodHandle method, int bci) : Bytecode(method(), method()->bcp_from(bci)), _method(method) {} + + methodHandle method() const { return _method; } ++ constantPoolOop constants() const { return _method->constants(); } ++ constantPoolCacheOop cpcache() const { return _method->constants()->cache(); } ++ ConstantPoolCacheEntry* cpcache_entry() const; + + public: + int index() const; // cache index (loaded from instruction) + int pool_index() const; // constant pool index ++ Symbol* klass() const; // returns the klass of the method or field + Symbol* name() const; // returns the name of the method or field + Symbol* signature() const; // returns the signature of the method or field + +@@ -218,13 +223,15 @@ + + // Attributes + methodHandle static_target(TRAPS); // "specified" method (from constant pool) ++ Handle appendix(TRAPS); // if CPCE::has_appendix (from constant pool) + + // Testers +- bool is_invokeinterface() const { return java_code() == Bytecodes::_invokeinterface; } +- bool is_invokevirtual() const { return java_code() == Bytecodes::_invokevirtual; } +- bool is_invokestatic() const { return java_code() == Bytecodes::_invokestatic; } +- bool is_invokespecial() const { return java_code() == Bytecodes::_invokespecial; } +- bool is_invokedynamic() const { return java_code() == Bytecodes::_invokedynamic; } ++ bool is_invokeinterface() const { return invoke_code() == Bytecodes::_invokeinterface; } ++ bool is_invokevirtual() const { return invoke_code() == Bytecodes::_invokevirtual; } ++ bool is_invokestatic() const { return invoke_code() == Bytecodes::_invokestatic; } ++ bool is_invokespecial() const { return invoke_code() == Bytecodes::_invokespecial; } ++ bool is_invokedynamic() const { return invoke_code() == Bytecodes::_invokedynamic; } ++ bool is_invokehandle() const { return invoke_code() == Bytecodes::_invokehandle; } + + bool has_receiver() const { return !is_invokestatic() && !is_invokedynamic(); } + +@@ -232,15 +239,12 @@ + is_invokevirtual() || + is_invokestatic() || + is_invokespecial() || +- is_invokedynamic(); } ++ is_invokedynamic() || ++ is_invokehandle(); } + +- bool is_method_handle_invoke() const { +- return (is_invokedynamic() || +- (is_invokevirtual() && +- method()->constants()->klass_ref_at_noresolve(index()) == vmSymbols::java_lang_invoke_MethodHandle() && +- methodOopDesc::is_method_handle_invoke_name(name()))); +- } ++ bool has_appendix() { return cpcache_entry()->has_appendix(); } + ++ private: + // Helper to skip verification. Used is_valid() to check if the result is really an invoke + inline friend Bytecode_invoke Bytecode_invoke_check(methodHandle method, int bci); + }; +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/bytecodeInterpreter.cpp +--- openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -235,10 +235,6 @@ + #endif + #endif + +-// JavaStack Implementation +-#define MORE_STACK(count) \ +- (topOfStack -= ((count) * Interpreter::stackElementWords)) +- + + #define UPDATE_PC(opsize) {pc += opsize; } + /* +@@ -575,7 +571,7 @@ + + /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, + /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, +-/* 0xE8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, ++/* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default, + /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, + + /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, +@@ -1774,7 +1770,7 @@ + + oop obj; + if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { +- obj = (oop) cache->f1(); ++ obj = (oop) cache->f1_as_instance(); + MORE_STACK(1); // Assume single slot push + } else { + obj = (oop) STACK_OBJECT(-1); +@@ -1785,7 +1781,7 @@ + // Now store the result on the stack + // + TosState tos_type = cache->flag_state(); +- int field_offset = cache->f2(); ++ int field_offset = cache->f2_as_index(); + if (cache->is_volatile()) { + if (tos_type == atos) { + VERIFY_OOP(obj->obj_field_acquire(field_offset)); +@@ -1885,7 +1881,7 @@ + --count; + } + if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { +- obj = (oop) cache->f1(); ++ obj = (oop) cache->f1_as_instance(); + } else { + --count; + obj = (oop) STACK_OBJECT(count); +@@ -1895,7 +1891,7 @@ + // + // Now store the result + // +- int field_offset = cache->f2(); ++ int field_offset = cache->f2_as_index(); + if (cache->is_volatile()) { + if (tos_type == itos) { + obj->release_int_field_put(field_offset, STACK_INT(-1)); +@@ -2177,13 +2173,15 @@ + // This kind of CP cache entry does not need to match the flags byte, because + // there is a 1-1 relation between bytecode type and CP entry type. + ConstantPoolCacheEntry* cache = cp->entry_at(index); +- if (cache->is_f1_null()) { ++ oop result = cache->f1_as_instance(); ++ if (result == NULL) { + CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), + handle_exception); ++ result = cache->f1_as_instance(); + } + +- VERIFY_OOP(cache->f1()); +- SET_STACK_OBJECT(cache->f1(), 0); ++ VERIFY_OOP(result); ++ SET_STACK_OBJECT(result, 0); + UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); + } + +@@ -2197,28 +2195,74 @@ + ShouldNotReachHere(); + } + +- int index = Bytes::get_native_u4(pc+1); ++ u4 index = Bytes::get_native_u4(pc+1); ++ ConstantPoolCacheEntry* cache = cp->secondary_entry_at(index); ++ oop result = cache->f1_as_instance(); + + // We are resolved if the f1 field contains a non-null object (CallSite, etc.) + // This kind of CP cache entry does not need to match the flags byte, because + // there is a 1-1 relation between bytecode type and CP entry type. + assert(constantPoolCacheOopDesc::is_secondary_index(index), "incorrect format"); +- ConstantPoolCacheEntry* cache = cp->secondary_entry_at(index); +- if (cache->is_f1_null()) { ++ if (! cache->is_resolved((Bytecodes::Code) opcode)) { + CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD), + handle_exception); ++ result = cache->f1_as_instance(); + } + +- VERIFY_OOP(cache->f1()); +- oop method_handle = java_lang_invoke_CallSite::target(cache->f1()); ++ VERIFY_OOP(result); ++ oop method_handle = java_lang_invoke_CallSite::target(result); + CHECK_NULL(method_handle); + +- istate->set_msg(call_method_handle); +- istate->set_callee((methodOop) method_handle); ++ methodOop method = cache->f1_as_method(); ++ VERIFY_OOP(method); ++ ++ /** Re-enabled in 7200949 ++ if (cache->has_appendix()) { ++ constantPoolOop constants = METHOD->constants(); ++ SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); ++ MORE_STACK(1); ++ } **/ ++ ++ istate->set_msg(call_method); ++ istate->set_callee(method); ++ istate->set_callee_entry_point(method->from_interpreted_entry()); + istate->set_bcp_advance(5); + + UPDATE_PC_AND_RETURN(0); // I'll be back... + } ++ CASE(_invokehandle): { ++ ++ if (!EnableInvokeDynamic) { ++ ShouldNotReachHere(); ++ } ++ ++ u2 index = Bytes::get_native_u2(pc+1); ++ ConstantPoolCacheEntry* cache = cp->entry_at(index); ++ ++ if (! cache->is_resolved((Bytecodes::Code) opcode)) { ++ CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD), ++ handle_exception); ++ cache = cp->entry_at(index); ++ } ++ ++ methodOop method = cache->f1_as_method(); ++ ++ VERIFY_OOP(method); ++ ++ /** Re-enabled in 7200949 ++ if (cache->has_appendix()) { ++ constantPoolOop constants = METHOD->constants(); ++ SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); ++ MORE_STACK(1); ++ } **/ ++ ++ istate->set_msg(call_method); ++ istate->set_callee(method); ++ istate->set_callee_entry_point(method->from_interpreted_entry()); ++ istate->set_bcp_advance(3); ++ ++ UPDATE_PC_AND_RETURN(0); // I'll be back... ++ } + + CASE(_invokeinterface): { + u2 index = Bytes::get_native_u2(pc+1); +@@ -2239,11 +2283,11 @@ + // java.lang.Object. See cpCacheOop.cpp for details. + // This code isn't produced by javac, but could be produced by + // another compliant java compiler. +- if (cache->is_methodInterface()) { ++ if (cache->is_forced_virtual()) { + methodOop callee; + CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); + if (cache->is_vfinal()) { +- callee = (methodOop) cache->f2(); ++ callee = cache->f2_as_vfinal_method(); + } else { + // get receiver + int parms = cache->parameter_size(); +@@ -2251,7 +2295,7 @@ + VERIFY_OOP(STACK_OBJECT(-parms)); + instanceKlass* rcvrKlass = (instanceKlass*) + STACK_OBJECT(-parms)->klass()->klass_part(); +- callee = (methodOop) rcvrKlass->start_of_vtable()[ cache->f2()]; ++ callee = (methodOop) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; + } + istate->set_callee(callee); + istate->set_callee_entry_point(callee->from_interpreted_entry()); +@@ -2266,7 +2310,7 @@ + + // this could definitely be cleaned up QQQ + methodOop callee; +- klassOop iclass = (klassOop)cache->f1(); ++ klassOop iclass = cache->f1_as_klass(); + // instanceKlass* interface = (instanceKlass*) iclass->klass_part(); + // get receiver + int parms = cache->parameter_size(); +@@ -2284,7 +2328,7 @@ + if (i == int2->itable_length()) { + VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), ""); + } +- int mindex = cache->f2(); ++ int mindex = cache->f2_as_index(); + itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); + callee = im[mindex].method(); + if (callee == NULL) { +@@ -2322,12 +2366,12 @@ + methodOop callee; + if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { + CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); +- if (cache->is_vfinal()) callee = (methodOop) cache->f2(); ++ if (cache->is_vfinal()) callee = cache->f2_as_vfinal_method(); + else { + // get receiver + int parms = cache->parameter_size(); + // this works but needs a resourcemark and seems to create a vtable on every call: +- // methodOop callee = rcvr->klass()->klass_part()->vtable()->method_at(cache->f2()); ++ // methodOop callee = rcvr->klass()->klass_part()->vtable()->method_at(cache->f2_as_index()); + // + // this fails with an assert + // instanceKlass* rcvrKlass = instanceKlass::cast(STACK_OBJECT(-parms)->klass()); +@@ -2350,13 +2394,13 @@ + However it seems to have a vtable in the right location. Huh? + + */ +- callee = (methodOop) rcvrKlass->start_of_vtable()[ cache->f2()]; ++ callee = (methodOop) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; + } + } else { + if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { + CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); + } +- callee = (methodOop) cache->f1(); ++ callee = cache->f1_as_method(); + } + + istate->set_callee(callee); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/bytecodeInterpreter.hpp +--- openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -50,6 +50,10 @@ + + #ifdef CC_INTERP + ++// JavaStack Implementation ++#define MORE_STACK(count) \ ++ (topOfStack -= ((count) * Interpreter::stackElementWords)) ++ + // CVM definitions find hotspot equivalents... + + union VMJavaVal64 { +@@ -107,7 +111,6 @@ + rethrow_exception, // unwinding and throwing exception + // requests to frame manager from C++ interpreter + call_method, // request for new frame from interpreter, manager responds with method_entry +- call_method_handle, // like the above, except the callee is a method handle + return_from_method, // request from interpreter to unwind, manager responds with method_continue + more_monitors, // need a new monitor + throwing_exception, // unwind stack and rethrow +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/bytecodes.cpp +--- openjdk/hotspot/src/share/vm/interpreter/bytecodes.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/bytecodes.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -534,6 +534,8 @@ + + def(_return_register_finalizer , "return_register_finalizer" , "b" , NULL , T_VOID , 0, true, _return); + ++ def(_invokehandle , "invokehandle" , "bJJ" , NULL , T_ILLEGAL, -1, true, _invokevirtual ); ++ + def(_fast_aldc , "fast_aldc" , "bj" , NULL , T_OBJECT, 1, true, _ldc ); + def(_fast_aldc_w , "fast_aldc_w" , "bJJ" , NULL , T_OBJECT, 1, true, _ldc_w ); + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/bytecodes.hpp +--- openjdk/hotspot/src/share/vm/interpreter/bytecodes.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/bytecodes.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -282,6 +282,9 @@ + + _return_register_finalizer , + ++ // special handling of signature-polymorphic methods: ++ _invokehandle , ++ + _shouldnotreachhere, // For debugging + + // Platform specific JVM bytecodes +@@ -356,8 +359,8 @@ + + public: + // Conversion +- static void check (Code code) { assert(is_defined(code), "illegal code"); } +- static void wide_check (Code code) { assert(wide_is_defined(code), "illegal code"); } ++ static void check (Code code) { assert(is_defined(code), err_msg("illegal code: %d", (int)code)); } ++ static void wide_check (Code code) { assert(wide_is_defined(code), err_msg("illegal code: %d", (int)code)); } + static Code cast (int code) { return (Code)code; } + + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/cppInterpreter.cpp +--- openjdk/hotspot/src/share/vm/interpreter/cppInterpreter.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/cppInterpreter.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -117,7 +117,6 @@ + method_entry(empty); + method_entry(accessor); + method_entry(abstract); +- method_entry(method_handle); + method_entry(java_lang_math_sin ); + method_entry(java_lang_math_cos ); + method_entry(java_lang_math_tan ); +@@ -126,6 +125,9 @@ + method_entry(java_lang_math_log ); + method_entry(java_lang_math_log10 ); + method_entry(java_lang_ref_reference_get); ++ ++ initialize_method_handle_entries(); ++ + Interpreter::_native_entry_begin = Interpreter::code()->code_end(); + method_entry(native); + method_entry(native_synchronized); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/interpreter.cpp +--- openjdk/hotspot/src/share/vm/interpreter/interpreter.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/interpreter.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -37,6 +37,7 @@ + #include "oops/oop.inline.hpp" + #include "prims/forte.hpp" + #include "prims/jvmtiExport.hpp" ++#include "prims/methodHandles.hpp" + #include "runtime/handles.inline.hpp" + #include "runtime/sharedRuntime.hpp" + #include "runtime/stubRoutines.hpp" +@@ -180,14 +181,21 @@ + // Abstract method? + if (m->is_abstract()) return abstract; + +- // Invoker for method handles? +- if (m->is_method_handle_invoke()) return method_handle; ++ // Method handle primitive? ++ if (m->is_method_handle_intrinsic()) { ++ vmIntrinsics::ID id = m->intrinsic_id(); ++ assert(MethodHandles::is_signature_polymorphic(id), "must match an intrinsic"); ++ MethodKind kind = (MethodKind)( method_handle_invoke_FIRST + ++ ((int)id - vmIntrinsics::FIRST_MH_SIG_POLY) ); ++ assert(kind <= method_handle_invoke_LAST, "parallel enum ranges"); ++ return kind; ++ } + + // Native method? + // Note: This test must come _before_ the test for intrinsic + // methods. See also comments below. + if (m->is_native()) { +- assert(!m->is_method_handle_invoke(), "overlapping bits here, watch out"); ++ assert(!m->is_method_handle_intrinsic(), "overlapping bits here, watch out"); + return m->is_synchronized() ? native_synchronized : native; + } + +@@ -237,6 +245,14 @@ + } + + ++void AbstractInterpreter::set_entry_for_kind(AbstractInterpreter::MethodKind kind, address entry) { ++ assert(kind >= method_handle_invoke_FIRST && ++ kind <= method_handle_invoke_LAST, "late initialization only for MH entry points"); ++ assert(_entry_table[kind] == _entry_table[abstract], "previous value must be AME entry"); ++ _entry_table[kind] = entry; ++} ++ ++ + // Return true if the interpreter can prove that the given bytecode has + // not yet been executed (in Java semantics, not in actual operation). + bool AbstractInterpreter::is_not_reached(methodHandle method, int bci) { +@@ -268,7 +284,6 @@ + case empty : tty->print("empty" ); break; + case accessor : tty->print("accessor" ); break; + case abstract : tty->print("abstract" ); break; +- case method_handle : tty->print("method_handle" ); break; + case java_lang_math_sin : tty->print("java_lang_math_sin" ); break; + case java_lang_math_cos : tty->print("java_lang_math_cos" ); break; + case java_lang_math_tan : tty->print("java_lang_math_tan" ); break; +@@ -276,7 +291,16 @@ + case java_lang_math_sqrt : tty->print("java_lang_math_sqrt" ); break; + case java_lang_math_log : tty->print("java_lang_math_log" ); break; + case java_lang_math_log10 : tty->print("java_lang_math_log10" ); break; +- default : ShouldNotReachHere(); ++ default: ++ if (kind >= method_handle_invoke_FIRST && ++ kind <= method_handle_invoke_LAST) { ++ const char* kind_name = vmIntrinsics::name_at(method_handle_intrinsic(kind)); ++ if (kind_name[0] == '_') kind_name = &kind_name[1]; // '_invokeExact' => 'invokeExact' ++ tty->print("method_handle_%s", kind_name); ++ break; ++ } ++ ShouldNotReachHere(); ++ break; + } + } + #endif // PRODUCT +@@ -436,3 +460,11 @@ + } + } + } ++ ++void AbstractInterpreterGenerator::initialize_method_handle_entries() { ++ // method handle entry kinds are generated later in MethodHandlesAdapterGenerator::generate: ++ for (int i = Interpreter::method_handle_invoke_FIRST; i <= Interpreter::method_handle_invoke_LAST; i++) { ++ Interpreter::MethodKind kind = (Interpreter::MethodKind) i; ++ Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract]; ++ } ++} +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/interpreterRuntime.cpp +--- openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -145,7 +145,7 @@ + // The bytecode wrappers aren't GC-safe so construct a new one + Bytecode_loadconstant ldc2(m, bci(thread)); + ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc2.cache_index()); +- assert(result == cpce->f1(), "expected result for assembly code"); ++ assert(result == cpce->f1_as_instance(), "expected result for assembly code"); + } + #endif + } +@@ -674,7 +674,7 @@ + JvmtiExport::post_raw_breakpoint(thread, method, bcp); + IRT_END + +-IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode)) ++IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode)) { + // extract receiver from the outgoing argument list if necessary + Handle receiver(thread, NULL); + if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface) { +@@ -742,86 +742,54 @@ + info.resolved_method(), + info.vtable_index()); + } ++} ++IRT_END ++ ++ ++// First time execution: Resolve symbols, create a permanent MethodType object. ++IRT_ENTRY(void, InterpreterRuntime::resolve_invokehandle(JavaThread* thread)) { ++ assert(EnableInvokeDynamic, ""); ++ const Bytecodes::Code bytecode = Bytecodes::_invokehandle; ++ ++ // resolve method ++ CallInfo info; ++ constantPoolHandle pool(thread, method(thread)->constants()); ++ ++ { ++ JvmtiHideSingleStepping jhss(thread); ++ LinkResolver::resolve_invoke(info, Handle(), pool, ++ get_index_u2_cpcache(thread, bytecode), bytecode, CHECK); ++ } // end JvmtiHideSingleStepping ++ ++ cache_entry(thread)->set_method_handle( ++ info.resolved_method(), ++ info.resolved_appendix()); ++} + IRT_END + + + // First time execution: Resolve symbols, create a permanent CallSite object. + IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) { +- ResourceMark rm(thread); +- + assert(EnableInvokeDynamic, ""); +- + const Bytecodes::Code bytecode = Bytecodes::_invokedynamic; + +- methodHandle caller_method(thread, method(thread)); ++ //TO DO: consider passing BCI to Java. ++ // int caller_bci = method(thread)->bci_from(bcp(thread)); + +- constantPoolHandle pool(thread, caller_method->constants()); +- pool->set_invokedynamic(); // mark header to flag active call sites ++ // resolve method ++ CallInfo info; ++ constantPoolHandle pool(thread, method(thread)->constants()); ++ int index = get_index_u4(thread, bytecode); + +- int caller_bci = 0; +- int site_index = 0; +- { address caller_bcp = bcp(thread); +- caller_bci = caller_method->bci_from(caller_bcp); +- site_index = Bytes::get_native_u4(caller_bcp+1); +- } +- assert(site_index == InterpreterRuntime::bytecode(thread).get_index_u4(bytecode), ""); +- assert(constantPoolCacheOopDesc::is_secondary_index(site_index), "proper format"); +- // there is a second CPC entries that is of interest; it caches signature info: +- int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index(); +- int pool_index = pool->cache()->entry_at(main_index)->constant_pool_index(); ++ { ++ JvmtiHideSingleStepping jhss(thread); ++ LinkResolver::resolve_invoke(info, Handle(), pool, ++ index, bytecode, CHECK); ++ } // end JvmtiHideSingleStepping + +- // first resolve the signature to a MH.invoke methodOop +- if (!pool->cache()->entry_at(main_index)->is_resolved(bytecode)) { +- JvmtiHideSingleStepping jhss(thread); +- CallInfo callinfo; +- LinkResolver::resolve_invoke(callinfo, Handle(), pool, +- site_index, bytecode, CHECK); +- // The main entry corresponds to a JVM_CONSTANT_InvokeDynamic, and serves +- // as a common reference point for all invokedynamic call sites with +- // that exact call descriptor. We will link it in the CP cache exactly +- // as if it were an invokevirtual of MethodHandle.invoke. +- pool->cache()->entry_at(main_index)->set_method( +- bytecode, +- callinfo.resolved_method(), +- callinfo.vtable_index()); +- } +- +- // The method (f2 entry) of the main entry is the MH.invoke for the +- // invokedynamic target call signature. +- oop f1_value = pool->cache()->entry_at(main_index)->f1(); +- methodHandle signature_invoker(THREAD, (methodOop) f1_value); +- assert(signature_invoker.not_null() && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(), +- "correct result from LinkResolver::resolve_invokedynamic"); +- +- Handle info; // optional argument(s) in JVM_CONSTANT_InvokeDynamic +- Handle bootm = SystemDictionary::find_bootstrap_method(caller_method, caller_bci, +- main_index, info, CHECK); +- if (!java_lang_invoke_MethodHandle::is_instance(bootm())) { +- THROW_MSG(vmSymbols::java_lang_IllegalStateException(), +- "no bootstrap method found for invokedynamic"); +- } +- +- // Short circuit if CallSite has been bound already: +- if (!pool->cache()->secondary_entry_at(site_index)->is_f1_null()) +- return; +- +- Symbol* call_site_name = pool->name_ref_at(site_index); +- +- Handle call_site +- = SystemDictionary::make_dynamic_call_site(bootm, +- // Callee information: +- call_site_name, +- signature_invoker, +- info, +- // Caller information: +- caller_method, +- caller_bci, +- CHECK); +- +- // In the secondary entry, the f1 field is the call site, and the f2 (index) +- // field is some data about the invoke site. Currently, it is just the BCI. +- // Later, it might be changed to help manage inlining dependencies. +- pool->cache()->secondary_entry_at(site_index)->set_dynamic_call(call_site, signature_invoker); ++ pool->cache()->secondary_entry_at(index)->set_dynamic_call( ++ info.resolved_method(), ++ info.resolved_appendix()); + } + IRT_END + +@@ -993,7 +961,7 @@ + + // check the access_flags for the field in the klass + +- instanceKlass* ik = instanceKlass::cast(java_lang_Class::as_klassOop(cp_entry->f1())); ++ instanceKlass* ik = instanceKlass::cast(java_lang_Class::as_klassOop(cp_entry->f1_as_klass_mirror())); + int index = cp_entry->field_index(); + if ((ik->field_access_flags(index) & JVM_ACC_FIELD_ACCESS_WATCHED) == 0) return; + +@@ -1016,15 +984,15 @@ + // non-static field accessors have an object, but we need a handle + h_obj = Handle(thread, obj); + } +- instanceKlassHandle h_cp_entry_f1(thread, java_lang_Class::as_klassOop(cp_entry->f1())); +- jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_cp_entry_f1, cp_entry->f2(), is_static); ++ instanceKlassHandle h_cp_entry_f1(thread, java_lang_Class::as_klassOop(cp_entry->f1_as_klass_mirror())); ++ jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_cp_entry_f1, cp_entry->f2_as_index(), is_static); + JvmtiExport::post_field_access(thread, method(thread), bcp(thread), h_cp_entry_f1, h_obj, fid); + IRT_END + + IRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread *thread, + oopDesc* obj, ConstantPoolCacheEntry *cp_entry, jvalue *value)) + +- klassOop k = java_lang_Class::as_klassOop(cp_entry->f1()); ++ klassOop k = java_lang_Class::as_klassOop(cp_entry->f1_as_klass_mirror()); + + // check the access_flags for the field in the klass + instanceKlass* ik = instanceKlass::cast(k); +@@ -1049,7 +1017,7 @@ + + HandleMark hm(thread); + instanceKlassHandle h_klass(thread, k); +- jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_klass, cp_entry->f2(), is_static); ++ jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_klass, cp_entry->f2_as_index(), is_static); + jvalue fvalue; + #ifdef _LP64 + fvalue = *value; +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/interpreterRuntime.hpp +--- openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -71,6 +71,8 @@ + { return bytecode(thread).get_index_u2(bc); } + static int get_index_u2_cpcache(JavaThread *thread, Bytecodes::Code bc) + { return bytecode(thread).get_index_u2_cpcache(bc); } ++ static int get_index_u4(JavaThread *thread, Bytecodes::Code bc) ++ { return bytecode(thread).get_index_u4(bc); } + static int number_of_dimensions(JavaThread *thread) { return bcp(thread)[3]; } + + static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i) { return method(thread)->constants()->cache()->entry_at(i); } +@@ -118,6 +120,7 @@ + + // Calls + static void resolve_invoke (JavaThread* thread, Bytecodes::Code bytecode); ++ static void resolve_invokehandle (JavaThread* thread); + static void resolve_invokedynamic(JavaThread* thread); + + // Breakpoints +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/linkResolver.cpp +--- openjdk/hotspot/src/share/vm/interpreter/linkResolver.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/linkResolver.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -96,15 +96,21 @@ + void CallInfo::set_virtual(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) { + assert(vtable_index >= 0 || vtable_index == methodOopDesc::nonvirtual_vtable_index, "valid index"); + set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK); ++ assert(!resolved_method->is_compiled_lambda_form(), "these must be handled via an invokehandle call"); + } + +-void CallInfo::set_dynamic(methodHandle resolved_method, TRAPS) { +- assert(resolved_method->is_method_handle_invoke(), ""); ++void CallInfo::set_handle(methodHandle resolved_method, Handle resolved_appendix, TRAPS) { ++ if (resolved_method.is_null()) { ++ THROW_MSG(vmSymbols::java_lang_InternalError(), "resolved method is null"); ++ } + KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass(); +- assert(resolved_klass == resolved_method->method_holder(), ""); ++ assert(resolved_method->intrinsic_id() == vmIntrinsics::_invokeBasic || ++ resolved_method->is_compiled_lambda_form(), ++ "linkMethod must return one of these"); + int vtable_index = methodOopDesc::nonvirtual_vtable_index; + assert(resolved_method->vtable_index() == vtable_index, ""); +- set_common(resolved_klass, KlassHandle(), resolved_method, resolved_method, vtable_index, CHECK); ++ set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK); ++ _resolved_appendix = resolved_appendix; + } + + void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) { +@@ -114,6 +120,7 @@ + _resolved_method = resolved_method; + _selected_method = selected_method; + _vtable_index = vtable_index; ++ _resolved_appendix = Handle(); + if (CompilationPolicy::must_be_compiled(selected_method)) { + // This path is unusual, mostly used by the '-Xcomp' stress test mode. + +@@ -180,11 +187,9 @@ + void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) { + methodOop result_oop = klass->uncached_lookup_method(name, signature); + if (EnableInvokeDynamic && result_oop != NULL) { +- switch (result_oop->intrinsic_id()) { +- case vmIntrinsics::_invokeExact: +- case vmIntrinsics::_invokeGeneric: +- case vmIntrinsics::_invokeDynamic: +- // Do not link directly to these. The VM must produce a synthetic one using lookup_implicit_method. ++ vmIntrinsics::ID iid = result_oop->intrinsic_id(); ++ if (MethodHandles::is_signature_polymorphic(iid)) { ++ // Do not link directly to these. The VM must produce a synthetic one using lookup_polymorphic_method. + return; + } + } +@@ -213,31 +218,97 @@ + result = methodHandle(THREAD, ik->lookup_method_in_all_interfaces(name, signature)); + } + +-void LinkResolver::lookup_implicit_method(methodHandle& result, +- KlassHandle klass, Symbol* name, Symbol* signature, +- KlassHandle current_klass, +- TRAPS) { ++void LinkResolver::lookup_polymorphic_method(methodHandle& result, ++ KlassHandle klass, Symbol* name, Symbol* full_signature, ++ KlassHandle current_klass, ++ Handle* appendix_result_or_null, ++ TRAPS) { ++ vmIntrinsics::ID iid = MethodHandles::signature_polymorphic_name_id(name); ++ if (TraceMethodHandles) { ++ tty->print_cr("lookup_polymorphic_method iid=%s %s.%s%s", ++ vmIntrinsics::name_at(iid), klass->external_name(), ++ name->as_C_string(), full_signature->as_C_string()); ++ } + if (EnableInvokeDynamic && + klass() == SystemDictionary::MethodHandle_klass() && +- methodOopDesc::is_method_handle_invoke_name(name)) { +- if (!THREAD->is_Compiler_thread() && !MethodHandles::enabled()) { +- // Make sure the Java part of the runtime has been booted up. +- klassOop natives = SystemDictionary::MethodHandleNatives_klass(); +- if (natives == NULL || instanceKlass::cast(natives)->is_not_initialized()) { +- SystemDictionary::resolve_or_fail(vmSymbols::java_lang_invoke_MethodHandleNatives(), +- Handle(), +- Handle(), +- true, +- CHECK); ++ iid != vmIntrinsics::_none) { ++ if (MethodHandles::is_signature_polymorphic_intrinsic(iid)) { ++ // Most of these do not need an up-call to Java to resolve, so can be done anywhere. ++ // Do not erase last argument type (MemberName) if it is a static linkTo method. ++ bool keep_last_arg = MethodHandles::is_signature_polymorphic_static(iid); ++ TempNewSymbol basic_signature = ++ MethodHandles::lookup_basic_type_signature(full_signature, keep_last_arg, CHECK); ++ if (TraceMethodHandles) { ++ tty->print_cr("lookup_polymorphic_method %s %s => basic %s", ++ name->as_C_string(), ++ full_signature->as_C_string(), ++ basic_signature->as_C_string()); + } +- } +- methodOop result_oop = SystemDictionary::find_method_handle_invoke(name, +- signature, +- current_klass, +- CHECK); +- if (result_oop != NULL) { +- assert(result_oop->is_method_handle_invoke() && result_oop->signature() == signature, "consistent"); +- result = methodHandle(THREAD, result_oop); ++ result = SystemDictionary::find_method_handle_intrinsic(iid, ++ basic_signature, ++ CHECK); ++ if (result.not_null()) { ++ assert(result->is_method_handle_intrinsic(), "MH.invokeBasic or MH.linkTo* intrinsic"); ++ assert(result->intrinsic_id() != vmIntrinsics::_invokeGeneric, "wrong place to find this"); ++ assert(basic_signature == result->signature(), "predict the result signature"); ++ if (TraceMethodHandles) { ++ tty->print("lookup_polymorphic_method => intrinsic "); ++ result->print_on(tty); ++ } ++ return; ++ } ++ } else if (iid == vmIntrinsics::_invokeGeneric ++ && !THREAD->is_Compiler_thread() ++ && appendix_result_or_null != NULL) { ++ // This is a method with type-checking semantics. ++ // We will ask Java code to spin an adapter method for it. ++ if (!MethodHandles::enabled()) { ++ // Make sure the Java part of the runtime has been booted up. ++ klassOop natives = SystemDictionary::MethodHandleNatives_klass(); ++ if (natives == NULL || instanceKlass::cast(natives)->is_not_initialized()) { ++ SystemDictionary::resolve_or_fail(vmSymbols::java_lang_invoke_MethodHandleNatives(), ++ Handle(), ++ Handle(), ++ true, ++ CHECK); ++ } ++ } ++ ++ Handle appendix; ++ result = SystemDictionary::find_method_handle_invoker(name, ++ full_signature, ++ current_klass, ++ &appendix, ++ CHECK); ++ if (TraceMethodHandles) { ++ tty->print("lookup_polymorphic_method => (via Java) "); ++ result->print_on(tty); ++ tty->print(" lookup_polymorphic_method => appendix = "); ++ if (appendix.is_null()) tty->print_cr("(none)"); ++ else appendix->print_on(tty); ++ } ++ if (result.not_null()) { ++#ifdef ASSERT ++ TempNewSymbol basic_signature = ++ MethodHandles::lookup_basic_type_signature(full_signature, CHECK); ++ int actual_size_of_params = result->size_of_parameters(); ++ int expected_size_of_params = ArgumentSizeComputer(basic_signature).size(); ++ // +1 for MethodHandle.this, +1 for trailing MethodType ++ if (!MethodHandles::is_signature_polymorphic_static(iid)) expected_size_of_params += 1; ++ if (appendix.not_null()) expected_size_of_params += 1; ++ if (actual_size_of_params != expected_size_of_params) { ++ tty->print_cr("*** basic_signature=%s", basic_signature->as_C_string()); ++ tty->print_cr("*** result for %s: ", vmIntrinsics::name_at(iid)); ++ result->print(); ++ } ++ assert(actual_size_of_params == expected_size_of_params, ++ err_msg("%d != %d", actual_size_of_params, expected_size_of_params)); ++#endif //ASSERT ++ ++ assert(appendix_result_or_null != NULL, ""); ++ (*appendix_result_or_null) = appendix; ++ return; ++ } + } + } + } +@@ -267,6 +338,7 @@ + new_flags = new_flags | JVM_ACC_PUBLIC; + flags.set_flags(new_flags); + } ++// assert(extra_arg_result_or_null != NULL, "must be able to return extra argument"); + + if (!Reflection::verify_field_access(ref_klass->as_klassOop(), + resolved_klass->as_klassOop(), +@@ -287,10 +359,19 @@ + } + } + +-void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle& resolved_klass, +- constantPoolHandle pool, int index, TRAPS) { ++void LinkResolver::resolve_method_statically(methodHandle& resolved_method, KlassHandle& resolved_klass, ++ Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS) { + + // resolve klass ++ if (code == Bytecodes::_invokedynamic) { ++ resolved_klass = SystemDictionaryHandles::MethodHandle_klass(); ++ Symbol* method_name = vmSymbols::invoke_name(); ++ Symbol* method_signature = pool->signature_ref_at(index); ++ KlassHandle current_klass(THREAD, pool->pool_holder()); ++ resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); ++ return; ++ } ++ + resolve_klass(resolved_klass, pool, index, CHECK); + + Symbol* method_name = pool->name_ref_at(index); +@@ -299,7 +380,7 @@ + + if (pool->has_preresolution() + || (resolved_klass() == SystemDictionary::MethodHandle_klass() && +- methodOopDesc::is_method_handle_invoke_name(method_name))) { ++ MethodHandles::is_signature_polymorphic_name(resolved_klass(), method_name))) { + methodOop result_oop = constantPoolOopDesc::method_at_if_loaded(pool, index); + if (result_oop != NULL) { + resolved_method = methodHandle(THREAD, result_oop); +@@ -307,33 +388,13 @@ + } + } + +- resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); ++ if (code == Bytecodes::_invokeinterface) { ++ resolve_interface_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); ++ } else { ++ resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); ++ } + } + +-void LinkResolver::resolve_dynamic_method(methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS) { +- // The class is java.lang.invoke.MethodHandle +- resolved_klass = SystemDictionaryHandles::MethodHandle_klass(); +- +- Symbol* method_name = vmSymbols::invokeExact_name(); +- +- Symbol* method_signature = pool->signature_ref_at(index); +- KlassHandle current_klass (THREAD, pool->pool_holder()); +- +- resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); +-} +- +-void LinkResolver::resolve_interface_method(methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS) { +- +- // resolve klass +- resolve_klass(resolved_klass, pool, index, CHECK); +- Symbol* method_name = pool->name_ref_at(index); +- Symbol* method_signature = pool->signature_ref_at(index); +- KlassHandle current_klass(THREAD, pool->pool_holder()); +- +- resolve_interface_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); +-} +- +- + void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle resolved_klass, + Symbol* method_name, Symbol* method_signature, + KlassHandle current_klass, bool check_access, TRAPS) { +@@ -346,6 +407,8 @@ + THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); + } + ++ Handle nested_exception; ++ + // 2. lookup method in resolved klass and its super klasses + lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); + +@@ -354,17 +417,23 @@ + lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); + + if (resolved_method.is_null()) { +- // JSR 292: see if this is an implicitly generated method MethodHandle.invoke(*...) +- lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, CHECK); ++ // JSR 292: see if this is an implicitly generated method MethodHandle.linkToVirtual(*...), etc ++ lookup_polymorphic_method(resolved_method, resolved_klass, method_name, method_signature, ++ current_klass, (Handle*)NULL, THREAD); ++ if (HAS_PENDING_EXCEPTION) { ++ nested_exception = Handle(THREAD, PENDING_EXCEPTION); ++ CLEAR_PENDING_EXCEPTION; ++ } + } + + if (resolved_method.is_null()) { + // 4. method lookup failed + ResourceMark rm(THREAD); +- THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), +- methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), +- method_name, +- method_signature)); ++ THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(), ++ methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), ++ method_name, ++ method_signature), ++ nested_exception); + } + } + +@@ -1053,6 +1122,7 @@ + case Bytecodes::_invokestatic : resolve_invokestatic (result, pool, index, CHECK); break; + case Bytecodes::_invokespecial : resolve_invokespecial (result, pool, index, CHECK); break; + case Bytecodes::_invokevirtual : resolve_invokevirtual (result, recv, pool, index, CHECK); break; ++ case Bytecodes::_invokehandle : resolve_invokehandle (result, pool, index, CHECK); break; + case Bytecodes::_invokedynamic : resolve_invokedynamic (result, pool, index, CHECK); break; + case Bytecodes::_invokeinterface: resolve_invokeinterface(result, recv, pool, index, CHECK); break; + } +@@ -1116,22 +1186,91 @@ + } + + +-void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle pool, int raw_index, TRAPS) { ++void LinkResolver::resolve_invokehandle(CallInfo& result, constantPoolHandle pool, int index, TRAPS) { + assert(EnableInvokeDynamic, ""); ++ // This guy is reached from InterpreterRuntime::resolve_invokehandle. ++ KlassHandle resolved_klass; ++ Symbol* method_name = NULL; ++ Symbol* method_signature = NULL; ++ KlassHandle current_klass; ++ resolve_pool(resolved_klass, method_name, method_signature, current_klass, pool, index, CHECK); ++ if (TraceMethodHandles) ++ tty->print_cr("resolve_invokehandle %s %s", method_name->as_C_string(), method_signature->as_C_string()); ++ resolve_handle_call(result, resolved_klass, method_name, method_signature, current_klass, CHECK); ++} + +- // This guy is reached from InterpreterRuntime::resolve_invokedynamic. ++void LinkResolver::resolve_handle_call(CallInfo& result, KlassHandle resolved_klass, ++ Symbol* method_name, Symbol* method_signature, ++ KlassHandle current_klass, ++ TRAPS) { ++ // JSR 292: this must be an implicitly generated method MethodHandle.invokeExact(*...) or similar ++ assert(resolved_klass() == SystemDictionary::MethodHandle_klass(), ""); ++ assert(MethodHandles::is_signature_polymorphic_name(method_name), ""); ++ methodHandle resolved_method; ++ Handle resolved_appendix; ++ lookup_polymorphic_method(resolved_method, resolved_klass, ++ method_name, method_signature, ++ current_klass, &resolved_appendix, CHECK); ++ result.set_handle(resolved_method, resolved_appendix, CHECK); ++} + +- // At this point, we only need the signature, and can ignore the name. +- Symbol* method_signature = pool->signature_ref_at(raw_index); // raw_index works directly +- Symbol* method_name = vmSymbols::invokeExact_name(); +- KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass(); + +- // JSR 292: this must be an implicitly generated method MethodHandle.invokeExact(*...) +- // The extra MH receiver will be inserted into the stack on every call. +- methodHandle resolved_method; +- KlassHandle current_klass(THREAD, pool->pool_holder()); +- lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, THREAD); ++void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle pool, int index, TRAPS) { ++ assert(EnableInvokeDynamic, ""); ++ pool->set_invokedynamic(); // mark header to flag active call sites ++ ++ //resolve_pool(, method_name, method_signature, current_klass, pool, index, CHECK); ++ Symbol* method_name = pool->name_ref_at(index); ++ Symbol* method_signature = pool->signature_ref_at(index); ++ KlassHandle current_klass = KlassHandle(THREAD, pool->pool_holder()); ++ ++ // Resolve the bootstrap specifier (BSM + optional arguments). ++ Handle bootstrap_specifier; ++ // Check if CallSite has been bound already: ++ ConstantPoolCacheEntry* cpce = pool->cache()->secondary_entry_at(index); ++ if (cpce->is_f1_null()) { ++ int pool_index = pool->cache()->main_entry_at(index)->constant_pool_index(); ++ oop bsm_info = pool->resolve_bootstrap_specifier_at(pool_index, CHECK); ++ assert(bsm_info != NULL, ""); ++ // FIXME: Cache this once per BootstrapMethods entry, not once per CONSTANT_InvokeDynamic. ++ bootstrap_specifier = Handle(THREAD, bsm_info); ++ } ++ if (!cpce->is_f1_null()) { ++ methodHandle method(THREAD, cpce->f2_as_vfinal_method()); ++ Handle appendix(THREAD, cpce->has_appendix() ? cpce->f1_appendix() : (oop)NULL); ++ result.set_handle(method, appendix, CHECK); ++ return; ++ } ++ ++ if (TraceMethodHandles) { ++ tty->print_cr("resolve_invokedynamic #%d %s %s", ++ constantPoolCacheOopDesc::decode_secondary_index(index), ++ method_name->as_C_string(), method_signature->as_C_string()); ++ tty->print(" BSM info: "); bootstrap_specifier->print(); ++ } ++ ++ resolve_dynamic_call(result, bootstrap_specifier, method_name, method_signature, current_klass, CHECK); ++} ++ ++void LinkResolver::resolve_dynamic_call(CallInfo& result, ++ Handle bootstrap_specifier, ++ Symbol* method_name, Symbol* method_signature, ++ KlassHandle current_klass, ++ TRAPS) { ++ // JSR 292: this must resolve to an implicitly generated method MH.linkToCallSite(*...) ++ // The appendix argument is likely to be a freshly-created CallSite. ++ Handle resolved_appendix; ++ methodHandle resolved_method = ++ SystemDictionary::find_dynamic_call_site_invoker(current_klass, ++ bootstrap_specifier, ++ method_name, method_signature, ++ &resolved_appendix, ++ CHECK); + if (HAS_PENDING_EXCEPTION) { ++ if (TraceMethodHandles) { ++ tty->print_cr("invokedynamic throws BSME for "INTPTR_FORMAT, PENDING_EXCEPTION); ++ PENDING_EXCEPTION->print(); ++ } + if (PENDING_EXCEPTION->is_a(SystemDictionary::BootstrapMethodError_klass())) { + // throw these guys, since they are already wrapped + return; +@@ -1141,17 +1280,12 @@ + return; + } + // See the "Linking Exceptions" section for the invokedynamic instruction in the JVMS. +- Handle ex(THREAD, PENDING_EXCEPTION); ++ Handle nested_exception(THREAD, PENDING_EXCEPTION); + CLEAR_PENDING_EXCEPTION; +- oop bsme = Klass::cast(SystemDictionary::BootstrapMethodError_klass())->java_mirror(); +- MethodHandles::raise_exception(Bytecodes::_athrow, ex(), bsme, CHECK); +- // java code should not return, but if it does throw out anyway +- THROW(vmSymbols::java_lang_InternalError()); ++ THROW_MSG_CAUSE(vmSymbols::java_lang_BootstrapMethodError(), ++ "BootstrapMethodError", nested_exception) + } +- if (resolved_method.is_null()) { +- THROW(vmSymbols::java_lang_InternalError()); +- } +- result.set_dynamic(resolved_method, CHECK); ++ result.set_handle(resolved_method, resolved_appendix, CHECK); + } + + //------------------------------------------------------------------------------------------------------------------------ +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/linkResolver.hpp +--- openjdk/hotspot/src/share/vm/interpreter/linkResolver.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/linkResolver.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -75,11 +75,12 @@ + methodHandle _resolved_method; // static target method + methodHandle _selected_method; // dynamic (actual) target method + int _vtable_index; // vtable index of selected method ++ Handle _resolved_appendix; // extra argument in constant pool (if CPCE::has_appendix) + + void set_static( KlassHandle resolved_klass, methodHandle resolved_method , TRAPS); + void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method , TRAPS); + void set_virtual( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS); +- void set_dynamic( methodHandle resolved_method, TRAPS); ++ void set_handle( methodHandle resolved_method, Handle resolved_appendix, TRAPS); + void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS); + + friend class LinkResolver; +@@ -89,6 +90,7 @@ + KlassHandle selected_klass() const { return _selected_klass; } + methodHandle resolved_method() const { return _resolved_method; } + methodHandle selected_method() const { return _selected_method; } ++ Handle resolved_appendix() const { return _resolved_appendix; } + + BasicType result_type() const { return selected_method()->result_type(); } + bool has_vtable_index() const { return _vtable_index >= 0; } +@@ -110,8 +112,8 @@ + static void lookup_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + static void lookup_method_in_interfaces (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); +- static void lookup_implicit_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, +- KlassHandle current_klass, TRAPS); ++ static void lookup_polymorphic_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, ++ KlassHandle current_klass, Handle* appendix_result_or_null, TRAPS); + + static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + +@@ -139,10 +141,9 @@ + // constant pool resolving + static void check_klass_accessability(KlassHandle ref_klass, KlassHandle sel_klass, TRAPS); + +- // static resolving for all calls except interface calls +- static void resolve_method (methodHandle& method_result, KlassHandle& klass_result, constantPoolHandle pool, int index, TRAPS); +- static void resolve_dynamic_method (methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS); +- static void resolve_interface_method(methodHandle& method_result, KlassHandle& klass_result, constantPoolHandle pool, int index, TRAPS); ++ // static resolving calls (will not run any Java code); used only from Bytecode_invoke::static_target ++ static void resolve_method_statically(methodHandle& method_result, KlassHandle& klass_result, ++ Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS); + + // runtime/static resolving for fields + static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS); +@@ -156,6 +157,8 @@ + static void resolve_special_call (CallInfo& result, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); + static void resolve_virtual_call (CallInfo& result, Handle recv, KlassHandle recv_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, bool check_null_and_abstract, TRAPS); + static void resolve_interface_call(CallInfo& result, Handle recv, KlassHandle recv_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, bool check_null_and_abstract, TRAPS); ++ static void resolve_handle_call (CallInfo& result, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, TRAPS); ++ static void resolve_dynamic_call (CallInfo& result, Handle bootstrap_specifier, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, TRAPS); + + // same as above for compile-time resolution; but returns null handle instead of throwing an exception on error + // also, does not initialize klass (i.e., no side effects) +@@ -177,6 +180,7 @@ + static void resolve_invokevirtual (CallInfo& result, Handle recv, constantPoolHandle pool, int index, TRAPS); + static void resolve_invokeinterface(CallInfo& result, Handle recv, constantPoolHandle pool, int index, TRAPS); + static void resolve_invokedynamic (CallInfo& result, constantPoolHandle pool, int index, TRAPS); ++ static void resolve_invokehandle (CallInfo& result, constantPoolHandle pool, int index, TRAPS); + + static void resolve_invoke (CallInfo& result, Handle recv, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS); + }; +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/rewriter.cpp +--- openjdk/hotspot/src/share/vm/interpreter/rewriter.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/rewriter.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -33,6 +33,7 @@ + #include "oops/objArrayOop.hpp" + #include "oops/oop.inline.hpp" + #include "prims/methodComparator.hpp" ++#include "prims/methodHandles.hpp" + + // Computes a CPC map (new_index -> original_index) for constant pool entries + // that are referred to by the interpreter at runtime via the constant pool cache. +@@ -41,10 +42,9 @@ + void Rewriter::compute_index_maps() { + const int length = _pool->length(); + init_cp_map(length); +- jint tag_mask = 0; ++ bool saw_mh_symbol = false; + for (int i = 0; i < length; i++) { + int tag = _pool->tag_at(i).value(); +- tag_mask |= (1 << tag); + switch (tag) { + case JVM_CONSTANT_InterfaceMethodref: + case JVM_CONSTANT_Fieldref : // fall through +@@ -54,13 +54,18 @@ + case JVM_CONSTANT_InvokeDynamic : // fall through + add_cp_cache_entry(i); + break; ++ case JVM_CONSTANT_Utf8: ++ if (_pool->symbol_at(i) == vmSymbols::java_lang_invoke_MethodHandle()) ++ saw_mh_symbol = true; ++ break; + } + } + + guarantee((int)_cp_cache_map.length()-1 <= (int)((u2)-1), + "all cp cache indexes fit in a u2"); + +- _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0); ++ if (saw_mh_symbol) ++ _method_handle_invokers.initialize(length, (int)0); + } + + // Unrewrite the bytecodes if an error occurs. +@@ -80,22 +85,6 @@ + oopFactory::new_constantPoolCache(length, CHECK); + No_Safepoint_Verifier nsv; + cache->initialize(_cp_cache_map); +- +- // Don't bother with the next pass if there is no JVM_CONSTANT_InvokeDynamic. +- if (_have_invoke_dynamic) { +- for (int i = 0; i < length; i++) { +- int pool_index = cp_cache_entry_pool_index(i); +- if (pool_index >= 0 && +- _pool->tag_at(pool_index).is_invoke_dynamic()) { +- int bsm_index = _pool->invoke_dynamic_bootstrap_method_ref_index_at(pool_index); +- assert(_pool->tag_at(bsm_index).is_method_handle(), "must be a MH constant"); +- // There is a CP cache entry holding the BSM for these calls. +- int bsm_cache_index = cp_entry_to_cp_cache(bsm_index); +- cache->entry_at(i)->initialize_bootstrap_method_index_in_cache(bsm_cache_index); +- } +- } +- } +- + _pool->set_cache(cache); + cache->set_constant_pool(_pool()); + } +@@ -148,10 +137,53 @@ + int cp_index = Bytes::get_Java_u2(p); + int cache_index = cp_entry_to_cp_cache(cp_index); + Bytes::put_native_u2(p, cache_index); ++ if (!_method_handle_invokers.is_empty()) ++ maybe_rewrite_invokehandle(p - 1, cp_index, reverse); + } else { + int cache_index = Bytes::get_native_u2(p); + int pool_index = cp_cache_entry_pool_index(cache_index); + Bytes::put_Java_u2(p, pool_index); ++ if (!_method_handle_invokers.is_empty()) ++ maybe_rewrite_invokehandle(p - 1, pool_index, reverse); ++ } ++} ++ ++ ++// Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.) ++void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, bool reverse) { ++ if (!reverse) { ++ if ((*opc) == (u1)Bytecodes::_invokevirtual || ++ // allow invokespecial as an alias, although it would be very odd: ++ (*opc) == (u1)Bytecodes::_invokespecial) { ++ assert(_pool->tag_at(cp_index).is_method(), "wrong index"); ++ // Determine whether this is a signature-polymorphic method. ++ if (cp_index >= _method_handle_invokers.length()) return; ++ int status = _method_handle_invokers[cp_index]; ++ assert(status >= -1 && status <= 1, "oob tri-state"); ++ if (status == 0) { ++ if (_pool->klass_ref_at_noresolve(cp_index) == vmSymbols::java_lang_invoke_MethodHandle() && ++ MethodHandles::is_signature_polymorphic_name(SystemDictionary::MethodHandle_klass(), ++ _pool->name_ref_at(cp_index))) ++ status = +1; ++ else ++ status = -1; ++ _method_handle_invokers[cp_index] = status; ++ } ++ // We use a special internal bytecode for such methods (if non-static). ++ // The basic reason for this is that such methods need an extra "appendix" argument ++ // to transmit the call site's intended call type. ++ if (status > 0) { ++ (*opc) = (u1)Bytecodes::_invokehandle; ++ } ++ } ++ } else { ++ // Do not need to look at cp_index. ++ if ((*opc) == (u1)Bytecodes::_invokehandle) { ++ (*opc) = (u1)Bytecodes::_invokevirtual; ++ // Ignore corner case of original _invokespecial instruction. ++ // This is safe because (a) the signature polymorphic method was final, and ++ // (b) the implementation of MethodHandle will not call invokespecial on it. ++ } + } + } + +@@ -297,17 +329,18 @@ + case Bytecodes::_invokespecial : // fall through + case Bytecodes::_invokestatic : + case Bytecodes::_invokeinterface: ++ case Bytecodes::_invokehandle : // if reverse=true + rewrite_member_reference(bcp, prefix_length+1, reverse); + break; + case Bytecodes::_invokedynamic: + rewrite_invokedynamic(bcp, prefix_length+1, reverse); + break; + case Bytecodes::_ldc: +- case Bytecodes::_fast_aldc: ++ case Bytecodes::_fast_aldc: // if reverse=true + maybe_rewrite_ldc(bcp, prefix_length+1, false, reverse); + break; + case Bytecodes::_ldc_w: +- case Bytecodes::_fast_aldc_w: ++ case Bytecodes::_fast_aldc_w: // if reverse=true + maybe_rewrite_ldc(bcp, prefix_length+1, true, reverse); + break; + case Bytecodes::_jsr : // fall through +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/rewriter.hpp +--- openjdk/hotspot/src/share/vm/interpreter/rewriter.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/rewriter.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -39,7 +39,7 @@ + objArrayHandle _methods; + intArray _cp_map; + intStack _cp_cache_map; +- bool _have_invoke_dynamic; ++ intArray _method_handle_invokers; + + void init_cp_map(int length) { + _cp_map.initialize(length, -1); +@@ -88,6 +88,7 @@ + void scan_method(methodOop m, bool reverse = false); + void rewrite_Object_init(methodHandle m, TRAPS); + void rewrite_member_reference(address bcp, int offset, bool reverse = false); ++ void maybe_rewrite_invokehandle(address opc, int cp_index, bool reverse = false); + void rewrite_invokedynamic(address bcp, int offset, bool reverse = false); + void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false); + // Revert bytecodes in case of an exception. +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/templateInterpreter.cpp +--- openjdk/hotspot/src/share/vm/interpreter/templateInterpreter.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/templateInterpreter.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -362,7 +362,6 @@ + method_entry(empty) + method_entry(accessor) + method_entry(abstract) +- method_entry(method_handle) + method_entry(java_lang_math_sin ) + method_entry(java_lang_math_cos ) + method_entry(java_lang_math_tan ) +@@ -372,6 +371,8 @@ + method_entry(java_lang_math_log10) + method_entry(java_lang_ref_reference_get) + ++ initialize_method_handle_entries(); ++ + // all native method kinds (must be one contiguous block) + Interpreter::_native_entry_begin = Interpreter::code()->code_end(); + method_entry(native) +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/templateTable.cpp +--- openjdk/hotspot/src/share/vm/interpreter/templateTable.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/templateTable.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -444,7 +444,7 @@ + def(Bytecodes::_invokespecial , ubcp|disp|clvm|____, vtos, vtos, invokespecial , f1_byte ); + def(Bytecodes::_invokestatic , ubcp|disp|clvm|____, vtos, vtos, invokestatic , f1_byte ); + def(Bytecodes::_invokeinterface , ubcp|disp|clvm|____, vtos, vtos, invokeinterface , f1_byte ); +- def(Bytecodes::_invokedynamic , ubcp|disp|clvm|____, vtos, vtos, invokedynamic , f1_oop ); ++ def(Bytecodes::_invokedynamic , ubcp|disp|clvm|____, vtos, vtos, invokedynamic , f12_oop ); + def(Bytecodes::_new , ubcp|____|clvm|____, vtos, atos, _new , _ ); + def(Bytecodes::_newarray , ubcp|____|clvm|____, itos, atos, newarray , _ ); + def(Bytecodes::_anewarray , ubcp|____|clvm|____, itos, atos, anewarray , _ ); +@@ -514,6 +514,8 @@ + + def(Bytecodes::_return_register_finalizer , ____|disp|clvm|____, vtos, vtos, _return , vtos ); + ++ def(Bytecodes::_invokehandle , ubcp|disp|clvm|____, vtos, vtos, invokehandle , f12_oop ); ++ + def(Bytecodes::_shouldnotreachhere , ____|____|____|____, vtos, vtos, shouldnotreachhere , _ ); + // platform specific bytecodes + pd_initialize(); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/templateTable.hpp +--- openjdk/hotspot/src/share/vm/interpreter/templateTable.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/interpreter/templateTable.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -98,7 +98,7 @@ + public: + enum Operation { add, sub, mul, div, rem, _and, _or, _xor, shl, shr, ushr }; + enum Condition { equal, not_equal, less, less_equal, greater, greater_equal }; +- enum CacheByte { f1_byte = 1, f2_byte = 2, f1_oop = 0x11 }; // byte_no codes ++ enum CacheByte { f1_byte = 1, f2_byte = 2, f12_oop = 0x12 }; // byte_no codes + + private: + static bool _is_initialized; // true if TemplateTable has been initialized +@@ -294,6 +294,7 @@ + static void invokestatic(int byte_no); + static void invokeinterface(int byte_no); + static void invokedynamic(int byte_no); ++ static void invokehandle(int byte_no); + static void fast_invokevfinal(int byte_no); + + static void getfield_or_static(int byte_no, bool is_static); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/constantPoolOop.cpp +--- openjdk/hotspot/src/share/vm/oops/constantPoolOop.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/oops/constantPoolOop.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -267,25 +267,61 @@ + + + methodOop constantPoolOopDesc::method_at_if_loaded(constantPoolHandle cpool, +- int which, Bytecodes::Code invoke_code) { ++ int which) { + assert(!constantPoolCacheOopDesc::is_secondary_index(which), "no indy instruction here"); + if (cpool->cache() == NULL) return NULL; // nothing to load yet +- int cache_index = which - CPCACHE_INDEX_TAG; ++ int cache_index = get_cpcache_index(which); + if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) { + if (PrintMiscellaneous && (Verbose||WizardMode)) { +- tty->print_cr("bad operand %d for %d in:", which, invoke_code); cpool->print(); ++ tty->print_cr("bad operand %d in:", which); cpool->print(); + } + return NULL; + } + ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); +- if (invoke_code != Bytecodes::_illegal) +- return e->get_method_if_resolved(invoke_code, cpool); +- Bytecodes::Code bc; +- if ((bc = e->bytecode_1()) != (Bytecodes::Code)0) +- return e->get_method_if_resolved(bc, cpool); +- if ((bc = e->bytecode_2()) != (Bytecodes::Code)0) +- return e->get_method_if_resolved(bc, cpool); +- return NULL; ++ return e->method_if_resolved(cpool); ++} ++ ++ ++bool constantPoolOopDesc::has_appendix_at_if_loaded(constantPoolHandle cpool, int which) { ++ if (cpool->cache() == NULL) return false; // nothing to load yet ++ // XXX Is there a simpler way to get to the secondary entry? ++ ConstantPoolCacheEntry* e; ++ if (constantPoolCacheOopDesc::is_secondary_index(which)) { ++ e = cpool->cache()->secondary_entry_at(which); ++ } else { ++ int cache_index = get_cpcache_index(which); ++ if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) { ++ if (PrintMiscellaneous && (Verbose||WizardMode)) { ++ tty->print_cr("bad operand %d in:", which); cpool->print(); ++ } ++ return false; ++ } ++ e = cpool->cache()->entry_at(cache_index); ++ } ++ return e->has_appendix(); ++} ++ ++ ++oop constantPoolOopDesc::appendix_at_if_loaded(constantPoolHandle cpool, int which) { ++ if (cpool->cache() == NULL) return NULL; // nothing to load yet ++ // XXX Is there a simpler way to get to the secondary entry? ++ ConstantPoolCacheEntry* e; ++ if (constantPoolCacheOopDesc::is_secondary_index(which)) { ++ e = cpool->cache()->secondary_entry_at(which); ++ } else { ++ int cache_index = get_cpcache_index(which); ++ if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) { ++ if (PrintMiscellaneous && (Verbose||WizardMode)) { ++ tty->print_cr("bad operand %d in:", which); cpool->print(); ++ } ++ return NULL; ++ } ++ e = cpool->cache()->entry_at(cache_index); ++ } ++ if (!e->has_appendix()) { ++ return NULL; ++ } ++ return e->f1_as_instance(); + } + + +@@ -481,7 +517,7 @@ + if (cache_index >= 0) { + assert(index == _no_index_sentinel, "only one kind of index at a time"); + ConstantPoolCacheEntry* cpc_entry = this_oop->cache()->entry_at(cache_index); +- result_oop = cpc_entry->f1(); ++ result_oop = cpc_entry->f1_as_instance(); + if (result_oop != NULL) { + return decode_exception_from_f1(result_oop, THREAD); + // That was easy... +@@ -553,12 +589,7 @@ + index, this_oop->method_type_index_at(index), + signature->as_C_string()); + KlassHandle klass(THREAD, this_oop->pool_holder()); +- bool ignore_is_on_bcp = false; +- Handle value = SystemDictionary::find_method_handle_type(signature, +- klass, +- false, +- ignore_is_on_bcp, +- THREAD); ++ Handle value = SystemDictionary::find_method_handle_type(signature, klass, THREAD); + if (HAS_PENDING_EXCEPTION) { + throw_exception = Handle(THREAD, PENDING_EXCEPTION); + CLEAR_PENDING_EXCEPTION; +@@ -608,7 +639,7 @@ + result_oop = NULL; // safety + ObjectLocker ol(this_oop, THREAD); + ConstantPoolCacheEntry* cpc_entry = this_oop->cache()->entry_at(cache_index); +- result_oop = cpc_entry->f1(); ++ result_oop = cpc_entry->f1_as_instance(); + // Benign race condition: f1 may already be filled in while we were trying to lock. + // The important thing here is that all threads pick up the same result. + // It doesn't matter which racing thread wins, as long as only one +@@ -627,6 +658,45 @@ + } + } + ++ ++oop constantPoolOopDesc::resolve_bootstrap_specifier_at_impl(constantPoolHandle this_oop, int index, TRAPS) { ++ assert(this_oop->tag_at(index).is_invoke_dynamic(), "Corrupted constant pool"); ++ ++ Handle bsm; ++ int argc; ++ { ++ // JVM_CONSTANT_InvokeDynamic is an ordered pair of [bootm, name&type], plus optional arguments ++ // The bootm, being a JVM_CONSTANT_MethodHandle, has its own cache entry. ++ // It is accompanied by the optional arguments. ++ int bsm_index = this_oop->invoke_dynamic_bootstrap_method_ref_index_at(index); ++ oop bsm_oop = this_oop->resolve_possibly_cached_constant_at(bsm_index, CHECK_NULL); ++ if (!java_lang_invoke_MethodHandle::is_instance(bsm_oop)) { ++ THROW_MSG_NULL(vmSymbols::java_lang_LinkageError(), "BSM not an MethodHandle"); ++ } ++ ++ // Extract the optional static arguments. ++ argc = this_oop->invoke_dynamic_argument_count_at(index); ++ if (argc == 0) return bsm_oop; ++ ++ bsm = Handle(THREAD, bsm_oop); ++ } ++ ++ objArrayHandle info; ++ { ++ objArrayOop info_oop = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1+argc, CHECK_NULL); ++ info = objArrayHandle(THREAD, info_oop); ++ } ++ ++ info->obj_at_put(0, bsm()); ++ for (int i = 0; i < argc; i++) { ++ int arg_index = this_oop->invoke_dynamic_argument_index_at(index, i); ++ oop arg_oop = this_oop->resolve_possibly_cached_constant_at(arg_index, CHECK_NULL); ++ info->obj_at_put(1+i, arg_oop); ++ } ++ ++ return info(); ++} ++ + oop constantPoolOopDesc::string_at_impl(constantPoolHandle this_oop, int which, TRAPS) { + oop str = NULL; + CPSlot entry = this_oop->slot_at(which); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/constantPoolOop.hpp +--- openjdk/hotspot/src/share/vm/oops/constantPoolOop.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/oops/constantPoolOop.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -642,6 +642,11 @@ + return resolve_constant_at_impl(h_this, pool_index, _possible_index_sentinel, THREAD); + } + ++ oop resolve_bootstrap_specifier_at(int index, TRAPS) { ++ constantPoolHandle h_this(THREAD, this); ++ return resolve_bootstrap_specifier_at_impl(h_this, index, THREAD); ++ } ++ + // Klass name matches name at offset + bool klass_name_at_matches(instanceKlassHandle k, int which); + +@@ -666,12 +671,13 @@ + friend class SystemDictionary; + + // Used by compiler to prevent classloading. +- static methodOop method_at_if_loaded (constantPoolHandle this_oop, int which, +- Bytecodes::Code bc = Bytecodes::_illegal); +- static klassOop klass_at_if_loaded (constantPoolHandle this_oop, int which); +- static klassOop klass_ref_at_if_loaded (constantPoolHandle this_oop, int which); ++ static methodOop method_at_if_loaded (constantPoolHandle this_oop, int which); ++ static bool has_appendix_at_if_loaded (constantPoolHandle this_oop, int which); ++ static oop appendix_at_if_loaded (constantPoolHandle this_oop, int which); ++ static klassOop klass_at_if_loaded (constantPoolHandle this_oop, int which); ++ static klassOop klass_ref_at_if_loaded (constantPoolHandle this_oop, int which); + // Same as above - but does LinkResolving. +- static klassOop klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS); ++ static klassOop klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS); + + // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the + // future by other Java code. These take constant pool indices rather than +@@ -697,6 +703,8 @@ + enum { CPCACHE_INDEX_TAG = 0 }; // in product mode, this zero value is a no-op + #endif //ASSERT + ++ static int get_cpcache_index(int index) { return index - CPCACHE_INDEX_TAG; } ++ + private: + + Symbol* impl_name_ref_at(int which, bool uncached); +@@ -729,6 +737,7 @@ + static void resolve_string_constants_impl(constantPoolHandle this_oop, TRAPS); + + static oop resolve_constant_at_impl(constantPoolHandle this_oop, int index, int cache_index, TRAPS); ++ static oop resolve_bootstrap_specifier_at_impl(constantPoolHandle this_oop, int index, TRAPS); + + public: + // Merging constantPoolOop support: +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/cpCacheOop.cpp +--- openjdk/hotspot/src/share/vm/oops/cpCacheOop.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/oops/cpCacheOop.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -31,6 +31,7 @@ + #include "oops/objArrayOop.hpp" + #include "oops/oop.inline.hpp" + #include "prims/jvmtiRedefineClassesTrace.hpp" ++#include "prims/methodHandles.hpp" + #include "runtime/handles.inline.hpp" + + +@@ -44,68 +45,61 @@ + + void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) { + assert(0 <= main_index && main_index < 0x10000, "sanity check"); +- _indices = (main_index << 16); ++ _indices = (main_index << main_cp_index_bits); + assert(main_entry_index() == main_index, ""); + } + +-int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final, +- bool is_vfinal, bool is_volatile, +- bool is_method_interface, bool is_method) { +- int f = state; +- +- assert( state < number_of_states, "Invalid state in as_flags"); +- +- f <<= 1; +- if (is_final) f |= 1; +- f <<= 1; +- if (is_vfinal) f |= 1; +- f <<= 1; +- if (is_volatile) f |= 1; +- f <<= 1; +- if (is_method_interface) f |= 1; +- f <<= 1; +- if (is_method) f |= 1; +- f <<= ConstantPoolCacheEntry::hotSwapBit; ++int ConstantPoolCacheEntry::make_flags(TosState state, ++ int option_bits, ++ int field_index_or_method_params) { ++ assert(state < number_of_states, "Invalid state in make_flags"); ++ int f = ((int)state << tos_state_shift) | option_bits | field_index_or_method_params; + // Preserve existing flag bit values ++ // The low bits are a field offset, or else the method parameter size. + #ifdef ASSERT +- int old_state = ((_flags >> tosBits) & 0x0F); +- assert(old_state == 0 || old_state == state, ++ TosState old_state = flag_state(); ++ assert(old_state == (TosState)0 || old_state == state, + "inconsistent cpCache flags state"); + #endif + return (_flags | f) ; + } + + void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) { ++ assert(!is_secondary_entry(), "must not overwrite main_entry_index"); + #ifdef ASSERT + // Read once. + volatile Bytecodes::Code c = bytecode_1(); + assert(c == 0 || c == code || code == 0, "update must be consistent"); + #endif + // Need to flush pending stores here before bytecode is written. +- OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16)); ++ OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift)); + } + + void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) { ++ assert(!is_secondary_entry(), "must not overwrite main_entry_index"); + #ifdef ASSERT + // Read once. + volatile Bytecodes::Code c = bytecode_2(); + assert(c == 0 || c == code || code == 0, "update must be consistent"); + #endif + // Need to flush pending stores here before bytecode is written. +- OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24)); ++ OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift)); + } + +-// Atomically sets f1 if it is still NULL, otherwise it keeps the +-// current value. +-void ConstantPoolCacheEntry::set_f1_if_null_atomic(oop f1) { ++// Sets f1, ordering with previous writes. ++void ConstantPoolCacheEntry::release_set_f1(oop f1) { + // Use barriers as in oop_store ++ assert(f1 != NULL, ""); + oop* f1_addr = (oop*) &_f1; + update_barrier_set_pre(f1_addr, f1); +- void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL); +- bool success = (result == NULL); +- if (success) { +- update_barrier_set((void*) f1_addr, f1); +- } ++ OrderAccess::release_store_ptr((intptr_t*)f1_addr, f1); ++ update_barrier_set((void*) f1_addr, f1); ++} ++ ++// Sets flags, but only if the value was previously zero. ++bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) { ++ intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0); ++ return (result == 0); + } + + #ifdef ASSERT +@@ -135,17 +129,32 @@ + bool is_volatile) { + set_f1(field_holder()->java_mirror()); + set_f2(field_offset); +- assert(field_index <= field_index_mask, ++ assert((field_index & field_index_mask) == field_index, + "field index does not fit in low flag bits"); +- set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) | +- (field_index & field_index_mask)); ++ set_field_flags(field_type, ++ ((is_volatile ? 1 : 0) << is_volatile_shift) | ++ ((is_final ? 1 : 0) << is_final_shift), ++ field_index); + set_bytecode_1(get_code); + set_bytecode_2(put_code); + NOT_PRODUCT(verify(tty)); + } + +-int ConstantPoolCacheEntry::field_index() const { +- return (_flags & field_index_mask); ++void ConstantPoolCacheEntry::set_parameter_size(int value) { ++ // This routine is called only in corner cases where the CPCE is not yet initialized. ++ // See AbstractInterpreter::deopt_continue_after_entry. ++ assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value, ++ err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value)); ++ // Setting the parameter size by itself is only safe if the ++ // current value of _flags is 0, otherwise another thread may have ++ // updated it and we don't want to overwrite that value. Don't ++ // bother trying to update it once it's nonzero but always make ++ // sure that the final parameter size agrees with what was passed. ++ if (_flags == 0) { ++ Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0); ++ } ++ guarantee(parameter_size() == value, ++ err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value)); + } + + void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code, +@@ -154,51 +163,51 @@ + assert(!is_secondary_entry(), ""); + assert(method->interpreter_entry() != NULL, "should have been set at this point"); + assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); +- bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface); + + int byte_no = -1; +- bool needs_vfinal_flag = false; ++ bool change_to_virtual = false; ++ + switch (invoke_code) { ++ case Bytecodes::_invokeinterface: ++ // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface ++ // instruction somehow links to a non-interface method (in Object). ++ // In that case, the method has no itable index and must be invoked as a virtual. ++ // Set a flag to keep track of this corner case. ++ change_to_virtual = true; ++ ++ // ...and fall through as if we were handling invokevirtual: + case Bytecodes::_invokevirtual: +- case Bytecodes::_invokeinterface: { ++ { + if (method->can_be_statically_bound()) { +- set_f2((intptr_t)method()); +- needs_vfinal_flag = true; ++ // set_f2_as_vfinal_method checks if is_vfinal flag is true. ++ set_method_flags(as_TosState(method->result_type()), ++ ( 1 << is_vfinal_shift) | ++ ((method->is_final_method() ? 1 : 0) << is_final_shift) | ++ ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift), ++ method()->size_of_parameters()); ++ set_f2_as_vfinal_method(method()); + } else { + assert(vtable_index >= 0, "valid index"); ++ assert(!method->is_final_method(), "sanity"); ++ set_method_flags(as_TosState(method->result_type()), ++ ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift), ++ method()->size_of_parameters()); + set_f2(vtable_index); + } + byte_no = 2; + break; +- } +- +- case Bytecodes::_invokedynamic: // similar to _invokevirtual +- if (TraceInvokeDynamic) { +- tty->print_cr("InvokeDynamic set_method%s method="PTR_FORMAT" index=%d", +- (is_secondary_entry() ? " secondary" : ""), +- (intptr_t)method(), vtable_index); +- method->print(); +- this->print(tty, 0); + } +- assert(method->can_be_statically_bound(), "must be a MH invoker method"); +- assert(_f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized"); +- // SystemDictionary::find_method_handle_invoke only caches +- // methods which signature classes are on the boot classpath, +- // otherwise the newly created method is returned. To avoid +- // races in that case we store the first one coming in into the +- // cp-cache atomically if it's still unset. +- set_f1_if_null_atomic(method()); +- needs_vfinal_flag = false; // _f2 is not an oop +- assert(!is_vfinal(), "f2 not an oop"); +- byte_no = 1; // coordinate this with bytecode_number & is_resolved +- break; + + case Bytecodes::_invokespecial: +- // Preserve the value of the vfinal flag on invokevirtual bytecode +- // which may be shared with this constant pool cache entry. +- needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal(); +- // fall through + case Bytecodes::_invokestatic: ++ // Note: Read and preserve the value of the is_vfinal flag on any ++ // invokevirtual bytecode shared with this constant pool cache entry. ++ // It is cheap and safe to consult is_vfinal() at all times. ++ // Once is_vfinal is set, it must stay that way, lest we get a dangling oop. ++ set_method_flags(as_TosState(method->result_type()), ++ ((is_vfinal() ? 1 : 0) << is_vfinal_shift) | ++ ((method->is_final_method() ? 1 : 0) << is_final_shift), ++ method()->size_of_parameters()); + set_f1(method()); + byte_no = 1; + break; +@@ -207,19 +216,14 @@ + break; + } + +- set_flags(as_flags(as_TosState(method->result_type()), +- method->is_final_method(), +- needs_vfinal_flag, +- false, +- change_to_virtual, +- true)| +- method()->size_of_parameters()); +- + // Note: byte_no also appears in TemplateTable::resolve. + if (byte_no == 1) { ++ assert(invoke_code != Bytecodes::_invokevirtual && ++ invoke_code != Bytecodes::_invokeinterface, ""); + set_bytecode_1(invoke_code); + } else if (byte_no == 2) { + if (change_to_virtual) { ++ assert(invoke_code == Bytecodes::_invokeinterface, ""); + // NOTE: THIS IS A HACK - BE VERY CAREFUL!!! + // + // Workaround for the case where we encounter an invokeinterface, but we +@@ -235,10 +239,11 @@ + // Otherwise, the method needs to be reresolved with caller for each + // interface call. + if (method->is_public()) set_bytecode_1(invoke_code); +- set_bytecode_2(Bytecodes::_invokevirtual); + } else { +- set_bytecode_2(invoke_code); ++ assert(invoke_code == Bytecodes::_invokevirtual, ""); + } ++ // set up for invokevirtual, even if linking for invokeinterface also: ++ set_bytecode_2(Bytecodes::_invokevirtual); + } else { + ShouldNotReachHere(); + } +@@ -250,73 +255,129 @@ + assert(!is_secondary_entry(), ""); + klassOop interf = method->method_holder(); + assert(instanceKlass::cast(interf)->is_interface(), "must be an interface"); ++ assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here"); + set_f1(interf); + set_f2(index); +- set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters()); ++ set_method_flags(as_TosState(method->result_type()), ++ 0, // no option bits ++ method()->size_of_parameters()); + set_bytecode_1(Bytecodes::_invokeinterface); + } + + +-void ConstantPoolCacheEntry::initialize_bootstrap_method_index_in_cache(int bsm_cache_index) { +- assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry"); +- assert(_f2 == 0, "initialize once"); +- assert(bsm_cache_index == (int)(u2)bsm_cache_index, "oob"); +- set_f2(bsm_cache_index + constantPoolOopDesc::CPCACHE_INDEX_TAG); ++void ConstantPoolCacheEntry::set_method_handle(methodHandle adapter, Handle appendix) { ++ assert(!is_secondary_entry(), ""); ++ set_method_handle_common(Bytecodes::_invokehandle, adapter, appendix); + } + +-int ConstantPoolCacheEntry::bootstrap_method_index_in_cache() { +- assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry"); +- intptr_t bsm_cache_index = (intptr_t) _f2 - constantPoolOopDesc::CPCACHE_INDEX_TAG; +- assert(bsm_cache_index == (intptr_t)(u2)bsm_cache_index, "oob"); +- return (int) bsm_cache_index; ++void ConstantPoolCacheEntry::set_dynamic_call(methodHandle adapter, Handle appendix) { ++ assert(is_secondary_entry(), ""); ++ set_method_handle_common(Bytecodes::_invokedynamic, adapter, appendix); + } + +-void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, methodHandle signature_invoker) { +- assert(is_secondary_entry(), ""); +- // NOTE: it's important that all other values are set before f1 is +- // set since some users short circuit on f1 being set +- // (i.e. non-null) and that may result in uninitialized values for +- // other racing threads (e.g. flags). +- int param_size = signature_invoker->size_of_parameters(); +- assert(param_size >= 1, "method argument size must include MH.this"); +- param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic +- bool is_final = true; +- assert(signature_invoker->is_final_method(), "is_final"); +- int flags = as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size; +- assert(_flags == 0 || _flags == flags, "flags should be the same"); +- set_flags(flags); +- // do not do set_bytecode on a secondary CP cache entry +- //set_bytecode_1(Bytecodes::_invokedynamic); +- set_f1_if_null_atomic(call_site()); // This must be the last one to set (see NOTE above)! ++void ConstantPoolCacheEntry::set_method_handle_common(Bytecodes::Code invoke_code, methodHandle adapter, Handle appendix) { ++ // NOTE: This CPCE can be the subject of data races. ++ // There are three words to update: flags, f2, f1 (in that order). ++ // Writers must store all other values before f1. ++ // Readers must test f1 first for non-null before reading other fields. ++ // Competing writers must acquire exclusive access on the first ++ // write, to flags, using a compare/exchange. ++ // A losing writer must spin until the winner writes f1, ++ // so that when he returns, he can use the linked cache entry. ++ ++ bool has_appendix = appendix.not_null(); ++ if (!has_appendix) { ++ // The extra argument is not used, but we need a non-null value to signify linkage state. ++ // Set it to something benign that will never leak memory. ++ appendix = Universe::void_mirror(); ++ } ++ ++ bool owner = ++ init_method_flags_atomic(as_TosState(adapter->result_type()), ++ ((has_appendix ? 1 : 0) << has_appendix_shift) | ++ ( 1 << is_vfinal_shift) | ++ ( 1 << is_final_shift), ++ adapter->size_of_parameters()); ++ if (!owner) { ++ while (is_f1_null()) { ++ // Pause momentarily on a low-level lock, to allow racing thread to win. ++ MutexLockerEx mu(Patching_lock, Mutex::_no_safepoint_check_flag); ++ os::yield(); ++ } ++ return; ++ } ++ ++ if (TraceInvokeDynamic) { ++ tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method="PTR_FORMAT" ", ++ invoke_code, ++ (intptr_t)appendix(), (has_appendix ? "" : " (unused)"), ++ (intptr_t)adapter()); ++ adapter->print(); ++ if (has_appendix) appendix()->print(); ++ } ++ ++ // Method handle invokes and invokedynamic sites use both cp cache words. ++ // f1, if not null, contains a value passed as a trailing argument to the adapter. ++ // In the general case, this could be the call site's MethodType, ++ // for use with java.lang.Invokers.checkExactType, or else a CallSite object. ++ // f2 contains the adapter method which manages the actual call. ++ // In the general case, this is a compiled LambdaForm. ++ // (The Java code is free to optimize these calls by binding other ++ // sorts of methods and appendices to call sites.) ++ // JVM-level linking is via f2, as if for invokevfinal, and signatures are erased. ++ // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits. ++ // In principle this means that the method (with appendix) could take up to 256 parameter slots. ++ // ++ // This means that given a call site like (List)mh.invoke("foo"), ++ // the f2 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;', ++ // not '(Ljava/lang/String;)Ljava/util/List;'. ++ // The fact that String and List are involved is encoded in the MethodType in f1. ++ // This allows us to create fewer method oops, while keeping type safety. ++ // ++ set_f2_as_vfinal_method(adapter()); ++ assert(appendix.not_null(), "needed for linkage state"); ++ release_set_f1(appendix()); // This must be the last one to set (see NOTE above)! ++ if (!is_secondary_entry()) { ++ // The interpreter assembly code does not check byte_2, ++ // but it is used by is_resolved, method_if_resolved, etc. ++ set_bytecode_2(invoke_code); ++ } ++ NOT_PRODUCT(verify(tty)); ++ if (TraceInvokeDynamic) { ++ this->print(tty, 0); ++ } + } + +- +-methodOop ConstantPoolCacheEntry::get_method_if_resolved(Bytecodes::Code invoke_code, constantPoolHandle cpool) { +- assert(invoke_code > (Bytecodes::Code)0, "bad query"); ++methodOop ConstantPoolCacheEntry::method_if_resolved(constantPoolHandle cpool) { + if (is_secondary_entry()) { +- return cpool->cache()->entry_at(main_entry_index())->get_method_if_resolved(invoke_code, cpool); ++ if (!is_f1_null()) ++ return f2_as_vfinal_method(); ++ return NULL; + } + // Decode the action of set_method and set_interface_call +- if (bytecode_1() == invoke_code) { ++ Bytecodes::Code invoke_code = bytecode_1(); ++ if (invoke_code != (Bytecodes::Code)0) { + oop f1 = _f1; + if (f1 != NULL) { + switch (invoke_code) { + case Bytecodes::_invokeinterface: + assert(f1->is_klass(), ""); +- return klassItable::method_for_itable_index(klassOop(f1), (int) f2()); ++ return klassItable::method_for_itable_index(klassOop(f1), f2_as_index()); + case Bytecodes::_invokestatic: + case Bytecodes::_invokespecial: ++ assert(!has_appendix(), ""); + assert(f1->is_method(), ""); + return methodOop(f1); + } + } + } +- if (bytecode_2() == invoke_code) { ++ invoke_code = bytecode_2(); ++ if (invoke_code != (Bytecodes::Code)0) { + switch (invoke_code) { + case Bytecodes::_invokevirtual: + if (is_vfinal()) { + // invokevirtual +- methodOop m = methodOop((intptr_t) f2()); ++ methodOop m = f2_as_vfinal_method(); + assert(m->is_method(), ""); + return m; + } else { +@@ -325,16 +386,19 @@ + klassOop klass = cpool->resolved_klass_at(holder_index); + if (!Klass::cast(klass)->oop_is_instance()) + klass = SystemDictionary::Object_klass(); +- return instanceKlass::cast(klass)->method_at_vtable((int) f2()); ++ return instanceKlass::cast(klass)->method_at_vtable(f2_as_index()); + } + } ++ break; ++ case Bytecodes::_invokehandle: ++ case Bytecodes::_invokedynamic: ++ return f2_as_vfinal_method(); + } + } + return NULL; + } + + +- + class LocalOopClosure: public OopClosure { + private: + void (*_f)(oop*); +@@ -419,9 +483,10 @@ + methodOop new_method, bool * trace_name_printed) { + + if (is_vfinal()) { +- // virtual and final so f2() contains method ptr instead of vtable index +- if (f2() == (intptr_t)old_method) { ++ // virtual and final so _f2 contains method ptr instead of vtable index ++ if (f2_as_vfinal_method() == old_method) { + // match old_method so need an update ++ // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values + _f2 = (intptr_t)new_method; + if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { + if (!(*trace_name_printed)) { +@@ -497,16 +562,17 @@ + methodOop m = NULL; + if (is_vfinal()) { + // virtual and final so _f2 contains method ptr instead of vtable index +- m = (methodOop)_f2; +- } else if ((oop)_f1 == NULL) { ++ m = f2_as_vfinal_method(); ++ } else if (is_f1_null()) { + // NULL _f1 means this is a virtual entry so also not interesting + return false; + } else { +- if (!((oop)_f1)->is_method()) { ++ oop f1 = _f1; // _f1 is volatile ++ if (!f1->is_method()) { + // _f1 can also contain a klassOop for an interface + return false; + } +- m = (methodOop)_f1; ++ m = f1_as_method(); + } + + assert(m != NULL && m->is_method(), "sanity check"); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/cpCacheOop.hpp +--- openjdk/hotspot/src/share/vm/oops/cpCacheOop.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/oops/cpCacheOop.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -38,13 +38,14 @@ + // bit number |31 0| + // bit length |-8--|-8--|---16----| + // -------------------------------- +-// _indices [ b2 | b1 | index ] +-// _f1 [ entry specific ] +-// _f2 [ entry specific ] +-// _flags [t|f|vf|v|m|h|unused|field_index] (for field entries) +-// bit length |4|1|1 |1|1|0|---7--|----16-----] +-// _flags [t|f|vf|v|m|h|unused|eidx|psze] (for method entries) +-// bit length |4|1|1 |1|1|1|---7--|-8--|-8--] ++// _indices [ b2 | b1 | index ] index = constant_pool_index (!= 0, normal entries only) ++// _indices [ index | 00000 ] index = main_entry_index (secondary entries only) ++// _f1 [ entry specific ] method, klass, or oop (MethodType or CallSite) ++// _f2 [ entry specific ] vtable index or vfinal method ++// _flags [tos|0|00|00|00|f|v|f2|unused|field_index] (for field entries) ++// bit length [ 4 |1|1 |1 | 1|1|1| 1|---5--|----16-----] ++// _flags [tos|M|vf|fv|ea|f|0|f2|unused|00000|psize] (for method entries) ++// bit length [ 4 |1|1 |1 | 1|1|1| 1|---5--|--8--|--8--] + + // -------------------------------- + // +@@ -52,24 +53,23 @@ + // index = original constant pool index + // b1 = bytecode 1 + // b2 = bytecode 2 +-// psze = parameters size (method entries only) +-// eidx = interpreter entry index (method entries only) ++// psize = parameters size (method entries only) + // field_index = index into field information in holder instanceKlass + // The index max is 0xffff (max number of fields in constant pool) + // and is multiplied by (instanceKlass::next_offset) when accessing. + // t = TosState (see below) + // f = field is marked final (see below) +-// vf = virtual, final (method entries only : is_vfinal()) ++// f2 = virtual but final (method entries only: is_vfinal()) + // v = field is volatile (see below) + // m = invokeinterface used for method in class Object (see below) + // h = RedefineClasses/Hotswap bit (see below) + // + // The flags after TosState have the following interpretation: +-// bit 27: f flag true if field is marked final +-// bit 26: vf flag true if virtual final method +-// bit 25: v flag true if field is volatile (only for fields) +-// bit 24: m flag true if invokeinterface used for method in class Object +-// bit 23: 0 for fields, 1 for methods ++// bit 27: 0 for fields, 1 for methods ++// f flag true if field is marked final ++// v flag true if field is volatile (only for fields) ++// f2 flag true if f2 contains an oop (e.g., virtual final method) ++// fv flag true if invokeinterface used for method in class Object + // + // The flags 31, 30, 29, 28 together build a 4 bit number 0 to 8 with the + // following mapping to the TosState states: +@@ -86,25 +86,26 @@ + // + // Entry specific: field entries: + // _indices = get (b1 section) and put (b2 section) bytecodes, original constant pool index +-// _f1 = field holder +-// _f2 = field offset in words +-// _flags = field type information, original field index in field holder ++// _f1 = field holder (as a java.lang.Class, not a klassOop) ++// _f2 = field offset in bytes ++// _flags = field type information, original FieldInfo index in field holder + // (field_index section) + // + // Entry specific: method entries: + // _indices = invoke code for f1 (b1 section), invoke code for f2 (b2 section), + // original constant pool index +-// _f1 = method for all but virtual calls, unused by virtual calls +-// (note: for interface calls, which are essentially virtual, +-// contains klassOop for the corresponding interface. +-// for invokedynamic, f1 contains the CallSite object for the invocation +-// _f2 = method/vtable index for virtual calls only, unused by all other +-// calls. The vf flag indicates this is a method pointer not an +-// index. +-// _flags = field type info (f section), +-// virtual final entry (vf), +-// interpreter entry index (eidx section), +-// parameter size (psze section) ++// _f1 = methodOop for non-virtual calls, unused by virtual calls. ++// for interface calls, which are essentially virtual but need a klass, ++// contains klassOop for the corresponding interface. ++// for invokedynamic, f1 contains a site-specific CallSite object (as an appendix) ++// for invokehandle, f1 contains a site-specific MethodType object (as an appendix) ++// (upcoming metadata changes will move the appendix to a separate array) ++// _f2 = vtable/itable index (or final methodOop) for virtual calls only, ++// unused by non-virtual. The is_vfinal flag indicates this is a ++// method pointer for a final method, not an index. ++// _flags = method type info (t section), ++// virtual final bit (vfinal), ++// parameter size (psize section) + // + // Note: invokevirtual & invokespecial bytecodes can share the same constant + // pool entry and thus the same constant pool cache entry. All invoke +@@ -138,30 +139,61 @@ + assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change"); + oop_store(&_f1, f1); + } +- void set_f1_if_null_atomic(oop f1); +- void set_f2(intx f2) { assert(_f2 == 0 || _f2 == f2, "illegal field change"); _f2 = f2; } +- int as_flags(TosState state, bool is_final, bool is_vfinal, bool is_volatile, +- bool is_method_interface, bool is_method); ++ void release_set_f1(oop f1); ++ void set_f2(intx f2) { assert(_f2 == 0 || _f2 == f2, "illegal field change"); _f2 = f2; } ++ void set_f2_as_vfinal_method(methodOop f2) { assert(_f2 == 0 || _f2 == (intptr_t) f2, "illegal field change"); assert(is_vfinal(), "flags must be set"); _f2 = (intptr_t) f2; } ++ int make_flags(TosState state, int option_bits, int field_index_or_method_params); + void set_flags(intx flags) { _flags = flags; } ++ bool init_flags_atomic(intx flags); ++ void set_field_flags(TosState field_type, int option_bits, int field_index) { ++ assert((field_index & field_index_mask) == field_index, "field_index in range"); ++ set_flags(make_flags(field_type, option_bits | (1 << is_field_entry_shift), field_index)); ++ } ++ void set_method_flags(TosState return_type, int option_bits, int method_params) { ++ assert((method_params & parameter_size_mask) == method_params, "method_params in range"); ++ set_flags(make_flags(return_type, option_bits, method_params)); ++ } ++ bool init_method_flags_atomic(TosState return_type, int option_bits, int method_params) { ++ assert((method_params & parameter_size_mask) == method_params, "method_params in range"); ++ return init_flags_atomic(make_flags(return_type, option_bits, method_params)); ++ } + + public: +- // specific bit values in flag field +- // Note: the interpreter knows this layout! +- enum FlagBitValues { +- hotSwapBit = 23, +- methodInterface = 24, +- volatileField = 25, +- vfinalMethod = 26, +- finalField = 27 ++ // specific bit definitions for the flags field: ++ // (Note: the interpreter must use these definitions to access the CP cache.) ++ enum { ++ // high order bits are the TosState corresponding to field type or method return type ++ tos_state_bits = 4, ++ tos_state_mask = right_n_bits(tos_state_bits), ++ tos_state_shift = BitsPerInt - tos_state_bits, // see verify_tos_state_shift below ++ // misc. option bits; can be any bit position in [16..27] ++ is_vfinal_shift = 21, ++ is_volatile_shift = 22, ++ is_final_shift = 23, ++ has_appendix_shift = 24, ++ is_forced_virtual_shift = 25, ++ is_field_entry_shift = 26, ++ // low order bits give field index (for FieldInfo) or method parameter size: ++ field_index_bits = 16, ++ field_index_mask = right_n_bits(field_index_bits), ++ parameter_size_bits = 8, // subset of field_index_mask, range is 0..255 ++ parameter_size_mask = right_n_bits(parameter_size_bits), ++ option_bits_mask = ~(((-1) << tos_state_shift) | (field_index_mask | parameter_size_mask)) + }; + +- enum { field_index_mask = 0xFFFF }; ++ // specific bit definitions for the indices field: ++ enum { ++ main_cp_index_bits = 2*BitsPerByte, ++ main_cp_index_mask = right_n_bits(main_cp_index_bits), ++ bytecode_1_shift = main_cp_index_bits, ++ bytecode_1_mask = right_n_bits(BitsPerByte), // == (u1)0xFF ++ bytecode_2_shift = main_cp_index_bits + BitsPerByte, ++ bytecode_2_mask = right_n_bits(BitsPerByte), // == (u1)0xFF ++ // the secondary cp index overlaps with bytecodes 1 and 2: ++ secondary_cp_index_shift = bytecode_1_shift, ++ secondary_cp_index_bits = BitsPerInt - main_cp_index_bits ++ }; + +- // start of type bits in flags +- // Note: the interpreter knows this layout! +- enum FlagValues { +- tosBits = 28 +- }; + + // Initialization + void initialize_entry(int original_index); // initialize primary entry +@@ -189,30 +221,40 @@ + int index // Method index into interface + ); + +- void set_dynamic_call( +- Handle call_site, // Resolved java.lang.invoke.CallSite (f1) +- methodHandle signature_invoker // determines signature information ++ void set_method_handle( ++ methodHandle method, // adapter for invokeExact, etc. ++ Handle appendix // stored in f1; could be a java.lang.invoke.MethodType + ); + +- methodOop get_method_if_resolved(Bytecodes::Code invoke_code, constantPoolHandle cpool); ++ void set_dynamic_call( ++ methodHandle method, // adapter for this call site ++ Handle appendix // stored in f1; could be a java.lang.invoke.CallSite ++ ); + +- // For JVM_CONSTANT_InvokeDynamic cache entries: +- void initialize_bootstrap_method_index_in_cache(int bsm_cache_index); +- int bootstrap_method_index_in_cache(); ++ // Common code for invokedynamic and MH invocations. + +- void set_parameter_size(int value) { +- assert(parameter_size() == 0 || parameter_size() == value, +- "size must not change"); +- // Setting the parameter size by itself is only safe if the +- // current value of _flags is 0, otherwise another thread may have +- // updated it and we don't want to overwrite that value. Don't +- // bother trying to update it once it's nonzero but always make +- // sure that the final parameter size agrees with what was passed. +- if (_flags == 0) { +- Atomic::cmpxchg_ptr((value & 0xFF), &_flags, 0); +- } +- guarantee(parameter_size() == value, "size must not change"); +- } ++ // The "appendix" is an optional call-site-specific parameter which is ++ // pushed by the JVM at the end of the argument list. This argument may ++ // be a MethodType for the MH.invokes and a CallSite for an invokedynamic ++ // instruction. However, its exact type and use depends on the Java upcall, ++ // which simply returns a compiled LambdaForm along with any reference ++ // that LambdaForm needs to complete the call. If the upcall returns a ++ // null appendix, the argument is not passed at all. ++ // ++ // The appendix is *not* represented in the signature of the symbolic ++ // reference for the call site, but (if present) it *is* represented in ++ // the methodOop bound to the site. This means that static and dynamic ++ // resolution logic needs to make slightly different assessments about the ++ // number and types of arguments. ++ void set_method_handle_common( ++ Bytecodes::Code invoke_code, // _invokehandle or _invokedynamic ++ methodHandle adapter, // invoker method (f2) ++ Handle appendix // appendix such as CallSite, MethodType, etc. (f1) ++ ); ++ ++ methodOop method_if_resolved(constantPoolHandle cpool); ++ ++ void set_parameter_size(int value); + + // Which bytecode number (1 or 2) in the index field is valid for this bytecode? + // Returns -1 if neither is valid. +@@ -222,10 +264,11 @@ + case Bytecodes::_getfield : // fall through + case Bytecodes::_invokespecial : // fall through + case Bytecodes::_invokestatic : // fall through +- case Bytecodes::_invokedynamic : // fall through + case Bytecodes::_invokeinterface : return 1; + case Bytecodes::_putstatic : // fall through + case Bytecodes::_putfield : // fall through ++ case Bytecodes::_invokehandle : // fall through ++ case Bytecodes::_invokedynamic : // fall through + case Bytecodes::_invokevirtual : return 2; + default : break; + } +@@ -242,31 +285,43 @@ + } + + // Accessors +- bool is_secondary_entry() const { return (_indices & 0xFFFF) == 0; } +- int constant_pool_index() const { assert((_indices & 0xFFFF) != 0, "must be main entry"); +- return (_indices & 0xFFFF); } +- int main_entry_index() const { assert((_indices & 0xFFFF) == 0, "must be secondary entry"); +- return ((uintx)_indices >> 16); } +- Bytecodes::Code bytecode_1() const { return Bytecodes::cast((_indices >> 16) & 0xFF); } +- Bytecodes::Code bytecode_2() const { return Bytecodes::cast((_indices >> 24) & 0xFF); } +- volatile oop f1() const { return _f1; } +- bool is_f1_null() const { return (oop)_f1 == NULL; } // classifies a CPC entry as unbound +- intx f2() const { return _f2; } +- int field_index() const; +- int parameter_size() const { return _flags & 0xFF; } +- bool is_vfinal() const { return ((_flags & (1 << vfinalMethod)) == (1 << vfinalMethod)); } +- bool is_volatile() const { return ((_flags & (1 << volatileField)) == (1 << volatileField)); } +- bool is_methodInterface() const { return ((_flags & (1 << methodInterface)) == (1 << methodInterface)); } +- bool is_byte() const { return (((uintx) _flags >> tosBits) == btos); } +- bool is_char() const { return (((uintx) _flags >> tosBits) == ctos); } +- bool is_short() const { return (((uintx) _flags >> tosBits) == stos); } +- bool is_int() const { return (((uintx) _flags >> tosBits) == itos); } +- bool is_long() const { return (((uintx) _flags >> tosBits) == ltos); } +- bool is_float() const { return (((uintx) _flags >> tosBits) == ftos); } +- bool is_double() const { return (((uintx) _flags >> tosBits) == dtos); } +- bool is_object() const { return (((uintx) _flags >> tosBits) == atos); } +- TosState flag_state() const { assert( ( (_flags >> tosBits) & 0x0F ) < number_of_states, "Invalid state in as_flags"); +- return (TosState)((_flags >> tosBits) & 0x0F); } ++ bool is_secondary_entry() const { return (_indices & main_cp_index_mask) == 0; } ++ int main_entry_index() const { assert(is_secondary_entry(), "must be secondary entry"); ++ return ((uintx)_indices >> secondary_cp_index_shift); } ++ int primary_entry_indices() const { assert(!is_secondary_entry(), "must be main entry"); ++ return _indices; } ++ int constant_pool_index() const { return (primary_entry_indices() & main_cp_index_mask); } ++ Bytecodes::Code bytecode_1() const { return Bytecodes::cast((primary_entry_indices() >> bytecode_1_shift) ++ & bytecode_1_mask); } ++ Bytecodes::Code bytecode_2() const { return Bytecodes::cast((primary_entry_indices() >> bytecode_2_shift) ++ & bytecode_2_mask); } ++ methodOop f1_as_method() const { oop f1 = _f1; assert(f1 == NULL || f1->is_method(), ""); return methodOop(f1); } ++ klassOop f1_as_klass() const { oop f1 = _f1; assert(f1 == NULL || f1->is_klass(), ""); return klassOop(f1); } ++ oop f1_as_klass_mirror() const { oop f1 = f1_as_instance(); return f1; } // i.e., return a java_mirror ++ oop f1_as_instance() const { oop f1 = _f1; assert(f1 == NULL || f1->is_instance() || f1->is_array(), ""); return f1; } ++ oop f1_appendix() const { assert(has_appendix(), ""); return f1_as_instance(); } ++ bool is_f1_null() const { oop f1 = _f1; return f1 == NULL; } // classifies a CPC entry as unbound ++ int f2_as_index() const { assert(!is_vfinal(), ""); return (int) _f2; } ++ methodOop f2_as_vfinal_method() const { assert(is_vfinal(), ""); return methodOop(_f2); } ++ int field_index() const { assert(is_field_entry(), ""); return (_flags & field_index_mask); } ++ int parameter_size() const { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); } ++ bool is_volatile() const { return (_flags & (1 << is_volatile_shift)) != 0; } ++ bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; } ++ bool has_appendix() const { return (_flags & (1 << has_appendix_shift)) != 0; } ++ bool is_forced_virtual() const { return (_flags & (1 << is_forced_virtual_shift)) != 0; } ++ bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; } ++ bool is_method_entry() const { return (_flags & (1 << is_field_entry_shift)) == 0; } ++ bool is_field_entry() const { return (_flags & (1 << is_field_entry_shift)) != 0; } ++ bool is_byte() const { return flag_state() == btos; } ++ bool is_char() const { return flag_state() == ctos; } ++ bool is_short() const { return flag_state() == stos; } ++ bool is_int() const { return flag_state() == itos; } ++ bool is_long() const { return flag_state() == ltos; } ++ bool is_float() const { return flag_state() == ftos; } ++ bool is_double() const { return flag_state() == dtos; } ++ bool is_object() const { return flag_state() == atos; } ++ TosState flag_state() const { assert((uint)number_of_states <= (uint)tos_state_mask+1, ""); ++ return (TosState)((_flags >> tos_state_shift) & tos_state_mask); } + + // Code generation support + static WordSize size() { return in_WordSize(sizeof(ConstantPoolCacheEntry) / HeapWordSize); } +@@ -300,15 +355,14 @@ + bool * trace_name_printed); + bool check_no_old_or_obsolete_entries(); + bool is_interesting_method_entry(klassOop k); +- bool is_field_entry() const { return (_flags & (1 << hotSwapBit)) == 0; } +- bool is_method_entry() const { return (_flags & (1 << hotSwapBit)) != 0; } + + // Debugging & Printing + void print (outputStream* st, int index) const; + void verify(outputStream* st) const; + +- static void verify_tosBits() { +- assert(tosBits == 28, "interpreter now assumes tosBits is 28"); ++ static void verify_tos_state_shift() { ++ // When shifting flags as a 32-bit int, make sure we don't need an extra mask for tos_state: ++ assert((((u4)-1 >> tos_state_shift) & ~tos_state_mask) == 0, "no need for tos_state mask"); + } + }; + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/generateOopMap.cpp +--- openjdk/hotspot/src/share/vm/oops/generateOopMap.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/oops/generateOopMap.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -31,6 +31,7 @@ + #include "runtime/java.hpp" + #include "runtime/relocator.hpp" + #include "utilities/bitMap.inline.hpp" ++#include "prims/methodHandles.hpp" + + // + // +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/instanceKlass.cpp +--- openjdk/hotspot/src/share/vm/oops/instanceKlass.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/oops/instanceKlass.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -2389,6 +2389,22 @@ + } else if (java_lang_boxing_object::is_instance(obj)) { + st->print(" = "); + java_lang_boxing_object::print(obj, st); ++ } else if (as_klassOop() == SystemDictionary::LambdaForm_klass()) { ++ oop vmentry = java_lang_invoke_LambdaForm::vmentry(obj); ++ if (vmentry != NULL) { ++ st->print(" => "); ++ vmentry->print_value_on(st); ++ } ++ } else if (as_klassOop() == SystemDictionary::MemberName_klass()) { ++ oop vmtarget = java_lang_invoke_MemberName::vmtarget(obj); ++ if (vmtarget != NULL) { ++ st->print(" = "); ++ vmtarget->print_value_on(st); ++ } else { ++ java_lang_invoke_MemberName::clazz(obj)->print_value_on(st); ++ st->print("."); ++ java_lang_invoke_MemberName::name(obj)->print_value_on(st); ++ } + } + } + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/methodKlass.cpp +--- openjdk/hotspot/src/share/vm/oops/methodKlass.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/oops/methodKlass.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -250,7 +250,11 @@ + st->print_cr(" - highest level: %d", m->highest_comp_level()); + st->print_cr(" - vtable index: %d", m->_vtable_index); + st->print_cr(" - i2i entry: " INTPTR_FORMAT, m->interpreter_entry()); +- st->print_cr(" - adapter: " INTPTR_FORMAT, m->adapter()); ++ st->print( " - adapters: "); ++ if (m->adapter() == NULL) ++ st->print_cr(INTPTR_FORMAT, m->adapter()); ++ else ++ m->adapter()->print_adapter_on(st); + st->print_cr(" - compiled entry " INTPTR_FORMAT, m->from_compiled_entry()); + st->print_cr(" - code size: %d", m->code_size()); + if (m->code_size() != 0) { +@@ -298,13 +302,8 @@ + if (m->code() != NULL) { + st->print (" - compiled code: "); + m->code()->print_value_on(st); +- st->cr(); + } +- if (m->is_method_handle_invoke()) { +- st->print_cr(" - invoke method type: " INTPTR_FORMAT, (address) m->method_handle_type()); +- // m is classified as native, but it does not have an interesting +- // native_function or signature handler +- } else if (m->is_native()) { ++ if (m->is_native()) { + st->print_cr(" - native function: " INTPTR_FORMAT, m->native_function()); + st->print_cr(" - signature handler: " INTPTR_FORMAT, m->signature_handler()); + } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/methodOop.cpp +--- openjdk/hotspot/src/share/vm/oops/methodOop.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/oops/methodOop.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -40,7 +40,7 @@ + #include "oops/oop.inline.hpp" + #include "oops/symbol.hpp" + #include "prims/jvmtiExport.hpp" +-#include "prims/methodHandleWalk.hpp" ++#include "prims/methodHandles.hpp" + #include "prims/nativeLookup.hpp" + #include "runtime/arguments.hpp" + #include "runtime/compilationPolicy.hpp" +@@ -556,6 +556,7 @@ + + void methodOopDesc::set_native_function(address function, bool post_event_flag) { + assert(function != NULL, "use clear_native_function to unregister natives"); ++ assert(!is_method_handle_intrinsic() || function == SharedRuntime::native_method_throw_unsatisfied_link_error_entry(), ""); + address* native_function = native_function_addr(); + + // We can see racers trying to place the same native function into place. Once +@@ -585,12 +586,14 @@ + + + bool methodOopDesc::has_native_function() const { ++ assert(!is_method_handle_intrinsic(), ""); + address func = native_function(); + return (func != NULL && func != SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); + } + + + void methodOopDesc::clear_native_function() { ++ // Note: is_method_handle_intrinsic() is allowed here. + set_native_function( + SharedRuntime::native_method_throw_unsatisfied_link_error_entry(), + !native_bind_event_is_interesting); +@@ -610,10 +613,6 @@ + + + bool methodOopDesc::is_not_compilable(int comp_level) const { +- if (is_method_handle_invoke()) { +- // compilers must recognize this method specially, or not at all +- return true; +- } + if (number_of_breakpoints() > 0) { + return true; + } +@@ -713,7 +712,7 @@ + assert(entry != NULL, "interpreter entry must be non-null"); + // Sets both _i2i_entry and _from_interpreted_entry + set_interpreter_entry(entry); +- if (is_native() && !is_method_handle_invoke()) { ++ if (is_native() && !is_method_handle_intrinsic()) { + set_native_function( + SharedRuntime::native_method_throw_unsatisfied_link_error_entry(), + !native_bind_event_is_interesting); +@@ -801,13 +800,13 @@ + OrderAccess::storestore(); + #ifdef SHARK + mh->_from_interpreted_entry = code->insts_begin(); +-#else ++#else //!SHARK + mh->_from_compiled_entry = code->verified_entry_point(); + OrderAccess::storestore(); + // Instantly compiled code can execute. +- mh->_from_interpreted_entry = mh->get_i2c_entry(); +-#endif // SHARK +- ++ if (!mh->is_method_handle_intrinsic()) ++ mh->_from_interpreted_entry = mh->get_i2c_entry(); ++#endif //!SHARK + } + + +@@ -859,104 +858,51 @@ + return false; + } + +-bool methodOopDesc::is_method_handle_invoke_name(vmSymbols::SID name_sid) { +- switch (name_sid) { +- case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name): +- case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name): +- return true; +- } +- if (AllowInvokeGeneric +- && name_sid == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name)) +- return true; +- return false; +-} +- + // Constant pool structure for invoke methods: + enum { +- _imcp_invoke_name = 1, // utf8: 'invokeExact' or 'invokeGeneric' ++ _imcp_invoke_name = 1, // utf8: 'invokeExact', etc. + _imcp_invoke_signature, // utf8: (variable Symbol*) +- _imcp_method_type_value, // string: (variable java/lang/invoke/MethodType, sic) + _imcp_limit + }; + +-oop methodOopDesc::method_handle_type() const { +- if (!is_method_handle_invoke()) { assert(false, "caller resp."); return NULL; } +- oop mt = constants()->resolved_string_at(_imcp_method_type_value); +- assert(mt->klass() == SystemDictionary::MethodType_klass(), ""); +- return mt; ++// Test if this method is an MH adapter frame generated by Java code. ++// Cf. java/lang/invoke/InvokerBytecodeGenerator ++bool methodOopDesc::is_compiled_lambda_form() const { ++ return intrinsic_id() == vmIntrinsics::_compiledLambdaForm; + } + +-jint* methodOopDesc::method_type_offsets_chain() { +- static jint pchase[] = { -1, -1, -1 }; +- if (pchase[0] == -1) { +- jint step0 = in_bytes(constants_offset()); +- jint step1 = (constantPoolOopDesc::header_size() + _imcp_method_type_value) * HeapWordSize; +- // do this in reverse to avoid races: +- OrderAccess::release_store(&pchase[1], step1); +- OrderAccess::release_store(&pchase[0], step0); +- } +- return pchase; ++// Test if this method is an internal MH primitive method. ++bool methodOopDesc::is_method_handle_intrinsic() const { ++ vmIntrinsics::ID iid = intrinsic_id(); ++ return (MethodHandles::is_signature_polymorphic(iid) && ++ MethodHandles::is_signature_polymorphic_intrinsic(iid)); + } + +-//------------------------------------------------------------------------------ +-// methodOopDesc::is_method_handle_adapter +-// +-// Tests if this method is an internal adapter frame from the +-// MethodHandleCompiler. +-// Must be consistent with MethodHandleCompiler::get_method_oop(). +-bool methodOopDesc::is_method_handle_adapter() const { +- if (is_synthetic() && +- !is_native() && // has code from MethodHandleCompiler +- is_method_handle_invoke_name(name()) && +- MethodHandleCompiler::klass_is_method_handle_adapter_holder(method_holder())) { +- assert(!is_method_handle_invoke(), "disjoint"); +- return true; +- } else { +- return false; +- } ++bool methodOopDesc::has_member_arg() const { ++ vmIntrinsics::ID iid = intrinsic_id(); ++ return (MethodHandles::is_signature_polymorphic(iid) && ++ MethodHandles::has_member_arg(iid)); + } + +-methodHandle methodOopDesc::make_invoke_method(KlassHandle holder, +- Symbol* name, +- Symbol* signature, +- Handle method_type, TRAPS) { ++// Make an instance of a signature-polymorphic internal MH primitive. ++methodHandle methodOopDesc::make_method_handle_intrinsic(vmIntrinsics::ID iid, ++ Symbol* signature, ++ TRAPS) { + ResourceMark rm; + methodHandle empty; + +- assert(holder() == SystemDictionary::MethodHandle_klass(), +- "must be a JSR 292 magic type"); +- ++ KlassHandle holder = SystemDictionary::MethodHandle_klass(); ++ Symbol* name = MethodHandles::signature_polymorphic_intrinsic_name(iid); ++ assert(iid == MethodHandles::signature_polymorphic_name_id(name), ""); + if (TraceMethodHandles) { +- tty->print("Creating invoke method for "); +- signature->print_value(); +- tty->cr(); ++ tty->print_cr("make_method_handle_intrinsic MH.%s%s", name->as_C_string(), signature->as_C_string()); + } + + // invariant: cp->symbol_at_put is preceded by a refcount increment (more usually a lookup) + name->increment_refcount(); + signature->increment_refcount(); + +- // record non-BCP method types in the constant pool +- GrowableArray* extra_klasses = NULL; +- for (int i = -1, len = java_lang_invoke_MethodType::ptype_count(method_type()); i < len; i++) { +- oop ptype = (i == -1 +- ? java_lang_invoke_MethodType::rtype(method_type()) +- : java_lang_invoke_MethodType::ptype(method_type(), i)); +- klassOop klass = check_non_bcp_klass(java_lang_Class::as_klassOop(ptype)); +- if (klass != NULL) { +- if (extra_klasses == NULL) +- extra_klasses = new GrowableArray(len+1); +- bool dup = false; +- for (int j = 0; j < extra_klasses->length(); j++) { +- if (extra_klasses->at(j) == klass) { dup = true; break; } +- } +- if (!dup) +- extra_klasses->append(KlassHandle(THREAD, klass)); +- } +- } +- +- int extra_klass_count = (extra_klasses == NULL ? 0 : extra_klasses->length()); +- int cp_length = _imcp_limit + extra_klass_count; ++ int cp_length = _imcp_limit; + constantPoolHandle cp; + { + constantPoolOop cp_oop = oopFactory::new_constantPool(cp_length, IsSafeConc, CHECK_(empty)); +@@ -964,19 +910,17 @@ + } + cp->symbol_at_put(_imcp_invoke_name, name); + cp->symbol_at_put(_imcp_invoke_signature, signature); +- cp->string_at_put(_imcp_method_type_value, Universe::the_null_string()); +- for (int j = 0; j < extra_klass_count; j++) { +- KlassHandle klass = extra_klasses->at(j); +- cp->klass_at_put(_imcp_limit + j, klass()); +- } + cp->set_preresolution(); + cp->set_pool_holder(holder()); + +- // set up the fancy stuff: +- cp->pseudo_string_at_put(_imcp_method_type_value, method_type()); ++ // decide on access bits: public or not? ++ int flags_bits = (JVM_ACC_NATIVE | JVM_ACC_SYNTHETIC | JVM_ACC_FINAL); ++ bool must_be_static = MethodHandles::is_signature_polymorphic_static(iid); ++ if (must_be_static) flags_bits |= JVM_ACC_STATIC; ++ assert((flags_bits & JVM_ACC_PUBLIC) == 0, "do not expose these methods"); ++ + methodHandle m; + { +- int flags_bits = (JVM_MH_INVOKE_BITS | JVM_ACC_PUBLIC | JVM_ACC_FINAL); + methodOop m_oop = oopFactory::new_method(0, accessFlags_from(flags_bits), + 0, 0, 0, IsSafeConc, CHECK_(empty)); + m = methodHandle(THREAD, m_oop); +@@ -984,9 +928,8 @@ + m->set_constants(cp()); + m->set_name_index(_imcp_invoke_name); + m->set_signature_index(_imcp_invoke_signature); +- assert(is_method_handle_invoke_name(m->name()), ""); ++ assert(MethodHandles::is_signature_polymorphic_name(m->name()), ""); + assert(m->signature() == signature, ""); +- assert(m->is_method_handle_invoke(), ""); + #ifdef CC_INTERP + ResultTypeFinder rtf(signature); + m->set_result_index(rtf.type()); +@@ -994,24 +937,18 @@ + m->compute_size_of_parameters(THREAD); + m->set_exception_table(Universe::the_empty_int_array()); + m->init_intrinsic_id(); +- assert(m->intrinsic_id() == vmIntrinsics::_invokeExact || +- m->intrinsic_id() == vmIntrinsics::_invokeGeneric, "must be an invoker"); ++ assert(m->is_method_handle_intrinsic(), ""); ++#ifdef ASSERT ++ if (!MethodHandles::is_signature_polymorphic(m->intrinsic_id())) m->print(); ++ assert(MethodHandles::is_signature_polymorphic(m->intrinsic_id()), "must be an invoker"); ++ assert(m->intrinsic_id() == iid, "correctly predicted iid"); ++#endif //ASSERT + + // Finally, set up its entry points. +- assert(m->method_handle_type() == method_type(), ""); + assert(m->can_be_statically_bound(), ""); + m->set_vtable_index(methodOopDesc::nonvirtual_vtable_index); + m->link_method(m, CHECK_(empty)); + +-#ifdef ASSERT +- // Make sure the pointer chase works. +- address p = (address) m(); +- for (jint* pchase = method_type_offsets_chain(); (*pchase) != -1; pchase++) { +- p = *(address*)(p + (*pchase)); +- } +- assert((oop)p == method_type(), "pointer chase is correct"); +-#endif +- + if (TraceMethodHandles && (Verbose || WizardMode)) + m->print_on(tty); + +@@ -1028,7 +965,7 @@ + } + + +-methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, ++methodHandle methodOopDesc::clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, + u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS) { + // Code below does not work for native methods - they should never get rewritten anyway + assert(!m->is_native(), "cannot rewrite native methods"); +@@ -1138,7 +1075,9 @@ + + // ditto for method and signature: + vmSymbols::SID name_id = vmSymbols::find_sid(name()); +- if (name_id == vmSymbols::NO_SID) return; ++ if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle) ++ && name_id == vmSymbols::NO_SID) ++ return; + vmSymbols::SID sig_id = vmSymbols::find_sid(signature()); + if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle) + && sig_id == vmSymbols::NO_SID) return; +@@ -1167,21 +1106,10 @@ + + // Signature-polymorphic methods: MethodHandle.invoke*, InvokeDynamic.*. + case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle): +- if (is_static() || !is_native()) break; +- switch (name_id) { +- case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name): +- if (!AllowInvokeGeneric) break; +- case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name): +- id = vmIntrinsics::_invokeGeneric; +- break; +- case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name): +- id = vmIntrinsics::_invokeExact; +- break; +- } +- break; +- case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_InvokeDynamic): +- if (!is_static() || !is_native()) break; +- id = vmIntrinsics::_invokeDynamic; ++ if (!is_native()) break; ++ id = MethodHandles::signature_polymorphic_name_id(method_holder(), name()); ++ if (is_static() != MethodHandles::is_signature_polymorphic_static(id)) ++ id = vmIntrinsics::_none; + break; + } + +@@ -1194,6 +1122,12 @@ + + // These two methods are static since a GC may move the methodOopDesc + bool methodOopDesc::load_signature_classes(methodHandle m, TRAPS) { ++ if (THREAD->is_Compiler_thread()) { ++ // There is nothing useful this routine can do from within the Compile thread. ++ // Hopefully, the signature contains only well-known classes. ++ // We could scan for this and return true/false, but the caller won't care. ++ return false; ++ } + bool sig_is_loaded = true; + Handle class_loader(THREAD, instanceKlass::cast(m->method_holder())->class_loader()); + Handle protection_domain(THREAD, Klass::cast(m->method_holder())->protection_domain()); +@@ -1247,6 +1181,8 @@ + #endif + name()->print_symbol_on(st); + if (WizardMode) signature()->print_symbol_on(st); ++ else if (MethodHandles::is_signature_polymorphic(intrinsic_id())) ++ MethodHandles::print_as_basic_type_signature_on(st, signature(), true); + } + + // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/methodOop.hpp +--- openjdk/hotspot/src/share/vm/oops/methodOop.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/oops/methodOop.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -125,7 +125,10 @@ + u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words + u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) + u1 _jfr_towrite : 1, // Flags +- : 7; ++ _force_inline : 1, ++ _hidden : 1, ++ _dont_inline : 1, ++ : 4; + u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting + u2 _number_of_breakpoints; // fullspeed debugging support + InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations +@@ -246,7 +249,7 @@ + void set_constants(constantPoolOop c) { oop_store_without_check((oop*)&_constants, c); } + + // max stack +- int max_stack() const { return _max_stack; } ++ int max_stack() const { return _max_stack + extra_stack_entries(); } + void set_max_stack(int size) { _max_stack = size; } + + // max locals +@@ -592,28 +595,19 @@ + bool is_overridden_in(klassOop k) const; + + // JSR 292 support +- bool is_method_handle_invoke() const { return access_flags().is_method_handle_invoke(); } +- static bool is_method_handle_invoke_name(vmSymbols::SID name_sid); +- static bool is_method_handle_invoke_name(Symbol* name) { +- return is_method_handle_invoke_name(vmSymbols::find_sid(name)); +- } +- // Tests if this method is an internal adapter frame from the +- // MethodHandleCompiler. +- bool is_method_handle_adapter() const; +- static methodHandle make_invoke_method(KlassHandle holder, +- Symbol* name, //invokeExact or invokeGeneric +- Symbol* signature, //anything at all +- Handle method_type, +- TRAPS); ++ bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id) ++ bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm ++ bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc. ++ static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual ++ Symbol* signature, //anything at all ++ TRAPS); + static klassOop check_non_bcp_klass(klassOop klass); + // these operate only on invoke methods: +- oop method_handle_type() const; +- static jint* method_type_offsets_chain(); // series of pointer-offsets, terminated by -1 + // presize interpreter frames for extra interpreter stack entries, if needed + // method handles want to be able to push a few extra values (e.g., a bound receiver), and + // invokedynamic sometimes needs to push a bootstrap method, call site, and arglist, + // all without checking for a stack overflow +- static int extra_stack_entries() { return EnableInvokeDynamic ? (int) MethodHandlePushLimit + 3 : 0; } ++ static int extra_stack_entries() { return EnableInvokeDynamic ? 2 : 0; } + static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize() + + // RedefineClasses() support: +@@ -658,6 +652,13 @@ + bool jfr_towrite() { return _jfr_towrite; } + void set_jfr_towrite(bool towrite) { _jfr_towrite = towrite; } + ++ bool force_inline() { return _force_inline; } ++ void set_force_inline(bool x) { _force_inline = x; } ++ bool dont_inline() { return _dont_inline; } ++ void set_dont_inline(bool x) { _dont_inline = x; } ++ bool is_hidden() { return _hidden; } ++ void set_hidden(bool x) { _hidden = x; } ++ + // On-stack replacement support + bool has_osr_nmethod(int level, bool match_level) { + return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL; +@@ -703,8 +704,8 @@ + static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS); + + // Printing +- void print_short_name(outputStream* st); // prints as klassname::methodname; Exposed so field engineers can debug VM +- void print_name(outputStream* st); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses ++ void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM ++ void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses + + // Helper routine used for method sorting + static void sort_methods(objArrayOop methods, +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/symbol.cpp +--- openjdk/hotspot/src/share/vm/oops/symbol.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/oops/symbol.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -86,7 +86,7 @@ + address scan = bytes + i; + if (scan > limit) + return -1; +- for (;;) { ++ for (; scan <= limit; scan++) { + scan = (address) memchr(scan, first_char, (limit + 1 - scan)); + if (scan == NULL) + return -1; // not found +@@ -94,6 +94,7 @@ + if (memcmp(scan, str, len) == 0) + return (int)(scan - bytes); + } ++ return -1; + } + + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/bytecodeInfo.cpp +--- openjdk/hotspot/src/share/vm/opto/bytecodeInfo.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/opto/bytecodeInfo.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -93,7 +93,7 @@ + ); + } + +-// positive filter: should send be inlined? returns NULL, if yes, or rejection msg ++// positive filter: should callee be inlined? returns NULL, if yes, or rejection msg + const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const { + // Allows targeted inlining + if(callee_method->should_inline()) { +@@ -131,33 +131,6 @@ + int call_site_count = method()->scale_count(profile.count()); + int invoke_count = method()->interpreter_invocation_count(); + +- // Bytecoded method handle adapters do not have interpreter +- // profiling data but only made up MDO data. Get the counter from +- // there. +- if (caller_method->is_method_handle_adapter()) { +- assert(method()->method_data_or_null(), "must have an MDO"); +- ciMethodData* mdo = method()->method_data(); +- ciProfileData* mha_profile = mdo->bci_to_data(caller_bci); +- assert(mha_profile, "must exist"); +- CounterData* cd = mha_profile->as_CounterData(); +- invoke_count = cd->count(); +- if (invoke_count == 0) { +- return "method handle not reached"; +- } +- +- if (_caller_jvms != NULL && _caller_jvms->method() != NULL && +- _caller_jvms->method()->method_data() != NULL && +- !_caller_jvms->method()->method_data()->is_empty()) { +- ciMethodData* mdo = _caller_jvms->method()->method_data(); +- ciProfileData* mha_profile = mdo->bci_to_data(_caller_jvms->bci()); +- assert(mha_profile, "must exist"); +- CounterData* cd = mha_profile->as_CounterData(); +- call_site_count = cd->count(); +- } else { +- call_site_count = invoke_count; // use the same value +- } +- } +- + assert(invoke_count != 0, "require invocation count greater than zero"); + int freq = call_site_count / invoke_count; + +@@ -189,15 +162,16 @@ + } + + +-// negative filter: should send NOT be inlined? returns NULL, ok to inline, or rejection msg ++// negative filter: should callee NOT be inlined? returns NULL, ok to inline, or rejection msg + const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const { + // negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg + if (!UseOldInlining) { + const char* fail = NULL; +- if (callee_method->is_abstract()) fail = "abstract method"; ++ if ( callee_method->is_abstract()) fail = "abstract method"; + // note: we allow ik->is_abstract() +- if (!callee_method->holder()->is_initialized()) fail = "method holder not initialized"; +- if (callee_method->is_native()) fail = "native method"; ++ if (!callee_method->holder()->is_initialized()) fail = "method holder not initialized"; ++ if ( callee_method->is_native()) fail = "native method"; ++ if ( callee_method->dont_inline()) fail = "don't inline by annotation"; + + if (fail) { + *wci_result = *(WarmCallInfo::always_cold()); +@@ -217,7 +191,8 @@ + } + } + +- if (callee_method->has_compiled_code() && callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode) { ++ if (callee_method->has_compiled_code() && ++ callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode) { + wci_result->set_profit(wci_result->profit() * 0.1); + // %%% adjust wci_result->size()? + } +@@ -225,26 +200,25 @@ + return NULL; + } + +- // Always inline MethodHandle methods and generated MethodHandle adapters. +- if (callee_method->is_method_handle_invoke() || callee_method->is_method_handle_adapter()) +- return NULL; ++ // First check all inlining restrictions which are required for correctness ++ if ( callee_method->is_abstract()) return "abstract method"; ++ // note: we allow ik->is_abstract() ++ if (!callee_method->holder()->is_initialized()) return "method holder not initialized"; ++ if ( callee_method->is_native()) return "native method"; ++ if ( callee_method->dont_inline()) return "don't inline by annotation"; ++ if ( callee_method->has_unloaded_classes_in_signature()) return "unloaded signature classes"; + +- // First check all inlining restrictions which are required for correctness +- if (callee_method->is_abstract()) return "abstract method"; +- // note: we allow ik->is_abstract() +- if (!callee_method->holder()->is_initialized()) return "method holder not initialized"; +- if (callee_method->is_native()) return "native method"; +- if (callee_method->has_unloaded_classes_in_signature()) return "unloaded signature classes"; +- +- if (callee_method->should_inline()) { ++ if (callee_method->force_inline() || callee_method->should_inline()) { + // ignore heuristic controls on inlining + return NULL; + } + + // Now perform checks which are heuristic + +- if( callee_method->has_compiled_code() && callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode ) ++ if (callee_method->has_compiled_code() && ++ callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode) { + return "already compiled into a big method"; ++ } + + // don't inline exception code unless the top method belongs to an + // exception class +@@ -258,7 +232,7 @@ + } + + // use frequency-based objections only for non-trivial methods +- if (callee_method->code_size_for_inlining() <= MaxTrivialSize) return NULL; ++ if (callee_method->code_size() <= MaxTrivialSize) return NULL; + + // don't use counts with -Xcomp or CTW + if (UseInterpreter && !CompileTheWorld) { +@@ -319,7 +293,7 @@ + } + + // suppress a few checks for accessors and trivial methods +- if (callee_method->code_size_for_inlining() > MaxTrivialSize) { ++ if (callee_method->code_size() > MaxTrivialSize) { + + // don't inline into giant methods + if (C->unique() > (uint)NodeCountInliningCutoff) { +@@ -346,7 +320,7 @@ + } + + // detect direct and indirect recursive inlining +- { ++ if (!callee_method->is_compiled_lambda_form()) { + // count the current method and the callee + int inline_level = (method() == callee_method) ? 1 : 0; + if (inline_level > MaxRecursiveInlineLevel) +@@ -412,6 +386,7 @@ + const char* InlineTree::check_can_parse(ciMethod* callee) { + // Certain methods cannot be parsed at all: + if ( callee->is_native()) return "native method"; ++ if ( callee->is_abstract()) return "abstract method"; + if (!callee->can_be_compiled()) return "not compilable (disabled)"; + if (!callee->has_balanced_monitors()) return "not compilable (unbalanced monitors)"; + if ( callee->get_flow_analysis()->failing()) return "not compilable (flow analysis failed)"; +@@ -426,7 +401,7 @@ + if (Verbose && callee_method) { + const InlineTree *top = this; + while( top->caller_tree() != NULL ) { top = top->caller_tree(); } +- tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count()); ++ //tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count()); + } + } + +@@ -449,10 +424,7 @@ + + // Do some initial checks. + if (!pass_initial_checks(caller_method, caller_bci, callee_method)) { +- if (PrintInlining) { +- failure_msg = "failed_initial_checks"; +- print_inlining(callee_method, caller_bci, failure_msg); +- } ++ if (PrintInlining) print_inlining(callee_method, caller_bci, "failed initial checks"); + return NULL; + } + +@@ -539,9 +511,10 @@ + } + int max_inline_level_adjust = 0; + if (caller_jvms->method() != NULL) { +- if (caller_jvms->method()->is_method_handle_adapter()) ++ if (caller_jvms->method()->is_compiled_lambda_form()) + max_inline_level_adjust += 1; // don't count actions in MH or indy adapter frames +- else if (callee_method->is_method_handle_invoke()) { ++ else if (callee_method->is_method_handle_intrinsic() || ++ callee_method->is_compiled_lambda_form()) { + max_inline_level_adjust += 1; // don't count method handle calls from java.lang.invoke implem + } + if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) { +@@ -590,7 +563,7 @@ + // Given a jvms, which determines a call chain from the root method, + // find the corresponding inline tree. + // Note: This method will be removed or replaced as InlineTree goes away. +-InlineTree* InlineTree::find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found) { ++InlineTree* InlineTree::find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee) { + InlineTree* iltp = root; + uint depth = jvms && jvms->has_method() ? jvms->depth() : 0; + for (uint d = 1; d <= depth; d++) { +@@ -599,12 +572,12 @@ + assert(jvmsp->method() == iltp->method(), "tree still in sync"); + ciMethod* d_callee = (d == depth) ? callee : jvms->of_depth(d+1)->method(); + InlineTree* sub = iltp->callee_at(jvmsp->bci(), d_callee); +- if (!sub) { +- if (create_if_not_found && d == depth) { +- return iltp->build_inline_tree_for_callee(d_callee, jvmsp, jvmsp->bci()); ++ if (sub == NULL) { ++ if (d == depth) { ++ sub = iltp->build_inline_tree_for_callee(d_callee, jvmsp, jvmsp->bci()); + } +- assert(sub != NULL, "should be a sub-ilt here"); +- return NULL; ++ guarantee(sub != NULL, "should be a sub-ilt here"); ++ return sub; + } + iltp = sub; + } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/callGenerator.cpp +--- openjdk/hotspot/src/share/vm/opto/callGenerator.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/opto/callGenerator.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -26,6 +26,7 @@ + #include "ci/bcEscapeAnalyzer.hpp" + #include "ci/ciCallSite.hpp" + #include "ci/ciCPCache.hpp" ++#include "ci/ciMemberName.hpp" + #include "ci/ciMethodHandle.hpp" + #include "classfile/javaClasses.hpp" + #include "compiler/compileLog.hpp" +@@ -39,9 +40,6 @@ + #include "opto/runtime.hpp" + #include "opto/subnode.hpp" + +-CallGenerator::CallGenerator(ciMethod* method) { +- _method = method; +-} + + // Utility function. + const TypeFunc* CallGenerator::tf() const { +@@ -147,7 +145,8 @@ + } + // Mark the call node as virtual, sort of: + call->set_optimized_virtual(true); +- if (method()->is_method_handle_invoke()) { ++ if (method()->is_method_handle_intrinsic() || ++ method()->is_compiled_lambda_form()) { + call->set_method_handle_invoke(true); + } + } +@@ -325,12 +324,13 @@ + + CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { + assert(!m->is_static(), "for_virtual_call mismatch"); +- assert(!m->is_method_handle_invoke(), "should be a direct call"); ++ assert(!m->is_method_handle_intrinsic(), "should be a direct call"); + return new VirtualCallGenerator(m, vtable_index); + } + + CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) { +- assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch"); ++ assert(m->is_compiled_lambda_form(), "for_dynamic_call mismatch"); ++ //@@ FIXME: this should be done via a direct call + return new DynamicCallGenerator(m); + } + +@@ -654,271 +654,95 @@ + } + + +-//------------------------PredictedDynamicCallGenerator----------------------- +-// Internal class which handles all out-of-line calls checking receiver type. +-class PredictedDynamicCallGenerator : public CallGenerator { +- ciMethodHandle* _predicted_method_handle; +- CallGenerator* _if_missed; +- CallGenerator* _if_hit; +- float _hit_prob; +- +-public: +- PredictedDynamicCallGenerator(ciMethodHandle* predicted_method_handle, +- CallGenerator* if_missed, +- CallGenerator* if_hit, +- float hit_prob) +- : CallGenerator(if_missed->method()), +- _predicted_method_handle(predicted_method_handle), +- _if_missed(if_missed), +- _if_hit(if_hit), +- _hit_prob(hit_prob) +- {} +- +- virtual bool is_inline() const { return _if_hit->is_inline(); } +- virtual bool is_deferred() const { return _if_hit->is_deferred(); } +- +- virtual JVMState* generate(JVMState* jvms); +-}; +- +- +-CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle, +- CallGenerator* if_missed, +- CallGenerator* if_hit, +- float hit_prob) { +- return new PredictedDynamicCallGenerator(predicted_method_handle, if_missed, if_hit, hit_prob); +-} +- +- +-CallGenerator* CallGenerator::for_method_handle_call(Node* method_handle, JVMState* jvms, +- ciMethod* caller, ciMethod* callee, ciCallProfile profile) { +- assert(callee->is_method_handle_invoke() || callee->is_method_handle_adapter(), "for_method_handle_call mismatch"); +- CallGenerator* cg = CallGenerator::for_method_handle_inline(method_handle, jvms, caller, callee, profile); ++CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee) { ++ assert(callee->is_method_handle_intrinsic() || ++ callee->is_compiled_lambda_form(), "for_method_handle_call mismatch"); ++ CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee); + if (cg != NULL) + return cg; + return CallGenerator::for_direct_call(callee); + } + +-CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMState* jvms, +- ciMethod* caller, ciMethod* callee, ciCallProfile profile) { +- if (method_handle->Opcode() == Op_ConP) { +- const TypeOopPtr* oop_ptr = method_handle->bottom_type()->is_oopptr(); +- ciObject* const_oop = oop_ptr->const_oop(); +- ciMethodHandle* method_handle = const_oop->as_method_handle(); +- +- // Set the callee to have access to the class and signature in +- // the MethodHandleCompiler. +- method_handle->set_callee(callee); +- method_handle->set_caller(caller); +- method_handle->set_call_profile(profile); +- +- // Get an adapter for the MethodHandle. +- ciMethod* target_method = method_handle->get_method_handle_adapter(); +- if (target_method != NULL) { +- CallGenerator* cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS); +- if (cg != NULL && cg->is_inline()) +- return cg; +- } +- } else if (method_handle->Opcode() == Op_Phi && method_handle->req() == 3 && +- method_handle->in(1)->Opcode() == Op_ConP && method_handle->in(2)->Opcode() == Op_ConP) { +- float prob = PROB_FAIR; +- Node* meth_region = method_handle->in(0); +- if (meth_region->is_Region() && +- meth_region->in(1)->is_Proj() && meth_region->in(2)->is_Proj() && +- meth_region->in(1)->in(0) == meth_region->in(2)->in(0) && +- meth_region->in(1)->in(0)->is_If()) { +- // If diamond, so grab the probability of the test to drive the inlining below +- prob = meth_region->in(1)->in(0)->as_If()->_prob; +- if (meth_region->in(1)->is_IfTrue()) { +- prob = 1 - prob; ++CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee) { ++ GraphKit kit(jvms); ++ PhaseGVN& gvn = kit.gvn(); ++ Compile* C = kit.C; ++ vmIntrinsics::ID iid = callee->intrinsic_id(); ++ switch (iid) { ++ case vmIntrinsics::_invokeBasic: ++ { ++ // get MethodHandle receiver ++ Node* receiver = kit.argument(0); ++ if (receiver->Opcode() == Op_ConP) { ++ const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); ++ ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); ++ guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove ++ const int vtable_index = methodOopDesc::invalid_vtable_index; ++ CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS); ++ if (cg != NULL && cg->is_inline()) ++ return cg; ++ } else { ++ if (PrintInlining) CompileTask::print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant"); + } + } ++ break; + +- // selectAlternative idiom merging two constant MethodHandles. +- // Generate a guard so that each can be inlined. We might want to +- // do more inputs at later point but this gets the most common +- // case. +- CallGenerator* cg1 = for_method_handle_call(method_handle->in(1), jvms, caller, callee, profile.rescale(1.0 - prob)); +- CallGenerator* cg2 = for_method_handle_call(method_handle->in(2), jvms, caller, callee, profile.rescale(prob)); +- if (cg1 != NULL && cg2 != NULL) { +- const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr(); +- ciObject* const_oop = oop_ptr->const_oop(); +- ciMethodHandle* mh = const_oop->as_method_handle(); +- return new PredictedDynamicCallGenerator(mh, cg2, cg1, prob); ++ case vmIntrinsics::_linkToVirtual: ++ case vmIntrinsics::_linkToStatic: ++ case vmIntrinsics::_linkToSpecial: ++ case vmIntrinsics::_linkToInterface: ++ { ++ // pop MemberName argument ++ Node* member_name = kit.argument(callee->arg_size() - 1); ++ if (member_name->Opcode() == Op_ConP) { ++ const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); ++ ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); ++ ++ // In lamda forms we erase signature types to avoid resolving issues ++ // involving class loaders. When we optimize a method handle invoke ++ // to a direct call we must cast the receiver and arguments to its ++ // actual types. ++ ciSignature* signature = target->signature(); ++ const int receiver_skip = target->is_static() ? 0 : 1; ++ // Cast receiver to its type. ++ if (!target->is_static()) { ++ Node* arg = kit.argument(0); ++ const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); ++ const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); ++ if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { ++ Node* cast_obj = gvn.transform(new (C, 2) CheckCastPPNode(kit.control(), arg, sig_type)); ++ kit.set_argument(0, cast_obj); ++ } ++ } ++ // Cast reference arguments to its type. ++ for (int i = 0; i < signature->count(); i++) { ++ ciType* t = signature->type_at(i); ++ if (t->is_klass()) { ++ Node* arg = kit.argument(receiver_skip + i); ++ const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); ++ const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); ++ if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { ++ Node* cast_obj = gvn.transform(new (C, 2) CheckCastPPNode(kit.control(), arg, sig_type)); ++ kit.set_argument(receiver_skip + i, cast_obj); ++ } ++ } ++ } ++ const int vtable_index = methodOopDesc::invalid_vtable_index; ++ const bool call_is_virtual = target->is_abstract(); // FIXME workaround ++ CallGenerator* cg = C->call_generator(target, vtable_index, call_is_virtual, jvms, true, PROB_ALWAYS); ++ if (cg != NULL && cg->is_inline()) ++ return cg; ++ } + } ++ break; ++ ++ default: ++ fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); ++ break; + } + return NULL; + } + +-CallGenerator* CallGenerator::for_invokedynamic_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile) { +- assert(callee->is_method_handle_invoke() || callee->is_method_handle_adapter(), "for_invokedynamic_call mismatch"); +- // Get the CallSite object. +- ciBytecodeStream str(caller); +- str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. +- ciCallSite* call_site = str.get_call_site(); +- CallGenerator* cg = CallGenerator::for_invokedynamic_inline(call_site, jvms, caller, callee, profile); +- if (cg != NULL) +- return cg; +- return CallGenerator::for_dynamic_call(callee); +-} +- +-CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms, +- ciMethod* caller, ciMethod* callee, ciCallProfile profile) { +- ciMethodHandle* method_handle = call_site->get_target(); +- +- // Set the callee to have access to the class and signature in the +- // MethodHandleCompiler. +- method_handle->set_callee(callee); +- method_handle->set_caller(caller); +- method_handle->set_call_profile(profile); +- +- // Get an adapter for the MethodHandle. +- ciMethod* target_method = method_handle->get_invokedynamic_adapter(); +- if (target_method != NULL) { +- Compile *C = Compile::current(); +- CallGenerator* cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS); +- if (cg != NULL && cg->is_inline()) { +- // Add a dependence for invalidation of the optimization. +- if (!call_site->is_constant_call_site()) { +- C->dependencies()->assert_call_site_target_value(call_site, method_handle); +- } +- return cg; +- } +- } +- return NULL; +-} +- +- +-JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) { +- GraphKit kit(jvms); +- Compile* C = kit.C; +- PhaseGVN& gvn = kit.gvn(); +- +- CompileLog* log = C->log(); +- if (log != NULL) { +- log->elem("predicted_dynamic_call bci='%d'", jvms->bci()); +- } +- +- const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true); +- Node* predicted_mh = kit.makecon(predicted_mh_ptr); +- +- Node* bol = NULL; +- int bc = jvms->method()->java_code_at_bci(jvms->bci()); +- if (bc != Bytecodes::_invokedynamic) { +- // This is the selectAlternative idiom for guardWithTest or +- // similar idioms. +- Node* receiver = kit.argument(0); +- +- // Check if the MethodHandle is the expected one +- Node* cmp = gvn.transform(new (C, 3) CmpPNode(receiver, predicted_mh)); +- bol = gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq) ); +- } else { +- // Get the constant pool cache from the caller class. +- ciMethod* caller_method = jvms->method(); +- ciBytecodeStream str(caller_method); +- str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. +- ciCPCache* cpcache = str.get_cpcache(); +- +- // Get the offset of the CallSite from the constant pool cache +- // pointer. +- int index = str.get_method_index(); +- size_t call_site_offset = cpcache->get_f1_offset(index); +- +- // Load the CallSite object from the constant pool cache. +- const TypeOopPtr* cpcache_type = TypeOopPtr::make_from_constant(cpcache); // returns TypeAryPtr of type T_OBJECT +- const TypeOopPtr* call_site_type = TypeOopPtr::make_from_klass(C->env()->CallSite_klass()); +- Node* cpcache_adr = kit.makecon(cpcache_type); +- Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, call_site_offset); +- // The oops in the constant pool cache are not compressed; load then as raw pointers. +- Node* call_site = kit.make_load(kit.control(), call_site_adr, call_site_type, T_ADDRESS, Compile::AliasIdxRaw); +- +- // Load the target MethodHandle from the CallSite object. +- const TypeOopPtr* target_type = TypeOopPtr::make_from_klass(C->env()->MethodHandle_klass()); +- Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes()); +- Node* target_mh = kit.make_load(kit.control(), target_adr, target_type, T_OBJECT); +- +- // Check if the MethodHandle is still the same. +- Node* cmp = gvn.transform(new (C, 3) CmpPNode(target_mh, predicted_mh)); +- bol = gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq) ); +- } +- IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN); +- kit.set_control( gvn.transform(new (C, 1) IfTrueNode (iff))); +- Node* slow_ctl = gvn.transform(new (C, 1) IfFalseNode(iff)); +- +- SafePointNode* slow_map = NULL; +- JVMState* slow_jvms; +- { PreserveJVMState pjvms(&kit); +- kit.set_control(slow_ctl); +- if (!kit.stopped()) { +- slow_jvms = _if_missed->generate(kit.sync_jvms()); +- if (kit.failing()) +- return NULL; // might happen because of NodeCountInliningCutoff +- assert(slow_jvms != NULL, "must be"); +- kit.add_exception_states_from(slow_jvms); +- kit.set_map(slow_jvms->map()); +- if (!kit.stopped()) +- slow_map = kit.stop(); +- } +- } +- +- if (kit.stopped()) { +- // Instance exactly does not matches the desired type. +- kit.set_jvms(slow_jvms); +- return kit.transfer_exceptions_into_jvms(); +- } +- +- // Make the hot call: +- JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); +- if (new_jvms == NULL) { +- // Inline failed, so make a direct call. +- assert(_if_hit->is_inline(), "must have been a failed inline"); +- CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); +- new_jvms = cg->generate(kit.sync_jvms()); +- } +- kit.add_exception_states_from(new_jvms); +- kit.set_jvms(new_jvms); +- +- // Need to merge slow and fast? +- if (slow_map == NULL) { +- // The fast path is the only path remaining. +- return kit.transfer_exceptions_into_jvms(); +- } +- +- if (kit.stopped()) { +- // Inlined method threw an exception, so it's just the slow path after all. +- kit.set_jvms(slow_jvms); +- return kit.transfer_exceptions_into_jvms(); +- } +- +- // Finish the diamond. +- kit.C->set_has_split_ifs(true); // Has chance for split-if optimization +- RegionNode* region = new (C, 3) RegionNode(3); +- region->init_req(1, kit.control()); +- region->init_req(2, slow_map->control()); +- kit.set_control(gvn.transform(region)); +- Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); +- iophi->set_req(2, slow_map->i_o()); +- kit.set_i_o(gvn.transform(iophi)); +- kit.merge_memory(slow_map->merged_memory(), region, 2); +- uint tos = kit.jvms()->stkoff() + kit.sp(); +- uint limit = slow_map->req(); +- for (uint i = TypeFunc::Parms; i < limit; i++) { +- // Skip unused stack slots; fast forward to monoff(); +- if (i == tos) { +- i = kit.jvms()->monoff(); +- if( i >= limit ) break; +- } +- Node* m = kit.map()->in(i); +- Node* n = slow_map->in(i); +- if (m != n) { +- const Type* t = gvn.type(m)->meet(gvn.type(n)); +- Node* phi = PhiNode::make(region, m, t); +- phi->set_req(2, n); +- kit.map()->set_req(i, gvn.transform(phi)); +- } +- } +- return kit.transfer_exceptions_into_jvms(); +-} +- + + //-------------------------UncommonTrapCallGenerator----------------------------- + // Internal class which handles all out-of-line calls checking receiver type. +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/callGenerator.hpp +--- openjdk/hotspot/src/share/vm/opto/callGenerator.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/opto/callGenerator.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -25,6 +25,7 @@ + #ifndef SHARE_VM_OPTO_CALLGENERATOR_HPP + #define SHARE_VM_OPTO_CALLGENERATOR_HPP + ++#include "compiler/compileBroker.hpp" + #include "opto/callnode.hpp" + #include "opto/compile.hpp" + #include "opto/type.hpp" +@@ -44,7 +45,7 @@ + ciMethod* _method; // The method being called. + + protected: +- CallGenerator(ciMethod* method); ++ CallGenerator(ciMethod* method) : _method(method) {} + + public: + // Accessors +@@ -111,11 +112,8 @@ + static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface + static CallGenerator* for_dynamic_call(ciMethod* m); // invokedynamic + +- static CallGenerator* for_method_handle_call(Node* method_handle, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile); +- static CallGenerator* for_invokedynamic_call( JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile); +- +- static CallGenerator* for_method_handle_inline(Node* method_handle, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile); +- static CallGenerator* for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile); ++ static CallGenerator* for_method_handle_call( JVMState* jvms, ciMethod* caller, ciMethod* callee); ++ static CallGenerator* for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee); + + // How to generate a replace a direct call with an inline version + static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg); +@@ -145,13 +143,21 @@ + // Registry for intrinsics: + static CallGenerator* for_intrinsic(ciMethod* m); + static void register_intrinsic(ciMethod* m, CallGenerator* cg); ++ ++ static void print_inlining(ciMethod* callee, int inline_level, int bci, const char* msg) { ++ if (PrintInlining) ++ CompileTask::print_inlining(callee, inline_level, bci, msg); ++ } + }; + ++ ++//------------------------InlineCallGenerator---------------------------------- + class InlineCallGenerator : public CallGenerator { ++ protected: ++ InlineCallGenerator(ciMethod* method) : CallGenerator(method) {} ++ ++ public: + virtual bool is_inline() const { return true; } +- +- protected: +- InlineCallGenerator(ciMethod* method) : CallGenerator(method) { } + }; + + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/callnode.cpp +--- openjdk/hotspot/src/share/vm/opto/callnode.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/opto/callnode.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -231,9 +231,9 @@ + } + + //============================================================================= +-JVMState::JVMState(ciMethod* method, JVMState* caller) { ++JVMState::JVMState(ciMethod* method, JVMState* caller) : ++ _method(method) { + assert(method != NULL, "must be valid call site"); +- _method = method; + _reexecute = Reexecute_Undefined; + debug_only(_bci = -99); // random garbage value + debug_only(_map = (SafePointNode*)-1); +@@ -246,8 +246,8 @@ + _endoff = _monoff; + _sp = 0; + } +-JVMState::JVMState(int stack_size) { +- _method = NULL; ++JVMState::JVMState(int stack_size) : ++ _method(NULL) { + _bci = InvocationEntryBci; + _reexecute = Reexecute_Undefined; + debug_only(_map = (SafePointNode*)-1); +@@ -526,8 +526,8 @@ + } + _map->dump(2); + } +- st->print("JVMS depth=%d loc=%d stk=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", +- depth(), locoff(), stkoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); ++ st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", ++ depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); + if (_method == NULL) { + st->print_cr("(none)"); + } else { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/callnode.hpp +--- openjdk/hotspot/src/share/vm/opto/callnode.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/opto/callnode.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -197,7 +197,7 @@ + + private: + JVMState* _caller; // List pointer for forming scope chains +- uint _depth; // One mroe than caller depth, or one. ++ uint _depth; // One more than caller depth, or one. + uint _locoff; // Offset to locals in input edge mapping + uint _stkoff; // Offset to stack in input edge mapping + uint _monoff; // Offset to monitors in input edge mapping +@@ -223,6 +223,8 @@ + JVMState(int stack_size); // root state; has a null method + + // Access functions for the JVM ++ // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| ++ // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff + uint locoff() const { return _locoff; } + uint stkoff() const { return _stkoff; } + uint argoff() const { return _stkoff + _sp; } +@@ -231,15 +233,16 @@ + uint endoff() const { return _endoff; } + uint oopoff() const { return debug_end(); } + +- int loc_size() const { return _stkoff - _locoff; } +- int stk_size() const { return _monoff - _stkoff; } +- int mon_size() const { return _scloff - _monoff; } +- int scl_size() const { return _endoff - _scloff; } ++ int loc_size() const { return stkoff() - locoff(); } ++ int stk_size() const { return monoff() - stkoff(); } ++ int arg_size() const { return monoff() - argoff(); } ++ int mon_size() const { return scloff() - monoff(); } ++ int scl_size() const { return endoff() - scloff(); } + +- bool is_loc(uint i) const { return i >= _locoff && i < _stkoff; } +- bool is_stk(uint i) const { return i >= _stkoff && i < _monoff; } +- bool is_mon(uint i) const { return i >= _monoff && i < _scloff; } +- bool is_scl(uint i) const { return i >= _scloff && i < _endoff; } ++ bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } ++ bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } ++ bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } ++ bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } + + uint sp() const { return _sp; } + int bci() const { return _bci; } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/doCall.cpp +--- openjdk/hotspot/src/share/vm/opto/doCall.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/opto/doCall.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -59,13 +59,13 @@ + } + #endif + +-CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, ++CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_is_virtual, + JVMState* jvms, bool allow_inline, + float prof_factor) { + ciMethod* caller = jvms->method(); + int bci = jvms->bci(); + Bytecodes::Code bytecode = caller->java_code_at_bci(bci); +- guarantee(call_method != NULL, "failed method resolution"); ++ guarantee(callee != NULL, "failed method resolution"); + + // Dtrace currently doesn't work unless all calls are vanilla + if (env()->dtrace_method_probes()) { +@@ -91,7 +91,7 @@ + int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1; + int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1; + log->begin_elem("call method='%d' count='%d' prof_factor='%g'", +- log->identify(call_method), site_count, prof_factor); ++ log->identify(callee), site_count, prof_factor); + if (call_is_virtual) log->print(" virtual='1'"); + if (allow_inline) log->print(" inline='1'"); + if (receiver_count >= 0) { +@@ -109,7 +109,7 @@ + // We do this before the strict f.p. check below because the + // intrinsics handle strict f.p. correctly. + if (allow_inline) { +- CallGenerator* cg = find_intrinsic(call_method, call_is_virtual); ++ CallGenerator* cg = find_intrinsic(callee, call_is_virtual); + if (cg != NULL) return cg; + } + +@@ -117,19 +117,12 @@ + // NOTE: This must happen before normal inlining logic below since + // MethodHandle.invoke* are native methods which obviously don't + // have bytecodes and so normal inlining fails. +- if (call_method->is_method_handle_invoke()) { +- if (bytecode != Bytecodes::_invokedynamic) { +- GraphKit kit(jvms); +- Node* method_handle = kit.argument(0); +- return CallGenerator::for_method_handle_call(method_handle, jvms, caller, call_method, profile); +- } +- else { +- return CallGenerator::for_invokedynamic_call(jvms, caller, call_method, profile); +- } ++ if (callee->is_method_handle_intrinsic()) { ++ return CallGenerator::for_method_handle_call(jvms, caller, callee); + } + + // Do not inline strict fp into non-strict code, or the reverse +- if (caller->is_strict() ^ call_method->is_strict()) { ++ if (caller->is_strict() ^ callee->is_strict()) { + allow_inline = false; + } + +@@ -155,26 +148,26 @@ + } + WarmCallInfo scratch_ci; + if (!UseOldInlining) +- scratch_ci.init(jvms, call_method, profile, prof_factor); +- WarmCallInfo* ci = ilt->ok_to_inline(call_method, jvms, profile, &scratch_ci); ++ scratch_ci.init(jvms, callee, profile, prof_factor); ++ WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci); + assert(ci != &scratch_ci, "do not let this pointer escape"); + bool allow_inline = (ci != NULL && !ci->is_cold()); + bool require_inline = (allow_inline && ci->is_hot()); + + if (allow_inline) { +- CallGenerator* cg = CallGenerator::for_inline(call_method, expected_uses); +- if (require_inline && cg != NULL && should_delay_inlining(call_method, jvms)) { ++ CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses); ++ if (require_inline && cg != NULL && should_delay_inlining(callee, jvms)) { + // Delay the inlining of this method to give us the + // opportunity to perform some high level optimizations + // first. +- return CallGenerator::for_late_inline(call_method, cg); ++ return CallGenerator::for_late_inline(callee, cg); + } + if (cg == NULL) { + // Fall through. + } else if (require_inline || !InlineWarmCalls) { + return cg; + } else { +- CallGenerator* cold_cg = call_generator(call_method, vtable_index, call_is_virtual, jvms, false, prof_factor); ++ CallGenerator* cold_cg = call_generator(callee, vtable_index, call_is_virtual, jvms, false, prof_factor); + return CallGenerator::for_warm_call(ci, cold_cg, cg); + } + } +@@ -189,7 +182,7 @@ + (profile.morphism() == 2 && UseBimorphicInlining)) { + // receiver_method = profile.method(); + // Profiles do not suggest methods now. Look it up in the major receiver. +- receiver_method = call_method->resolve_invoke(jvms->method()->holder(), ++ receiver_method = callee->resolve_invoke(jvms->method()->holder(), + profile.receiver(0)); + } + if (receiver_method != NULL) { +@@ -201,7 +194,7 @@ + CallGenerator* next_hit_cg = NULL; + ciMethod* next_receiver_method = NULL; + if (profile.morphism() == 2 && UseBimorphicInlining) { +- next_receiver_method = call_method->resolve_invoke(jvms->method()->holder(), ++ next_receiver_method = callee->resolve_invoke(jvms->method()->holder(), + profile.receiver(1)); + if (next_receiver_method != NULL) { + next_hit_cg = this->call_generator(next_receiver_method, +@@ -224,12 +217,12 @@ + ) { + // Generate uncommon trap for class check failure path + // in case of monomorphic or bimorphic virtual call site. +- miss_cg = CallGenerator::for_uncommon_trap(call_method, reason, ++ miss_cg = CallGenerator::for_uncommon_trap(callee, reason, + Deoptimization::Action_maybe_recompile); + } else { + // Generate virtual call for class check failure path + // in case of polymorphic virtual call site. +- miss_cg = CallGenerator::for_virtual_call(call_method, vtable_index); ++ miss_cg = CallGenerator::for_virtual_call(callee, vtable_index); + } + if (miss_cg != NULL) { + if (next_hit_cg != NULL) { +@@ -252,11 +245,11 @@ + // There was no special inlining tactic, or it bailed out. + // Use a more generic tactic, like a simple call. + if (call_is_virtual) { +- return CallGenerator::for_virtual_call(call_method, vtable_index); ++ return CallGenerator::for_virtual_call(callee, vtable_index); + } else { + // Class Hierarchy Analysis or Type Profile reveals a unique target, + // or it is a static or special call. +- return CallGenerator::for_direct_call(call_method, should_delay_inlining(call_method, jvms)); ++ return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms)); + } + } + +@@ -355,33 +348,40 @@ + + // Find target being called + bool will_link; +- ciMethod* dest_method = iter().get_method(will_link); +- ciInstanceKlass* holder_klass = dest_method->holder(); ++ ciMethod* bc_callee = iter().get_method(will_link); // actual callee from bytecode ++ ciInstanceKlass* holder_klass = bc_callee->holder(); + ciKlass* holder = iter().get_declared_method_holder(); + ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); + +- int nargs = dest_method->arg_size(); +- if (is_invokedynamic) nargs -= 1; +- + // uncommon-trap when callee is unloaded, uninitialized or will not link + // bailout when too many arguments for register representation +- if (!will_link || can_not_compile_call_site(dest_method, klass)) { ++ if (!will_link || can_not_compile_call_site(bc_callee, klass)) { + #ifndef PRODUCT + if (PrintOpto && (Verbose || WizardMode)) { + method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci()); +- dest_method->print_name(); tty->cr(); ++ bc_callee->print_name(); tty->cr(); + } + #endif + return; + } + assert(holder_klass->is_loaded(), ""); +- assert((dest_method->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); ++ //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw) + // Note: this takes into account invokeinterface of methods declared in java/lang/Object, + // which should be invokevirtuals but according to the VM spec may be invokeinterfaces + assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc"); + // Note: In the absence of miranda methods, an abstract class K can perform + // an invokevirtual directly on an interface method I.m if K implements I. + ++ const int nargs = bc_callee->arg_size(); ++ ++ // Push appendix argument (MethodType, CallSite, etc.), if one. ++ if (iter().has_appendix()) { ++ ciObject* appendix_arg = iter().get_appendix(); ++ const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg); ++ Node* appendix_arg_node = _gvn.makecon(appendix_arg_type); ++ push(appendix_arg_node); ++ } ++ + // --------------------- + // Does Class Hierarchy Analysis reveal only a single target of a v-call? + // Then we may inline or make a static call, but become dependent on there being only 1 target. +@@ -392,21 +392,21 @@ + // Choose call strategy. + bool call_is_virtual = is_virtual_or_interface; + int vtable_index = methodOopDesc::invalid_vtable_index; +- ciMethod* call_method = dest_method; ++ ciMethod* callee = bc_callee; + + // Try to get the most accurate receiver type + if (is_virtual_or_interface) { + Node* receiver_node = stack(sp() - nargs); + const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); +- ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type); ++ ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, bc_callee, receiver_type); + + // Have the call been sufficiently improved such that it is no longer a virtual? + if (optimized_virtual_method != NULL) { +- call_method = optimized_virtual_method; ++ callee = optimized_virtual_method; + call_is_virtual = false; +- } else if (!UseInlineCaches && is_virtual && call_method->is_loaded()) { ++ } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) { + // We can make a vtable call at this site +- vtable_index = call_method->resolve_vtable_index(method()->holder(), klass); ++ vtable_index = callee->resolve_vtable_index(method()->holder(), klass); + } + } + +@@ -416,22 +416,24 @@ + bool try_inline = (C->do_inlining() || InlineAccessors); + + // --------------------- +- inc_sp(- nargs); // Temporarily pop args for JVM state of call ++ dec_sp(nargs); // Temporarily pop args for JVM state of call + JVMState* jvms = sync_jvms(); + + // --------------------- + // Decide call tactic. + // This call checks with CHA, the interpreter profile, intrinsics table, etc. + // It decides whether inlining is desirable or not. +- CallGenerator* cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); ++ CallGenerator* cg = C->call_generator(callee, vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); ++ ++ bc_callee = callee = NULL; // don't use bc_callee and callee after this point + + // --------------------- + // Round double arguments before call +- round_double_arguments(dest_method); ++ round_double_arguments(cg->method()); + + #ifndef PRODUCT + // bump global counters for calls +- count_compiled_calls(false/*at_method_entry*/, cg->is_inline()); ++ count_compiled_calls(/*at_method_entry*/ false, cg->is_inline()); + + // Record first part of parsing work for this call + parse_histogram()->record_change(); +@@ -447,8 +449,8 @@ + // because exceptions don't return to the call site.) + profile_call(receiver); + +- JVMState* new_jvms; +- if ((new_jvms = cg->generate(jvms)) == NULL) { ++ JVMState* new_jvms = cg->generate(jvms); ++ if (new_jvms == NULL) { + // When inlining attempt fails (e.g., too many arguments), + // it may contaminate the current compile state, making it + // impossible to pull back and try again. Once we call +@@ -469,7 +471,7 @@ + // intrinsic was expecting to optimize. The fallback position is + // to call out-of-line. + try_inline = false; // Inline tactic bailed out. +- cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); ++ cg = C->call_generator(cg->method(), vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); + if ((new_jvms = cg->generate(jvms)) == NULL) { + guarantee(failing(), "call failed to generate: calls should work"); + return; +@@ -478,8 +480,8 @@ + + if (cg->is_inline()) { + // Accumulate has_loops estimate +- C->set_has_loops(C->has_loops() || call_method->has_loops()); +- C->env()->notice_inlined_method(call_method); ++ C->set_has_loops(C->has_loops() || cg->method()->has_loops()); ++ C->env()->notice_inlined_method(cg->method()); + } + + // Reset parser state from [new_]jvms, which now carries results of the call. +@@ -501,20 +503,74 @@ + } + + // Round double result after a call from strict to non-strict code +- round_double_result(dest_method); ++ round_double_result(cg->method()); ++ ++ ciType* rtype = cg->method()->return_type(); ++ if (iter().cur_bc_raw() == Bytecodes::_invokehandle || is_invokedynamic) { ++ // Be careful here with return types. ++ ciType* ctype = iter().get_declared_method_signature()->return_type(); ++ if (ctype != rtype) { ++ BasicType rt = rtype->basic_type(); ++ BasicType ct = ctype->basic_type(); ++ Node* retnode = peek(); ++ if (ct == T_VOID) { ++ // It's OK for a method to return a value that is discarded. ++ // The discarding does not require any special action from the caller. ++ // The Java code knows this, at VerifyType.isNullConversion. ++ pop_node(rt); // whatever it was, pop it ++ retnode = top(); ++ } else if (rt == T_INT || is_subword_type(rt)) { ++ // FIXME: This logic should be factored out. ++ if (ct == T_BOOLEAN) { ++ retnode = _gvn.transform( new (C, 3) AndINode(retnode, intcon(0x1)) ); ++ } else if (ct == T_CHAR) { ++ retnode = _gvn.transform( new (C, 3) AndINode(retnode, intcon(0xFFFF)) ); ++ } else if (ct == T_BYTE) { ++ retnode = _gvn.transform( new (C, 3) LShiftINode(retnode, intcon(24)) ); ++ retnode = _gvn.transform( new (C, 3) RShiftINode(retnode, intcon(24)) ); ++ } else if (ct == T_SHORT) { ++ retnode = _gvn.transform( new (C, 3) LShiftINode(retnode, intcon(16)) ); ++ retnode = _gvn.transform( new (C, 3) RShiftINode(retnode, intcon(16)) ); ++ } else { ++ assert(ct == T_INT, err_msg("rt=%d, ct=%d", rt, ct)); ++ } ++ } else if (rt == T_OBJECT) { ++ assert(ct == T_OBJECT, err_msg("rt=T_OBJECT, ct=%d", ct)); ++ if (ctype->is_loaded()) { ++ Node* if_fail = top(); ++ retnode = gen_checkcast(retnode, makecon(TypeKlassPtr::make(ctype->as_klass())), &if_fail); ++ if (if_fail != top()) { ++ PreserveJVMState pjvms(this); ++ set_control(if_fail); ++ builtin_throw(Deoptimization::Reason_class_check); ++ } ++ pop(); ++ push(retnode); ++ } ++ } else { ++ assert(ct == rt, err_msg("unexpected mismatch rt=%d, ct=%d", rt, ct)); ++ // push a zero; it's better than getting an oop/int mismatch ++ retnode = pop_node(rt); ++ retnode = zerocon(ct); ++ push_node(ct, retnode); ++ } ++ // Now that the value is well-behaved, continue with the call-site type. ++ rtype = ctype; ++ } ++ } + + // If the return type of the method is not loaded, assert that the + // value we got is a null. Otherwise, we need to recompile. +- if (!dest_method->return_type()->is_loaded()) { ++ if (!rtype->is_loaded()) { + #ifndef PRODUCT + if (PrintOpto && (Verbose || WizardMode)) { + method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci()); +- dest_method->print_name(); tty->cr(); ++ cg->method()->print_name(); tty->cr(); + } + #endif + if (C->log() != NULL) { + C->log()->elem("assert_null reason='return' klass='%d'", +- C->log()->identify(dest_method->return_type())); ++ C->log()->identify(rtype)); + } + // If there is going to be a trap, put it at the next bytecode: + set_bci(iter().next_bci()); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/graphKit.cpp +--- openjdk/hotspot/src/share/vm/opto/graphKit.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/opto/graphKit.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -965,7 +965,7 @@ + assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, ""); + } + +-bool GraphKit::compute_stack_effects(int& inputs, int& depth) { ++bool GraphKit::compute_stack_effects(int& inputs, int& depth, bool for_parse) { + Bytecodes::Code code = java_bc(); + if (code == Bytecodes::_wide) { + code = method()->java_code_at_bci(bci() + 1); +@@ -1032,12 +1032,21 @@ + ciBytecodeStream iter(method()); + iter.reset_to_bci(bci()); + iter.next(); +- ciMethod* method = iter.get_method(ignore); ++ ciMethod* callee = iter.get_method(ignore); + // (Do not use ciMethod::arg_size(), because + // it might be an unloaded method, which doesn't + // know whether it is static or not.) +- inputs = method->invoke_arg_size(code); +- int size = method->return_type()->size(); ++ if (for_parse) { ++ // Case 1: When called from parse we are *before* the invoke (in the ++ // caller) and need to to adjust the inputs by an appendix ++ // argument that will be pushed implicitly. ++ inputs = callee->invoke_arg_size(code) - (iter.has_appendix() ? 1 : 0); ++ } else { ++ // Case 2: Here we are *after* the invoke (in the callee) and need to ++ // remove any appendix arguments that were popped. ++ inputs = callee->invoke_arg_size(code) - (callee->has_member_arg() ? 1 : 0); ++ } ++ int size = callee->return_type()->size(); + depth = size - inputs; + } + break; +@@ -1373,7 +1382,6 @@ + } + + +- + //============================================================================= + //--------------------------------memory--------------------------------------- + Node* GraphKit::memory(uint alias_idx) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/graphKit.hpp +--- openjdk/hotspot/src/share/vm/opto/graphKit.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/opto/graphKit.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -145,6 +145,7 @@ + void clean_stack(int from_sp); // clear garbage beyond from_sp to top + + void inc_sp(int i) { set_sp(sp() + i); } ++ void dec_sp(int i) { set_sp(sp() - i); } + void set_bci(int bci) { _bci = bci; } + + // Make sure jvms has current bci & sp. +@@ -285,7 +286,7 @@ + // How many stack inputs does the current BC consume? + // And, how does the stack change after the bytecode? + // Returns false if unknown. +- bool compute_stack_effects(int& inputs, int& depth); ++ bool compute_stack_effects(int& inputs, int& depth, bool for_parse = false); + + // Add a fixed offset to a pointer + Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) { +@@ -370,9 +371,9 @@ + // Replace all occurrences of one node by another. + void replace_in_map(Node* old, Node* neww); + +- void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms,_sp++,n); } +- Node* pop() { map_not_null(); return _map->stack(_map->_jvms,--_sp); } +- Node* peek(int off=0) { map_not_null(); return _map->stack(_map->_jvms, _sp - off - 1); } ++ void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++, n); } ++ Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp); } ++ Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1); } + + void push_pair(Node* ldval) { + push(ldval); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/library_call.cpp +--- openjdk/hotspot/src/share/vm/opto/library_call.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/opto/library_call.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -2104,7 +2104,7 @@ + if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL)) return false; + if (id == vmIntrinsics::_reverseBytes_c && !Matcher::has_match_rule(Op_ReverseBytesUS)) return false; + if (id == vmIntrinsics::_reverseBytes_s && !Matcher::has_match_rule(Op_ReverseBytesS)) return false; +- _sp += arg_size(); // restore stack pointer ++ _sp += arg_size(); // restore stack pointer + switch (id) { + case vmIntrinsics::_reverseBytes_i: + push(_gvn.transform(new (C, 2) ReverseBytesINode(0, pop()))); +@@ -2277,6 +2277,7 @@ + + // Argument words: "this" plus (oop/offset) or (lo/hi) args plus maybe 1 or 2 value words + int nargs = 1 + (is_native_ptr ? 2 : 3) + (is_store ? type_words : 0); ++ assert(callee()->arg_size() == nargs, "must be"); + + debug_only(int saved_sp = _sp); + _sp += nargs; +@@ -3932,7 +3933,8 @@ + } + } + +- if (method->is_method_handle_adapter()) { ++ if (method->is_method_handle_intrinsic() || ++ method->is_compiled_lambda_form()) { + // This is an internal adapter frame from the MethodHandleCompiler -- skip it + return true; + } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/matcher.cpp +--- openjdk/hotspot/src/share/vm/opto/matcher.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/opto/matcher.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -1231,8 +1231,9 @@ + if (is_method_handle_invoke) { + // Kill some extra stack space in case method handles want to do + // a little in-place argument insertion. ++ // FIXME: Is this still necessary? + int regs_per_word = NOT_LP64(1) LP64_ONLY(2); // %%% make a global const! +- out_arg_limit_per_call += MethodHandlePushLimit * regs_per_word; ++ out_arg_limit_per_call += methodOopDesc::extra_stack_entries() * regs_per_word; + // Do not update mcall->_argsize because (a) the extra space is not + // pushed as arguments and (b) _argsize is dead (not used anywhere). + } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/node.hpp +--- openjdk/hotspot/src/share/vm/opto/node.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/opto/node.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -362,7 +362,7 @@ + #endif + + // Reference to the i'th input Node. Error if out of bounds. +- Node* in(uint i) const { assert(i < _max,"oob"); return _in[i]; } ++ Node* in(uint i) const { assert(i < _max, err_msg("oob: i=%d, _max=%d", i, _max)); return _in[i]; } + // Reference to the i'th output Node. Error if out of bounds. + // Use this accessor sparingly. We are going trying to use iterators instead. + Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; } +@@ -393,7 +393,7 @@ + void ins_req( uint i, Node *n ); // Insert a NEW required input + void set_req( uint i, Node *n ) { + assert( is_not_dead(n), "can not use dead node"); +- assert( i < _cnt, "oob"); ++ assert( i < _cnt, err_msg("oob: i=%d, _cnt=%d", i, _cnt)); + assert( !VerifyHashTableKeys || _hash_lock == 0, + "remove node from hash table before modifying it"); + Node** p = &_in[i]; // cache this._in, across the del_out call +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/parse.hpp +--- openjdk/hotspot/src/share/vm/opto/parse.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/opto/parse.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -84,7 +84,7 @@ + static const char* check_can_parse(ciMethod* callee); + + static InlineTree* build_inline_tree_root(); +- static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false); ++ static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee); + + // For temporary (stack-allocated, stateless) ilts: + InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/parse1.cpp +--- openjdk/hotspot/src/share/vm/opto/parse1.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/opto/parse1.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -398,7 +398,7 @@ + if (PrintCompilation || PrintOpto) { + // Make sure I have an inline tree, so I can print messages about it. + JVMState* ilt_caller = is_osr_parse() ? caller->caller() : caller; +- InlineTree::find_subtree_from_root(C->ilt(), ilt_caller, parse_method, true); ++ InlineTree::find_subtree_from_root(C->ilt(), ilt_caller, parse_method); + } + _max_switch_depth = 0; + _est_switch_depth = 0; +@@ -1398,8 +1398,8 @@ + #ifdef ASSERT + int pre_bc_sp = sp(); + int inputs, depth; +- bool have_se = !stopped() && compute_stack_effects(inputs, depth); +- assert(!have_se || pre_bc_sp >= inputs, "have enough stack to execute this BC"); ++ bool have_se = !stopped() && compute_stack_effects(inputs, depth, /*for_parse*/ true); ++ assert(!have_se || pre_bc_sp >= inputs, err_msg("have enough stack to execute this BC: pre_bc_sp=%d, inputs=%d", pre_bc_sp, inputs)); + #endif //ASSERT + + do_one_bytecode(); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/phaseX.hpp +--- openjdk/hotspot/src/share/vm/opto/phaseX.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/opto/phaseX.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -193,6 +193,7 @@ + // If you want the type of a very new (untransformed) node, + // you must use type_or_null, and test the result for NULL. + const Type* type(const Node* n) const { ++ assert(n != NULL, "must not be null"); + const Type* t = _types.fast_lookup(n->_idx); + assert(t != NULL, "must set before get"); + return t; +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/prims/jvmtiTagMap.cpp +--- openjdk/hotspot/src/share/vm/prims/jvmtiTagMap.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/prims/jvmtiTagMap.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -3162,9 +3162,6 @@ + if (fr->is_entry_frame()) { + last_entry_frame = fr; + } +- if (fr->is_ricochet_frame()) { +- fr->oops_ricochet_do(blk, vf->register_map()); +- } + } + + vf = vf->sender(); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/prims/methodHandleWalk.cpp +--- openjdk/hotspot/src/share/vm/prims/methodHandleWalk.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ /dev/null Thu Jan 01 00:00:00 1970 +0000 +@@ -1,2089 +0,0 @@ +-/* +- * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "interpreter/rewriter.hpp" +-#include "memory/oopFactory.hpp" +-#include "prims/methodHandleWalk.hpp" +- +-/* +- * JSR 292 reference implementation: method handle structure analysis +- */ +- +-#ifdef PRODUCT +-#define print_method_handle(mh) {} +-#else //PRODUCT +-extern "C" void print_method_handle(oop mh); +-#endif //PRODUCT +- +-// ----------------------------------------------------------------------------- +-// MethodHandleChain +- +-void MethodHandleChain::set_method_handle(Handle mh, TRAPS) { +- if (!java_lang_invoke_MethodHandle::is_instance(mh())) lose("bad method handle", CHECK); +- +- // set current method handle and unpack partially +- _method_handle = mh; +- _is_last = false; +- _is_bound = false; +- _arg_slot = -1; +- _arg_type = T_VOID; +- _conversion = -1; +- _last_invoke = Bytecodes::_nop; //arbitrary non-garbage +- +- if (java_lang_invoke_DirectMethodHandle::is_instance(mh())) { +- set_last_method(mh(), THREAD); +- return; +- } +- if (java_lang_invoke_AdapterMethodHandle::is_instance(mh())) { +- _conversion = AdapterMethodHandle_conversion(); +- assert(_conversion != -1, "bad conv value"); +- assert(java_lang_invoke_BoundMethodHandle::is_instance(mh()), "also BMH"); +- } +- if (java_lang_invoke_BoundMethodHandle::is_instance(mh())) { +- if (!is_adapter()) // keep AMH and BMH separate in this model +- _is_bound = true; +- _arg_slot = BoundMethodHandle_vmargslot(); +- oop target = MethodHandle_vmtarget_oop(); +- if (!is_bound() || java_lang_invoke_MethodHandle::is_instance(target)) { +- _arg_type = compute_bound_arg_type(target, NULL, _arg_slot, CHECK); +- } else if (target != NULL && target->is_method()) { +- methodOop m = (methodOop) target; +- _arg_type = compute_bound_arg_type(NULL, m, _arg_slot, CHECK); +- set_last_method(mh(), CHECK); +- } else { +- _is_bound = false; // lose! +- } +- } +- if (is_bound() && _arg_type == T_VOID) { +- lose("bad vmargslot", CHECK); +- } +- if (!is_bound() && !is_adapter()) { +- lose("unrecognized MH type", CHECK); +- } +-} +- +- +-void MethodHandleChain::set_last_method(oop target, TRAPS) { +- _is_last = true; +- KlassHandle receiver_limit; int flags = 0; +- _last_method = MethodHandles::decode_method(target, receiver_limit, flags); +- if ((flags & MethodHandles::_dmf_has_receiver) == 0) +- _last_invoke = Bytecodes::_invokestatic; +- else if ((flags & MethodHandles::_dmf_does_dispatch) == 0) +- _last_invoke = Bytecodes::_invokespecial; +- else if ((flags & MethodHandles::_dmf_from_interface) != 0) +- _last_invoke = Bytecodes::_invokeinterface; +- else +- _last_invoke = Bytecodes::_invokevirtual; +-} +- +- +-BasicType MethodHandleChain::compute_bound_arg_type(oop target, methodOop m, int arg_slot, TRAPS) { +- // There is no direct indication of whether the argument is primitive or not. +- // It is implied by the _vmentry code, and by the MethodType of the target. +- BasicType arg_type = T_VOID; +- if (target != NULL) { +- oop mtype = java_lang_invoke_MethodHandle::type(target); +- int arg_num = MethodHandles::argument_slot_to_argnum(mtype, arg_slot); +- if (arg_num >= 0) { +- oop ptype = java_lang_invoke_MethodType::ptype(mtype, arg_num); +- arg_type = java_lang_Class::as_BasicType(ptype); +- } +- } else if (m != NULL) { +- // figure out the argument type from the slot +- // FIXME: make this explicit in the MH +- int cur_slot = m->size_of_parameters(); +- if (arg_slot >= cur_slot) +- return T_VOID; +- if (!m->is_static()) { +- cur_slot -= type2size[T_OBJECT]; +- if (cur_slot == arg_slot) +- return T_OBJECT; +- } +- ResourceMark rm(THREAD); +- for (SignatureStream ss(m->signature()); !ss.is_done(); ss.next()) { +- BasicType bt = ss.type(); +- cur_slot -= type2size[bt]; +- if (cur_slot <= arg_slot) { +- if (cur_slot == arg_slot) +- arg_type = bt; +- break; +- } +- } +- } +- if (arg_type == T_ARRAY) +- arg_type = T_OBJECT; +- return arg_type; +-} +- +- +-void MethodHandleChain::lose(const char* msg, TRAPS) { +- _lose_message = msg; +-#ifdef ASSERT +- if (Verbose) { +- tty->print_cr(INTPTR_FORMAT " lose: %s", _method_handle(), msg); +- print(); +- } +-#endif +- if (!THREAD->is_Java_thread() || ((JavaThread*)THREAD)->thread_state() != _thread_in_vm) { +- // throw a preallocated exception +- THROW_OOP(Universe::virtual_machine_error_instance()); +- } +- THROW_MSG(vmSymbols::java_lang_InternalError(), msg); +-} +- +- +-#ifdef ASSERT +-static const char* adapter_ops[] = { +- "retype_only" , +- "retype_raw" , +- "check_cast" , +- "prim_to_prim" , +- "ref_to_prim" , +- "prim_to_ref" , +- "swap_args" , +- "rot_args" , +- "dup_args" , +- "drop_args" , +- "collect_args" , +- "spread_args" , +- "fold_args" +-}; +- +-static const char* adapter_op_to_string(int op) { +- if (op >= 0 && op < (int)ARRAY_SIZE(adapter_ops)) +- return adapter_ops[op]; +- return "unknown_op"; +-} +- +-void MethodHandleChain::print(oopDesc* m) { +- HandleMark hm; +- ResourceMark rm; +- Handle mh(m); +- EXCEPTION_MARK; +- MethodHandleChain mhc(mh, THREAD); +- if (HAS_PENDING_EXCEPTION) { +- oop ex = THREAD->pending_exception(); +- CLEAR_PENDING_EXCEPTION; +- ex->print(); +- return; +- } +- mhc.print(); +-} +- +- +-void MethodHandleChain::print() { +- EXCEPTION_MARK; +- print_impl(THREAD); +- if (HAS_PENDING_EXCEPTION) { +- oop ex = THREAD->pending_exception(); +- CLEAR_PENDING_EXCEPTION; +- ex->print(); +- } +-} +- +-void MethodHandleChain::print_impl(TRAPS) { +- ResourceMark rm; +- +- MethodHandleChain chain(_root, CHECK); +- for (;;) { +- tty->print(INTPTR_FORMAT ": ", chain.method_handle()()); +- if (chain.is_bound()) { +- tty->print("bound: arg_type %s arg_slot %d", +- type2name(chain.bound_arg_type()), +- chain.bound_arg_slot()); +- oop o = chain.bound_arg_oop(); +- if (o != NULL) { +- if (o->is_instance()) { +- tty->print(" instance %s", o->klass()->klass_part()->internal_name()); +- if (java_lang_invoke_CountingMethodHandle::is_instance(o)) { +- tty->print(" vmcount: %d", java_lang_invoke_CountingMethodHandle::vmcount(o)); +- } +- } else { +- o->print(); +- } +- } +- oop vmt = chain.vmtarget_oop(); +- if (vmt != NULL) { +- if (vmt->is_method()) { +- tty->print(" "); +- methodOop(vmt)->print_short_name(tty); +- } else if (java_lang_invoke_MethodHandle::is_instance(vmt)) { +- tty->print(" method handle " INTPTR_FORMAT, vmt); +- } else { +- ShouldNotReachHere(); +- } +- } +- } else if (chain.is_adapter()) { +- tty->print("adapter: arg_slot %d conversion op %s", +- chain.adapter_arg_slot(), +- adapter_op_to_string(chain.adapter_conversion_op())); +- switch (chain.adapter_conversion_op()) { +- case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY: +- if (java_lang_invoke_CountingMethodHandle::is_instance(chain.method_handle_oop())) { +- tty->print(" vmcount: %d", java_lang_invoke_CountingMethodHandle::vmcount(chain.method_handle_oop())); +- } +- case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW: +- case java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST: +- case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM: +- case java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM: +- break; +- +- case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF: { +- tty->print(" src_type = %s", type2name(chain.adapter_conversion_src_type())); +- break; +- } +- +- case java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS: +- case java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS: { +- int dest_arg_slot = chain.adapter_conversion_vminfo(); +- tty->print(" dest_arg_slot %d type %s", dest_arg_slot, type2name(chain.adapter_conversion_src_type())); +- break; +- } +- +- case java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS: +- case java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS: { +- int dup_slots = chain.adapter_conversion_stack_pushes(); +- tty->print(" pushes %d", dup_slots); +- break; +- } +- +- case java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS: +- case java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS: { +- int coll_slots = chain.MethodHandle_vmslots(); +- tty->print(" coll_slots %d", coll_slots); +- break; +- } +- +- case java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS: { +- // Check the required length. +- int spread_slots = 1 + chain.adapter_conversion_stack_pushes(); +- tty->print(" spread_slots %d", spread_slots); +- break; +- } +- +- default: +- tty->print_cr("bad adapter conversion"); +- break; +- } +- } else { +- // DMH +- tty->print("direct: "); +- chain.last_method_oop()->print_short_name(tty); +- } +- +- tty->print(" ("); +- objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(chain.method_type_oop()); +- for (int i = ptypes->length() - 1; i >= 0; i--) { +- BasicType t = java_lang_Class::as_BasicType(ptypes->obj_at(i)); +- if (t == T_ARRAY) t = T_OBJECT; +- tty->print("%c", type2char(t)); +- if (t == T_LONG || t == T_DOUBLE) tty->print("_"); +- } +- tty->print(")"); +- BasicType rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(chain.method_type_oop())); +- if (rtype == T_ARRAY) rtype = T_OBJECT; +- tty->print("%c", type2char(rtype)); +- tty->cr(); +- if (!chain.is_last()) { +- chain.next(CHECK); +- } else { +- break; +- } +- } +-} +-#endif +- +- +-// ----------------------------------------------------------------------------- +-// MethodHandleWalker +- +-Bytecodes::Code MethodHandleWalker::conversion_code(BasicType src, BasicType dest) { +- if (is_subword_type(src)) { +- src = T_INT; // all subword src types act like int +- } +- if (src == dest) { +- return Bytecodes::_nop; +- } +- +-#define SRC_DEST(s,d) (((int)(s) << 4) + (int)(d)) +- switch (SRC_DEST(src, dest)) { +- case SRC_DEST(T_INT, T_LONG): return Bytecodes::_i2l; +- case SRC_DEST(T_INT, T_FLOAT): return Bytecodes::_i2f; +- case SRC_DEST(T_INT, T_DOUBLE): return Bytecodes::_i2d; +- case SRC_DEST(T_INT, T_BYTE): return Bytecodes::_i2b; +- case SRC_DEST(T_INT, T_CHAR): return Bytecodes::_i2c; +- case SRC_DEST(T_INT, T_SHORT): return Bytecodes::_i2s; +- +- case SRC_DEST(T_LONG, T_INT): return Bytecodes::_l2i; +- case SRC_DEST(T_LONG, T_FLOAT): return Bytecodes::_l2f; +- case SRC_DEST(T_LONG, T_DOUBLE): return Bytecodes::_l2d; +- +- case SRC_DEST(T_FLOAT, T_INT): return Bytecodes::_f2i; +- case SRC_DEST(T_FLOAT, T_LONG): return Bytecodes::_f2l; +- case SRC_DEST(T_FLOAT, T_DOUBLE): return Bytecodes::_f2d; +- +- case SRC_DEST(T_DOUBLE, T_INT): return Bytecodes::_d2i; +- case SRC_DEST(T_DOUBLE, T_LONG): return Bytecodes::_d2l; +- case SRC_DEST(T_DOUBLE, T_FLOAT): return Bytecodes::_d2f; +- } +-#undef SRC_DEST +- +- // cannot do it in one step, or at all +- return Bytecodes::_illegal; +-} +- +- +-// ----------------------------------------------------------------------------- +-// MethodHandleWalker::walk +-// +-MethodHandleWalker::ArgToken +-MethodHandleWalker::walk(TRAPS) { +- ArgToken empty = ArgToken(); // Empty return value. +- +- walk_incoming_state(CHECK_(empty)); +- +- for (;;) { +- set_method_handle(chain().method_handle_oop()); +- +- assert(_outgoing_argc == argument_count_slow(), "empty slots under control"); +- +- if (chain().is_adapter()) { +- int conv_op = chain().adapter_conversion_op(); +- int arg_slot = chain().adapter_arg_slot(); +- +- // Check that the arg_slot is valid. In most cases it must be +- // within range of the current arguments but there are some +- // exceptions. Those are sanity checked in their implemention +- // below. +- if ((arg_slot < 0 || arg_slot >= _outgoing.length()) && +- conv_op > java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW && +- conv_op != java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS && +- conv_op != java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS) { +- lose(err_msg("bad argument index %d", arg_slot), CHECK_(empty)); +- } +- +- bool retain_original_args = false; // used by fold/collect logic +- +- // perform the adapter action +- switch (conv_op) { +- case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY: +- // No changes to arguments; pass the bits through. +- break; +- +- case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW: { +- // To keep the verifier happy, emit bitwise ("raw") conversions as needed. +- // See MethodHandles::same_basic_type_for_arguments for allowed conversions. +- Handle incoming_mtype(THREAD, chain().method_type_oop()); +- Handle outgoing_mtype; +- { +- oop outgoing_mh_oop = chain().vmtarget_oop(); +- if (!java_lang_invoke_MethodHandle::is_instance(outgoing_mh_oop)) +- lose("outgoing target not a MethodHandle", CHECK_(empty)); +- outgoing_mtype = Handle(THREAD, java_lang_invoke_MethodHandle::type(outgoing_mh_oop)); +- } +- +- int nptypes = java_lang_invoke_MethodType::ptype_count(outgoing_mtype()); +- if (nptypes != java_lang_invoke_MethodType::ptype_count(incoming_mtype())) +- lose("incoming and outgoing parameter count do not agree", CHECK_(empty)); +- +- // Argument types. +- for (int i = 0, slot = _outgoing.length() - 1; slot >= 0; slot--) { +- if (arg_type(slot) == T_VOID) continue; +- +- klassOop src_klass = NULL; +- klassOop dst_klass = NULL; +- BasicType src = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(incoming_mtype(), i), &src_klass); +- BasicType dst = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(outgoing_mtype(), i), &dst_klass); +- retype_raw_argument_type(src, dst, slot, CHECK_(empty)); +- i++; // We need to skip void slots at the top of the loop. +- } +- +- // Return type. +- { +- BasicType src = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(incoming_mtype())); +- BasicType dst = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(outgoing_mtype())); +- retype_raw_return_type(src, dst, CHECK_(empty)); +- } +- break; +- } +- +- case java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST: { +- // checkcast the Nth outgoing argument in place +- klassOop dest_klass = NULL; +- BasicType dest = java_lang_Class::as_BasicType(chain().adapter_arg_oop(), &dest_klass); +- assert(dest == T_OBJECT, ""); +- ArgToken arg = _outgoing.at(arg_slot); +- assert(dest == arg.basic_type(), ""); +- arg = make_conversion(T_OBJECT, dest_klass, Bytecodes::_checkcast, arg, CHECK_(empty)); +- // replace the object by the result of the cast, to make the compiler happy: +- change_argument(T_OBJECT, arg_slot, T_OBJECT, arg); +- debug_only(dest_klass = (klassOop)badOop); +- break; +- } +- +- case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM: { +- // i2l, etc., on the Nth outgoing argument in place +- BasicType src = chain().adapter_conversion_src_type(), +- dest = chain().adapter_conversion_dest_type(); +- ArgToken arg = _outgoing.at(arg_slot); +- Bytecodes::Code bc = conversion_code(src, dest); +- if (bc == Bytecodes::_nop) { +- break; +- } else if (bc != Bytecodes::_illegal) { +- arg = make_conversion(dest, NULL, bc, arg, CHECK_(empty)); +- } else if (is_subword_type(dest)) { +- bc = conversion_code(src, T_INT); +- if (bc != Bytecodes::_illegal) { +- arg = make_conversion(dest, NULL, bc, arg, CHECK_(empty)); +- bc = conversion_code(T_INT, dest); +- arg = make_conversion(dest, NULL, bc, arg, CHECK_(empty)); +- } +- } +- if (bc == Bytecodes::_illegal) { +- lose(err_msg("bad primitive conversion for %s -> %s", type2name(src), type2name(dest)), CHECK_(empty)); +- } +- change_argument(src, arg_slot, dest, arg); +- break; +- } +- +- case java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM: { +- // checkcast to wrapper type & call intValue, etc. +- BasicType dest = chain().adapter_conversion_dest_type(); +- ArgToken arg = _outgoing.at(arg_slot); +- arg = make_conversion(T_OBJECT, SystemDictionary::box_klass(dest), +- Bytecodes::_checkcast, arg, CHECK_(empty)); +- vmIntrinsics::ID unboxer = vmIntrinsics::for_unboxing(dest); +- if (unboxer == vmIntrinsics::_none) { +- lose("no unboxing method", CHECK_(empty)); +- } +- ArgToken arglist[2]; +- arglist[0] = arg; // outgoing 'this' +- arglist[1] = ArgToken(); // sentinel +- arg = make_invoke(methodHandle(), unboxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty)); +- change_argument(T_OBJECT, arg_slot, dest, arg); +- break; +- } +- +- case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF: { +- // call wrapper type.valueOf +- BasicType src = chain().adapter_conversion_src_type(); +- vmIntrinsics::ID boxer = vmIntrinsics::for_boxing(src); +- if (boxer == vmIntrinsics::_none) { +- lose("no boxing method", CHECK_(empty)); +- } +- ArgToken arg = _outgoing.at(arg_slot); +- ArgToken arglist[2]; +- arglist[0] = arg; // outgoing value +- arglist[1] = ArgToken(); // sentinel +- arg = make_invoke(methodHandle(), boxer, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(empty)); +- change_argument(src, arg_slot, T_OBJECT, arg); +- break; +- } +- +- case java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS: { +- int dest_arg_slot = chain().adapter_conversion_vminfo(); +- if (!has_argument(dest_arg_slot)) { +- lose("bad swap index", CHECK_(empty)); +- } +- // a simple swap between two arguments +- if (arg_slot > dest_arg_slot) { +- int tmp = arg_slot; +- arg_slot = dest_arg_slot; +- dest_arg_slot = tmp; +- } +- ArgToken a1 = _outgoing.at(arg_slot); +- ArgToken a2 = _outgoing.at(dest_arg_slot); +- change_argument(a2.basic_type(), dest_arg_slot, a1); +- change_argument(a1.basic_type(), arg_slot, a2); +- break; +- } +- +- case java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS: { +- int limit_raw = chain().adapter_conversion_vminfo(); +- bool rot_down = (arg_slot < limit_raw); +- int limit_bias = (rot_down ? MethodHandles::OP_ROT_ARGS_DOWN_LIMIT_BIAS : 0); +- int limit_slot = limit_raw - limit_bias; +- if ((uint)limit_slot > (uint)_outgoing.length()) { +- lose("bad rotate index", CHECK_(empty)); +- } +- // Rotate the source argument (plus following N slots) into the +- // position occupied by the dest argument (plus following N slots). +- int rotate_count = type2size[chain().adapter_conversion_src_type()]; +- // (no other rotate counts are currently supported) +- if (rot_down) { +- for (int i = 0; i < rotate_count; i++) { +- ArgToken temp = _outgoing.at(arg_slot); +- _outgoing.remove_at(arg_slot); +- _outgoing.insert_before(limit_slot - 1, temp); +- } +- } else { // arg_slot > limit_slot => rotate_up +- for (int i = 0; i < rotate_count; i++) { +- ArgToken temp = _outgoing.at(arg_slot + rotate_count - 1); +- _outgoing.remove_at(arg_slot + rotate_count - 1); +- _outgoing.insert_before(limit_slot, temp); +- } +- } +- assert(_outgoing_argc == argument_count_slow(), "empty slots under control"); +- break; +- } +- +- case java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS: { +- int dup_slots = chain().adapter_conversion_stack_pushes(); +- if (dup_slots <= 0) { +- lose("bad dup count", CHECK_(empty)); +- } +- for (int i = 0; i < dup_slots; i++) { +- ArgToken dup = _outgoing.at(arg_slot + 2*i); +- if (dup.basic_type() != T_VOID) _outgoing_argc += 1; +- _outgoing.insert_before(i, dup); +- } +- assert(_outgoing_argc == argument_count_slow(), "empty slots under control"); +- break; +- } +- +- case java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS: { +- int drop_slots = -chain().adapter_conversion_stack_pushes(); +- if (drop_slots <= 0) { +- lose("bad drop count", CHECK_(empty)); +- } +- for (int i = 0; i < drop_slots; i++) { +- ArgToken drop = _outgoing.at(arg_slot); +- if (drop.basic_type() != T_VOID) _outgoing_argc -= 1; +- _outgoing.remove_at(arg_slot); +- } +- assert(_outgoing_argc == argument_count_slow(), "empty slots under control"); +- break; +- } +- +- case java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS: +- retain_original_args = true; // and fall through: +- case java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS: { +- // call argument MH recursively +- //{static int x; if (!x++) print_method_handle(chain().method_handle_oop()); --x;} +- Handle recursive_mh(THREAD, chain().adapter_arg_oop()); +- if (!java_lang_invoke_MethodHandle::is_instance(recursive_mh())) { +- lose("recursive target not a MethodHandle", CHECK_(empty)); +- } +- Handle recursive_mtype(THREAD, java_lang_invoke_MethodHandle::type(recursive_mh())); +- int argc = java_lang_invoke_MethodType::ptype_count(recursive_mtype()); +- int coll_slots = java_lang_invoke_MethodHandle::vmslots(recursive_mh()); +- BasicType rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(recursive_mtype())); +- ArgToken* arglist = NEW_RESOURCE_ARRAY(ArgToken, 1 + argc + 1); // 1+: mh, +1: sentinel +- arglist[0] = make_oop_constant(recursive_mh(), CHECK_(empty)); +- if (arg_slot < 0 || coll_slots < 0 || arg_slot + coll_slots > _outgoing.length()) { +- lose("bad fold/collect arg slot", CHECK_(empty)); +- } +- for (int i = 0, slot = arg_slot + coll_slots - 1; slot >= arg_slot; slot--) { +- ArgToken arg_state = _outgoing.at(slot); +- BasicType arg_type = arg_state.basic_type(); +- if (arg_type == T_VOID) continue; +- ArgToken arg = _outgoing.at(slot); +- if (i >= argc) { lose("bad fold/collect arg", CHECK_(empty)); } +- arglist[1+i] = arg; +- if (!retain_original_args) +- change_argument(arg_type, slot, T_VOID, ArgToken(tt_void)); +- i++; +- } +- arglist[1+argc] = ArgToken(); // sentinel +- oop invoker = java_lang_invoke_MethodTypeForm::vmlayout( +- java_lang_invoke_MethodType::form(recursive_mtype()) ); +- if (invoker == NULL || !invoker->is_method()) { +- lose("bad vmlayout slot", CHECK_(empty)); +- } +- // FIXME: consider inlining the invokee at the bytecode level +- ArgToken ret = make_invoke(methodHandle(THREAD, methodOop(invoker)), vmIntrinsics::_invokeGeneric, +- Bytecodes::_invokevirtual, false, 1+argc, &arglist[0], CHECK_(empty)); +- // The iid = _invokeGeneric really means to adjust reference types as needed. +- DEBUG_ONLY(invoker = NULL); +- if (rtype == T_OBJECT) { +- klassOop rklass = java_lang_Class::as_klassOop( java_lang_invoke_MethodType::rtype(recursive_mtype()) ); +- if (rklass != SystemDictionary::Object_klass() && +- !Klass::cast(rklass)->is_interface()) { +- // preserve type safety +- ret = make_conversion(T_OBJECT, rklass, Bytecodes::_checkcast, ret, CHECK_(empty)); +- } +- } +- if (rtype != T_VOID) { +- int ret_slot = arg_slot + (retain_original_args ? coll_slots : 0); +- change_argument(T_VOID, ret_slot, rtype, ret); +- } +- break; +- } +- +- case java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS: { +- klassOop array_klass_oop = NULL; +- BasicType array_type = java_lang_Class::as_BasicType(chain().adapter_arg_oop(), +- &array_klass_oop); +- assert(array_type == T_OBJECT, ""); +- assert(Klass::cast(array_klass_oop)->oop_is_array(), ""); +- arrayKlassHandle array_klass(THREAD, array_klass_oop); +- debug_only(array_klass_oop = (klassOop)badOop); +- +- klassOop element_klass_oop = NULL; +- BasicType element_type = java_lang_Class::as_BasicType(array_klass->component_mirror(), +- &element_klass_oop); +- KlassHandle element_klass(THREAD, element_klass_oop); +- debug_only(element_klass_oop = (klassOop)badOop); +- +- // Fetch the argument, which we will cast to the required array type. +- ArgToken arg = _outgoing.at(arg_slot); +- assert(arg.basic_type() == T_OBJECT, ""); +- ArgToken array_arg = arg; +- array_arg = make_conversion(T_OBJECT, array_klass(), Bytecodes::_checkcast, array_arg, CHECK_(empty)); +- change_argument(T_OBJECT, arg_slot, T_VOID, ArgToken(tt_void)); +- +- // Check the required length. +- int spread_slots = 1 + chain().adapter_conversion_stack_pushes(); +- int spread_length = spread_slots; +- if (type2size[element_type] == 2) { +- if (spread_slots % 2 != 0) spread_slots = -1; // force error +- spread_length = spread_slots / 2; +- } +- if (spread_slots < 0) { +- lose("bad spread length", CHECK_(empty)); +- } +- +- jvalue length_jvalue; length_jvalue.i = spread_length; +- ArgToken length_arg = make_prim_constant(T_INT, &length_jvalue, CHECK_(empty)); +- // Call a built-in method known to the JVM to validate the length. +- ArgToken arglist[3]; +- arglist[0] = array_arg; // value to check +- arglist[1] = length_arg; // length to check +- arglist[2] = ArgToken(); // sentinel +- make_invoke(methodHandle(), vmIntrinsics::_checkSpreadArgument, +- Bytecodes::_invokestatic, false, 2, &arglist[0], CHECK_(empty)); +- +- // Spread out the array elements. +- Bytecodes::Code aload_op = Bytecodes::_nop; +- switch (element_type) { +- case T_INT: aload_op = Bytecodes::_iaload; break; +- case T_LONG: aload_op = Bytecodes::_laload; break; +- case T_FLOAT: aload_op = Bytecodes::_faload; break; +- case T_DOUBLE: aload_op = Bytecodes::_daload; break; +- case T_OBJECT: aload_op = Bytecodes::_aaload; break; +- case T_BOOLEAN: // fall through: +- case T_BYTE: aload_op = Bytecodes::_baload; break; +- case T_CHAR: aload_op = Bytecodes::_caload; break; +- case T_SHORT: aload_op = Bytecodes::_saload; break; +- default: lose("primitive array NYI", CHECK_(empty)); +- } +- int ap = arg_slot; +- for (int i = 0; i < spread_length; i++) { +- jvalue offset_jvalue; offset_jvalue.i = i; +- ArgToken offset_arg = make_prim_constant(T_INT, &offset_jvalue, CHECK_(empty)); +- ArgToken element_arg = make_fetch(element_type, element_klass(), aload_op, array_arg, offset_arg, CHECK_(empty)); +- change_argument(T_VOID, ap, element_type, element_arg); +- //ap += type2size[element_type]; // don't do this; insert next arg to *right* of previous +- } +- break; +- } +- +- default: +- lose("bad adapter conversion", CHECK_(empty)); +- break; +- } +- } +- +- if (chain().is_bound()) { +- // push a new argument +- BasicType arg_type = chain().bound_arg_type(); +- jint arg_slot = chain().bound_arg_slot(); +- oop arg_oop = chain().bound_arg_oop(); +- ArgToken arg; +- if (arg_type == T_OBJECT) { +- arg = make_oop_constant(arg_oop, CHECK_(empty)); +- } else { +- jvalue arg_value; +- BasicType bt = java_lang_boxing_object::get_value(arg_oop, &arg_value); +- if (bt == arg_type || (bt == T_INT && is_subword_type(arg_type))) { +- arg = make_prim_constant(arg_type, &arg_value, CHECK_(empty)); +- } else { +- lose(err_msg("bad bound value: arg_type %s boxing %s", type2name(arg_type), type2name(bt)), CHECK_(empty)); +- } +- } +- DEBUG_ONLY(arg_oop = badOop); +- change_argument(T_VOID, arg_slot, arg_type, arg); +- } +- +- // this test must come after the body of the loop +- if (!chain().is_last()) { +- chain().next(CHECK_(empty)); +- } else { +- break; +- } +- } +- +- // finish the sequence with a tail-call to the ultimate target +- // parameters are passed in logical order (recv 1st), not slot order +- ArgToken* arglist = NEW_RESOURCE_ARRAY(ArgToken, _outgoing.length() + 1); +- int ap = 0; +- for (int i = _outgoing.length() - 1; i >= 0; i--) { +- ArgToken arg_state = _outgoing.at(i); +- if (arg_state.basic_type() == T_VOID) continue; +- arglist[ap++] = _outgoing.at(i); +- } +- assert(ap == _outgoing_argc, ""); +- arglist[ap] = ArgToken(); // add a sentinel, for the sake of asserts +- return make_invoke(chain().last_method(), +- vmIntrinsics::_none, +- chain().last_invoke_code(), true, +- ap, arglist, THREAD); +-} +- +- +-// ----------------------------------------------------------------------------- +-// MethodHandleWalker::walk_incoming_state +-// +-void MethodHandleWalker::walk_incoming_state(TRAPS) { +- Handle mtype(THREAD, chain().method_type_oop()); +- int nptypes = java_lang_invoke_MethodType::ptype_count(mtype()); +- _outgoing_argc = nptypes; +- int argp = nptypes - 1; +- if (argp >= 0) { +- _outgoing.at_grow(argp, ArgToken(tt_void)); // presize +- } +- for (int i = 0; i < nptypes; i++) { +- klassOop arg_type_klass = NULL; +- BasicType arg_type = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(mtype(), i), &arg_type_klass); +- int index = new_local_index(arg_type); +- ArgToken arg = make_parameter(arg_type, arg_type_klass, index, CHECK); +- DEBUG_ONLY(arg_type_klass = (klassOop) NULL); +- _outgoing.at_put(argp, arg); +- if (type2size[arg_type] == 2) { +- // add the extra slot, so we can model the JVM stack +- _outgoing.insert_before(argp+1, ArgToken(tt_void)); +- } +- --argp; +- } +- // call make_parameter at the end of the list for the return type +- klassOop ret_type_klass = NULL; +- BasicType ret_type = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(mtype()), &ret_type_klass); +- ArgToken ret = make_parameter(ret_type, ret_type_klass, -1, CHECK); +- // ignore ret; client can catch it if needed +- +- assert(_outgoing_argc == argument_count_slow(), "empty slots under control"); +- +- verify_args_and_signature(CHECK); +-} +- +- +-#ifdef ASSERT +-void MethodHandleWalker::verify_args_and_signature(TRAPS) { +- int index = _outgoing.length() - 1; +- objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(chain().method_type_oop()); +- for (int i = 0, limit = ptypes->length(); i < limit; i++) { +- BasicType t = java_lang_Class::as_BasicType(ptypes->obj_at(i)); +- if (t == T_ARRAY) t = T_OBJECT; +- if (t == T_LONG || t == T_DOUBLE) { +- assert(T_VOID == _outgoing.at(index).basic_type(), "types must match"); +- index--; +- } +- assert(t == _outgoing.at(index).basic_type(), "types must match"); +- index--; +- } +-} +-#endif +- +- +-// ----------------------------------------------------------------------------- +-// MethodHandleWalker::change_argument +-// +-// This is messy because some kinds of arguments are paired with +-// companion slots containing an empty value. +-void MethodHandleWalker::change_argument(BasicType old_type, int slot, const ArgToken& new_arg) { +- BasicType new_type = new_arg.basic_type(); +- int old_size = type2size[old_type]; +- int new_size = type2size[new_type]; +- if (old_size == new_size) { +- // simple case first +- _outgoing.at_put(slot, new_arg); +- } else if (old_size > new_size) { +- for (int i = old_size - 1; i >= new_size; i--) { +- assert((i != 0) == (_outgoing.at(slot + i).basic_type() == T_VOID), ""); +- _outgoing.remove_at(slot + i); +- } +- if (new_size > 0) +- _outgoing.at_put(slot, new_arg); +- else +- _outgoing_argc -= 1; // deleted a real argument +- } else { +- for (int i = old_size; i < new_size; i++) { +- _outgoing.insert_before(slot + i, ArgToken(tt_void)); +- } +- _outgoing.at_put(slot, new_arg); +- if (old_size == 0) +- _outgoing_argc += 1; // inserted a real argument +- } +- assert(_outgoing_argc == argument_count_slow(), "empty slots under control"); +-} +- +- +-#ifdef ASSERT +-int MethodHandleWalker::argument_count_slow() { +- int args_seen = 0; +- for (int i = _outgoing.length() - 1; i >= 0; i--) { +- if (_outgoing.at(i).basic_type() != T_VOID) { +- ++args_seen; +- if (_outgoing.at(i).basic_type() == T_LONG || +- _outgoing.at(i).basic_type() == T_DOUBLE) { +- assert(_outgoing.at(i + 1).basic_type() == T_VOID, "should only follow two word"); +- } +- } else { +- assert(_outgoing.at(i - 1).basic_type() == T_LONG || +- _outgoing.at(i - 1).basic_type() == T_DOUBLE, "should only follow two word"); +- } +- } +- return args_seen; +-} +-#endif +- +- +-// ----------------------------------------------------------------------------- +-// MethodHandleWalker::retype_raw_conversion +-// +-// Do the raw retype conversions for OP_RETYPE_RAW. +-void MethodHandleWalker::retype_raw_conversion(BasicType src, BasicType dst, bool for_return, int slot, TRAPS) { +- if (src != dst) { +- if (MethodHandles::same_basic_type_for_returns(src, dst, /*raw*/ true)) { +- if (MethodHandles::is_float_fixed_reinterpretation_cast(src, dst)) { +- vmIntrinsics::ID iid = vmIntrinsics::for_raw_conversion(src, dst); +- if (iid == vmIntrinsics::_none) { +- lose("no raw conversion method", CHECK); +- } +- ArgToken arglist[2]; +- if (!for_return) { +- // argument type conversion +- ArgToken arg = _outgoing.at(slot); +- assert(arg.token_type() >= tt_symbolic || src == arg.basic_type(), "sanity"); +- arglist[0] = arg; // outgoing 'this' +- arglist[1] = ArgToken(); // sentinel +- arg = make_invoke(methodHandle(), iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK); +- change_argument(src, slot, dst, arg); +- } else { +- // return type conversion +- if (_return_conv == vmIntrinsics::_none) { +- _return_conv = iid; +- } else if (_return_conv == vmIntrinsics::for_raw_conversion(dst, src)) { +- _return_conv = vmIntrinsics::_none; +- } else if (_return_conv != zero_return_conv()) { +- lose(err_msg("requested raw return conversion not allowed: %s -> %s (before %s)", type2name(src), type2name(dst), vmIntrinsics::name_at(_return_conv)), CHECK); +- } +- } +- } else { +- // Nothing to do. +- } +- } else if (for_return && (!is_subword_type(src) || !is_subword_type(dst))) { +- // This can occur in exception-throwing MHs, which have a fictitious return value encoded as Void or Empty. +- _return_conv = zero_return_conv(); +- } else if (src == T_OBJECT && is_java_primitive(dst)) { +- // ref-to-prim: discard ref, push zero +- lose("requested ref-to-prim conversion not expected", CHECK); +- } else { +- lose(err_msg("requested raw conversion not allowed: %s -> %s", type2name(src), type2name(dst)), CHECK); +- } +- } +-} +- +- +-// ----------------------------------------------------------------------------- +-// MethodHandleCompiler +- +-MethodHandleCompiler::MethodHandleCompiler(Handle root, Symbol* name, Symbol* signature, int invoke_count, bool is_invokedynamic, TRAPS) +- : MethodHandleWalker(root, is_invokedynamic, THREAD), +- _invoke_count(invoke_count), +- _thread(THREAD), +- _bytecode(THREAD, 50), +- _constants(THREAD, 10), +- _non_bcp_klasses(THREAD, 5), +- _cur_stack(0), +- _max_stack(0), +- _rtype(T_ILLEGAL), +- _selectAlternative_bci(-1), +- _taken_count(0), +- _not_taken_count(0) +-{ +- +- // Element zero is always the null constant. +- (void) _constants.append(NULL); +- +- // Set name and signature index. +- _name_index = cpool_symbol_put(name); +- _signature_index = cpool_symbol_put(signature); +- +- // To make the resulting methods more recognizable by +- // stack walkers and compiler heuristics, +- // we put them in holder class MethodHandle. +- // See klass_is_method_handle_adapter_holder +- // and methodOopDesc::is_method_handle_adapter. +- _target_klass = SystemDictionaryHandles::MethodHandle_klass(); +- +- check_non_bcp_klasses(java_lang_invoke_MethodHandle::type(root()), CHECK); +- +- // Get return type klass. +- Handle first_mtype(THREAD, chain().method_type_oop()); +- // _rklass is NULL for primitives. +- _rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(first_mtype()), &_rklass); +- if (_rtype == T_ARRAY) _rtype = T_OBJECT; +- +- ArgumentSizeComputer args(signature); +- int params = args.size() + 1; // Incoming arguments plus receiver. +- _num_params = for_invokedynamic() ? params - 1 : params; // XXX Check if callee is static? +-} +- +- +-// ----------------------------------------------------------------------------- +-// MethodHandleCompiler::compile +-// +-// Compile this MethodHandle into a bytecode adapter and return a +-// methodOop. +-methodHandle MethodHandleCompiler::compile(TRAPS) { +- assert(_thread == THREAD, "must be same thread"); +- methodHandle nullHandle; +- (void) walk(CHECK_(nullHandle)); +- record_non_bcp_klasses(); +- return get_method_oop(CHECK_(nullHandle)); +-} +- +- +-void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index, int args_size) { +- Bytecodes::check(op); // Are we legal? +- +- switch (op) { +- // b +- case Bytecodes::_aconst_null: +- case Bytecodes::_iconst_m1: +- case Bytecodes::_iconst_0: +- case Bytecodes::_iconst_1: +- case Bytecodes::_iconst_2: +- case Bytecodes::_iconst_3: +- case Bytecodes::_iconst_4: +- case Bytecodes::_iconst_5: +- case Bytecodes::_lconst_0: +- case Bytecodes::_lconst_1: +- case Bytecodes::_fconst_0: +- case Bytecodes::_fconst_1: +- case Bytecodes::_fconst_2: +- case Bytecodes::_dconst_0: +- case Bytecodes::_dconst_1: +- case Bytecodes::_iload_0: +- case Bytecodes::_iload_1: +- case Bytecodes::_iload_2: +- case Bytecodes::_iload_3: +- case Bytecodes::_lload_0: +- case Bytecodes::_lload_1: +- case Bytecodes::_lload_2: +- case Bytecodes::_lload_3: +- case Bytecodes::_fload_0: +- case Bytecodes::_fload_1: +- case Bytecodes::_fload_2: +- case Bytecodes::_fload_3: +- case Bytecodes::_dload_0: +- case Bytecodes::_dload_1: +- case Bytecodes::_dload_2: +- case Bytecodes::_dload_3: +- case Bytecodes::_aload_0: +- case Bytecodes::_aload_1: +- case Bytecodes::_aload_2: +- case Bytecodes::_aload_3: +- case Bytecodes::_istore_0: +- case Bytecodes::_istore_1: +- case Bytecodes::_istore_2: +- case Bytecodes::_istore_3: +- case Bytecodes::_lstore_0: +- case Bytecodes::_lstore_1: +- case Bytecodes::_lstore_2: +- case Bytecodes::_lstore_3: +- case Bytecodes::_fstore_0: +- case Bytecodes::_fstore_1: +- case Bytecodes::_fstore_2: +- case Bytecodes::_fstore_3: +- case Bytecodes::_dstore_0: +- case Bytecodes::_dstore_1: +- case Bytecodes::_dstore_2: +- case Bytecodes::_dstore_3: +- case Bytecodes::_astore_0: +- case Bytecodes::_astore_1: +- case Bytecodes::_astore_2: +- case Bytecodes::_astore_3: +- case Bytecodes::_iand: +- case Bytecodes::_i2l: +- case Bytecodes::_i2f: +- case Bytecodes::_i2d: +- case Bytecodes::_i2b: +- case Bytecodes::_i2c: +- case Bytecodes::_i2s: +- case Bytecodes::_l2i: +- case Bytecodes::_l2f: +- case Bytecodes::_l2d: +- case Bytecodes::_f2i: +- case Bytecodes::_f2l: +- case Bytecodes::_f2d: +- case Bytecodes::_d2i: +- case Bytecodes::_d2l: +- case Bytecodes::_d2f: +- case Bytecodes::_iaload: +- case Bytecodes::_laload: +- case Bytecodes::_faload: +- case Bytecodes::_daload: +- case Bytecodes::_aaload: +- case Bytecodes::_baload: +- case Bytecodes::_caload: +- case Bytecodes::_saload: +- case Bytecodes::_ireturn: +- case Bytecodes::_lreturn: +- case Bytecodes::_freturn: +- case Bytecodes::_dreturn: +- case Bytecodes::_areturn: +- case Bytecodes::_return: +- assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_b, "wrong bytecode format"); +- _bytecode.push(op); +- break; +- +- // bi +- case Bytecodes::_ldc: +- assert(Bytecodes::format_bits(op, false) == (Bytecodes::_fmt_b|Bytecodes::_fmt_has_k), "wrong bytecode format"); +- if (index == (index & 0xff)) { +- _bytecode.push(op); +- _bytecode.push(index); +- } else { +- _bytecode.push(Bytecodes::_ldc_w); +- _bytecode.push(index >> 8); +- _bytecode.push(index); +- } +- break; +- +- case Bytecodes::_iload: +- case Bytecodes::_lload: +- case Bytecodes::_fload: +- case Bytecodes::_dload: +- case Bytecodes::_aload: +- case Bytecodes::_istore: +- case Bytecodes::_lstore: +- case Bytecodes::_fstore: +- case Bytecodes::_dstore: +- case Bytecodes::_astore: +- assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bi, "wrong bytecode format"); +- if (index == (index & 0xff)) { +- _bytecode.push(op); +- _bytecode.push(index); +- } else { +- // doesn't fit in a u2 +- _bytecode.push(Bytecodes::_wide); +- _bytecode.push(op); +- _bytecode.push(index >> 8); +- _bytecode.push(index); +- } +- break; +- +- // bkk +- case Bytecodes::_ldc_w: +- case Bytecodes::_ldc2_w: +- case Bytecodes::_checkcast: +- assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bkk, "wrong bytecode format"); +- assert((unsigned short) index == index, "index does not fit in 16-bit"); +- _bytecode.push(op); +- _bytecode.push(index >> 8); +- _bytecode.push(index); +- break; +- +- // bJJ +- case Bytecodes::_invokestatic: +- case Bytecodes::_invokespecial: +- case Bytecodes::_invokevirtual: +- assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format"); +- assert((unsigned short) index == index, "index does not fit in 16-bit"); +- _bytecode.push(op); +- _bytecode.push(index >> 8); +- _bytecode.push(index); +- break; +- +- case Bytecodes::_invokeinterface: +- assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format"); +- assert((unsigned short) index == index, "index does not fit in 16-bit"); +- assert(args_size > 0, "valid args_size"); +- _bytecode.push(op); +- _bytecode.push(index >> 8); +- _bytecode.push(index); +- _bytecode.push(args_size); +- _bytecode.push(0); +- break; +- +- case Bytecodes::_ifeq: +- assert((unsigned short) index == index, "index does not fit in 16-bit"); +- _bytecode.push(op); +- _bytecode.push(index >> 8); +- _bytecode.push(index); +- break; +- +- default: +- ShouldNotReachHere(); +- } +-} +- +-void MethodHandleCompiler::update_branch_dest(int src, int dst) { +- switch (_bytecode.at(src)) { +- case Bytecodes::_ifeq: +- dst -= src; // compute the offset +- assert((unsigned short) dst == dst, "index does not fit in 16-bit"); +- _bytecode.at_put(src + 1, dst >> 8); +- _bytecode.at_put(src + 2, dst); +- break; +- default: +- ShouldNotReachHere(); +- } +-} +- +-void MethodHandleCompiler::emit_load(ArgToken arg) { +- TokenType tt = arg.token_type(); +- BasicType bt = arg.basic_type(); +- +- switch (tt) { +- case tt_parameter: +- case tt_temporary: +- emit_load(bt, arg.index()); +- break; +- case tt_constant: +- emit_load_constant(arg); +- break; +- case tt_illegal: +- case tt_void: +- default: +- ShouldNotReachHere(); +- } +-} +- +- +-void MethodHandleCompiler::emit_load(BasicType bt, int index) { +- if (index <= 3) { +- switch (bt) { +- case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT: +- case T_INT: emit_bc(Bytecodes::cast(Bytecodes::_iload_0 + index)); break; +- case T_LONG: emit_bc(Bytecodes::cast(Bytecodes::_lload_0 + index)); break; +- case T_FLOAT: emit_bc(Bytecodes::cast(Bytecodes::_fload_0 + index)); break; +- case T_DOUBLE: emit_bc(Bytecodes::cast(Bytecodes::_dload_0 + index)); break; +- case T_OBJECT: emit_bc(Bytecodes::cast(Bytecodes::_aload_0 + index)); break; +- default: +- ShouldNotReachHere(); +- } +- } +- else { +- switch (bt) { +- case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT: +- case T_INT: emit_bc(Bytecodes::_iload, index); break; +- case T_LONG: emit_bc(Bytecodes::_lload, index); break; +- case T_FLOAT: emit_bc(Bytecodes::_fload, index); break; +- case T_DOUBLE: emit_bc(Bytecodes::_dload, index); break; +- case T_OBJECT: emit_bc(Bytecodes::_aload, index); break; +- default: +- ShouldNotReachHere(); +- } +- } +- stack_push(bt); +-} +- +-void MethodHandleCompiler::emit_store(BasicType bt, int index) { +- if (index <= 3) { +- switch (bt) { +- case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT: +- case T_INT: emit_bc(Bytecodes::cast(Bytecodes::_istore_0 + index)); break; +- case T_LONG: emit_bc(Bytecodes::cast(Bytecodes::_lstore_0 + index)); break; +- case T_FLOAT: emit_bc(Bytecodes::cast(Bytecodes::_fstore_0 + index)); break; +- case T_DOUBLE: emit_bc(Bytecodes::cast(Bytecodes::_dstore_0 + index)); break; +- case T_OBJECT: emit_bc(Bytecodes::cast(Bytecodes::_astore_0 + index)); break; +- default: +- ShouldNotReachHere(); +- } +- } +- else { +- switch (bt) { +- case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT: +- case T_INT: emit_bc(Bytecodes::_istore, index); break; +- case T_LONG: emit_bc(Bytecodes::_lstore, index); break; +- case T_FLOAT: emit_bc(Bytecodes::_fstore, index); break; +- case T_DOUBLE: emit_bc(Bytecodes::_dstore, index); break; +- case T_OBJECT: emit_bc(Bytecodes::_astore, index); break; +- default: +- ShouldNotReachHere(); +- } +- } +- stack_pop(bt); +-} +- +- +-void MethodHandleCompiler::emit_load_constant(ArgToken arg) { +- BasicType bt = arg.basic_type(); +- if (is_subword_type(bt)) bt = T_INT; +- switch (bt) { +- case T_INT: { +- jint value = arg.get_jint(); +- if (-1 <= value && value <= 5) +- emit_bc(Bytecodes::cast(Bytecodes::_iconst_0 + value)); +- else +- emit_bc(Bytecodes::_ldc, cpool_int_put(value)); +- break; +- } +- case T_LONG: { +- jlong value = arg.get_jlong(); +- if (0 <= value && value <= 1) +- emit_bc(Bytecodes::cast(Bytecodes::_lconst_0 + (int) value)); +- else +- emit_bc(Bytecodes::_ldc2_w, cpool_long_put(value)); +- break; +- } +- case T_FLOAT: { +- jfloat value = arg.get_jfloat(); +- if (value == 0.0 || value == 1.0 || value == 2.0) +- emit_bc(Bytecodes::cast(Bytecodes::_fconst_0 + (int) value)); +- else +- emit_bc(Bytecodes::_ldc, cpool_float_put(value)); +- break; +- } +- case T_DOUBLE: { +- jdouble value = arg.get_jdouble(); +- if (value == 0.0 || value == 1.0) +- emit_bc(Bytecodes::cast(Bytecodes::_dconst_0 + (int) value)); +- else +- emit_bc(Bytecodes::_ldc2_w, cpool_double_put(value)); +- break; +- } +- case T_OBJECT: { +- Handle value = arg.object(); +- if (value.is_null()) { +- emit_bc(Bytecodes::_aconst_null); +- break; +- } +- if (java_lang_Class::is_instance(value())) { +- klassOop k = java_lang_Class::as_klassOop(value()); +- if (k != NULL) { +- emit_bc(Bytecodes::_ldc, cpool_klass_put(k)); +- break; +- } +- } +- emit_bc(Bytecodes::_ldc, cpool_object_put(value)); +- break; +- } +- default: +- ShouldNotReachHere(); +- } +- stack_push(bt); +-} +- +- +-MethodHandleWalker::ArgToken +-MethodHandleCompiler::make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, +- const ArgToken& src, TRAPS) { +- +- BasicType srctype = src.basic_type(); +- TokenType tt = src.token_type(); +- int index = -1; +- +- switch (op) { +- case Bytecodes::_i2l: +- case Bytecodes::_i2f: +- case Bytecodes::_i2d: +- case Bytecodes::_i2b: +- case Bytecodes::_i2c: +- case Bytecodes::_i2s: +- +- case Bytecodes::_l2i: +- case Bytecodes::_l2f: +- case Bytecodes::_l2d: +- +- case Bytecodes::_f2i: +- case Bytecodes::_f2l: +- case Bytecodes::_f2d: +- +- case Bytecodes::_d2i: +- case Bytecodes::_d2l: +- case Bytecodes::_d2f: +- if (tt == tt_constant) { +- emit_load_constant(src); +- } else { +- emit_load(srctype, src.index()); +- } +- stack_pop(srctype); // pop the src type +- emit_bc(op); +- stack_push(type); // push the dest value +- if (tt != tt_constant) +- index = src.index(); +- if (srctype != type || index == -1) +- index = new_local_index(type); +- emit_store(type, index); +- break; +- +- case Bytecodes::_checkcast: +- if (tt == tt_constant) { +- emit_load_constant(src); +- } else { +- emit_load(srctype, src.index()); +- index = src.index(); +- } +- emit_bc(op, cpool_klass_put(tk)); +- check_non_bcp_klass(tk, CHECK_(src)); +- // Allocate a new local for the type so that we don't hide the +- // previous type from the verifier. +- index = new_local_index(type); +- emit_store(srctype, index); +- break; +- +- case Bytecodes::_nop: +- // nothing to do +- return src; +- +- default: +- if (op == Bytecodes::_illegal) +- lose(err_msg("no such primitive conversion: %s -> %s", type2name(src.basic_type()), type2name(type)), THREAD); +- else +- lose(err_msg("bad primitive conversion op: %s", Bytecodes::name(op)), THREAD); +- return make_prim_constant(type, &zero_jvalue, THREAD); +- } +- +- return make_parameter(type, tk, index, THREAD); +-} +- +- +-// ----------------------------------------------------------------------------- +-// MethodHandleCompiler +-// +- +-// Values used by the compiler. +-jvalue MethodHandleCompiler::zero_jvalue = { 0 }; +-jvalue MethodHandleCompiler::one_jvalue = { 1 }; +- +-// Fetch any values from CountingMethodHandles and capture them for profiles +-bool MethodHandleCompiler::fetch_counts(ArgToken arg1, ArgToken arg2) { +- int count1 = -1, count2 = -1; +- if (arg1.token_type() == tt_constant && arg1.basic_type() == T_OBJECT && +- java_lang_invoke_CountingMethodHandle::is_instance(arg1.object()())) { +- count1 = java_lang_invoke_CountingMethodHandle::vmcount(arg1.object()()); +- } +- if (arg2.token_type() == tt_constant && arg2.basic_type() == T_OBJECT && +- java_lang_invoke_CountingMethodHandle::is_instance(arg2.object()())) { +- count2 = java_lang_invoke_CountingMethodHandle::vmcount(arg2.object()()); +- } +- int total = count1 + count2; +- if (count1 != -1 && count2 != -1 && total != 0) { +- // Normalize the collect counts to the invoke_count +- if (count1 != 0) _not_taken_count = (int)(_invoke_count * count1 / (double)total); +- if (count2 != 0) _taken_count = (int)(_invoke_count * count2 / (double)total); +- return true; +- } +- return false; +-} +- +-// Emit bytecodes for the given invoke instruction. +-MethodHandleWalker::ArgToken +-MethodHandleCompiler::make_invoke(methodHandle m, vmIntrinsics::ID iid, +- Bytecodes::Code op, bool tailcall, +- int argc, MethodHandleWalker::ArgToken* argv, +- TRAPS) { +- ArgToken zero; +- if (m.is_null()) { +- // Get the intrinsic methodOop. +- m = methodHandle(THREAD, vmIntrinsics::method_for(iid)); +- if (m.is_null()) { +- lose(vmIntrinsics::name_at(iid), CHECK_(zero)); +- } +- } +- +- klassOop klass = m->method_holder(); +- Symbol* name = m->name(); +- Symbol* signature = m->signature(); +- +- if (iid == vmIntrinsics::_invokeGeneric && +- argc >= 1 && argv[0].token_type() == tt_constant) { +- assert(m->intrinsic_id() == vmIntrinsics::_invokeExact, ""); +- Handle receiver = argv[0].object(); +- Handle rtype(THREAD, java_lang_invoke_MethodHandle::type(receiver())); +- Handle mtype(THREAD, m->method_handle_type()); +- if (rtype() != mtype()) { +- assert(java_lang_invoke_MethodType::form(rtype()) == +- java_lang_invoke_MethodType::form(mtype()), +- "must be the same shape"); +- // customize m to the exact required rtype +- bool has_non_bcp_klass = check_non_bcp_klasses(rtype(), CHECK_(zero)); +- TempNewSymbol sig2 = java_lang_invoke_MethodType::as_signature(rtype(), true, CHECK_(zero)); +- methodHandle m2; +- if (!has_non_bcp_klass) { +- methodOop m2_oop = SystemDictionary::find_method_handle_invoke(m->name(), sig2, +- KlassHandle(), CHECK_(zero)); +- m2 = methodHandle(THREAD, m2_oop); +- } +- if (m2.is_null()) { +- // just build it fresh +- m2 = methodOopDesc::make_invoke_method(klass, m->name(), sig2, rtype, CHECK_(zero)); +- if (m2.is_null()) +- lose(err_msg("no customized invoker %s", sig2->as_utf8()), CHECK_(zero)); +- } +- m = m2; +- signature = m->signature(); +- } +- } +- +- if (m->intrinsic_id() == vmIntrinsics::_selectAlternative && +- fetch_counts(argv[1], argv[2])) { +- assert(argc == 3, "three arguments"); +- assert(tailcall, "only"); +- +- // do inline bytecodes so we can drop profile data into it, +- // 0: iload_0 +- emit_load(argv[0]); +- // 1: ifeq 8 +- _selectAlternative_bci = _bytecode.length(); +- emit_bc(Bytecodes::_ifeq, 0); // emit placeholder offset +- // 4: aload_1 +- emit_load(argv[1]); +- // 5: areturn; +- emit_bc(Bytecodes::_areturn); +- // 8: aload_2 +- update_branch_dest(_selectAlternative_bci, cur_bci()); +- emit_load(argv[2]); +- // 9: areturn +- emit_bc(Bytecodes::_areturn); +- return ArgToken(); // Dummy return value. +- } +- +- check_non_bcp_klass(klass, CHECK_(zero)); +- if (m->is_method_handle_invoke()) { +- check_non_bcp_klasses(m->method_handle_type(), CHECK_(zero)); +- } +- +- // Count the number of arguments, not the size +- ArgumentCount asc(signature); +- assert(argc == asc.size() + ((op == Bytecodes::_invokestatic || op == Bytecodes::_invokedynamic) ? 0 : 1), +- "argc mismatch"); +- +- for (int i = 0; i < argc; i++) { +- ArgToken arg = argv[i]; +- TokenType tt = arg.token_type(); +- BasicType bt = arg.basic_type(); +- +- switch (tt) { +- case tt_parameter: +- case tt_temporary: +- emit_load(bt, arg.index()); +- break; +- case tt_constant: +- emit_load_constant(arg); +- break; +- case tt_illegal: +- // Sentinel. +- assert(i == (argc - 1), "sentinel must be last entry"); +- break; +- case tt_void: +- default: +- ShouldNotReachHere(); +- } +- } +- +- // Populate constant pool. +- int name_index = cpool_symbol_put(name); +- int signature_index = cpool_symbol_put(signature); +- int name_and_type_index = cpool_name_and_type_put(name_index, signature_index); +- int klass_index = cpool_klass_put(klass); +- int methodref_index = cpool_methodref_put(op, klass_index, name_and_type_index, m); +- +- // Generate invoke. +- switch (op) { +- case Bytecodes::_invokestatic: +- case Bytecodes::_invokespecial: +- case Bytecodes::_invokevirtual: +- emit_bc(op, methodref_index); +- break; +- +- case Bytecodes::_invokeinterface: { +- ArgumentSizeComputer asc(signature); +- emit_bc(op, methodref_index, asc.size() + 1); +- break; +- } +- +- default: +- ShouldNotReachHere(); +- } +- +- // If tailcall, we have walked all the way to a direct method handle. +- // Otherwise, make a recursive call to some helper routine. +- BasicType rbt = m->result_type(); +- if (rbt == T_ARRAY) rbt = T_OBJECT; +- stack_push(rbt); // The return value is already pushed onto the stack. +- ArgToken ret; +- if (tailcall) { +- if (return_conv() == zero_return_conv()) { +- rbt = T_VOID; // discard value +- } else if (return_conv() != vmIntrinsics::_none) { +- // return value conversion +- int index = new_local_index(rbt); +- emit_store(rbt, index); +- ArgToken arglist[2]; +- arglist[0] = ArgToken(tt_temporary, rbt, index); +- arglist[1] = ArgToken(); // sentinel +- ret = make_invoke(methodHandle(), return_conv(), Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(zero)); +- set_return_conv(vmIntrinsics::_none); +- rbt = ret.basic_type(); +- emit_load(rbt, ret.index()); +- } +- if (rbt != _rtype) { +- if (rbt == T_VOID) { +- // push a zero of the right sort +- if (_rtype == T_OBJECT) { +- zero = make_oop_constant(NULL, CHECK_(zero)); +- } else { +- zero = make_prim_constant(_rtype, &zero_jvalue, CHECK_(zero)); +- } +- emit_load_constant(zero); +- } else if (_rtype == T_VOID) { +- // We'll emit a _return with something on the stack. +- // It's OK to ignore what's on the stack. +- } else if (rbt == T_INT && is_subword_type(_rtype)) { +- // Convert value to match return type. +- switch (_rtype) { +- case T_BOOLEAN: { +- // boolean is treated as a one-bit unsigned integer. +- // Cf. API documentation: java/lang/invoke/MethodHandles.html#explicitCastArguments +- ArgToken one = make_prim_constant(T_INT, &one_jvalue, CHECK_(zero)); +- emit_load_constant(one); +- emit_bc(Bytecodes::_iand); +- break; +- } +- case T_BYTE: emit_bc(Bytecodes::_i2b); break; +- case T_CHAR: emit_bc(Bytecodes::_i2c); break; +- case T_SHORT: emit_bc(Bytecodes::_i2s); break; +- default: ShouldNotReachHere(); +- } +- } else if (is_subword_type(rbt) && (is_subword_type(_rtype) || (_rtype == T_INT))) { +- // The subword type was returned as an int and will be passed +- // on as an int. +- } else { +- lose("unknown conversion", CHECK_(zero)); +- } +- } +- switch (_rtype) { +- case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT: +- case T_INT: emit_bc(Bytecodes::_ireturn); break; +- case T_LONG: emit_bc(Bytecodes::_lreturn); break; +- case T_FLOAT: emit_bc(Bytecodes::_freturn); break; +- case T_DOUBLE: emit_bc(Bytecodes::_dreturn); break; +- case T_VOID: emit_bc(Bytecodes::_return); break; +- case T_OBJECT: +- if (_rklass.not_null() && _rklass() != SystemDictionary::Object_klass() && !Klass::cast(_rklass())->is_interface()) { +- emit_bc(Bytecodes::_checkcast, cpool_klass_put(_rklass())); +- check_non_bcp_klass(_rklass(), CHECK_(zero)); +- } +- emit_bc(Bytecodes::_areturn); +- break; +- default: ShouldNotReachHere(); +- } +- ret = ArgToken(); // Dummy return value. +- } +- else { +- int index = new_local_index(rbt); +- switch (rbt) { +- case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT: +- case T_INT: case T_LONG: case T_FLOAT: case T_DOUBLE: +- case T_OBJECT: +- emit_store(rbt, index); +- ret = ArgToken(tt_temporary, rbt, index); +- break; +- case T_VOID: +- ret = ArgToken(tt_void); +- break; +- default: +- ShouldNotReachHere(); +- } +- } +- +- return ret; +-} +- +-MethodHandleWalker::ArgToken +-MethodHandleCompiler::make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, +- const MethodHandleWalker::ArgToken& base, +- const MethodHandleWalker::ArgToken& offset, +- TRAPS) { +- switch (base.token_type()) { +- case tt_parameter: +- case tt_temporary: +- emit_load(base.basic_type(), base.index()); +- break; +- case tt_constant: +- emit_load_constant(base); +- break; +- default: +- ShouldNotReachHere(); +- } +- switch (offset.token_type()) { +- case tt_parameter: +- case tt_temporary: +- emit_load(offset.basic_type(), offset.index()); +- break; +- case tt_constant: +- emit_load_constant(offset); +- break; +- default: +- ShouldNotReachHere(); +- } +- emit_bc(op); +- int index = new_local_index(type); +- emit_store(type, index); +- return ArgToken(tt_temporary, type, index); +-} +- +- +-int MethodHandleCompiler::cpool_primitive_put(BasicType bt, jvalue* con) { +- jvalue con_copy; +- assert(bt < T_OBJECT, ""); +- if (type2aelembytes(bt) < jintSize) { +- // widen to int +- con_copy = (*con); +- con = &con_copy; +- switch (bt) { +- case T_BOOLEAN: con->i = (con->z ? 1 : 0); break; +- case T_BYTE: con->i = con->b; break; +- case T_CHAR: con->i = con->c; break; +- case T_SHORT: con->i = con->s; break; +- default: ShouldNotReachHere(); +- } +- bt = T_INT; +- } +- +-// for (int i = 1, imax = _constants.length(); i < imax; i++) { +-// ConstantValue* con = _constants.at(i); +-// if (con != NULL && con->is_primitive() && con.basic_type() == bt) { +-// bool match = false; +-// switch (type2size[bt]) { +-// case 1: if (pcon->_value.i == con->i) match = true; break; +-// case 2: if (pcon->_value.j == con->j) match = true; break; +-// } +-// if (match) +-// return i; +-// } +-// } +- ConstantValue* cv = new ConstantValue(bt, *con); +- int index = _constants.append(cv); +- +- // long and double entries take 2 slots, we add another empty entry. +- if (type2size[bt] == 2) +- (void) _constants.append(NULL); +- +- return index; +-} +- +-bool MethodHandleCompiler::check_non_bcp_klasses(Handle method_type, TRAPS) { +- bool res = false; +- for (int i = -1, len = java_lang_invoke_MethodType::ptype_count(method_type()); i < len; i++) { +- oop ptype = (i == -1 +- ? java_lang_invoke_MethodType::rtype(method_type()) +- : java_lang_invoke_MethodType::ptype(method_type(), i)); +- res |= check_non_bcp_klass(java_lang_Class::as_klassOop(ptype), CHECK_(false)); +- } +- return res; +-} +- +-bool MethodHandleCompiler::check_non_bcp_klass(klassOop klass, TRAPS) { +- klass = methodOopDesc::check_non_bcp_klass(klass); +- if (klass != NULL) { +- Symbol* name = Klass::cast(klass)->name(); +- for (int i = _non_bcp_klasses.length() - 1; i >= 0; i--) { +- klassOop k2 = _non_bcp_klasses.at(i)(); +- if (Klass::cast(k2)->name() == name) { +- if (k2 != klass) { +- lose(err_msg("unsupported klass name alias %s", name->as_utf8()), THREAD); +- } +- return true; +- } +- } +- _non_bcp_klasses.append(KlassHandle(THREAD, klass)); +- return true; +- } +- return false; +-} +- +-void MethodHandleCompiler::record_non_bcp_klasses() { +- // Append extra klasses to constant pool, to guide klass lookup. +- for (int k = 0; k < _non_bcp_klasses.length(); k++) { +- klassOop non_bcp_klass = _non_bcp_klasses.at(k)(); +- bool add_to_cp = true; +- for (int j = 1; j < _constants.length(); j++) { +- ConstantValue* cv = _constants.at(j); +- if (cv != NULL && cv->tag() == JVM_CONSTANT_Class +- && cv->klass_oop() == non_bcp_klass) { +- add_to_cp = false; +- break; +- } +- } +- if (add_to_cp) cpool_klass_put(non_bcp_klass); +- } +-} +- +-constantPoolHandle MethodHandleCompiler::get_constant_pool(TRAPS) const { +- constantPoolHandle nullHandle; +- constantPoolOop cpool_oop = oopFactory::new_constantPool(_constants.length(), +- oopDesc::IsSafeConc, +- CHECK_(nullHandle)); +- constantPoolHandle cpool(THREAD, cpool_oop); +- +- // Fill the real constant pool skipping the zero element. +- for (int i = 1; i < _constants.length(); i++) { +- ConstantValue* cv = _constants.at(i); +- switch (cv->tag()) { +- case JVM_CONSTANT_Utf8: cpool->symbol_at_put( i, cv->symbol() ); break; +- case JVM_CONSTANT_Integer: cpool->int_at_put( i, cv->get_jint() ); break; +- case JVM_CONSTANT_Float: cpool->float_at_put( i, cv->get_jfloat() ); break; +- case JVM_CONSTANT_Long: cpool->long_at_put( i, cv->get_jlong() ); break; +- case JVM_CONSTANT_Double: cpool->double_at_put( i, cv->get_jdouble() ); break; +- case JVM_CONSTANT_Class: cpool->klass_at_put( i, cv->klass_oop() ); break; +- case JVM_CONSTANT_Methodref: cpool->method_at_put( i, cv->first_index(), cv->second_index()); break; +- case JVM_CONSTANT_InterfaceMethodref: +- cpool->interface_method_at_put(i, cv->first_index(), cv->second_index()); break; +- case JVM_CONSTANT_NameAndType: cpool->name_and_type_at_put(i, cv->first_index(), cv->second_index()); break; +- case JVM_CONSTANT_Object: cpool->object_at_put( i, cv->object_oop() ); break; +- default: ShouldNotReachHere(); +- } +- +- switch (cv->tag()) { +- case JVM_CONSTANT_Long: +- case JVM_CONSTANT_Double: +- i++; // Skip empty entry. +- assert(_constants.at(i) == NULL, "empty entry"); +- break; +- } +- } +- +- cpool->set_preresolution(); +- +- // Set the constant pool holder to the target method's class. +- cpool->set_pool_holder(_target_klass()); +- +- return cpool; +-} +- +- +-methodHandle MethodHandleCompiler::get_method_oop(TRAPS) { +- methodHandle empty; +- // Create a method that holds the generated bytecode. invokedynamic +- // has no receiver, normal MH calls do. +- int flags_bits; +- if (for_invokedynamic()) +- flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_SYNTHETIC | JVM_ACC_STATIC); +- else +- flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_SYNTHETIC); +- +- // Create a new method +- methodHandle m; +- { +- methodOop m_oop = oopFactory::new_method(bytecode_length(), +- accessFlags_from(flags_bits), +- 0, 0, 0, oopDesc::IsSafeConc, CHECK_(empty)); +- m = methodHandle(THREAD, m_oop); +- } +- +- constantPoolHandle cpool = get_constant_pool(CHECK_(empty)); +- m->set_constants(cpool()); +- +- m->set_name_index(_name_index); +- m->set_signature_index(_signature_index); +- +- m->set_code((address) bytecode()); +- +- m->set_max_stack(_max_stack); +- m->set_max_locals(max_locals()); +- m->set_size_of_parameters(_num_params); +- +- typeArrayHandle exception_handlers(THREAD, Universe::the_empty_int_array()); +- m->set_exception_table(exception_handlers()); +- +- // Rewrite the method and set up the constant pool cache. +- objArrayOop m_array = oopFactory::new_system_objArray(1, CHECK_(empty)); +- objArrayHandle methods(THREAD, m_array); +- methods->obj_at_put(0, m()); +- Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(empty)); // Use fake class. +- Rewriter::relocate_and_link(_target_klass(), methods, CHECK_(empty)); // Use fake class. +- +- // Pre-resolve selected CP cache entries, to avoid problems with class loader scoping. +- constantPoolCacheHandle cpc(THREAD, cpool->cache()); +- for (int i = 0; i < cpc->length(); i++) { +- ConstantPoolCacheEntry* e = cpc->entry_at(i); +- assert(!e->is_secondary_entry(), "no indy instructions in here, yet"); +- int constant_pool_index = e->constant_pool_index(); +- ConstantValue* cv = _constants.at(constant_pool_index); +- if (!cv->has_linkage()) continue; +- methodHandle m = cv->linkage(); +- int index; +- switch (cv->tag()) { +- case JVM_CONSTANT_Methodref: +- index = m->vtable_index(); +- if (m->is_static()) { +- e->set_method(Bytecodes::_invokestatic, m, index); +- } else { +- e->set_method(Bytecodes::_invokespecial, m, index); +- e->set_method(Bytecodes::_invokevirtual, m, index); +- } +- break; +- case JVM_CONSTANT_InterfaceMethodref: +- index = klassItable::compute_itable_index(m()); +- e->set_interface_call(m, index); +- break; +- } +- } +- +- // Set the invocation counter's count to the invoke count of the +- // original call site. +- InvocationCounter* ic = m->invocation_counter(); +- ic->set(InvocationCounter::wait_for_compile, _invoke_count); +- +- // Create a new MDO +- { +- methodDataOop mdo = oopFactory::new_methodData(m, CHECK_(empty)); +- assert(m->method_data() == NULL, "there should not be an MDO yet"); +- m->set_method_data(mdo); +- +- bool found_selectAlternative = false; +- // Iterate over all profile data and set the count of the counter +- // data entries to the original call site counter. +- for (ProfileData* profile_data = mdo->first_data(); +- mdo->is_valid(profile_data); +- profile_data = mdo->next_data(profile_data)) { +- if (profile_data->is_CounterData()) { +- CounterData* counter_data = profile_data->as_CounterData(); +- counter_data->set_count(_invoke_count); +- } +- if (profile_data->is_BranchData() && +- profile_data->bci() == _selectAlternative_bci) { +- BranchData* bd = profile_data->as_BranchData(); +- bd->set_taken(_taken_count); +- bd->set_not_taken(_not_taken_count); +- found_selectAlternative = true; +- } +- } +- assert(_selectAlternative_bci == -1 || found_selectAlternative, "must have found profile entry"); +- } +- +-#ifndef PRODUCT +- if (TraceMethodHandles) { +- m->print(); +- m->print_codes(); +- } +-#endif //PRODUCT +- +- assert(m->is_method_handle_adapter(), "must be recognized as an adapter"); +- return m; +-} +- +- +-#ifndef PRODUCT +- +-// MH printer for debugging. +- +-class MethodHandlePrinter : public MethodHandleWalker { +-private: +- outputStream* _out; +- bool _verbose; +- int _temp_num; +- int _param_state; +- stringStream _strbuf; +- const char* strbuf() { +- const char* s = _strbuf.as_string(); +- _strbuf.reset(); +- return s; +- } +- ArgToken token(const char* str, BasicType type) { +- return ArgToken(str, type); +- } +- const char* string(ArgToken token) { +- return token.str(); +- } +- void start_params() { +- _param_state <<= 1; +- _out->print("("); +- } +- void end_params() { +- if (_verbose) _out->print("\n"); +- _out->print(") => {"); +- _param_state >>= 1; +- } +- void put_type_name(BasicType type, klassOop tk, outputStream* s) { +- const char* kname = NULL; +- if (tk != NULL) +- kname = Klass::cast(tk)->external_name(); +- s->print("%s", (kname != NULL) ? kname : type2name(type)); +- } +- ArgToken maybe_make_temp(const char* statement_op, BasicType type, const char* temp_name) { +- const char* value = strbuf(); +- if (!_verbose) return token(value, type); +- // make an explicit binding for each separate value +- _strbuf.print("%s%d", temp_name, ++_temp_num); +- const char* temp = strbuf(); +- _out->print("\n %s %s %s = %s;", statement_op, type2name(type), temp, value); +- return token(temp, type); +- } +- +-public: +- MethodHandlePrinter(Handle root, bool verbose, outputStream* out, TRAPS) +- : MethodHandleWalker(root, false, THREAD), +- _out(out), +- _verbose(verbose), +- _param_state(0), +- _temp_num(0) +- { +- out->print("MethodHandle:"); +- java_lang_invoke_MethodType::print_signature(java_lang_invoke_MethodHandle::type(root()), out); +- out->print(" : #"); +- start_params(); +- } +- virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) { +- if (argnum < 0) { +- end_params(); +- return token("return", type); +- } +- if ((_param_state & 1) == 0) { +- _param_state |= 1; +- _out->print(_verbose ? "\n " : ""); +- } else { +- _out->print(_verbose ? ",\n " : ", "); +- } +- if (argnum >= _temp_num) +- _temp_num = argnum; +- // generate an argument name +- _strbuf.print("a%d", argnum); +- const char* arg = strbuf(); +- put_type_name(type, tk, _out); +- _out->print(" %s", arg); +- return token(arg, type); +- } +- virtual ArgToken make_oop_constant(oop con, TRAPS) { +- if (con == NULL) +- _strbuf.print("null"); +- else +- con->print_value_on(&_strbuf); +- if (_strbuf.size() == 0) { // yuck +- _strbuf.print("(a "); +- put_type_name(T_OBJECT, con->klass(), &_strbuf); +- _strbuf.print(")"); +- } +- return maybe_make_temp("constant", T_OBJECT, "k"); +- } +- virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) { +- java_lang_boxing_object::print(type, con, &_strbuf); +- return maybe_make_temp("constant", type, "k"); +- } +- void print_bytecode_name(Bytecodes::Code op) { +- if (Bytecodes::is_defined(op)) +- _strbuf.print("%s", Bytecodes::name(op)); +- else +- _strbuf.print("bytecode_%d", (int) op); +- } +- virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS) { +- print_bytecode_name(op); +- _strbuf.print("(%s", string(src)); +- if (tk != NULL) { +- _strbuf.print(", "); +- put_type_name(type, tk, &_strbuf); +- } +- _strbuf.print(")"); +- return maybe_make_temp("convert", type, "v"); +- } +- virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS) { +- _strbuf.print("%s(%s, %s", Bytecodes::name(op), string(base), string(offset)); +- if (tk != NULL) { +- _strbuf.print(", "); +- put_type_name(type, tk, &_strbuf); +- } +- _strbuf.print(")"); +- return maybe_make_temp("fetch", type, "x"); +- } +- virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid, +- Bytecodes::Code op, bool tailcall, +- int argc, ArgToken* argv, TRAPS) { +- Symbol* name; +- Symbol* sig; +- if (m.not_null()) { +- name = m->name(); +- sig = m->signature(); +- } else { +- name = vmSymbols::symbol_at(vmIntrinsics::name_for(iid)); +- sig = vmSymbols::symbol_at(vmIntrinsics::signature_for(iid)); +- } +- _strbuf.print("%s %s%s(", Bytecodes::name(op), name->as_C_string(), sig->as_C_string()); +- for (int i = 0; i < argc; i++) { +- _strbuf.print("%s%s", (i > 0 ? ", " : ""), string(argv[i])); +- } +- _strbuf.print(")"); +- if (!tailcall) { +- BasicType rt = char2type(sig->byte_at(sig->utf8_length()-1)); +- if (rt == T_ILLEGAL) rt = T_OBJECT; // ';' at the end of '(...)L...;' +- return maybe_make_temp("invoke", rt, "x"); +- } else { +- const char* ret = strbuf(); +- _out->print(_verbose ? "\n return " : " "); +- _out->print("%s", ret); +- _out->print(_verbose ? "\n}\n" : " }"); +- } +- return ArgToken(); +- } +- +- virtual void set_method_handle(oop mh) { +- if (WizardMode && Verbose) { +- tty->print("\n--- next target: "); +- mh->print(); +- } +- } +- +- static void print(Handle root, bool verbose, outputStream* out, TRAPS) { +- ResourceMark rm; +- MethodHandlePrinter printer(root, verbose, out, CHECK); +- printer.walk(CHECK); +- out->print("\n"); +- } +- static void print(Handle root, bool verbose = Verbose, outputStream* out = tty) { +- Thread* THREAD = Thread::current(); +- ResourceMark rm; +- MethodHandlePrinter printer(root, verbose, out, THREAD); +- if (!HAS_PENDING_EXCEPTION) +- printer.walk(THREAD); +- if (HAS_PENDING_EXCEPTION) { +- oop ex = PENDING_EXCEPTION; +- CLEAR_PENDING_EXCEPTION; +- out->print(" *** "); +- if (printer.lose_message() != NULL) out->print("%s ", printer.lose_message()); +- out->print("}"); +- } +- out->print("\n"); +- } +-}; +- +-extern "C" +-void print_method_handle(oop mh) { +- if (!mh->is_oop()) { +- tty->print_cr("*** not a method handle: "PTR_FORMAT, (intptr_t)mh); +- } else if (java_lang_invoke_MethodHandle::is_instance(mh)) { +- MethodHandlePrinter::print(mh); +- } else { +- tty->print("*** not a method handle: "); +- mh->print(); +- } +-} +- +-#endif // PRODUCT +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/prims/methodHandleWalk.hpp +--- openjdk/hotspot/src/share/vm/prims/methodHandleWalk.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ /dev/null Thu Jan 01 00:00:00 1970 +0000 +@@ -1,486 +0,0 @@ +-/* +- * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_PRIMS_METHODHANDLEWALK_HPP +-#define SHARE_VM_PRIMS_METHODHANDLEWALK_HPP +- +-#include "prims/methodHandles.hpp" +- +-// Low-level parser for method handle chains. +-class MethodHandleChain : StackObj { +-public: +- typedef MethodHandles::EntryKind EntryKind; +- +-private: +- Handle _root; // original target +- Handle _method_handle; // current target +- bool _is_last; // final guy in chain +- bool _is_bound; // has a bound argument +- BasicType _arg_type; // if is_bound, the bound argument type +- int _arg_slot; // if is_bound or is_adapter, affected argument slot +- jint _conversion; // conversion field of AMH or -1 +- methodHandle _last_method; // if is_last, which method we target +- Bytecodes::Code _last_invoke; // if is_last, type of invoke +- const char* _lose_message; // saved argument to lose() +- +- void set_method_handle(Handle target, TRAPS); +- void set_last_method(oop target, TRAPS); +- static BasicType compute_bound_arg_type(oop target, methodOop m, int arg_slot, TRAPS); +- +- oop MethodHandle_type_oop() { return java_lang_invoke_MethodHandle::type(method_handle_oop()); } +- oop MethodHandle_vmtarget_oop() { return java_lang_invoke_MethodHandle::vmtarget(method_handle_oop()); } +- int MethodHandle_vmslots() { return java_lang_invoke_MethodHandle::vmslots(method_handle_oop()); } +- int DirectMethodHandle_vmindex() { return java_lang_invoke_DirectMethodHandle::vmindex(method_handle_oop()); } +- oop BoundMethodHandle_argument_oop() { return java_lang_invoke_BoundMethodHandle::argument(method_handle_oop()); } +- int BoundMethodHandle_vmargslot() { return java_lang_invoke_BoundMethodHandle::vmargslot(method_handle_oop()); } +- int AdapterMethodHandle_conversion() { return java_lang_invoke_AdapterMethodHandle::conversion(method_handle_oop()); } +- +-#ifdef ASSERT +- void print_impl(TRAPS); +-#endif +- +-public: +- MethodHandleChain(Handle root, TRAPS) +- : _root(root) +- { set_method_handle(root, THREAD); } +- +- bool is_adapter() { return _conversion != -1; } +- bool is_bound() { return _is_bound; } +- bool is_last() { return _is_last; } +- +- void next(TRAPS) { +- assert(!is_last(), ""); +- set_method_handle(MethodHandle_vmtarget_oop(), THREAD); +- } +- +- Handle root() { return _root; } +- Handle method_handle() { return _method_handle; } +- oop method_handle_oop() { return _method_handle(); } +- oop method_type_oop() { return MethodHandle_type_oop(); } +- oop vmtarget_oop() { return MethodHandle_vmtarget_oop(); } +- +- jint adapter_conversion() { assert(is_adapter(), ""); return _conversion; } +- int adapter_conversion_op() { return MethodHandles::adapter_conversion_op(adapter_conversion()); } +- BasicType adapter_conversion_src_type() +- { return MethodHandles::adapter_conversion_src_type(adapter_conversion()); } +- BasicType adapter_conversion_dest_type() +- { return MethodHandles::adapter_conversion_dest_type(adapter_conversion()); } +- int adapter_conversion_stack_move() +- { return MethodHandles::adapter_conversion_stack_move(adapter_conversion()); } +- int adapter_conversion_stack_pushes() +- { return adapter_conversion_stack_move() / MethodHandles::stack_move_unit(); } +- int adapter_conversion_vminfo() +- { return MethodHandles::adapter_conversion_vminfo(adapter_conversion()); } +- int adapter_arg_slot() { assert(is_adapter(), ""); return _arg_slot; } +- oop adapter_arg_oop() { assert(is_adapter(), ""); return BoundMethodHandle_argument_oop(); } +- +- BasicType bound_arg_type() { assert(is_bound(), ""); return _arg_type; } +- int bound_arg_slot() { assert(is_bound(), ""); return _arg_slot; } +- oop bound_arg_oop() { assert(is_bound(), ""); return BoundMethodHandle_argument_oop(); } +- +- methodHandle last_method() { assert(is_last(), ""); return _last_method; } +- methodOop last_method_oop() { assert(is_last(), ""); return _last_method(); } +- Bytecodes::Code last_invoke_code() { assert(is_last(), ""); return _last_invoke; } +- +- void lose(const char* msg, TRAPS); +- const char* lose_message() { return _lose_message; } +- +-#ifdef ASSERT +- // Print a symbolic description of a method handle chain, including +- // the signature for each method. The signatures are printed in +- // slot order to make it easier to understand. +- void print(); +- static void print(oopDesc* mh); +-#endif +-}; +- +- +-// Structure walker for method handles. +-// Does abstract interpretation on top of low-level parsing. +-// You supply the tokens shuffled by the abstract interpretation. +-class MethodHandleWalker : StackObj { +-public: +- // Stack values: +- enum TokenType { +- tt_void, +- tt_parameter, +- tt_temporary, +- tt_constant, +- tt_symbolic, +- tt_illegal +- }; +- +- // Argument token: +- class ArgToken { +- private: +- TokenType _tt; +- BasicType _bt; +- jvalue _value; +- Handle _handle; +- +- public: +- ArgToken(TokenType tt = tt_illegal) : _tt(tt), _bt(tt == tt_void ? T_VOID : T_ILLEGAL) { +- assert(tt == tt_illegal || tt == tt_void, "invalid token type"); +- } +- +- ArgToken(TokenType tt, BasicType bt, int index) : _tt(tt), _bt(bt) { +- assert(_tt == tt_parameter || _tt == tt_temporary, "must have index"); +- _value.i = index; +- } +- +- ArgToken(BasicType bt, jvalue value) : _tt(tt_constant), _bt(bt), _value(value) { assert(_bt != T_OBJECT, "wrong constructor"); } +- ArgToken(Handle handle) : _tt(tt_constant), _bt(T_OBJECT), _handle(handle) {} +- +- +- ArgToken(const char* str, BasicType type) : _tt(tt_symbolic), _bt(type) { +- _value.j = (intptr_t)str; +- } +- +- TokenType token_type() const { return _tt; } +- BasicType basic_type() const { return _bt; } +- bool has_index() const { return _tt == tt_parameter || _tt == tt_temporary; } +- int index() const { assert(has_index(), "must have index");; return _value.i; } +- Handle object() const { assert(_bt == T_OBJECT, "wrong accessor"); assert(_tt == tt_constant, "value type"); return _handle; } +- const char* str() const { assert(_tt == tt_symbolic, "string type"); return (const char*)(intptr_t)_value.j; } +- +- jint get_jint() const { assert(_bt == T_INT || is_subword_type(_bt), "wrong accessor"); assert(_tt == tt_constant, "value types"); return _value.i; } +- jlong get_jlong() const { assert(_bt == T_LONG, "wrong accessor"); assert(_tt == tt_constant, "value types"); return _value.j; } +- jfloat get_jfloat() const { assert(_bt == T_FLOAT, "wrong accessor"); assert(_tt == tt_constant, "value types"); return _value.f; } +- jdouble get_jdouble() const { assert(_bt == T_DOUBLE, "wrong accessor"); assert(_tt == tt_constant, "value types"); return _value.d; } +- }; +- +-private: +- MethodHandleChain _chain; +- bool _for_invokedynamic; +- int _local_index; +- +- // This array is kept in an unusual order, indexed by low-level "slot number". +- // TOS is always _outgoing.at(0), so simple pushes and pops shift the whole _outgoing array. +- // If there is a receiver in the current argument list, it is at _outgoing.at(_outgoing.length()-1). +- // If a value at _outgoing.at(n) is T_LONG or T_DOUBLE, the value at _outgoing.at(n+1) is T_VOID. +- GrowableArray _outgoing; // current outgoing parameter slots +- int _outgoing_argc; // # non-empty outgoing slots +- +- vmIntrinsics::ID _return_conv; // Return conversion required by raw retypes. +- +- // Replace a value of type old_type at slot (and maybe slot+1) with the new value. +- // If old_type != T_VOID, remove the old argument at that point. +- // If new_type != T_VOID, insert the new argument at that point. +- // Insert or delete a second empty slot as needed. +- void change_argument(BasicType old_type, int slot, const ArgToken& new_arg); +- void change_argument(BasicType old_type, int slot, BasicType type, const ArgToken& new_arg) { +- assert(type == new_arg.basic_type(), "must agree"); +- change_argument(old_type, slot, new_arg); +- } +- +- // Raw retype conversions for OP_RAW_RETYPE. +- void retype_raw_conversion(BasicType src, BasicType dst, bool for_return, int slot, TRAPS); +- void retype_raw_argument_type(BasicType src, BasicType dst, int slot, TRAPS) { retype_raw_conversion(src, dst, false, slot, CHECK); } +- void retype_raw_return_type( BasicType src, BasicType dst, TRAPS) { retype_raw_conversion(src, dst, true, -1, CHECK); } +- +- BasicType arg_type(int slot) { +- return _outgoing.at(slot).basic_type(); +- } +- bool has_argument(int slot) { +- return arg_type(slot) < T_VOID; +- } +- +-#ifdef ASSERT +- int argument_count_slow(); +-#endif +- +- // Return a bytecode for converting src to dest, if one exists. +- Bytecodes::Code conversion_code(BasicType src, BasicType dest); +- +- void walk_incoming_state(TRAPS); +- +- void verify_args_and_signature(TRAPS) NOT_DEBUG_RETURN; +- +-public: +- MethodHandleWalker(Handle root, bool for_invokedynamic, TRAPS) +- : _chain(root, THREAD), +- _for_invokedynamic(for_invokedynamic), +- _outgoing(THREAD, 10), +- _outgoing_argc(0), +- _return_conv(vmIntrinsics::_none) +- { +- _local_index = for_invokedynamic ? 0 : 1; +- } +- +- MethodHandleChain& chain() { return _chain; } +- +- bool for_invokedynamic() const { return _for_invokedynamic; } +- +- vmIntrinsics::ID return_conv() const { return _return_conv; } +- void set_return_conv(vmIntrinsics::ID c) { _return_conv = c; } +- static vmIntrinsics::ID zero_return_conv() { return vmIntrinsics::_min; } +- +- int new_local_index(BasicType bt) { +- //int index = _for_invokedynamic ? _local_index : _local_index - 1; +- int index = _local_index; +- _local_index += type2size[bt]; +- return index; +- } +- +- int max_locals() const { return _local_index; } +- +- // plug-in abstract interpretation steps: +- virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) = 0; +- virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) = 0; +- virtual ArgToken make_oop_constant(oop con, TRAPS) = 0; +- virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS) = 0; +- virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS) = 0; +- virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS) = 0; +- +- // For make_invoke, the methodHandle can be NULL if the intrinsic ID +- // is something other than vmIntrinsics::_none. +- +- // and in case anyone cares to related the previous actions to the chain: +- virtual void set_method_handle(oop mh) { } +- +- void lose(const char* msg, TRAPS) { chain().lose(msg, THREAD); } +- const char* lose_message() { return chain().lose_message(); } +- +- ArgToken walk(TRAPS); +-}; +- +- +-// An abstract interpreter for method handle chains. +-// Produces an account of the semantics of a chain, in terms of a static IR. +-// The IR happens to be JVM bytecodes. +-class MethodHandleCompiler : public MethodHandleWalker { +-private: +- int _invoke_count; // count the original call site has been executed +- KlassHandle _rklass; // Return type for casting. +- BasicType _rtype; +- KlassHandle _target_klass; +- Thread* _thread; +- +- int _selectAlternative_bci; // These are used for capturing profiles from GWTs +- int _taken_count; +- int _not_taken_count; +- +- // Values used by the compiler. +- static jvalue zero_jvalue; +- static jvalue one_jvalue; +- +- // Fake constant pool entry. +- class ConstantValue : public ResourceObj { +- private: +- int _tag; // Constant pool tag type. +- JavaValue _value; +- Handle _handle; +- Symbol* _sym; +- methodHandle _method; // pre-linkage +- +- public: +- // Constructor for oop types. +- ConstantValue(int tag, Handle con) : _tag(tag), _handle(con) { +- assert(tag == JVM_CONSTANT_Class || +- tag == JVM_CONSTANT_String || +- tag == JVM_CONSTANT_Object, "must be oop type"); +- } +- +- ConstantValue(int tag, Symbol* con) : _tag(tag), _sym(con) { +- assert(tag == JVM_CONSTANT_Utf8, "must be symbol type"); +- } +- +- // Constructor for oop reference types. +- ConstantValue(int tag, int index) : _tag(tag) { +- assert(JVM_CONSTANT_Fieldref <= tag && tag <= JVM_CONSTANT_NameAndType, "must be ref type"); +- _value.set_jint(index); +- } +- ConstantValue(int tag, int first_index, int second_index) : _tag(tag) { +- assert(JVM_CONSTANT_Fieldref <= tag && tag <= JVM_CONSTANT_NameAndType, "must be ref type"); +- _value.set_jint(first_index << 16 | second_index); +- } +- +- // Constructor for primitive types. +- ConstantValue(BasicType bt, jvalue con) { +- _value.set_type(bt); +- switch (bt) { +- case T_INT: _tag = JVM_CONSTANT_Integer; _value.set_jint( con.i); break; +- case T_LONG: _tag = JVM_CONSTANT_Long; _value.set_jlong( con.j); break; +- case T_FLOAT: _tag = JVM_CONSTANT_Float; _value.set_jfloat( con.f); break; +- case T_DOUBLE: _tag = JVM_CONSTANT_Double; _value.set_jdouble(con.d); break; +- default: ShouldNotReachHere(); +- } +- } +- +- int tag() const { return _tag; } +- Symbol* symbol() const { return _sym; } +- klassOop klass_oop() const { return (klassOop) _handle(); } +- oop object_oop() const { return _handle(); } +- int index() const { return _value.get_jint(); } +- int first_index() const { return _value.get_jint() >> 16; } +- int second_index() const { return _value.get_jint() & 0x0000FFFF; } +- +- bool is_primitive() const { return is_java_primitive(_value.get_type()); } +- jint get_jint() const { return _value.get_jint(); } +- jlong get_jlong() const { return _value.get_jlong(); } +- jfloat get_jfloat() const { return _value.get_jfloat(); } +- jdouble get_jdouble() const { return _value.get_jdouble(); } +- +- void set_linkage(methodHandle method) { +- assert(_method.is_null(), ""); +- _method = method; +- } +- bool has_linkage() const { return _method.not_null(); } +- methodHandle linkage() const { return _method; } +- }; +- +- // Fake constant pool. +- GrowableArray _constants; +- +- // Non-BCP classes that appear in associated MethodTypes (require special handling). +- GrowableArray _non_bcp_klasses; +- +- // Accumulated compiler state: +- GrowableArray _bytecode; +- +- int _cur_stack; +- int _max_stack; +- int _num_params; +- int _name_index; +- int _signature_index; +- +- void stack_push(BasicType bt) { +- _cur_stack += type2size[bt]; +- if (_cur_stack > _max_stack) _max_stack = _cur_stack; +- } +- void stack_pop(BasicType bt) { +- _cur_stack -= type2size[bt]; +- assert(_cur_stack >= 0, "sanity"); +- } +- +- unsigned char* bytecode() const { return _bytecode.adr_at(0); } +- int bytecode_length() const { return _bytecode.length(); } +- int cur_bci() const { return _bytecode.length(); } +- +- // Fake constant pool. +- int cpool_oop_put(int tag, Handle con) { +- if (con.is_null()) return 0; +- ConstantValue* cv = new ConstantValue(tag, con); +- return _constants.append(cv); +- } +- +- int cpool_symbol_put(int tag, Symbol* con) { +- if (con == NULL) return 0; +- ConstantValue* cv = new ConstantValue(tag, con); +- con->increment_refcount(); +- return _constants.append(cv); +- } +- +- int cpool_oop_reference_put(int tag, int first_index, int second_index, methodHandle method) { +- if (first_index == 0 && second_index == 0) return 0; +- assert(first_index != 0 && second_index != 0, "no zero indexes"); +- ConstantValue* cv = new ConstantValue(tag, first_index, second_index); +- if (method.not_null()) cv->set_linkage(method); +- return _constants.append(cv); +- } +- +- int cpool_primitive_put(BasicType type, jvalue* con); +- +- bool check_non_bcp_klasses(Handle method_type, TRAPS); +- bool check_non_bcp_klass(klassOop klass, TRAPS); +- void record_non_bcp_klasses(); +- +- int cpool_int_put(jint value) { +- jvalue con; con.i = value; +- return cpool_primitive_put(T_INT, &con); +- } +- int cpool_long_put(jlong value) { +- jvalue con; con.j = value; +- return cpool_primitive_put(T_LONG, &con); +- } +- int cpool_float_put(jfloat value) { +- jvalue con; con.f = value; +- return cpool_primitive_put(T_FLOAT, &con); +- } +- int cpool_double_put(jdouble value) { +- jvalue con; con.d = value; +- return cpool_primitive_put(T_DOUBLE, &con); +- } +- +- int cpool_object_put(Handle obj) { +- return cpool_oop_put(JVM_CONSTANT_Object, obj); +- } +- int cpool_symbol_put(Symbol* sym) { +- return cpool_symbol_put(JVM_CONSTANT_Utf8, sym); +- } +- int cpool_klass_put(klassOop klass) { +- return cpool_oop_put(JVM_CONSTANT_Class, klass); +- } +- int cpool_methodref_put(Bytecodes::Code op, int class_index, int name_and_type_index, methodHandle method) { +- int tag = (op == Bytecodes::_invokeinterface ? JVM_CONSTANT_InterfaceMethodref : JVM_CONSTANT_Methodref); +- return cpool_oop_reference_put(tag, class_index, name_and_type_index, method); +- } +- int cpool_name_and_type_put(int name_index, int signature_index) { +- return cpool_oop_reference_put(JVM_CONSTANT_NameAndType, name_index, signature_index, methodHandle()); +- } +- +- void emit_bc(Bytecodes::Code op, int index = 0, int args_size = -1); +- void update_branch_dest(int src, int dst); +- void emit_load(ArgToken arg); +- void emit_load(BasicType bt, int index); +- void emit_store(BasicType bt, int index); +- void emit_load_constant(ArgToken arg); +- +- virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) { +- return ArgToken(tt_parameter, type, argnum); +- } +- virtual ArgToken make_oop_constant(oop con, TRAPS) { +- Handle h(THREAD, con); +- return ArgToken(h); +- } +- virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) { +- return ArgToken(type, *con); +- } +- +- virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS); +- virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS); +- virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS); +- +- // Check for profiling information on a GWT and return true if it's found +- bool fetch_counts(ArgToken a1, ArgToken a2); +- +- // Get a real constant pool. +- constantPoolHandle get_constant_pool(TRAPS) const; +- +- // Get a real methodOop. +- methodHandle get_method_oop(TRAPS); +- +-public: +- MethodHandleCompiler(Handle root, Symbol* name, Symbol* signature, int invoke_count, bool for_invokedynamic, TRAPS); +- +- // Compile the given MH chain into bytecode. +- methodHandle compile(TRAPS); +- +- // Tests if the given class is a MH adapter holder. +- static bool klass_is_method_handle_adapter_holder(klassOop klass) { +- return (klass == SystemDictionary::MethodHandle_klass()); +- } +-}; +- +-#endif // SHARE_VM_PRIMS_METHODHANDLEWALK_HPP +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/prims/methodHandles.cpp +--- openjdk/hotspot/src/share/vm/prims/methodHandles.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/prims/methodHandles.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -30,166 +30,30 @@ + #include "memory/allocation.inline.hpp" + #include "memory/oopFactory.hpp" + #include "prims/methodHandles.hpp" +-#include "prims/methodHandleWalk.hpp" + #include "runtime/compilationPolicy.hpp" + #include "runtime/javaCalls.hpp" + #include "runtime/reflection.hpp" + #include "runtime/signature.hpp" + #include "runtime/stubRoutines.hpp" + ++ + /* + * JSR 292 reference implementation: method handles ++ * The JDK 7 reference implementation represented method handle ++ * combinations as chains. Each link in the chain had a "vmentry" ++ * field which pointed at a bit of assembly code which performed ++ * one transformation before dispatching to the next link in the chain. ++ * ++ * The current reference implementation pushes almost all code generation ++ * responsibility to (trusted) Java code. A method handle contains a ++ * pointer to its "LambdaForm", which embodies all details of the method ++ * handle's behavior. The LambdaForm is a normal Java object, managed ++ * by a runtime coded in Java. + */ + + bool MethodHandles::_enabled = false; // set true after successful native linkage +- +-MethodHandleEntry* MethodHandles::_entries[MethodHandles::_EK_LIMIT] = {NULL}; +-const char* MethodHandles::_entry_names[_EK_LIMIT+1] = { +- "raise_exception", +- "invokestatic", // how a MH emulates invokestatic +- "invokespecial", // ditto for the other invokes... +- "invokevirtual", +- "invokeinterface", +- "bound_ref", // these are for BMH... +- "bound_int", +- "bound_long", +- "bound_ref_direct", // (direct versions have a direct methodOop) +- "bound_int_direct", +- "bound_long_direct", +- +- // starting at _adapter_mh_first: +- "adapter_retype_only", // these are for AMH... +- "adapter_retype_raw", +- "adapter_check_cast", +- "adapter_prim_to_prim", +- "adapter_ref_to_prim", +- "adapter_prim_to_ref", +- "adapter_swap_args", +- "adapter_rot_args", +- "adapter_dup_args", +- "adapter_drop_args", +- "adapter_collect_args", +- "adapter_spread_args", +- "adapter_fold_args", +- "adapter_unused_13", +- +- // optimized adapter types: +- "adapter_swap_args/1", +- "adapter_swap_args/2", +- "adapter_rot_args/1,up", +- "adapter_rot_args/1,down", +- "adapter_rot_args/2,up", +- "adapter_rot_args/2,down", +- "adapter_prim_to_prim/i2i", +- "adapter_prim_to_prim/l2i", +- "adapter_prim_to_prim/d2f", +- "adapter_prim_to_prim/i2l", +- "adapter_prim_to_prim/f2d", +- "adapter_ref_to_prim/unboxi", +- "adapter_ref_to_prim/unboxl", +- +- // return value handlers for collect/filter/fold adapters: +- "return/ref", +- "return/int", +- "return/long", +- "return/float", +- "return/double", +- "return/void", +- "return/S0/ref", +- "return/S1/ref", +- "return/S2/ref", +- "return/S3/ref", +- "return/S4/ref", +- "return/S5/ref", +- "return/any", +- +- // spreading (array length cases 0, 1, ...) +- "adapter_spread/0", +- "adapter_spread/1/ref", +- "adapter_spread/2/ref", +- "adapter_spread/3/ref", +- "adapter_spread/4/ref", +- "adapter_spread/5/ref", +- "adapter_spread/ref", +- "adapter_spread/byte", +- "adapter_spread/char", +- "adapter_spread/short", +- "adapter_spread/int", +- "adapter_spread/long", +- "adapter_spread/float", +- "adapter_spread/double", +- +- // blocking filter/collect conversions: +- "adapter_collect/ref", +- "adapter_collect/int", +- "adapter_collect/long", +- "adapter_collect/float", +- "adapter_collect/double", +- "adapter_collect/void", +- "adapter_collect/0/ref", +- "adapter_collect/1/ref", +- "adapter_collect/2/ref", +- "adapter_collect/3/ref", +- "adapter_collect/4/ref", +- "adapter_collect/5/ref", +- "adapter_filter/S0/ref", +- "adapter_filter/S1/ref", +- "adapter_filter/S2/ref", +- "adapter_filter/S3/ref", +- "adapter_filter/S4/ref", +- "adapter_filter/S5/ref", +- "adapter_collect/2/S0/ref", +- "adapter_collect/2/S1/ref", +- "adapter_collect/2/S2/ref", +- "adapter_collect/2/S3/ref", +- "adapter_collect/2/S4/ref", +- "adapter_collect/2/S5/ref", +- +- // blocking fold conversions: +- "adapter_fold/ref", +- "adapter_fold/int", +- "adapter_fold/long", +- "adapter_fold/float", +- "adapter_fold/double", +- "adapter_fold/void", +- "adapter_fold/1/ref", +- "adapter_fold/2/ref", +- "adapter_fold/3/ref", +- "adapter_fold/4/ref", +- "adapter_fold/5/ref", +- +- "adapter_opt_profiling", +- +- NULL +-}; +- +-// Adapters. + MethodHandlesAdapterBlob* MethodHandles::_adapter_code = NULL; + +-jobject MethodHandles::_raise_exception_method; +- +-address MethodHandles::_adapter_return_handlers[CONV_TYPE_MASK+1]; +- +-#ifdef ASSERT +-bool MethodHandles::spot_check_entry_names() { +- assert(!strcmp(entry_name(_invokestatic_mh), "invokestatic"), ""); +- assert(!strcmp(entry_name(_bound_ref_mh), "bound_ref"), ""); +- assert(!strcmp(entry_name(_adapter_retype_only), "adapter_retype_only"), ""); +- assert(!strcmp(entry_name(_adapter_fold_args), "adapter_fold_args"), ""); +- assert(!strcmp(entry_name(_adapter_opt_unboxi), "adapter_ref_to_prim/unboxi"), ""); +- assert(!strcmp(entry_name(_adapter_opt_spread_char), "adapter_spread/char"), ""); +- assert(!strcmp(entry_name(_adapter_opt_spread_double), "adapter_spread/double"), ""); +- assert(!strcmp(entry_name(_adapter_opt_collect_int), "adapter_collect/int"), ""); +- assert(!strcmp(entry_name(_adapter_opt_collect_0_ref), "adapter_collect/0/ref"), ""); +- assert(!strcmp(entry_name(_adapter_opt_collect_2_S3_ref), "adapter_collect/2/S3/ref"), ""); +- assert(!strcmp(entry_name(_adapter_opt_filter_S5_ref), "adapter_filter/S5/ref"), ""); +- assert(!strcmp(entry_name(_adapter_opt_fold_3_ref), "adapter_fold/3/ref"), ""); +- assert(!strcmp(entry_name(_adapter_opt_fold_void), "adapter_fold/void"), ""); +- return true; +-} +-#endif +- +- + //------------------------------------------------------------------------------ + // MethodHandles::generate_adapters + // +@@ -216,36 +80,20 @@ + // + void MethodHandlesAdapterGenerator::generate() { + // Generate generic method handle adapters. +- for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST; +- ek < MethodHandles::_EK_LIMIT; +- ek = MethodHandles::EntryKind(1 + (int)ek)) { +- if (MethodHandles::ek_supported(ek)) { +- StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek)); +- MethodHandles::generate_method_handle_stub(_masm, ek); ++ // Generate interpreter entries ++ for (Interpreter::MethodKind mk = Interpreter::method_handle_invoke_FIRST; ++ mk <= Interpreter::method_handle_invoke_LAST; ++ mk = Interpreter::MethodKind(1 + (int)mk)) { ++ vmIntrinsics::ID iid = Interpreter::method_handle_intrinsic(mk); ++ StubCodeMark mark(this, "MethodHandle::interpreter_entry", vmIntrinsics::name_at(iid)); ++ address entry = MethodHandles::generate_method_handle_interpreter_entry(_masm, iid); ++ if (entry != NULL) { ++ Interpreter::set_entry_for_kind(mk, entry); + } ++ // If the entry is not set, it will throw AbstractMethodError. + } + } + +- +-//------------------------------------------------------------------------------ +-// MethodHandles::ek_supported +-// +-bool MethodHandles::ek_supported(MethodHandles::EntryKind ek) { +- MethodHandles::EntryKind ek_orig = MethodHandles::ek_original_kind(ek); +- switch (ek_orig) { +- case _adapter_unused_13: +- return false; // not defined yet +- case _adapter_prim_to_ref: +- return conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF); +- case _adapter_collect_args: +- return conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS); +- case _adapter_fold_args: +- return conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS); +- } +- return true; +-} +- +- + void MethodHandles::set_enabled(bool z) { + if (_enabled != z) { + guarantee(z && EnableInvokeDynamic, "can only enable once, and only if -XX:+EnableInvokeDynamic"); +@@ -253,217 +101,6 @@ + } + } + +-// Note: A method which does not have a TRAPS argument cannot block in the GC +-// or throw exceptions. Such methods are used in this file to do something quick +-// and local, like parse a data structure. For speed, such methods work on plain +-// oops, not handles. Trapping methods uniformly operate on handles. +- +-methodHandle MethodHandles::decode_vmtarget(oop vmtarget, int vmindex, oop mtype, +- KlassHandle& receiver_limit_result, int& decode_flags_result) { +- if (vmtarget == NULL) return methodHandle(); +- assert(methodOopDesc::nonvirtual_vtable_index < 0, "encoding"); +- if (vmindex < 0) { +- // this DMH performs no dispatch; it is directly bound to a methodOop +- // A MemberName may either be directly bound to a methodOop, +- // or it may use the klass/index form; both forms mean the same thing. +- methodOop m = decode_methodOop(methodOop(vmtarget), decode_flags_result); +- if ((decode_flags_result & _dmf_has_receiver) != 0 +- && java_lang_invoke_MethodType::is_instance(mtype)) { +- // Extract receiver type restriction from mtype.ptypes[0]. +- objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(mtype); +- oop ptype0 = (ptypes == NULL || ptypes->length() < 1) ? oop(NULL) : ptypes->obj_at(0); +- if (java_lang_Class::is_instance(ptype0)) +- receiver_limit_result = java_lang_Class::as_klassOop(ptype0); +- } +- if (vmindex == methodOopDesc::nonvirtual_vtable_index) { +- // this DMH can be an "invokespecial" version +- decode_flags_result &= ~_dmf_does_dispatch; +- } else { +- assert(vmindex == methodOopDesc::invalid_vtable_index, "random vmindex?"); +- } +- return m; +- } else { +- assert(vmtarget->is_klass(), "must be class or interface"); +- decode_flags_result |= MethodHandles::_dmf_does_dispatch; +- decode_flags_result |= MethodHandles::_dmf_has_receiver; +- receiver_limit_result = (klassOop)vmtarget; +- Klass* tk = Klass::cast((klassOop)vmtarget); +- if (tk->is_interface()) { +- // an itable linkage is +- decode_flags_result |= MethodHandles::_dmf_from_interface; +- return klassItable::method_for_itable_index((klassOop)vmtarget, vmindex); +- } else { +- if (!tk->oop_is_instance()) +- tk = instanceKlass::cast(SystemDictionary::Object_klass()); +- return ((instanceKlass*)tk)->method_at_vtable(vmindex); +- } +- } +-} +- +-// MemberName and DirectMethodHandle have the same linkage to the JVM internals. +-// (MemberName is the non-operational name used for queries and setup.) +- +-methodHandle MethodHandles::decode_DirectMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { +- oop vmtarget = java_lang_invoke_DirectMethodHandle::vmtarget(mh); +- int vmindex = java_lang_invoke_DirectMethodHandle::vmindex(mh); +- oop mtype = java_lang_invoke_DirectMethodHandle::type(mh); +- return decode_vmtarget(vmtarget, vmindex, mtype, receiver_limit_result, decode_flags_result); +-} +- +-methodHandle MethodHandles::decode_BoundMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { +- assert(java_lang_invoke_BoundMethodHandle::is_instance(mh), ""); +- assert(mh->klass() != SystemDictionary::AdapterMethodHandle_klass(), ""); +- for (oop bmh = mh;;) { +- // Bound MHs can be stacked to bind several arguments. +- oop target = java_lang_invoke_MethodHandle::vmtarget(bmh); +- if (target == NULL) return methodHandle(); +- decode_flags_result |= MethodHandles::_dmf_binds_argument; +- klassOop tk = target->klass(); +- if (tk == SystemDictionary::BoundMethodHandle_klass()) { +- bmh = target; +- continue; +- } else { +- if (java_lang_invoke_MethodHandle::is_subclass(tk)) { +- //assert(tk == SystemDictionary::DirectMethodHandle_klass(), "end of BMH chain must be DMH"); +- return decode_MethodHandle(target, receiver_limit_result, decode_flags_result); +- } else { +- // Optimized case: binding a receiver to a non-dispatched DMH +- // short-circuits directly to the methodOop. +- // (It might be another argument besides a receiver also.) +- assert(target->is_method(), "must be a simple method"); +- decode_flags_result |= MethodHandles::_dmf_binds_method; +- methodOop m = (methodOop) target; +- if (!m->is_static()) +- decode_flags_result |= MethodHandles::_dmf_has_receiver; +- return m; +- } +- } +- } +-} +- +-methodHandle MethodHandles::decode_AdapterMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { +- assert(mh->klass() == SystemDictionary::AdapterMethodHandle_klass(), ""); +- for (oop amh = mh;;) { +- // Adapter MHs can be stacked to convert several arguments. +- int conv_op = adapter_conversion_op(java_lang_invoke_AdapterMethodHandle::conversion(amh)); +- decode_flags_result |= (_dmf_adapter_lsb << conv_op) & _DMF_ADAPTER_MASK; +- oop target = java_lang_invoke_MethodHandle::vmtarget(amh); +- if (target == NULL) return methodHandle(); +- klassOop tk = target->klass(); +- if (tk == SystemDictionary::AdapterMethodHandle_klass()) { +- amh = target; +- continue; +- } else { +- // must be a BMH (which will bind some more arguments) or a DMH (for the final call) +- return MethodHandles::decode_MethodHandle(target, receiver_limit_result, decode_flags_result); +- } +- } +-} +- +-methodHandle MethodHandles::decode_MethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { +- if (mh == NULL) return methodHandle(); +- klassOop mhk = mh->klass(); +- assert(java_lang_invoke_MethodHandle::is_subclass(mhk), "must be a MethodHandle"); +- if (mhk == SystemDictionary::DirectMethodHandle_klass()) { +- return decode_DirectMethodHandle(mh, receiver_limit_result, decode_flags_result); +- } else if (mhk == SystemDictionary::BoundMethodHandle_klass()) { +- return decode_BoundMethodHandle(mh, receiver_limit_result, decode_flags_result); +- } else if (mhk == SystemDictionary::AdapterMethodHandle_klass()) { +- return decode_AdapterMethodHandle(mh, receiver_limit_result, decode_flags_result); +- } else if (java_lang_invoke_BoundMethodHandle::is_subclass(mhk)) { +- // could be a JavaMethodHandle (but not an adapter MH) +- return decode_BoundMethodHandle(mh, receiver_limit_result, decode_flags_result); +- } else { +- assert(false, "cannot parse this MH"); +- return methodHandle(); // random MH? +- } +-} +- +-methodOop MethodHandles::decode_methodOop(methodOop m, int& decode_flags_result) { +- assert(m->is_method(), ""); +- if (m->is_static()) { +- // check that signature begins '(L' or '([' (not '(I', '()', etc.) +- Symbol* sig = m->signature(); +- BasicType recv_bt = char2type(sig->byte_at(1)); +- // Note: recv_bt might be T_ILLEGAL if byte_at(2) is ')' +- assert(sig->byte_at(0) == '(', "must be method sig"); +-// if (recv_bt == T_OBJECT || recv_bt == T_ARRAY) +-// decode_flags_result |= _dmf_has_receiver; +- } else { +- // non-static method +- decode_flags_result |= _dmf_has_receiver; +- if (!m->can_be_statically_bound() && !m->is_initializer()) { +- decode_flags_result |= _dmf_does_dispatch; +- if (Klass::cast(m->method_holder())->is_interface()) +- decode_flags_result |= _dmf_from_interface; +- } +- } +- return m; +-} +- +- +-// A trusted party is handing us a cookie to determine a method. +-// Let's boil it down to the method oop they really want. +-methodHandle MethodHandles::decode_method(oop x, KlassHandle& receiver_limit_result, int& decode_flags_result) { +- decode_flags_result = 0; +- receiver_limit_result = KlassHandle(); +- klassOop xk = x->klass(); +- if (xk == Universe::methodKlassObj()) { +- return decode_methodOop((methodOop) x, decode_flags_result); +- } else if (xk == SystemDictionary::MemberName_klass()) { +- // Note: This only works if the MemberName has already been resolved. +- return decode_MemberName(x, receiver_limit_result, decode_flags_result); +- } else if (java_lang_invoke_MethodHandle::is_subclass(xk)) { +- return decode_MethodHandle(x, receiver_limit_result, decode_flags_result); +- } else if (xk == SystemDictionary::reflect_Method_klass()) { +- oop clazz = java_lang_reflect_Method::clazz(x); +- int slot = java_lang_reflect_Method::slot(x); +- klassOop k = java_lang_Class::as_klassOop(clazz); +- if (k != NULL && Klass::cast(k)->oop_is_instance()) +- return decode_methodOop(instanceKlass::cast(k)->method_with_idnum(slot), +- decode_flags_result); +- } else if (xk == SystemDictionary::reflect_Constructor_klass()) { +- oop clazz = java_lang_reflect_Constructor::clazz(x); +- int slot = java_lang_reflect_Constructor::slot(x); +- klassOop k = java_lang_Class::as_klassOop(clazz); +- if (k != NULL && Klass::cast(k)->oop_is_instance()) +- return decode_methodOop(instanceKlass::cast(k)->method_with_idnum(slot), +- decode_flags_result); +- } else { +- // unrecognized object +- assert(!x->is_method(), "already checked"); +- assert(!java_lang_invoke_MemberName::is_instance(x), "already checked"); +- } +- return methodHandle(); +-} +- +- +-int MethodHandles::decode_MethodHandle_stack_pushes(oop mh) { +- if (mh->klass() == SystemDictionary::DirectMethodHandle_klass()) +- return 0; // no push/pop +- int this_vmslots = java_lang_invoke_MethodHandle::vmslots(mh); +- int last_vmslots = 0; +- oop last_mh = mh; +- for (;;) { +- oop target = java_lang_invoke_MethodHandle::vmtarget(last_mh); +- if (target->klass() == SystemDictionary::DirectMethodHandle_klass()) { +- last_vmslots = java_lang_invoke_MethodHandle::vmslots(target); +- break; +- } else if (!java_lang_invoke_MethodHandle::is_instance(target)) { +- // might be klass or method +- assert(target->is_method(), "must get here with a direct ref to method"); +- last_vmslots = methodOop(target)->size_of_parameters(); +- break; +- } +- last_mh = target; +- } +- // If I am called with fewer VM slots than my ultimate callee, +- // it must be that I push the additionally needed slots. +- // Likewise if am called with more VM slots, I will pop them. +- return (last_vmslots - this_vmslots); +-} +- +- + // MemberName support + + // import java_lang_invoke_MemberName.* +@@ -472,10 +109,11 @@ + IS_CONSTRUCTOR = java_lang_invoke_MemberName::MN_IS_CONSTRUCTOR, + IS_FIELD = java_lang_invoke_MemberName::MN_IS_FIELD, + IS_TYPE = java_lang_invoke_MemberName::MN_IS_TYPE, ++ REFERENCE_KIND_SHIFT = java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT, ++ REFERENCE_KIND_MASK = java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK, + SEARCH_SUPERCLASSES = java_lang_invoke_MemberName::MN_SEARCH_SUPERCLASSES, + SEARCH_INTERFACES = java_lang_invoke_MemberName::MN_SEARCH_INTERFACES, +- ALL_KINDS = IS_METHOD | IS_CONSTRUCTOR | IS_FIELD | IS_TYPE, +- VM_INDEX_UNINITIALIZED = java_lang_invoke_MemberName::VM_INDEX_UNINITIALIZED ++ ALL_KINDS = IS_METHOD | IS_CONSTRUCTOR | IS_FIELD | IS_TYPE + }; + + Handle MethodHandles::new_MemberName(TRAPS) { +@@ -485,72 +123,265 @@ + return Handle(THREAD, k->allocate_instance(THREAD)); + } + +-void MethodHandles::init_MemberName(oop mname_oop, oop target_oop) { +- if (target_oop->klass() == SystemDictionary::reflect_Field_klass()) { ++oop MethodHandles::init_MemberName(oop mname_oop, oop target_oop) { ++ klassOop target_klass = target_oop->klass(); ++ if (target_klass == SystemDictionary::reflect_Field_klass()) { + oop clazz = java_lang_reflect_Field::clazz(target_oop); // fd.field_holder() + int slot = java_lang_reflect_Field::slot(target_oop); // fd.index() + int mods = java_lang_reflect_Field::modifiers(target_oop); ++ oop type = java_lang_reflect_Field::type(target_oop); ++ oop name = java_lang_reflect_Field::name(target_oop); + klassOop k = java_lang_Class::as_klassOop(clazz); +- int offset = instanceKlass::cast(k)->field_offset(slot); +- init_MemberName(mname_oop, k, accessFlags_from(mods), offset); +- } else { +- KlassHandle receiver_limit; int decode_flags = 0; +- methodHandle m = MethodHandles::decode_method(target_oop, receiver_limit, decode_flags); +- bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0); +- init_MemberName(mname_oop, m(), do_dispatch); ++ intptr_t offset = instanceKlass::cast(k)->field_offset(slot); ++ return init_field_MemberName(mname_oop, k, accessFlags_from(mods), type, name, offset); ++ } else if (target_klass == SystemDictionary::reflect_Method_klass()) { ++ oop clazz = java_lang_reflect_Method::clazz(target_oop); ++ int slot = java_lang_reflect_Method::slot(target_oop); ++ klassOop k = java_lang_Class::as_klassOop(clazz); ++ if (k != NULL && Klass::cast(k)->oop_is_instance()) { ++ methodOop m = instanceKlass::cast(k)->method_with_idnum(slot); ++ return init_method_MemberName(mname_oop, m, true, k); ++ } ++ } else if (target_klass == SystemDictionary::reflect_Constructor_klass()) { ++ oop clazz = java_lang_reflect_Constructor::clazz(target_oop); ++ int slot = java_lang_reflect_Constructor::slot(target_oop); ++ klassOop k = java_lang_Class::as_klassOop(clazz); ++ if (k != NULL && Klass::cast(k)->oop_is_instance()) { ++ methodOop m = instanceKlass::cast(k)->method_with_idnum(slot); ++ return init_method_MemberName(mname_oop, m, false, k); ++ } ++ } else if (target_klass == SystemDictionary::MemberName_klass()) { ++ // Note: This only works if the MemberName has already been resolved. ++ oop clazz = java_lang_invoke_MemberName::clazz(target_oop); ++ int flags = java_lang_invoke_MemberName::flags(target_oop); ++ oop vmtarget = java_lang_invoke_MemberName::vmtarget(target_oop); ++ intptr_t vmindex = java_lang_invoke_MemberName::vmindex(target_oop); ++ klassOop k = java_lang_Class::as_klassOop(clazz); ++ int ref_kind = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK; ++ if (vmtarget == NULL) return NULL; // not resolved ++ if ((flags & IS_FIELD) != 0) { ++ assert(vmtarget->is_klass(), "field vmtarget is klassOop"); ++ int basic_mods = (ref_kind_is_static(ref_kind) ? JVM_ACC_STATIC : 0); ++ // FIXME: how does k (receiver_limit) contribute? ++ return init_field_MemberName(mname_oop, klassOop(vmtarget), accessFlags_from(basic_mods), NULL, NULL, vmindex); ++ } else if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) != 0) { ++ assert(vmtarget->is_method(), "method or constructor vmtarget is methodOop"); ++ return init_method_MemberName(mname_oop, methodOop(vmtarget), ref_kind_does_dispatch(ref_kind), k); ++ } else { ++ return NULL; ++ } + } ++ return NULL; + } + +-void MethodHandles::init_MemberName(oop mname_oop, methodOop m, bool do_dispatch) { +- int flags = ((m->is_initializer() ? IS_CONSTRUCTOR : IS_METHOD) +- | (jushort)( m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS )); +- oop vmtarget = m; +- int vmindex = methodOopDesc::invalid_vtable_index; // implies no info yet +- if (!do_dispatch || (flags & IS_CONSTRUCTOR) || m->can_be_statically_bound()) +- vmindex = methodOopDesc::nonvirtual_vtable_index; // implies never any dispatch +- assert(vmindex != VM_INDEX_UNINITIALIZED, "Java sentinel value"); ++oop MethodHandles::init_method_MemberName(oop mname_oop, methodOop m, bool do_dispatch, ++ klassOop receiver_limit) { ++ AccessFlags mods = m->access_flags(); ++ int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS ); ++ int vmindex = methodOopDesc::nonvirtual_vtable_index; // implies never any dispatch ++ klassOop mklass = m->method_holder(); ++ if (receiver_limit == NULL) ++ receiver_limit = mklass; ++ if (m->is_initializer()) { ++ flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT); ++ } else if (mods.is_static()) { ++ flags |= IS_METHOD | (JVM_REF_invokeStatic << REFERENCE_KIND_SHIFT); ++ } else if (receiver_limit != mklass && ++ !Klass::cast(receiver_limit)->is_subtype_of(mklass)) { ++ return NULL; // bad receiver limit ++ } else if (Klass::cast(receiver_limit)->is_interface() && ++ Klass::cast(mklass)->is_interface()) { ++ flags |= IS_METHOD | (JVM_REF_invokeInterface << REFERENCE_KIND_SHIFT); ++ receiver_limit = mklass; // ignore passed-in limit; interfaces are interconvertible ++ vmindex = klassItable::compute_itable_index(m); ++ } else if (mklass != receiver_limit && Klass::cast(mklass)->is_interface()) { ++ flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT); ++ // it is a miranda method, so m->vtable_index is not what we want ++ ResourceMark rm; ++ klassVtable* vt = instanceKlass::cast(receiver_limit)->vtable(); ++ vmindex = vt->index_of_miranda(m->name(), m->signature()); ++ } else if (!do_dispatch || m->can_be_statically_bound()) { ++ flags |= IS_METHOD | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT); ++ } else { ++ flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT); ++ vmindex = m->vtable_index(); ++ } ++ ++ java_lang_invoke_MemberName::set_flags(mname_oop, flags); ++ java_lang_invoke_MemberName::set_vmtarget(mname_oop, m); ++ java_lang_invoke_MemberName::set_vmindex(mname_oop, vmindex); // vtable/itable index ++ java_lang_invoke_MemberName::set_clazz(mname_oop, Klass::cast(receiver_limit)->java_mirror()); ++ // Note: name and type can be lazily computed by resolve_MemberName, ++ // if Java code needs them as resolved String and MethodType objects. ++ // The clazz must be eagerly stored, because it provides a GC ++ // root to help keep alive the methodOop. ++ // If relevant, the vtable or itable value is stored as vmindex. ++ // This is done eagerly, since it is readily available without ++ // constructing any new objects. ++ // TO DO: maybe intern mname_oop ++ return mname_oop; ++} ++ ++Handle MethodHandles::init_method_MemberName(oop mname_oop, CallInfo& info, TRAPS) { ++ Handle empty; ++ if (info.resolved_appendix().not_null()) { ++ // The resolved MemberName must not be accompanied by an appendix argument, ++ // since there is no way to bind this value into the MemberName. ++ // Caller is responsible to prevent this from happening. ++ THROW_MSG_(vmSymbols::java_lang_InternalError(), "appendix", empty); ++ } ++ methodHandle m = info.resolved_method(); ++ KlassHandle defc = info.resolved_klass(); ++ int vmindex = -1; ++ if (defc->is_interface() && Klass::cast(m->method_holder())->is_interface()) { ++ // LinkResolver does not report itable indexes! (fix this?) ++ vmindex = klassItable::compute_itable_index(m()); ++ } else if (m->can_be_statically_bound()) { ++ // LinkResolver reports vtable index even for final methods! ++ vmindex = methodOopDesc::nonvirtual_vtable_index; ++ } else { ++ vmindex = info.vtable_index(); ++ } ++ oop res = init_method_MemberName(mname_oop, m(), (vmindex >= 0), defc()); ++ assert(res == NULL || (java_lang_invoke_MemberName::vmindex(res) == vmindex), ""); ++ return Handle(THREAD, res); ++} ++ ++oop MethodHandles::init_field_MemberName(oop mname_oop, klassOop field_holder, ++ AccessFlags mods, oop type, oop name, ++ intptr_t offset, bool is_setter) { ++ int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS ); ++ flags |= IS_FIELD | ((mods.is_static() ? JVM_REF_getStatic : JVM_REF_getField) << REFERENCE_KIND_SHIFT); ++ if (is_setter) flags += ((JVM_REF_putField - JVM_REF_getField) << REFERENCE_KIND_SHIFT); ++ oop vmtarget = field_holder; ++ int vmindex = offset; // determines the field uniquely when combined with static bit ++ java_lang_invoke_MemberName::set_flags(mname_oop, flags); + java_lang_invoke_MemberName::set_vmtarget(mname_oop, vmtarget); + java_lang_invoke_MemberName::set_vmindex(mname_oop, vmindex); +- java_lang_invoke_MemberName::set_flags(mname_oop, flags); +- java_lang_invoke_MemberName::set_clazz(mname_oop, Klass::cast(m->method_holder())->java_mirror()); ++ java_lang_invoke_MemberName::set_clazz(mname_oop, Klass::cast(field_holder)->java_mirror()); ++ if (name != NULL) ++ java_lang_invoke_MemberName::set_name(mname_oop, name); ++ if (type != NULL) ++ java_lang_invoke_MemberName::set_type(mname_oop, type); ++ // Note: name and type can be lazily computed by resolve_MemberName, ++ // if Java code needs them as resolved String and Class objects. ++ // Note that the incoming type oop might be pre-resolved (non-null). ++ // The base clazz and field offset (vmindex) must be eagerly stored, ++ // because they unambiguously identify the field. ++ // Although the fieldDescriptor::_index would also identify the field, ++ // we do not use it, because it is harder to decode. ++ // TO DO: maybe intern mname_oop ++ return mname_oop; + } + +-void MethodHandles::init_MemberName(oop mname_oop, klassOop field_holder, AccessFlags mods, int offset) { +- int flags = (IS_FIELD | (jushort)( mods.as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS )); +- oop vmtarget = field_holder; +- int vmindex = offset; // determines the field uniquely when combined with static bit +- assert(vmindex != VM_INDEX_UNINITIALIZED, "bad alias on vmindex"); +- java_lang_invoke_MemberName::set_vmtarget(mname_oop, vmtarget); +- java_lang_invoke_MemberName::set_vmindex(mname_oop, vmindex); +- java_lang_invoke_MemberName::set_flags(mname_oop, flags); +- java_lang_invoke_MemberName::set_clazz(mname_oop, Klass::cast(field_holder)->java_mirror()); ++Handle MethodHandles::init_field_MemberName(oop mname_oop, FieldAccessInfo& info, TRAPS) { ++ return Handle(); ++#if 0 ++ KlassHandle field_holder = info.klass(); ++ intptr_t field_offset = info.field_offset(); ++ return init_field_MemberName(mname_oop, field_holder(), ++ info.access_flags(), ++ type, name, ++ field_offset, false /*is_setter*/); ++#endif + } + + +-methodHandle MethodHandles::decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result) { +- methodHandle empty; +- int flags = java_lang_invoke_MemberName::flags(mname); +- if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0) return empty; // not invocable +- oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname); +- int vmindex = java_lang_invoke_MemberName::vmindex(mname); +- if (vmindex == VM_INDEX_UNINITIALIZED) return empty; // not resolved +- methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result); +- oop clazz = java_lang_invoke_MemberName::clazz(mname); +- if (clazz != NULL && java_lang_Class::is_instance(clazz)) { +- klassOop klass = java_lang_Class::as_klassOop(clazz); +- if (klass != NULL) receiver_limit_result = klass; +- } +- return m; ++// JVM 2.9 Special Methods: ++// A method is signature polymorphic if and only if all of the following conditions hold : ++// * It is declared in the java.lang.invoke.MethodHandle class. ++// * It has a single formal parameter of type Object[]. ++// * It has a return type of Object. ++// * It has the ACC_VARARGS and ACC_NATIVE flags set. ++bool MethodHandles::is_method_handle_invoke_name(klassOop klass, Symbol* name) { ++ if (klass == NULL) ++ return false; ++ // The following test will fail spuriously during bootstrap of MethodHandle itself: ++ // if (klass != SystemDictionary::MethodHandle_klass()) ++ // Test the name instead: ++ if (Klass::cast(klass)->name() != vmSymbols::java_lang_invoke_MethodHandle()) ++ return false; ++ Symbol* poly_sig = vmSymbols::object_array_object_signature(); ++ methodOop m = instanceKlass::cast(klass)->find_method(name, poly_sig); ++ if (m == NULL) return false; ++ int required = JVM_ACC_NATIVE | JVM_ACC_VARARGS; ++ int flags = m->access_flags().as_int(); ++ return (flags & required) == required; + } + ++ ++Symbol* MethodHandles::signature_polymorphic_intrinsic_name(vmIntrinsics::ID iid) { ++ assert(is_signature_polymorphic_intrinsic(iid), err_msg("iid=%d", iid)); ++ switch (iid) { ++ case vmIntrinsics::_invokeBasic: return vmSymbols::invokeBasic_name(); ++ case vmIntrinsics::_linkToVirtual: return vmSymbols::linkToVirtual_name(); ++ case vmIntrinsics::_linkToStatic: return vmSymbols::linkToStatic_name(); ++ case vmIntrinsics::_linkToSpecial: return vmSymbols::linkToSpecial_name(); ++ case vmIntrinsics::_linkToInterface: return vmSymbols::linkToInterface_name(); ++ } ++ assert(false, ""); ++ return 0; ++} ++ ++int MethodHandles::signature_polymorphic_intrinsic_ref_kind(vmIntrinsics::ID iid) { ++ switch (iid) { ++ case vmIntrinsics::_invokeBasic: return 0; ++ case vmIntrinsics::_linkToVirtual: return JVM_REF_invokeVirtual; ++ case vmIntrinsics::_linkToStatic: return JVM_REF_invokeStatic; ++ case vmIntrinsics::_linkToSpecial: return JVM_REF_invokeSpecial; ++ case vmIntrinsics::_linkToInterface: return JVM_REF_invokeInterface; ++ } ++ assert(false, err_msg("iid=%d", iid)); ++ return 0; ++} ++ ++vmIntrinsics::ID MethodHandles::signature_polymorphic_name_id(Symbol* name) { ++ vmSymbols::SID name_id = vmSymbols::find_sid(name); ++ switch (name_id) { ++ // The ID _invokeGeneric stands for all non-static signature-polymorphic methods, except built-ins. ++ case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name): return vmIntrinsics::_invokeGeneric; ++ // The only built-in non-static signature-polymorphic method is MethodHandle.invokeBasic: ++ case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeBasic_name): return vmIntrinsics::_invokeBasic; ++ ++ // There is one static signature-polymorphic method for each JVM invocation mode. ++ case vmSymbols::VM_SYMBOL_ENUM_NAME(linkToVirtual_name): return vmIntrinsics::_linkToVirtual; ++ case vmSymbols::VM_SYMBOL_ENUM_NAME(linkToStatic_name): return vmIntrinsics::_linkToStatic; ++ case vmSymbols::VM_SYMBOL_ENUM_NAME(linkToSpecial_name): return vmIntrinsics::_linkToSpecial; ++ case vmSymbols::VM_SYMBOL_ENUM_NAME(linkToInterface_name): return vmIntrinsics::_linkToInterface; ++ } ++ ++ // Cover the case of invokeExact and any future variants of invokeFoo. ++ klassOop mh_klass = SystemDictionary::well_known_klass( ++ SystemDictionary::WK_KLASS_ENUM_NAME(MethodHandle_klass) ); ++ if (mh_klass != NULL && is_method_handle_invoke_name(mh_klass, name)) ++ return vmIntrinsics::_invokeGeneric; ++ ++ // Note: The pseudo-intrinsic _compiledLambdaForm is never linked against. ++ // Instead it is used to mark lambda forms bound to invokehandle or invokedynamic. ++ return vmIntrinsics::_none; ++} ++ ++vmIntrinsics::ID MethodHandles::signature_polymorphic_name_id(klassOop klass, Symbol* name) { ++ if (klass != NULL && ++ Klass::cast(klass)->name() == vmSymbols::java_lang_invoke_MethodHandle()) { ++ vmIntrinsics::ID iid = signature_polymorphic_name_id(name); ++ if (iid != vmIntrinsics::_none) ++ return iid; ++ if (is_method_handle_invoke_name(klass, name)) ++ return vmIntrinsics::_invokeGeneric; ++ } ++ return vmIntrinsics::_none; ++} ++ ++ + // convert the external string or reflective type to an internal signature +-Symbol* MethodHandles::convert_to_signature(oop type_str, bool polymorphic, TRAPS) { ++Symbol* MethodHandles::lookup_signature(oop type_str, bool intern_if_not_found, TRAPS) { + if (java_lang_invoke_MethodType::is_instance(type_str)) { +- return java_lang_invoke_MethodType::as_signature(type_str, polymorphic, CHECK_NULL); ++ return java_lang_invoke_MethodType::as_signature(type_str, intern_if_not_found, CHECK_NULL); + } else if (java_lang_Class::is_instance(type_str)) { + return java_lang_Class::as_signature(type_str, false, CHECK_NULL); + } else if (java_lang_String::is_instance(type_str)) { +- if (polymorphic) { ++ if (intern_if_not_found) { + return java_lang_String::as_symbol(type_str, CHECK_NULL); + } else { + return java_lang_String::as_symbol_or_null(type_str); +@@ -560,91 +391,297 @@ + } + } + ++static const char OBJ_SIG[] = "Ljava/lang/Object;"; ++enum { OBJ_SIG_LEN = 18 }; ++ ++bool MethodHandles::is_basic_type_signature(Symbol* sig) { ++ assert(vmSymbols::object_signature()->utf8_length() == (int)OBJ_SIG_LEN, ""); ++ assert(vmSymbols::object_signature()->equals(OBJ_SIG), ""); ++ const int len = sig->utf8_length(); ++ for (int i = 0; i < len; i++) { ++ switch (sig->byte_at(i)) { ++ case 'L': ++ // only java/lang/Object is valid here ++ if (sig->index_of_at(i, OBJ_SIG, OBJ_SIG_LEN) != i) ++ return false; ++ i += OBJ_SIG_LEN-1; //-1 because of i++ in loop ++ continue; ++ case '(': case ')': case 'V': ++ case 'I': case 'J': case 'F': case 'D': ++ continue; ++ //case '[': ++ //case 'Z': case 'B': case 'C': case 'S': ++ default: ++ return false; ++ } ++ } ++ return true; ++} ++ ++Symbol* MethodHandles::lookup_basic_type_signature(Symbol* sig, bool keep_last_arg, TRAPS) { ++ Symbol* bsig = NULL; ++ if (sig == NULL) { ++ return sig; ++ } else if (is_basic_type_signature(sig)) { ++ sig->increment_refcount(); ++ return sig; // that was easy ++ } else if (sig->byte_at(0) != '(') { ++ BasicType bt = char2type(sig->byte_at(0)); ++ if (is_subword_type(bt)) { ++ bsig = vmSymbols::int_signature(); ++ } else { ++ assert(bt == T_OBJECT || bt == T_ARRAY, "is_basic_type_signature was false"); ++ bsig = vmSymbols::object_signature(); ++ } ++ } else { ++ ResourceMark rm; ++ stringStream buffer(128); ++ buffer.put('('); ++ int arg_pos = 0, keep_arg_pos = -1; ++ if (keep_last_arg) ++ keep_arg_pos = ArgumentCount(sig).size() - 1; ++ for (SignatureStream ss(sig); !ss.is_done(); ss.next()) { ++ BasicType bt = ss.type(); ++ size_t this_arg_pos = buffer.size(); ++ if (ss.at_return_type()) { ++ buffer.put(')'); ++ } ++ if (arg_pos == keep_arg_pos) { ++ buffer.write((char*) ss.raw_bytes(), ++ (int) ss.raw_length()); ++ } else if (bt == T_OBJECT || bt == T_ARRAY) { ++ buffer.write(OBJ_SIG, OBJ_SIG_LEN); ++ } else { ++ if (is_subword_type(bt)) ++ bt = T_INT; ++ buffer.put(type2char(bt)); ++ } ++ arg_pos++; ++ } ++ const char* sigstr = buffer.base(); ++ int siglen = (int) buffer.size(); ++ bsig = SymbolTable::new_symbol(sigstr, siglen, THREAD); ++ } ++ assert(is_basic_type_signature(bsig) || ++ // detune assert in case the injected argument is not a basic type: ++ keep_last_arg, ""); ++ return bsig; ++} ++ ++void MethodHandles::print_as_basic_type_signature_on(outputStream* st, ++ Symbol* sig, ++ bool keep_arrays, ++ bool keep_basic_names) { ++ st = st ? st : tty; ++ int len = sig->utf8_length(); ++ int array = 0; ++ bool prev_type = false; ++ for (int i = 0; i < len; i++) { ++ char ch = sig->byte_at(i); ++ switch (ch) { ++ case '(': case ')': ++ prev_type = false; ++ st->put(ch); ++ continue; ++ case '[': ++ if (!keep_basic_names && keep_arrays) ++ st->put(ch); ++ array++; ++ continue; ++ case 'L': ++ { ++ if (prev_type) st->put(','); ++ int start = i+1, slash = start; ++ while (++i < len && (ch = sig->byte_at(i)) != ';') { ++ if (ch == '/' || ch == '.' || ch == '$') slash = i+1; ++ } ++ if (slash < i) start = slash; ++ if (!keep_basic_names) { ++ st->put('L'); ++ } else { ++ for (int j = start; j < i; j++) ++ st->put(sig->byte_at(j)); ++ prev_type = true; ++ } ++ break; ++ } ++ default: ++ { ++ if (array && char2type(ch) != T_ILLEGAL && !keep_arrays) { ++ ch = '['; ++ array = 0; ++ } ++ if (prev_type) st->put(','); ++ const char* n = NULL; ++ if (keep_basic_names) ++ n = type2name(char2type(ch)); ++ if (n == NULL) { ++ // unknown letter, or we don't want to know its name ++ st->put(ch); ++ } else { ++ st->print(n); ++ prev_type = true; ++ } ++ break; ++ } ++ } ++ // Switch break goes here to take care of array suffix: ++ if (prev_type) { ++ while (array > 0) { ++ st->print("[]"); ++ --array; ++ } ++ } ++ array = 0; ++ } ++} ++ ++ ++ ++static oop object_java_mirror() { ++ return Klass::cast(SystemDictionary::Object_klass())->java_mirror(); ++} ++ ++static oop field_name_or_null(Symbol* s) { ++ if (s == NULL) return NULL; ++ return StringTable::lookup(s); ++} ++ ++static oop field_signature_type_or_null(Symbol* s) { ++ if (s == NULL) return NULL; ++ BasicType bt = FieldType::basic_type(s); ++ if (is_java_primitive(bt)) { ++ assert(s->utf8_length() == 1, ""); ++ return java_lang_Class::primitive_mirror(bt); ++ } ++ // Here are some more short cuts for common types. ++ // They are optional, since reference types can be resolved lazily. ++ if (bt == T_OBJECT) { ++ if (s == vmSymbols::object_signature()) { ++ return object_java_mirror(); ++ } else if (s == vmSymbols::class_signature()) { ++ return Klass::cast(SystemDictionary::Class_klass())->java_mirror(); ++ } else if (s == vmSymbols::string_signature()) { ++ return Klass::cast(SystemDictionary::String_klass())->java_mirror(); ++ } else { ++ int len = s->utf8_length(); ++ if (s->byte_at(0) == 'L' && s->byte_at(len-1) == ';') { ++ TempNewSymbol cname = SymbolTable::probe((const char*)&s->bytes()[1], len-2); ++ if (cname == NULL) return NULL; ++ klassOop wkk = SystemDictionary::find_well_known_klass(cname); ++ if (wkk == NULL) return NULL; ++ return Klass::cast(wkk)->java_mirror(); ++ } ++ } ++ } ++ return NULL; ++} ++ + // An unresolved member name is a mere symbolic reference. + // Resolving it plants a vmtarget/vmindex in it, + // which refers dirctly to JVM internals. +-void MethodHandles::resolve_MemberName(Handle mname, TRAPS) { ++Handle MethodHandles::resolve_MemberName(Handle mname, TRAPS) { ++ Handle empty; + assert(java_lang_invoke_MemberName::is_instance(mname()), ""); +-#ifdef ASSERT +- // If this assert throws, renegotiate the sentinel value used by the Java code, +- // so that it is distinct from any valid vtable index value, and any special +- // values defined in methodOopDesc::VtableIndexFlag. +- // The point of the slop is to give the Java code and the JVM some room +- // to independently specify sentinel values. +- const int sentinel_slop = 10; +- const int sentinel_limit = methodOopDesc::highest_unused_vtable_index_value - sentinel_slop; +- assert(VM_INDEX_UNINITIALIZED < sentinel_limit, "Java sentinel != JVM sentinels"); +-#endif +- if (java_lang_invoke_MemberName::vmindex(mname()) != VM_INDEX_UNINITIALIZED) +- return; // already resolved ++ ++ if (java_lang_invoke_MemberName::vmtarget(mname()) != NULL) { ++ // Already resolved. ++ DEBUG_ONLY(int vmindex = java_lang_invoke_MemberName::vmindex(mname())); ++ assert(vmindex >= methodOopDesc::nonvirtual_vtable_index, ""); ++ return mname; ++ } ++ + Handle defc_oop(THREAD, java_lang_invoke_MemberName::clazz(mname())); + Handle name_str(THREAD, java_lang_invoke_MemberName::name( mname())); + Handle type_str(THREAD, java_lang_invoke_MemberName::type( mname())); + int flags = java_lang_invoke_MemberName::flags(mname()); ++ int ref_kind = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK; ++ if (!ref_kind_is_valid(ref_kind)) { ++ THROW_MSG_(vmSymbols::java_lang_InternalError(), "obsolete MemberName format", empty); ++ } ++ ++ DEBUG_ONLY(int old_vmindex); ++ assert((old_vmindex = java_lang_invoke_MemberName::vmindex(mname())) == 0, "clean input"); + + if (defc_oop.is_null() || name_str.is_null() || type_str.is_null()) { +- THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nothing to resolve"); ++ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), "nothing to resolve", empty); + } + + instanceKlassHandle defc; + { + klassOop defc_klassOop = java_lang_Class::as_klassOop(defc_oop()); +- if (defc_klassOop == NULL) return; // a primitive; no resolution possible ++ if (defc_klassOop == NULL) return empty; // a primitive; no resolution possible + if (!Klass::cast(defc_klassOop)->oop_is_instance()) { +- if (!Klass::cast(defc_klassOop)->oop_is_array()) return; ++ if (!Klass::cast(defc_klassOop)->oop_is_array()) return empty; + defc_klassOop = SystemDictionary::Object_klass(); + } + defc = instanceKlassHandle(THREAD, defc_klassOop); + } + if (defc.is_null()) { +- THROW_MSG(vmSymbols::java_lang_InternalError(), "primitive class"); ++ THROW_MSG_(vmSymbols::java_lang_InternalError(), "primitive class", empty); + } +- defc->link_class(CHECK); // possible safepoint ++ defc->link_class(CHECK_(empty)); // possible safepoint + + // convert the external string name to an internal symbol + TempNewSymbol name = java_lang_String::as_symbol_or_null(name_str()); +- if (name == NULL) return; // no such name ++ if (name == NULL) return empty; // no such name + if (name == vmSymbols::class_initializer_name()) +- return; // illegal name ++ return empty; // illegal name + +- Handle polymorphic_method_type; +- bool polymorphic_signature = false; ++ vmIntrinsics::ID mh_invoke_id = vmIntrinsics::_none; + if ((flags & ALL_KINDS) == IS_METHOD && +- (defc() == SystemDictionary::MethodHandle_klass() && +- methodOopDesc::is_method_handle_invoke_name(name))) { +- polymorphic_signature = true; ++ (defc() == SystemDictionary::MethodHandle_klass()) && ++ (ref_kind == JVM_REF_invokeVirtual || ++ ref_kind == JVM_REF_invokeSpecial || ++ // static invocation mode is required for _linkToVirtual, etc.: ++ ref_kind == JVM_REF_invokeStatic)) { ++ vmIntrinsics::ID iid = signature_polymorphic_name_id(name); ++ if (iid != vmIntrinsics::_none && ++ ((ref_kind == JVM_REF_invokeStatic) == is_signature_polymorphic_static(iid))) { ++ // Virtual methods invoke and invokeExact, plus internal invokers like _invokeBasic. ++ // For a static reference it could an internal linkage routine like _linkToVirtual, etc. ++ mh_invoke_id = iid; ++ } + } + + // convert the external string or reflective type to an internal signature +- TempNewSymbol type = convert_to_signature(type_str(), polymorphic_signature, CHECK); +- if (java_lang_invoke_MethodType::is_instance(type_str()) && polymorphic_signature) { +- polymorphic_method_type = type_str; // preserve exactly +- } +- if (type == NULL) return; // no such signature exists in the VM ++ TempNewSymbol type = lookup_signature(type_str(), (mh_invoke_id != vmIntrinsics::_none), CHECK_(empty)); ++ if (type == NULL) return empty; // no such signature exists in the VM + + // Time to do the lookup. + switch (flags & ALL_KINDS) { + case IS_METHOD: + { + CallInfo result; ++ bool do_dispatch = true; // default, neutral setting + { +- EXCEPTION_MARK; +- if ((flags & JVM_ACC_STATIC) != 0) { ++ assert(!HAS_PENDING_EXCEPTION, ""); ++ if (ref_kind == JVM_REF_invokeStatic) { ++ //do_dispatch = false; // no need, since statics are never dispatched + LinkResolver::resolve_static_call(result, ++ defc, name, type, KlassHandle(), false, false, THREAD); ++ } else if (ref_kind == JVM_REF_invokeInterface) { ++ LinkResolver::resolve_interface_call(result, Handle(), defc, + defc, name, type, KlassHandle(), false, false, THREAD); +- } else if (defc->is_interface()) { +- LinkResolver::resolve_interface_call(result, Handle(), defc, ++ } else if (mh_invoke_id != vmIntrinsics::_none) { ++ assert(!is_signature_polymorphic_static(mh_invoke_id), ""); ++ LinkResolver::resolve_handle_call(result, ++ defc, name, type, KlassHandle(), THREAD); ++ } else if (ref_kind == JVM_REF_invokeSpecial) { ++ do_dispatch = false; // force non-virtual linkage ++ LinkResolver::resolve_special_call(result, ++ defc, name, type, KlassHandle(), false, THREAD); ++ } else if (ref_kind == JVM_REF_invokeVirtual) { ++ LinkResolver::resolve_virtual_call(result, Handle(), defc, + defc, name, type, KlassHandle(), false, false, THREAD); + } else { +- LinkResolver::resolve_virtual_call(result, Handle(), defc, +- defc, name, type, KlassHandle(), false, false, THREAD); ++ assert(false, err_msg("ref_kind=%d", ref_kind)); + } + if (HAS_PENDING_EXCEPTION) { +- CLEAR_PENDING_EXCEPTION; +- break; // go to second chance ++ return empty; + } + } +- methodHandle m = result.resolved_method(); ++ /* + KlassHandle mklass = m->method_holder(); + KlassHandle receiver_limit = result.resolved_klass(); + if (receiver_limit.is_null() || +@@ -652,37 +689,15 @@ + receiver_limit->is_interface() && mklass->is_interface()) { + receiver_limit = mklass; + } +- oop vmtarget = NULL; +- int vmindex = methodOopDesc::nonvirtual_vtable_index; +- if (defc->is_interface()) { +- vmindex = klassItable::compute_itable_index(m()); +- assert(vmindex >= 0, ""); +- } else if (result.has_vtable_index()) { +- vmindex = result.vtable_index(); +- assert(vmindex >= 0, ""); +- } +- assert(vmindex != VM_INDEX_UNINITIALIZED, ""); +- if (vmindex < 0) { +- assert(result.is_statically_bound(), ""); +- vmtarget = m(); +- } else { +- vmtarget = result.resolved_klass()->as_klassOop(); +- } +- int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS); +- java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget); +- java_lang_invoke_MemberName::set_vmindex(mname(), vmindex); +- java_lang_invoke_MemberName::set_modifiers(mname(), mods); + java_lang_invoke_MemberName::set_clazz(mname(), receiver_limit->java_mirror()); +- DEBUG_ONLY(KlassHandle junk1; int junk2); +- assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(), +- "properly stored for later decoding"); +- return; ++ */ ++ return init_method_MemberName(mname(), result, THREAD); + } + case IS_CONSTRUCTOR: + { + CallInfo result; + { +- EXCEPTION_MARK; ++ assert(!HAS_PENDING_EXCEPTION, ""); + if (name == vmSymbols::object_initializer_name()) { + LinkResolver::resolve_special_call(result, + defc, name, type, KlassHandle(), false, THREAD); +@@ -690,22 +705,11 @@ + break; // will throw after end of switch + } + if (HAS_PENDING_EXCEPTION) { +- CLEAR_PENDING_EXCEPTION; +- return; ++ return empty; + } + } + assert(result.is_statically_bound(), ""); +- methodHandle m = result.resolved_method(); +- oop vmtarget = m(); +- int vmindex = methodOopDesc::nonvirtual_vtable_index; +- int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS); +- java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget); +- java_lang_invoke_MemberName::set_vmindex(mname(), vmindex); +- java_lang_invoke_MemberName::set_modifiers(mname(), mods); +- DEBUG_ONLY(KlassHandle junk1; int junk2); +- assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(), +- "properly stored for later decoding"); +- return; ++ return init_method_MemberName(mname(), result, THREAD); + } + case IS_FIELD: + { +@@ -713,54 +717,20 @@ + fieldDescriptor fd; // find_field initializes fd if found + KlassHandle sel_klass(THREAD, instanceKlass::cast(defc())->find_field(name, type, &fd)); + // check if field exists; i.e., if a klass containing the field def has been selected +- if (sel_klass.is_null()) return; +- oop vmtarget = sel_klass->as_klassOop(); +- int vmindex = fd.offset(); +- int mods = (fd.access_flags().as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS); +- if (vmindex == VM_INDEX_UNINITIALIZED) break; // should not happen +- java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget); +- java_lang_invoke_MemberName::set_vmindex(mname(), vmindex); +- java_lang_invoke_MemberName::set_modifiers(mname(), mods); +- return; ++ if (sel_klass.is_null()) return empty; // should not happen ++ oop type = field_signature_type_or_null(fd.signature()); ++ oop name = field_name_or_null(fd.name()); ++ bool is_setter = (ref_kind_is_valid(ref_kind) && ref_kind_is_setter(ref_kind)); ++ mname = Handle(THREAD, ++ init_field_MemberName(mname(), sel_klass->as_klassOop(), ++ fd.access_flags(), type, name, fd.offset(), is_setter)); ++ return mname; + } + default: +- THROW_MSG(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format"); ++ THROW_MSG_(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format", empty); + } + +- // Second chance. +- if (polymorphic_method_type.not_null()) { +- // Look on a non-null class loader. +- Handle cur_class_loader; +- const int nptypes = java_lang_invoke_MethodType::ptype_count(polymorphic_method_type()); +- for (int i = 0; i <= nptypes; i++) { +- oop type_mirror; +- if (i < nptypes) type_mirror = java_lang_invoke_MethodType::ptype(polymorphic_method_type(), i); +- else type_mirror = java_lang_invoke_MethodType::rtype(polymorphic_method_type()); +- klassOop example_type = java_lang_Class::as_klassOop(type_mirror); +- if (example_type == NULL) continue; +- oop class_loader = Klass::cast(example_type)->class_loader(); +- if (class_loader == NULL || class_loader == cur_class_loader()) continue; +- cur_class_loader = Handle(THREAD, class_loader); +- methodOop m = SystemDictionary::find_method_handle_invoke(name, +- type, +- KlassHandle(THREAD, example_type), +- THREAD); +- if (HAS_PENDING_EXCEPTION) { +- CLEAR_PENDING_EXCEPTION; +- m = NULL; +- // try again with a different class loader... +- } +- if (m != NULL && +- m->is_method_handle_invoke() && +- java_lang_invoke_MethodType::equals(polymorphic_method_type(), m->method_handle_type())) { +- int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS); +- java_lang_invoke_MemberName::set_vmtarget(mname(), m); +- java_lang_invoke_MemberName::set_vmindex(mname(), m->vtable_index()); +- java_lang_invoke_MemberName::set_modifiers(mname(), mods); +- return; +- } +- } +- } ++ return empty; + } + + // Conversely, a member name which is only initialized from JVM internals +@@ -771,7 +741,7 @@ + assert(java_lang_invoke_MemberName::is_instance(mname()), ""); + oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname()); + int vmindex = java_lang_invoke_MemberName::vmindex(mname()); +- if (vmtarget == NULL || vmindex == VM_INDEX_UNINITIALIZED) { ++ if (vmtarget == NULL) { + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nothing to expand"); + } + +@@ -792,14 +762,12 @@ + case IS_METHOD: + case IS_CONSTRUCTOR: + { +- KlassHandle receiver_limit; int decode_flags = 0; +- methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit, decode_flags); ++ assert(vmtarget->is_method(), "method or constructor vmtarget is methodOop"); ++ methodHandle m(THREAD, methodOop(vmtarget)); ++ DEBUG_ONLY(vmtarget = NULL); // safety + if (m.is_null()) break; + if (!have_defc) { + klassOop defc = m->method_holder(); +- if (receiver_limit.not_null() && receiver_limit() != defc +- && Klass::cast(receiver_limit())->is_subtype_of(defc)) +- defc = receiver_limit(); + java_lang_invoke_MemberName::set_clazz(mname(), Klass::cast(defc)->java_mirror()); + } + if (!have_name) { +@@ -816,9 +784,10 @@ + case IS_FIELD: + { + // This is taken from LinkResolver::resolve_field, sans access checks. +- if (!vmtarget->is_klass()) break; ++ assert(vmtarget->is_klass(), "field vmtarget is klassOop"); + if (!Klass::cast((klassOop) vmtarget)->oop_is_instance()) break; + instanceKlassHandle defc(THREAD, (klassOop) vmtarget); ++ DEBUG_ONLY(vmtarget = NULL); // safety + bool is_static = ((flags & JVM_ACC_STATIC) != 0); + fieldDescriptor fd; // find_field initializes fd if found + if (!defc->find_field_from_offset(vmindex, is_static, &fd)) +@@ -832,7 +801,11 @@ + java_lang_invoke_MemberName::set_name(mname(), name()); + } + if (!have_type) { +- Handle type = java_lang_String::create_from_symbol(fd.signature(), CHECK); ++ // If it is a primitive field type, don't mess with short strings like "I". ++ Handle type = field_signature_type_or_null(fd.signature()); ++ if (type.is_null()) { ++ java_lang_String::create_from_symbol(fd.signature(), CHECK); ++ } + java_lang_invoke_MemberName::set_type(mname(), type()); + } + return; +@@ -890,7 +863,13 @@ + oop result = results->obj_at(rfill++); + if (!java_lang_invoke_MemberName::is_instance(result)) + return -99; // caller bug! +- MethodHandles::init_MemberName(result, st.klass()->as_klassOop(), st.access_flags(), st.offset()); ++ oop type = field_signature_type_or_null(st.signature()); ++ oop name = field_name_or_null(st.name()); ++ oop saved = MethodHandles::init_field_MemberName(result, st.klass()->as_klassOop(), ++ st.access_flags(), type, name, ++ st.offset()); ++ if (saved != result) ++ results->obj_at_put(rfill-1, saved); // show saved instance to user + } else if (++overflow >= overflow_limit) { + match_flags = 0; break; // got tired of looking at overflow + } +@@ -938,7 +917,9 @@ + oop result = results->obj_at(rfill++); + if (!java_lang_invoke_MemberName::is_instance(result)) + return -99; // caller bug! +- MethodHandles::init_MemberName(result, m, true); ++ oop saved = MethodHandles::init_method_MemberName(result, m, true, NULL); ++ if (saved != result) ++ results->obj_at_put(rfill-1, saved); // show saved instance to user + } else if (++overflow >= overflow_limit) { + match_flags = 0; break; // got tired of looking at overflow + } +@@ -949,1925 +930,16 @@ + return rfill + overflow; + } + +- +-// Decode this java.lang.Class object into an instanceKlass, if possible. +-// Throw IAE if not +-instanceKlassHandle MethodHandles::resolve_instance_klass(oop java_mirror_oop, TRAPS) { +- instanceKlassHandle empty; +- klassOop caller = NULL; +- if (java_lang_Class::is_instance(java_mirror_oop)) { +- caller = java_lang_Class::as_klassOop(java_mirror_oop); +- } +- if (caller == NULL || !Klass::cast(caller)->oop_is_instance()) { +- THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), "not a class", empty); +- } +- return instanceKlassHandle(THREAD, caller); +-} +- +- +- +-// Decode the vmtarget field of a method handle. +-// Sanitize out methodOops, klassOops, and any other non-Java data. +-// This is for debugging and reflection. +-oop MethodHandles::encode_target(Handle mh, int format, TRAPS) { +- assert(java_lang_invoke_MethodHandle::is_instance(mh()), "must be a MH"); +- if (format == ETF_FORCE_DIRECT_HANDLE || +- format == ETF_COMPILE_DIRECT_HANDLE) { +- // Internal function for stress testing. +- Handle mt = java_lang_invoke_MethodHandle::type(mh()); +- int invocation_count = 10000; +- TempNewSymbol signature = java_lang_invoke_MethodType::as_signature(mt(), true, CHECK_NULL); +- bool omit_receiver_argument = true; +- MethodHandleCompiler mhc(mh, vmSymbols::invoke_name(), signature, invocation_count, omit_receiver_argument, CHECK_NULL); +- methodHandle m = mhc.compile(CHECK_NULL); +- if (StressMethodHandleWalk && Verbose || PrintMiscellaneous) { +- tty->print_cr("MethodHandleNatives.getTarget(%s)", +- format == ETF_FORCE_DIRECT_HANDLE ? "FORCE_DIRECT" : "COMPILE_DIRECT"); +- if (Verbose) { +- m->print_codes(); +- } +- } +- if (StressMethodHandleWalk) { +- InterpreterOopMap mask; +- OopMapCache::compute_one_oop_map(m, m->code_size() - 1, &mask); +- } +- if ((format == ETF_COMPILE_DIRECT_HANDLE || +- CompilationPolicy::must_be_compiled(m)) +- && !instanceKlass::cast(m->method_holder())->is_not_initialized() +- && CompilationPolicy::can_be_compiled(m)) { +- // Force compilation +- CompileBroker::compile_method(m, InvocationEntryBci, +- CompilationPolicy::policy()->initial_compile_level(), +- methodHandle(), 0, "MethodHandleNatives.getTarget", +- CHECK_NULL); +- } +- // Now wrap m in a DirectMethodHandle. +- instanceKlassHandle dmh_klass(THREAD, SystemDictionary::DirectMethodHandle_klass()); +- Handle dmh = dmh_klass->allocate_instance_handle(CHECK_NULL); +- JavaValue ignore_result(T_VOID); +- Symbol* init_name = vmSymbols::object_initializer_name(); +- Symbol* init_sig = vmSymbols::notifyGenericMethodType_signature(); +- JavaCalls::call_special(&ignore_result, dmh, +- SystemDictionaryHandles::MethodHandle_klass(), init_name, init_sig, +- java_lang_invoke_MethodHandle::type(mh()), CHECK_NULL); +- MethodHandles::init_DirectMethodHandle(dmh, m, false, CHECK_NULL); +- return dmh(); +- } +- if (format == ETF_HANDLE_OR_METHOD_NAME) { +- oop target = java_lang_invoke_MethodHandle::vmtarget(mh()); +- if (target == NULL) { +- return NULL; // unformed MH +- } +- klassOop tklass = target->klass(); +- if (Klass::cast(tklass)->is_subclass_of(SystemDictionary::Object_klass())) { +- return target; // target is another MH (or something else?) +- } +- } +- if (format == ETF_DIRECT_HANDLE) { +- oop target = mh(); +- for (;;) { +- if (target->klass() == SystemDictionary::DirectMethodHandle_klass()) { +- return target; +- } +- if (!java_lang_invoke_MethodHandle::is_instance(target)){ +- return NULL; // unformed MH +- } +- target = java_lang_invoke_MethodHandle::vmtarget(target); +- } +- } +- // cases of metadata in MH.vmtarget: +- // - AMH can have methodOop for static invoke with bound receiver +- // - DMH can have methodOop for static invoke (on variable receiver) +- // - DMH can have klassOop for dispatched (non-static) invoke +- KlassHandle receiver_limit; int decode_flags = 0; +- methodHandle m = decode_MethodHandle(mh(), receiver_limit, decode_flags); +- if (m.is_null()) return NULL; +- switch (format) { +- case ETF_REFLECT_METHOD: +- // same as jni_ToReflectedMethod: +- if (m->is_initializer()) { +- return Reflection::new_constructor(m, THREAD); +- } else { +- return Reflection::new_method(m, UseNewReflection, false, THREAD); +- } +- +- case ETF_HANDLE_OR_METHOD_NAME: // method, not handle +- case ETF_METHOD_NAME: +- { +- if (SystemDictionary::MemberName_klass() == NULL) break; +- instanceKlassHandle mname_klass(THREAD, SystemDictionary::MemberName_klass()); +- mname_klass->initialize(CHECK_NULL); +- Handle mname = mname_klass->allocate_instance_handle(CHECK_NULL); // possible safepoint +- java_lang_invoke_MemberName::set_vmindex(mname(), VM_INDEX_UNINITIALIZED); +- bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0); +- init_MemberName(mname(), m(), do_dispatch); +- expand_MemberName(mname, 0, CHECK_NULL); +- return mname(); +- } +- } +- +- // Unknown format code. +- char msg[50]; +- jio_snprintf(msg, sizeof(msg), "unknown getTarget format=%d", format); +- THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), msg); +-} +- +-static const char* always_null_names[] = { +- "java/lang/Void", +- "java/lang/Null", +- //"java/lang/Nothing", +- "sun/dyn/empty/Empty", +- "sun/invoke/empty/Empty", +- NULL +-}; +- +-static bool is_always_null_type(klassOop klass) { +- if (klass == NULL) return false; // safety +- if (!Klass::cast(klass)->oop_is_instance()) return false; +- instanceKlass* ik = instanceKlass::cast(klass); +- // Must be on the boot class path: +- if (ik->class_loader() != NULL) return false; +- // Check the name. +- Symbol* name = ik->name(); +- for (int i = 0; ; i++) { +- const char* test_name = always_null_names[i]; +- if (test_name == NULL) break; +- if (name->equals(test_name)) +- return true; +- } +- return false; +-} +- +-bool MethodHandles::class_cast_needed(klassOop src, klassOop dst) { +- if (dst == NULL) return true; +- if (src == NULL) return (dst != SystemDictionary::Object_klass()); +- if (src == dst || dst == SystemDictionary::Object_klass()) +- return false; // quickest checks +- Klass* srck = Klass::cast(src); +- Klass* dstk = Klass::cast(dst); +- if (dstk->is_interface()) { +- // interface receivers can safely be viewed as untyped, +- // because interface calls always include a dynamic check +- //dstk = Klass::cast(SystemDictionary::Object_klass()); +- return false; +- } +- if (srck->is_interface()) { +- // interface arguments must be viewed as untyped +- //srck = Klass::cast(SystemDictionary::Object_klass()); +- return true; +- } +- if (is_always_null_type(src)) { +- // some source types are known to be never instantiated; +- // they represent references which are always null +- // such null references never fail to convert safely +- return false; +- } +- return !srck->is_subclass_of(dstk->as_klassOop()); +-} +- +-static oop object_java_mirror() { +- return Klass::cast(SystemDictionary::Object_klass())->java_mirror(); +-} +- +-bool MethodHandles::is_float_fixed_reinterpretation_cast(BasicType src, BasicType dst) { +- if (src == T_FLOAT) return dst == T_INT; +- if (src == T_INT) return dst == T_FLOAT; +- if (src == T_DOUBLE) return dst == T_LONG; +- if (src == T_LONG) return dst == T_DOUBLE; +- return false; +-} +- +-bool MethodHandles::same_basic_type_for_arguments(BasicType src, +- BasicType dst, +- bool raw, +- bool for_return) { +- if (for_return) { +- // return values can always be forgotten: +- if (dst == T_VOID) return true; +- if (src == T_VOID) return raw && (dst == T_INT); +- // We allow caller to receive a garbage int, which is harmless. +- // This trick is pulled by trusted code (see VerifyType.canPassRaw). +- } +- assert(src != T_VOID && dst != T_VOID, "should not be here"); +- if (src == dst) return true; +- if (type2size[src] != type2size[dst]) return false; +- if (src == T_OBJECT || dst == T_OBJECT) return false; +- if (raw) return true; // bitwise reinterpretation; caller guarantees safety +- // allow reinterpretation casts for integral widening +- if (is_subword_type(src)) { // subwords can fit in int or other subwords +- if (dst == T_INT) // any subword fits in an int +- return true; +- if (src == T_BOOLEAN) // boolean fits in any subword +- return is_subword_type(dst); +- if (src == T_BYTE && dst == T_SHORT) +- return true; // remaining case: byte fits in short +- } +- // allow float/fixed reinterpretation casts +- if (is_float_fixed_reinterpretation_cast(src, dst)) +- return true; +- return false; +-} +- +-const char* MethodHandles::check_method_receiver(methodOop m, +- klassOop passed_recv_type) { +- assert(!m->is_static(), "caller resp."); +- if (passed_recv_type == NULL) +- return "receiver type is primitive"; +- if (class_cast_needed(passed_recv_type, m->method_holder())) { +- Klass* formal = Klass::cast(m->method_holder()); +- return SharedRuntime::generate_class_cast_message("receiver type", +- formal->external_name()); +- } +- return NULL; // checks passed +-} +- +-// Verify that m's signature can be called type-safely by a method handle +-// of the given method type 'mtype'. +-// It takes a TRAPS argument because it must perform symbol lookups. +-void MethodHandles::verify_method_signature(methodHandle m, +- Handle mtype, +- int first_ptype_pos, +- KlassHandle insert_ptype, +- TRAPS) { +- Handle mhi_type; +- if (m->is_method_handle_invoke()) { +- // use this more exact typing instead of the symbolic signature: +- mhi_type = Handle(THREAD, m->method_handle_type()); +- } +- objArrayHandle ptypes(THREAD, java_lang_invoke_MethodType::ptypes(mtype())); +- int pnum = first_ptype_pos; +- int pmax = ptypes->length(); +- int anum = 0; // method argument +- const char* err = NULL; +- ResourceMark rm(THREAD); +- for (SignatureStream ss(m->signature()); !ss.is_done(); ss.next()) { +- oop ptype_oop = NULL; +- if (ss.at_return_type()) { +- if (pnum != pmax) +- { err = "too many arguments"; break; } +- ptype_oop = java_lang_invoke_MethodType::rtype(mtype()); +- } else { +- if (pnum >= pmax) +- { err = "not enough arguments"; break; } +- if (pnum >= 0) +- ptype_oop = ptypes->obj_at(pnum); +- else if (insert_ptype.is_null()) +- ptype_oop = NULL; +- else +- ptype_oop = insert_ptype->java_mirror(); +- pnum += 1; +- anum += 1; +- } +- KlassHandle pklass; +- BasicType ptype = T_OBJECT; +- bool have_ptype = false; +- // missing ptype_oop does not match any non-reference; use Object to report the error +- pklass = SystemDictionaryHandles::Object_klass(); +- if (ptype_oop != NULL) { +- have_ptype = true; +- klassOop pklass_oop = NULL; +- ptype = java_lang_Class::as_BasicType(ptype_oop, &pklass_oop); +- pklass = KlassHandle(THREAD, pklass_oop); +- } +- ptype_oop = NULL; //done with this +- KlassHandle aklass; +- BasicType atype = ss.type(); +- if (atype == T_ARRAY) atype = T_OBJECT; // fold all refs to T_OBJECT +- if (atype == T_OBJECT) { +- if (!have_ptype) { +- // null matches any reference +- continue; +- } +- if (mhi_type.is_null()) { +- // If we fail to resolve types at this point, we will usually throw an error. +- TempNewSymbol name = ss.as_symbol_or_null(); +- if (name != NULL) { +- instanceKlass* mk = instanceKlass::cast(m->method_holder()); +- Handle loader(THREAD, mk->class_loader()); +- Handle domain(THREAD, mk->protection_domain()); +- klassOop aklass_oop = SystemDictionary::resolve_or_null(name, loader, domain, CHECK); +- if (aklass_oop != NULL) +- aklass = KlassHandle(THREAD, aklass_oop); +- if (aklass.is_null() && +- pklass.not_null() && +- loader.is_null() && +- pklass->name() == name) +- // accept name equivalence here, since that's the best we can do +- aklass = pklass; +- } +- } else { +- // for method handle invokers we don't look at the name in the signature +- oop atype_oop; +- if (ss.at_return_type()) +- atype_oop = java_lang_invoke_MethodType::rtype(mhi_type()); +- else +- atype_oop = java_lang_invoke_MethodType::ptype(mhi_type(), anum-1); +- klassOop aklass_oop = NULL; +- atype = java_lang_Class::as_BasicType(atype_oop, &aklass_oop); +- aklass = KlassHandle(THREAD, aklass_oop); +- } +- } +- if (!ss.at_return_type()) { +- err = check_argument_type_change(ptype, pklass(), atype, aklass(), anum); +- } else { +- err = check_return_type_change(atype, aklass(), ptype, pklass()); // note reversal! +- } +- if (err != NULL) break; +- } +- +- if (err != NULL) { +-#ifndef PRODUCT +- if (PrintMiscellaneous && (Verbose || WizardMode)) { +- tty->print("*** verify_method_signature failed: "); +- java_lang_invoke_MethodType::print_signature(mtype(), tty); +- tty->cr(); +- tty->print_cr(" first_ptype_pos = %d, insert_ptype = "UINTX_FORMAT, first_ptype_pos, insert_ptype()); +- tty->print(" Failing method: "); +- m->print(); +- } +-#endif //PRODUCT +- THROW_MSG(vmSymbols::java_lang_InternalError(), err); +- } +-} +- +-// Main routine for verifying the MethodHandle.type of a proposed +-// direct or bound-direct method handle. +-void MethodHandles::verify_method_type(methodHandle m, +- Handle mtype, +- bool has_bound_recv, +- KlassHandle bound_recv_type, +- TRAPS) { +- bool m_needs_receiver = !m->is_static(); +- +- const char* err = NULL; +- +- int first_ptype_pos = m_needs_receiver ? 1 : 0; +- if (has_bound_recv) { +- first_ptype_pos -= 1; // ptypes do not include the bound argument; start earlier in them +- if (m_needs_receiver && bound_recv_type.is_null()) +- { err = "bound receiver is not an object"; goto die; } +- } +- +- if (m_needs_receiver && err == NULL) { +- objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(mtype()); +- if (ptypes->length() < first_ptype_pos) +- { err = "receiver argument is missing"; goto die; } +- if (has_bound_recv) +- err = check_method_receiver(m(), bound_recv_type->as_klassOop()); +- else +- err = check_method_receiver(m(), java_lang_Class::as_klassOop(ptypes->obj_at(first_ptype_pos-1))); +- if (err != NULL) goto die; +- } +- +- // Check the other arguments for mistypes. +- verify_method_signature(m, mtype, first_ptype_pos, bound_recv_type, CHECK); +- return; +- +- die: +- THROW_MSG(vmSymbols::java_lang_InternalError(), err); +-} +- +-void MethodHandles::verify_vmslots(Handle mh, TRAPS) { +- // Verify vmslots. +- int check_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(mh())); +- if (java_lang_invoke_MethodHandle::vmslots(mh()) != check_slots) { +- THROW_MSG(vmSymbols::java_lang_InternalError(), "bad vmslots in BMH"); +- } +-} +- +-void MethodHandles::verify_vmargslot(Handle mh, int argnum, int argslot, TRAPS) { +- // Verify that argslot points at the given argnum. +- int check_slot = argument_slot(java_lang_invoke_MethodHandle::type(mh()), argnum); +- if (argslot != check_slot || argslot < 0) { +- ResourceMark rm; +- const char* fmt = "for argnum of %d, vmargslot is %d, should be %d"; +- size_t msglen = strlen(fmt) + 3*11 + 1; +- char* msg = NEW_RESOURCE_ARRAY(char, msglen); +- jio_snprintf(msg, msglen, fmt, argnum, argslot, check_slot); +- THROW_MSG(vmSymbols::java_lang_InternalError(), msg); +- } +-} +- +-// Verify the correspondence between two method types. +-// Apart from the advertised changes, caller method type X must +-// be able to invoke the callee method Y type with no violations +-// of type integrity. +-// Return NULL if all is well, else a short error message. +-const char* MethodHandles::check_method_type_change(oop src_mtype, int src_beg, int src_end, +- int insert_argnum, oop insert_type, +- int change_argnum, oop change_type, +- int delete_argnum, +- oop dst_mtype, int dst_beg, int dst_end, +- bool raw) { +- objArrayOop src_ptypes = java_lang_invoke_MethodType::ptypes(src_mtype); +- objArrayOop dst_ptypes = java_lang_invoke_MethodType::ptypes(dst_mtype); +- +- int src_max = src_ptypes->length(); +- int dst_max = dst_ptypes->length(); +- +- if (src_end == -1) src_end = src_max; +- if (dst_end == -1) dst_end = dst_max; +- +- assert(0 <= src_beg && src_beg <= src_end && src_end <= src_max, "oob"); +- assert(0 <= dst_beg && dst_beg <= dst_end && dst_end <= dst_max, "oob"); +- +- // pending actions; set to -1 when done: +- int ins_idx = insert_argnum, chg_idx = change_argnum, del_idx = delete_argnum; +- +- const char* err = NULL; +- +- // Walk along each array of parameter types, including a virtual +- // NULL end marker at the end of each. +- for (int src_idx = src_beg, dst_idx = dst_beg; +- (src_idx <= src_end && dst_idx <= dst_end); +- src_idx++, dst_idx++) { +- oop src_type = (src_idx == src_end) ? oop(NULL) : src_ptypes->obj_at(src_idx); +- oop dst_type = (dst_idx == dst_end) ? oop(NULL) : dst_ptypes->obj_at(dst_idx); +- bool fix_null_src_type = false; +- +- // Perform requested edits. +- if (ins_idx == src_idx) { +- // note that the inserted guy is never affected by a change or deletion +- ins_idx = -1; +- src_type = insert_type; +- fix_null_src_type = true; +- --src_idx; // back up to process src type on next loop +- src_idx = src_end; +- } else { +- // note that the changed guy can be immediately deleted +- if (chg_idx == src_idx) { +- chg_idx = -1; +- assert(src_idx < src_end, "oob"); +- src_type = change_type; +- fix_null_src_type = true; +- } +- if (del_idx == src_idx) { +- del_idx = -1; +- assert(src_idx < src_end, "oob"); +- --dst_idx; +- continue; // rerun loop after skipping this position +- } +- } +- +- if (src_type == NULL && fix_null_src_type) +- // explicit null in this case matches any dest reference +- src_type = (java_lang_Class::is_primitive(dst_type) ? object_java_mirror() : dst_type); +- +- // Compare the two argument types. +- if (src_type != dst_type) { +- if (src_type == NULL) return "not enough arguments"; +- if (dst_type == NULL) return "too many arguments"; +- err = check_argument_type_change(src_type, dst_type, dst_idx, raw); +- if (err != NULL) return err; +- } +- } +- +- // Now compare return types also. +- oop src_rtype = java_lang_invoke_MethodType::rtype(src_mtype); +- oop dst_rtype = java_lang_invoke_MethodType::rtype(dst_mtype); +- if (src_rtype != dst_rtype) { +- err = check_return_type_change(dst_rtype, src_rtype, raw); // note reversal! +- if (err != NULL) return err; +- } +- +- assert(err == NULL, ""); +- return NULL; // all is well +-} +- +- +-const char* MethodHandles::check_argument_type_change(BasicType src_type, +- klassOop src_klass, +- BasicType dst_type, +- klassOop dst_klass, +- int argnum, +- bool raw) { +- const char* err = NULL; +- const bool for_return = (argnum < 0); +- +- // just in case: +- if (src_type == T_ARRAY) src_type = T_OBJECT; +- if (dst_type == T_ARRAY) dst_type = T_OBJECT; +- +- // Produce some nice messages if VerifyMethodHandles is turned on: +- if (!same_basic_type_for_arguments(src_type, dst_type, raw, for_return)) { +- if (src_type == T_OBJECT) { +- if (raw && is_java_primitive(dst_type)) +- return NULL; // ref-to-prim discards ref and returns zero +- err = (!for_return +- ? "type mismatch: passing a %s for method argument #%d, which expects primitive %s" +- : "type mismatch: returning a %s, but caller expects primitive %s"); +- } else if (dst_type == T_OBJECT) { +- err = (!for_return +- ? "type mismatch: passing a primitive %s for method argument #%d, which expects %s" +- : "type mismatch: returning a primitive %s, but caller expects %s"); +- } else { +- err = (!for_return +- ? "type mismatch: passing a %s for method argument #%d, which expects %s" +- : "type mismatch: returning a %s, but caller expects %s"); +- } +- } else if (src_type == T_OBJECT && dst_type == T_OBJECT && +- class_cast_needed(src_klass, dst_klass)) { +- if (!class_cast_needed(dst_klass, src_klass)) { +- if (raw) +- return NULL; // reverse cast is OK; the MH target is trusted to enforce it +- err = (!for_return +- ? "cast required: passing a %s for method argument #%d, which expects %s" +- : "cast required: returning a %s, but caller expects %s"); +- } else { +- err = (!for_return +- ? "reference mismatch: passing a %s for method argument #%d, which expects %s" +- : "reference mismatch: returning a %s, but caller expects %s"); +- } +- } else { +- // passed the obstacle course +- return NULL; +- } +- +- // format, format, format +- const char* src_name = type2name(src_type); +- const char* dst_name = type2name(dst_type); +- if (src_name == NULL) src_name = "unknown type"; +- if (dst_name == NULL) dst_name = "unknown type"; +- if (src_type == T_OBJECT) +- src_name = (src_klass != NULL) ? Klass::cast(src_klass)->external_name() : "an unresolved class"; +- if (dst_type == T_OBJECT) +- dst_name = (dst_klass != NULL) ? Klass::cast(dst_klass)->external_name() : "an unresolved class"; +- +- size_t msglen = strlen(err) + strlen(src_name) + strlen(dst_name) + (argnum < 10 ? 1 : 11); +- char* msg = NEW_RESOURCE_ARRAY(char, msglen + 1); +- if (!for_return) { +- assert(strstr(err, "%d") != NULL, ""); +- jio_snprintf(msg, msglen, err, src_name, argnum, dst_name); +- } else { +- assert(strstr(err, "%d") == NULL, ""); +- jio_snprintf(msg, msglen, err, src_name, dst_name); +- } +- return msg; +-} +- +-// Compute the depth within the stack of the given argument, i.e., +-// the combined size of arguments to the right of the given argument. +-// For the last argument (ptypes.length-1) this will be zero. +-// For the first argument (0) this will be the size of all +-// arguments but that one. For the special number -1, this +-// will be the size of all arguments, including the first. +-// If the argument is neither -1 nor a valid argument index, +-// then return a negative number. Otherwise, the result +-// is in the range [0..vmslots] inclusive. +-int MethodHandles::argument_slot(oop method_type, int arg) { +- objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(method_type); +- int argslot = 0; +- int len = ptypes->length(); +- if (arg < -1 || arg >= len) return -99; +- for (int i = len-1; i > arg; i--) { +- BasicType bt = java_lang_Class::as_BasicType(ptypes->obj_at(i)); +- argslot += type2size[bt]; +- } +- assert(argument_slot_to_argnum(method_type, argslot) == arg, "inverse works"); +- return argslot; +-} +- +-// Given a slot number, return the argument number. +-int MethodHandles::argument_slot_to_argnum(oop method_type, int query_argslot) { +- objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(method_type); +- int argslot = 0; +- int len = ptypes->length(); +- for (int i = len-1; i >= 0; i--) { +- if (query_argslot == argslot) return i; +- BasicType bt = java_lang_Class::as_BasicType(ptypes->obj_at(i)); +- argslot += type2size[bt]; +- } +- // return pseudo-arg deepest in stack: +- if (query_argslot == argslot) return -1; +- return -99; // oob slot, or splitting a double-slot arg +-} +- +-methodHandle MethodHandles::dispatch_decoded_method(methodHandle m, +- KlassHandle receiver_limit, +- int decode_flags, +- KlassHandle receiver_klass, +- TRAPS) { +- assert((decode_flags & ~_DMF_DIRECT_MASK) == 0, "must be direct method reference"); +- assert((decode_flags & _dmf_has_receiver) != 0, "must have a receiver or first reference argument"); +- +- if (!m->is_static() && +- (receiver_klass.is_null() || !receiver_klass->is_subtype_of(m->method_holder()))) +- // given type does not match class of method, or receiver is null! +- // caller should have checked this, but let's be extra careful... +- return methodHandle(); +- +- if (receiver_limit.not_null() && +- (receiver_klass.not_null() && !receiver_klass->is_subtype_of(receiver_limit()))) +- // given type is not limited to the receiver type +- // note that a null receiver can match any reference value, for a static method +- return methodHandle(); +- +- if (!(decode_flags & MethodHandles::_dmf_does_dispatch)) { +- // pre-dispatched or static method (null receiver is OK for static) +- return m; +- +- } else if (receiver_klass.is_null()) { +- // null receiver value; cannot dispatch +- return methodHandle(); +- +- } else if (!(decode_flags & MethodHandles::_dmf_from_interface)) { +- // perform virtual dispatch +- int vtable_index = m->vtable_index(); +- guarantee(vtable_index >= 0, "valid vtable index"); +- +- // receiver_klass might be an arrayKlassOop but all vtables start at +- // the same place. The cast is to avoid virtual call and assertion. +- // See also LinkResolver::runtime_resolve_virtual_method. +- instanceKlass* inst = (instanceKlass*)Klass::cast(receiver_klass()); +- DEBUG_ONLY(inst->verify_vtable_index(vtable_index)); +- methodOop m_oop = inst->method_at_vtable(vtable_index); +- return methodHandle(THREAD, m_oop); +- +- } else { +- // perform interface dispatch +- int itable_index = klassItable::compute_itable_index(m()); +- guarantee(itable_index >= 0, "valid itable index"); +- instanceKlass* inst = instanceKlass::cast(receiver_klass()); +- methodOop m_oop = inst->method_at_itable(m->method_holder(), itable_index, THREAD); +- return methodHandle(THREAD, m_oop); +- } +-} +- +-void MethodHandles::verify_DirectMethodHandle(Handle mh, methodHandle m, TRAPS) { +- // Verify type. +- Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(mh())); +- verify_method_type(m, mtype, false, KlassHandle(), CHECK); +- +- // Verify vmslots. +- if (java_lang_invoke_MethodHandle::vmslots(mh()) != m->size_of_parameters()) { +- THROW_MSG(vmSymbols::java_lang_InternalError(), "bad vmslots in DMH"); +- } +-} +- +-void MethodHandles::init_DirectMethodHandle(Handle mh, methodHandle m, bool do_dispatch, TRAPS) { +- // Check arguments. +- if (mh.is_null() || m.is_null() || +- (!do_dispatch && m->is_abstract())) { +- THROW(vmSymbols::java_lang_InternalError()); +- } +- +- if (VerifyMethodHandles) { +- // The privileged code which invokes this routine should not make +- // a mistake about types, but it's better to verify. +- verify_DirectMethodHandle(mh, m, CHECK); +- } +- +- // Finally, after safety checks are done, link to the target method. +- // We will follow the same path as the latter part of +- // InterpreterRuntime::resolve_invoke(), which first finds the method +- // and then decides how to populate the constant pool cache entry +- // that links the interpreter calls to the method. We need the same +- // bits, and will use the same calling sequence code. +- +- int vmindex = methodOopDesc::garbage_vtable_index; +- Handle vmtarget; +- +- instanceKlass::cast(m->method_holder())->link_class(CHECK); +- +- MethodHandleEntry* me = NULL; +- if (do_dispatch && Klass::cast(m->method_holder())->is_interface()) { +- // We are simulating an invokeinterface instruction. +- // (We might also be simulating an invokevirtual on a miranda method, +- // but it is safe to treat it as an invokeinterface.) +- assert(!m->can_be_statically_bound(), "no final methods on interfaces"); +- vmindex = klassItable::compute_itable_index(m()); +- assert(vmindex >= 0, "(>=0) == do_dispatch"); +- // Set up same bits as ConstantPoolCacheEntry::set_interface_call(). +- vmtarget = m->method_holder(); // the interface +- me = MethodHandles::entry(MethodHandles::_invokeinterface_mh); +- } else if (!do_dispatch || m->can_be_statically_bound()) { +- // We are simulating an invokestatic or invokespecial instruction. +- // Set up the method pointer, just like ConstantPoolCacheEntry::set_method(). +- vmtarget = m; +- // this does not help dispatch, but it will make it possible to parse this MH: +- vmindex = methodOopDesc::nonvirtual_vtable_index; +- assert(vmindex < 0, "(>=0) == do_dispatch"); +- if (!m->is_static()) { +- me = MethodHandles::entry(MethodHandles::_invokespecial_mh); +- } else { +- me = MethodHandles::entry(MethodHandles::_invokestatic_mh); +- // Part of the semantics of a static call is an initialization barrier. +- // For a DMH, it is done now, when the handle is created. +- Klass* k = Klass::cast(m->method_holder()); +- if (k->should_be_initialized()) { +- k->initialize(CHECK); // possible safepoint +- } +- } +- } else { +- // We are simulating an invokevirtual instruction. +- // Set up the vtable index, just like ConstantPoolCacheEntry::set_method(). +- // The key logic is LinkResolver::runtime_resolve_virtual_method. +- vmindex = m->vtable_index(); +- vmtarget = m->method_holder(); +- me = MethodHandles::entry(MethodHandles::_invokevirtual_mh); +- } +- +- if (me == NULL) { THROW(vmSymbols::java_lang_InternalError()); } +- +- java_lang_invoke_DirectMethodHandle::set_vmtarget(mh(), vmtarget()); +- java_lang_invoke_DirectMethodHandle::set_vmindex( mh(), vmindex); +- DEBUG_ONLY(KlassHandle rlimit; int flags); +- assert(MethodHandles::decode_method(mh(), rlimit, flags) == m, +- "properly stored for later decoding"); +- DEBUG_ONLY(bool actual_do_dispatch = ((flags & _dmf_does_dispatch) != 0)); +- assert(!(actual_do_dispatch && !do_dispatch), +- "do not perform dispatch if !do_dispatch specified"); +- assert(actual_do_dispatch == (vmindex >= 0), "proper later decoding of do_dispatch"); +- assert(decode_MethodHandle_stack_pushes(mh()) == 0, "DMH does not move stack"); +- +- // Done! +- java_lang_invoke_MethodHandle::set_vmentry(mh(), me); +-} +- +-void MethodHandles::verify_BoundMethodHandle_with_receiver(Handle mh, +- methodHandle m, +- TRAPS) { +- // Verify type. +- KlassHandle bound_recv_type; +- { +- oop receiver = java_lang_invoke_BoundMethodHandle::argument(mh()); +- if (receiver != NULL) +- bound_recv_type = KlassHandle(THREAD, receiver->klass()); +- } +- Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(mh())); +- verify_method_type(m, mtype, true, bound_recv_type, CHECK); +- +- int receiver_pos = m->size_of_parameters() - 1; +- +- // Verify MH.vmargslot, which should point at the bound receiver. +- verify_vmargslot(mh, -1, java_lang_invoke_BoundMethodHandle::vmargslot(mh()), CHECK); +- //verify_vmslots(mh, CHECK); +- +- // Verify vmslots. +- if (java_lang_invoke_MethodHandle::vmslots(mh()) != receiver_pos) { +- THROW_MSG(vmSymbols::java_lang_InternalError(), "bad vmslots in BMH (receiver)"); +- } +-} +- +-// Initialize a BMH with a receiver bound directly to a methodOop. +-void MethodHandles::init_BoundMethodHandle_with_receiver(Handle mh, +- methodHandle original_m, +- KlassHandle receiver_limit, +- int decode_flags, +- TRAPS) { +- // Check arguments. +- if (mh.is_null() || original_m.is_null()) { +- THROW(vmSymbols::java_lang_InternalError()); +- } +- +- KlassHandle receiver_klass; +- { +- oop receiver_oop = java_lang_invoke_BoundMethodHandle::argument(mh()); +- if (receiver_oop != NULL) +- receiver_klass = KlassHandle(THREAD, receiver_oop->klass()); +- } +- methodHandle m = dispatch_decoded_method(original_m, +- receiver_limit, decode_flags, +- receiver_klass, +- CHECK); +- if (m.is_null()) { THROW(vmSymbols::java_lang_InternalError()); } +- if (m->is_abstract()) { THROW(vmSymbols::java_lang_AbstractMethodError()); } +- +- int vmargslot = m->size_of_parameters() - 1; +- assert(java_lang_invoke_BoundMethodHandle::vmargslot(mh()) == vmargslot, ""); +- +- if (VerifyMethodHandles) { +- verify_BoundMethodHandle_with_receiver(mh, m, CHECK); +- } +- +- java_lang_invoke_BoundMethodHandle::set_vmtarget(mh(), m()); +- +- DEBUG_ONLY(KlassHandle junk1; int junk2); +- assert(MethodHandles::decode_method(mh(), junk1, junk2) == m, "properly stored for later decoding"); +- assert(decode_MethodHandle_stack_pushes(mh()) == 1, "BMH pushes one stack slot"); +- +- // Done! +- java_lang_invoke_MethodHandle::set_vmentry(mh(), MethodHandles::entry(MethodHandles::_bound_ref_direct_mh)); +-} +- +-void MethodHandles::verify_BoundMethodHandle(Handle mh, Handle target, int argnum, +- bool direct_to_method, TRAPS) { +- ResourceMark rm; +- Handle ptype_handle(THREAD, +- java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum)); +- KlassHandle ptype_klass; +- BasicType ptype = java_lang_Class::as_BasicType(ptype_handle(), &ptype_klass); +- int slots_pushed = type2size[ptype]; +- +- oop argument = java_lang_invoke_BoundMethodHandle::argument(mh()); +- +- const char* err = NULL; +- +- switch (ptype) { +- case T_OBJECT: +- if (argument != NULL) +- // we must implicitly convert from the arg type to the outgoing ptype +- err = check_argument_type_change(T_OBJECT, argument->klass(), ptype, ptype_klass(), argnum); +- break; +- +- case T_ARRAY: case T_VOID: +- assert(false, "array, void do not appear here"); +- default: +- if (ptype != T_INT && !is_subword_type(ptype)) { +- err = "unexpected parameter type"; +- break; +- } +- // check subrange of Integer.value, if necessary +- if (argument == NULL || argument->klass() != SystemDictionary::Integer_klass()) { +- err = "bound integer argument must be of type java.lang.Integer"; +- break; +- } +- if (ptype != T_INT) { +- int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); +- jint value = argument->int_field(value_offset); +- int vminfo = adapter_unbox_subword_vminfo(ptype); +- jint subword = truncate_subword_from_vminfo(value, vminfo); +- if (value != subword) { +- err = "bound subword value does not fit into the subword type"; +- break; +- } +- } +- break; +- case T_FLOAT: +- case T_DOUBLE: +- case T_LONG: +- { +- // we must implicitly convert from the unboxed arg type to the outgoing ptype +- BasicType argbox = java_lang_boxing_object::basic_type(argument); +- if (argbox != ptype) { +- err = check_argument_type_change(T_OBJECT, (argument == NULL +- ? SystemDictionary::Object_klass() +- : argument->klass()), +- ptype, ptype_klass(), argnum); +- assert(err != NULL, "this must be an error"); +- } +- break; +- } +- } +- +- if (err == NULL) { +- DEBUG_ONLY(int this_pushes = decode_MethodHandle_stack_pushes(mh())); +- if (direct_to_method) { +- assert(this_pushes == slots_pushed, "BMH pushes one or two stack slots"); +- } else { +- int target_pushes = decode_MethodHandle_stack_pushes(target()); +- assert(this_pushes == slots_pushed + target_pushes, "BMH stack motion must be correct"); +- } +- } +- +- if (err == NULL) { +- // Verify the rest of the method type. +- err = check_method_type_insertion(java_lang_invoke_MethodHandle::type(mh()), +- argnum, ptype_handle(), +- java_lang_invoke_MethodHandle::type(target())); +- } +- +- if (err != NULL) { +- THROW_MSG(vmSymbols::java_lang_InternalError(), err); +- } +-} +- +-void MethodHandles::init_BoundMethodHandle(Handle mh, Handle target, int argnum, TRAPS) { +- // Check arguments. +- if (mh.is_null() || target.is_null() || !java_lang_invoke_MethodHandle::is_instance(target())) { +- THROW(vmSymbols::java_lang_InternalError()); +- } +- +- int argslot = java_lang_invoke_BoundMethodHandle::vmargslot(mh()); +- +- if (VerifyMethodHandles) { +- int insert_after = argnum - 1; +- verify_vmargslot(mh, insert_after, argslot, CHECK); +- verify_vmslots(mh, CHECK); +- } +- +- // Get bound type and required slots. +- BasicType ptype; +- { +- oop ptype_oop = java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum); +- ptype = java_lang_Class::as_BasicType(ptype_oop); +- } +- int slots_pushed = type2size[ptype]; +- +- // If (a) the target is a direct non-dispatched method handle, +- // or (b) the target is a dispatched direct method handle and we +- // are binding the receiver, cut out the middle-man. +- // Do this by decoding the DMH and using its methodOop directly as vmtarget. +- bool direct_to_method = false; +- if (OptimizeMethodHandles && +- target->klass() == SystemDictionary::DirectMethodHandle_klass() && +- (argnum != 0 || java_lang_invoke_BoundMethodHandle::argument(mh()) != NULL) && +- (argnum == 0 || java_lang_invoke_DirectMethodHandle::vmindex(target()) < 0)) { +- KlassHandle receiver_limit; int decode_flags = 0; +- methodHandle m = decode_method(target(), receiver_limit, decode_flags); +- if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "DMH failed to decode"); } +- DEBUG_ONLY(int m_vmslots = m->size_of_parameters() - slots_pushed); // pos. of 1st arg. +- assert(java_lang_invoke_BoundMethodHandle::vmslots(mh()) == m_vmslots, "type w/ m sig"); +- if (argnum == 0 && (decode_flags & _dmf_has_receiver) != 0) { +- init_BoundMethodHandle_with_receiver(mh, m, +- receiver_limit, decode_flags, +- CHECK); +- return; +- } +- +- // Even if it is not a bound receiver, we still might be able +- // to bind another argument and still invoke the methodOop directly. +- if (!(decode_flags & _dmf_does_dispatch)) { +- direct_to_method = true; +- java_lang_invoke_BoundMethodHandle::set_vmtarget(mh(), m()); +- } +- } +- if (!direct_to_method) +- java_lang_invoke_BoundMethodHandle::set_vmtarget(mh(), target()); +- +- if (VerifyMethodHandles) { +- verify_BoundMethodHandle(mh, target, argnum, direct_to_method, CHECK); +- } +- +- // Next question: Is this a ref, int, or long bound value? +- MethodHandleEntry* me = NULL; +- if (ptype == T_OBJECT) { +- if (direct_to_method) me = MethodHandles::entry(_bound_ref_direct_mh); +- else me = MethodHandles::entry(_bound_ref_mh); +- } else if (slots_pushed == 2) { +- if (direct_to_method) me = MethodHandles::entry(_bound_long_direct_mh); +- else me = MethodHandles::entry(_bound_long_mh); +- } else if (slots_pushed == 1) { +- if (direct_to_method) me = MethodHandles::entry(_bound_int_direct_mh); +- else me = MethodHandles::entry(_bound_int_mh); +- } else { +- assert(false, ""); +- } +- +- // Done! +- java_lang_invoke_MethodHandle::set_vmentry(mh(), me); +-} +- +-static void throw_InternalError_for_bad_conversion(int conversion, const char* err, TRAPS) { +- char msg[200]; +- jio_snprintf(msg, sizeof(msg), "bad adapter (conversion=0x%08x): %s", conversion, err); +- THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), msg); +-} +- +-void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { +- ResourceMark rm; +- jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh()); +- int argslot = java_lang_invoke_AdapterMethodHandle::vmargslot(mh()); +- +- verify_vmargslot(mh, argnum, argslot, CHECK); +- verify_vmslots(mh, CHECK); +- +- jint conv_op = adapter_conversion_op(conversion); +- if (!conv_op_valid(conv_op)) { +- throw_InternalError_for_bad_conversion(conversion, "unknown conversion op", THREAD); +- return; +- } +- EntryKind ek = adapter_entry_kind(conv_op); +- +- int stack_move = adapter_conversion_stack_move(conversion); +- BasicType src = adapter_conversion_src_type(conversion); +- BasicType dest = adapter_conversion_dest_type(conversion); +- int vminfo = adapter_conversion_vminfo(conversion); // should be zero +- +- Handle argument(THREAD, java_lang_invoke_AdapterMethodHandle::argument(mh())); +- Handle target(THREAD, java_lang_invoke_AdapterMethodHandle::vmtarget(mh())); +- Handle src_mtype(THREAD, java_lang_invoke_MethodHandle::type(mh())); +- Handle dst_mtype(THREAD, java_lang_invoke_MethodHandle::type(target())); +- Handle arg_mtype; +- +- const char* err = NULL; +- +- if (err == NULL) { +- // Check that the correct argument is supplied, but only if it is required. +- switch (ek) { +- case _adapter_check_cast: // target type of cast +- case _adapter_ref_to_prim: // wrapper type from which to unbox +- case _adapter_spread_args: // array type to spread from +- if (!java_lang_Class::is_instance(argument()) +- || java_lang_Class::is_primitive(argument())) +- { err = "adapter requires argument of type java.lang.Class"; break; } +- if (ek == _adapter_spread_args) { +- // Make sure it is a suitable collection type. (Array, for now.) +- Klass* ak = Klass::cast(java_lang_Class::as_klassOop(argument())); +- if (!ak->oop_is_array()) +- { err = "spread adapter requires argument representing an array class"; break; } +- BasicType et = arrayKlass::cast(ak->as_klassOop())->element_type(); +- if (et != dest && stack_move <= 0) +- { err = "spread adapter requires array class argument of correct type"; break; } +- } +- break; +- case _adapter_prim_to_ref: // boxer MH to use +- case _adapter_collect_args: // method handle which collects the args +- case _adapter_fold_args: // method handle which collects the args +- if (!java_lang_invoke_MethodHandle::is_instance(argument())) +- { err = "MethodHandle adapter argument required"; break; } +- arg_mtype = Handle(THREAD, java_lang_invoke_MethodHandle::type(argument())); +- break; +- default: +- if (argument.not_null()) +- { err = "adapter has spurious argument"; break; } +- break; +- } +- } +- +- if (err == NULL) { +- // Check that the src/dest types are supplied if needed. +- // Also check relevant parameter or return types. +- switch (ek) { +- case _adapter_check_cast: +- if (src != T_OBJECT || dest != T_OBJECT) { +- err = "adapter requires object src/dest conversion subfields"; +- } +- break; +- case _adapter_prim_to_prim: +- if (!is_java_primitive(src) || !is_java_primitive(dest) || src == dest) { +- err = "adapter requires primitive src/dest conversion subfields"; break; +- } +- if ( (src == T_FLOAT || src == T_DOUBLE) && !(dest == T_FLOAT || dest == T_DOUBLE) || +- !(src == T_FLOAT || src == T_DOUBLE) && (dest == T_FLOAT || dest == T_DOUBLE)) { +- err = "adapter cannot convert beween floating and fixed-point"; break; +- } +- break; +- case _adapter_ref_to_prim: +- if (src != T_OBJECT || !is_java_primitive(dest) +- || argument() != Klass::cast(SystemDictionary::box_klass(dest))->java_mirror()) { +- err = "adapter requires primitive dest conversion subfield"; break; +- } +- break; +- case _adapter_prim_to_ref: +- if (!is_java_primitive(src) || dest != T_OBJECT) { +- err = "adapter requires primitive src conversion subfield"; break; +- } +- break; +- case _adapter_swap_args: +- { +- if (!src || !dest) { +- err = "adapter requires src/dest conversion subfields for swap"; break; +- } +- int src_size = type2size[src]; +- if (src_size != type2size[dest]) { +- err = "adapter requires equal sizes for src/dest"; break; +- } +- int src_slot = argslot; +- int dest_slot = vminfo; +- int src_arg = argnum; +- int dest_arg = argument_slot_to_argnum(src_mtype(), dest_slot); +- verify_vmargslot(mh, dest_arg, dest_slot, CHECK); +- if (!(dest_slot >= src_slot + src_size) && +- !(src_slot >= dest_slot + src_size)) { +- err = "source, destination slots must be distinct"; break; +- } else if (!(src_slot > dest_slot)) { +- err = "source of swap must be deeper in stack"; break; +- } +- err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), dest_arg), +- java_lang_invoke_MethodType::ptype(dst_mtype(), src_arg), +- dest_arg); +- if (err == NULL) +- err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), src_arg), +- java_lang_invoke_MethodType::ptype(dst_mtype(), dest_arg), +- src_arg); +- break; +- } +- case _adapter_rot_args: +- { +- if (!src || !dest) { +- err = "adapter requires src/dest conversion subfields for rotate"; break; +- } +- int src_slot = argslot; +- int limit_raw = vminfo; +- bool rot_down = (src_slot < limit_raw); +- int limit_bias = (rot_down ? MethodHandles::OP_ROT_ARGS_DOWN_LIMIT_BIAS : 0); +- int limit_slot = limit_raw - limit_bias; +- int src_arg = argnum; +- int limit_arg = argument_slot_to_argnum(src_mtype(), limit_slot); +- verify_vmargslot(mh, limit_arg, limit_slot, CHECK); +- if (src_slot == limit_slot) { +- err = "source, destination slots must be distinct"; break; +- } +- if (!rot_down) { // rotate slots up == shift arguments left +- // limit_slot is an inclusive lower limit +- assert((src_slot > limit_slot) && (src_arg < limit_arg), ""); +- // rotate up: [limit_slot..src_slot-ss] --> [limit_slot+ss..src_slot] +- // that is: [src_arg+1..limit_arg] --> [src_arg..limit_arg-1] +- for (int i = src_arg+1; i <= limit_arg && err == NULL; i++) { +- err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), i), +- java_lang_invoke_MethodType::ptype(dst_mtype(), i-1), +- i); +- } +- } else { // rotate slots down == shfit arguments right +- // limit_slot is an exclusive upper limit +- assert((src_slot < limit_slot - limit_bias) && (src_arg > limit_arg + limit_bias), ""); +- // rotate down: [src_slot+ss..limit_slot) --> [src_slot..limit_slot-ss) +- // that is: (limit_arg..src_arg-1] --> (dst_arg+1..src_arg] +- for (int i = limit_arg+1; i <= src_arg-1 && err == NULL; i++) { +- err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), i), +- java_lang_invoke_MethodType::ptype(dst_mtype(), i+1), +- i); +- } +- } +- if (err == NULL) { +- int dest_arg = (rot_down ? limit_arg+1 : limit_arg); +- err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), src_arg), +- java_lang_invoke_MethodType::ptype(dst_mtype(), dest_arg), +- src_arg); +- } +- } +- break; +- case _adapter_spread_args: +- case _adapter_collect_args: +- case _adapter_fold_args: +- { +- bool is_spread = (ek == _adapter_spread_args); +- bool is_fold = (ek == _adapter_fold_args); +- BasicType coll_type = is_spread ? src : dest; +- BasicType elem_type = is_spread ? dest : src; +- // coll_type is type of args in collected form (or T_VOID if none) +- // elem_type is common type of args in spread form (or T_VOID if missing or heterogeneous) +- if (coll_type == 0 || elem_type == 0) { +- err = "adapter requires src/dest subfields for spread or collect"; break; +- } +- if (is_spread && coll_type != T_OBJECT) { +- err = "spread adapter requires object type for argument bundle"; break; +- } +- Handle spread_mtype = (is_spread ? dst_mtype : src_mtype); +- int spread_slot = argslot; +- int spread_arg = argnum; +- int slots_pushed = stack_move / stack_move_unit(); +- int coll_slot_count = type2size[coll_type]; +- int spread_slot_count = (is_spread ? slots_pushed : -slots_pushed) + coll_slot_count; +- if (is_fold) spread_slot_count = argument_slot_count(arg_mtype()); +- if (!is_spread) { +- int init_slots = argument_slot_count(src_mtype()); +- int coll_slots = argument_slot_count(arg_mtype()); +- if (spread_slot_count > init_slots || +- spread_slot_count != coll_slots) { +- err = "collect adapter has inconsistent arg counts"; break; +- } +- int next_slots = argument_slot_count(dst_mtype()); +- int unchanged_slots_in = (init_slots - spread_slot_count); +- int unchanged_slots_out = (next_slots - coll_slot_count - (is_fold ? spread_slot_count : 0)); +- if (unchanged_slots_in != unchanged_slots_out) { +- err = "collect adapter continuation has inconsistent arg counts"; break; +- } +- } +- } +- break; +- default: +- if (src != 0 || dest != 0) { +- err = "adapter has spurious src/dest conversion subfields"; break; +- } +- break; +- } +- } +- +- if (err == NULL) { +- // Check the stack_move subfield. +- // It must always report the net change in stack size, positive or negative. +- int slots_pushed = stack_move / stack_move_unit(); +- switch (ek) { +- case _adapter_prim_to_prim: +- case _adapter_ref_to_prim: +- case _adapter_prim_to_ref: +- if (slots_pushed != type2size[dest] - type2size[src]) { +- err = "wrong stack motion for primitive conversion"; +- } +- break; +- case _adapter_dup_args: +- if (slots_pushed <= 0) { +- err = "adapter requires conversion subfield slots_pushed > 0"; +- } +- break; +- case _adapter_drop_args: +- if (slots_pushed >= 0) { +- err = "adapter requires conversion subfield slots_pushed < 0"; +- } +- break; +- case _adapter_collect_args: +- case _adapter_fold_args: +- if (slots_pushed > 2) { +- err = "adapter requires conversion subfield slots_pushed <= 2"; +- } +- break; +- case _adapter_spread_args: +- if (slots_pushed < -1) { +- err = "adapter requires conversion subfield slots_pushed >= -1"; +- } +- break; +- default: +- if (stack_move != 0) { +- err = "adapter has spurious stack_move conversion subfield"; +- } +- break; +- } +- if (err == NULL && stack_move != slots_pushed * stack_move_unit()) { +- err = "stack_move conversion subfield must be multiple of stack_move_unit"; +- } +- } +- +- if (err == NULL) { +- // Make sure this adapter's stack pushing is accurately recorded. +- int slots_pushed = stack_move / stack_move_unit(); +- int this_vmslots = java_lang_invoke_MethodHandle::vmslots(mh()); +- int target_vmslots = java_lang_invoke_MethodHandle::vmslots(target()); +- int target_pushes = decode_MethodHandle_stack_pushes(target()); +- if (slots_pushed != (target_vmslots - this_vmslots)) { +- err = "stack_move inconsistent with previous and current MethodType vmslots"; +- } else { +- int this_pushes = decode_MethodHandle_stack_pushes(mh()); +- if (slots_pushed + target_pushes != this_pushes) { +- if (this_pushes == 0) +- err = "adapter push count not initialized"; +- else +- err = "adapter push count is wrong"; +- } +- } +- +- // While we're at it, check that the stack motion decoder works: +- DEBUG_ONLY(int this_pushes = decode_MethodHandle_stack_pushes(mh())); +- assert(this_pushes == slots_pushed + target_pushes, "AMH stack motion must be correct"); +- } +- +- if (err == NULL && vminfo != 0) { +- switch (ek) { +- case _adapter_swap_args: +- case _adapter_rot_args: +- case _adapter_prim_to_ref: +- case _adapter_collect_args: +- case _adapter_fold_args: +- break; // OK +- default: +- err = "vminfo subfield is reserved to the JVM"; +- } +- } +- +- // Do additional ad hoc checks. +- if (err == NULL) { +- switch (ek) { +- case _adapter_retype_only: +- err = check_method_type_passthrough(src_mtype(), dst_mtype(), false); +- break; +- +- case _adapter_retype_raw: +- err = check_method_type_passthrough(src_mtype(), dst_mtype(), true); +- break; +- +- case _adapter_check_cast: +- { +- // The actual value being checked must be a reference: +- err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), argnum), +- object_java_mirror(), argnum); +- if (err != NULL) break; +- +- // The output of the cast must fit with the destination argument: +- Handle cast_class = argument; +- err = check_method_type_conversion(src_mtype(), +- argnum, cast_class(), +- dst_mtype()); +- } +- break; +- +- // %%% TO DO: continue in remaining cases to verify src/dst_mtype if VerifyMethodHandles +- } +- } +- +- if (err != NULL) { +- throw_InternalError_for_bad_conversion(conversion, err, THREAD); +- return; +- } +- +-} +- +-void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS) { +- Handle argument = java_lang_invoke_AdapterMethodHandle::argument(mh()); +- int argslot = java_lang_invoke_AdapterMethodHandle::vmargslot(mh()); +- jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh()); +- jint conv_op = adapter_conversion_op(conversion); +- +- // adjust the adapter code to the internal EntryKind enumeration: +- EntryKind ek_orig = adapter_entry_kind(conv_op); +- EntryKind ek_opt = ek_orig; // may be optimized +- EntryKind ek_try; // temp +- +- // Finalize the vmtarget field (Java initialized it to null). +- if (!java_lang_invoke_MethodHandle::is_instance(target())) { +- throw_InternalError_for_bad_conversion(conversion, "bad target", THREAD); +- return; +- } +- java_lang_invoke_AdapterMethodHandle::set_vmtarget(mh(), target()); +- +- int stack_move = adapter_conversion_stack_move(conversion); +- BasicType src = adapter_conversion_src_type(conversion); +- BasicType dest = adapter_conversion_dest_type(conversion); +- int vminfo = adapter_conversion_vminfo(conversion); // should be zero +- +- int slots_pushed = stack_move / stack_move_unit(); +- +- if (VerifyMethodHandles) { +- verify_AdapterMethodHandle(mh, argnum, CHECK); +- } +- +- const char* err = NULL; +- +- if (!conv_op_supported(conv_op)) { +- err = "adapter not yet implemented in the JVM"; +- } +- +- // Now it's time to finish the case analysis and pick a MethodHandleEntry. +- switch (ek_orig) { +- case _adapter_retype_only: +- case _adapter_retype_raw: +- case _adapter_check_cast: +- case _adapter_dup_args: +- case _adapter_drop_args: +- // these work fine via general case code +- break; +- +- case _adapter_prim_to_prim: +- { +- // Non-subword cases are {int,float,long,double} -> {int,float,long,double}. +- // And, the {float,double} -> {int,long} cases must be handled by Java. +- switch (type2size[src] *4+ type2size[dest]) { +- case 1 *4+ 1: +- assert(src == T_INT || is_subword_type(src), "source is not float"); +- // Subword-related cases are int -> {boolean,byte,char,short}. +- ek_opt = _adapter_opt_i2i; +- vminfo = adapter_prim_to_prim_subword_vminfo(dest); +- break; +- case 2 *4+ 1: +- if (src == T_LONG && (dest == T_INT || is_subword_type(dest))) { +- ek_opt = _adapter_opt_l2i; +- vminfo = adapter_prim_to_prim_subword_vminfo(dest); +- } else if (src == T_DOUBLE && dest == T_FLOAT) { +- ek_opt = _adapter_opt_d2f; +- } else { +- goto throw_not_impl; // runs user code, hence could block +- } +- break; +- case 1 *4+ 2: +- if ((src == T_INT || is_subword_type(src)) && dest == T_LONG) { +- ek_opt = _adapter_opt_i2l; +- } else if (src == T_FLOAT && dest == T_DOUBLE) { +- ek_opt = _adapter_opt_f2d; +- } else { +- goto throw_not_impl; // runs user code, hence could block +- } +- break; +- default: +- goto throw_not_impl; // runs user code, hence could block +- break; +- } +- } +- break; +- +- case _adapter_ref_to_prim: +- { +- switch (type2size[dest]) { +- case 1: +- ek_opt = _adapter_opt_unboxi; +- vminfo = adapter_unbox_subword_vminfo(dest); +- break; +- case 2: +- ek_opt = _adapter_opt_unboxl; +- break; +- default: +- goto throw_not_impl; +- break; +- } +- } +- break; +- +- case _adapter_prim_to_ref: +- { +- // vminfo will be the location to insert the return value +- vminfo = argslot; +- ek_opt = _adapter_opt_collect_ref; +- ensure_vmlayout_field(target, CHECK); +- // for MethodHandleWalk: +- if (java_lang_invoke_AdapterMethodHandle::is_instance(argument())) +- ensure_vmlayout_field(argument, CHECK); +- if (!OptimizeMethodHandles) break; +- switch (type2size[src]) { +- case 1: +- ek_try = EntryKind(_adapter_opt_filter_S0_ref + argslot); +- if (ek_try < _adapter_opt_collect_LAST && +- ek_adapter_opt_collect_slot(ek_try) == argslot) { +- assert(ek_adapter_opt_collect_count(ek_try) == 1 && +- ek_adapter_opt_collect_type(ek_try) == T_OBJECT, ""); +- ek_opt = ek_try; +- break; +- } +- // else downgrade to variable slot: +- ek_opt = _adapter_opt_collect_1_ref; +- break; +- case 2: +- ek_try = EntryKind(_adapter_opt_collect_2_S0_ref + argslot); +- if (ek_try < _adapter_opt_collect_LAST && +- ek_adapter_opt_collect_slot(ek_try) == argslot) { +- assert(ek_adapter_opt_collect_count(ek_try) == 2 && +- ek_adapter_opt_collect_type(ek_try) == T_OBJECT, ""); +- ek_opt = ek_try; +- break; +- } +- // else downgrade to variable slot: +- ek_opt = _adapter_opt_collect_2_ref; +- break; +- default: +- goto throw_not_impl; +- break; +- } +- } +- break; +- +- case _adapter_swap_args: +- case _adapter_rot_args: +- { +- int swap_slots = type2size[src]; +- int src_slot = argslot; +- int dest_slot = vminfo; +- int rotate = (ek_orig == _adapter_swap_args) ? 0 : (src_slot > dest_slot) ? 1 : -1; +- switch (swap_slots) { +- case 1: +- ek_opt = (!rotate ? _adapter_opt_swap_1 : +- rotate > 0 ? _adapter_opt_rot_1_up : _adapter_opt_rot_1_down); +- break; +- case 2: +- ek_opt = (!rotate ? _adapter_opt_swap_2 : +- rotate > 0 ? _adapter_opt_rot_2_up : _adapter_opt_rot_2_down); +- break; +- default: +- goto throw_not_impl; +- break; +- } +- } +- break; +- +- case _adapter_spread_args: +- { +- // vminfo will be the required length of the array +- int array_size = (slots_pushed + 1) / (type2size[dest] == 2 ? 2 : 1); +- vminfo = array_size; +- // general case +- switch (dest) { +- case T_BOOLEAN : // fall through to T_BYTE: +- case T_BYTE : ek_opt = _adapter_opt_spread_byte; break; +- case T_CHAR : ek_opt = _adapter_opt_spread_char; break; +- case T_SHORT : ek_opt = _adapter_opt_spread_short; break; +- case T_INT : ek_opt = _adapter_opt_spread_int; break; +- case T_LONG : ek_opt = _adapter_opt_spread_long; break; +- case T_FLOAT : ek_opt = _adapter_opt_spread_float; break; +- case T_DOUBLE : ek_opt = _adapter_opt_spread_double; break; +- case T_OBJECT : ek_opt = _adapter_opt_spread_ref; break; +- case T_VOID : if (array_size != 0) goto throw_not_impl; +- ek_opt = _adapter_opt_spread_ref; break; +- default : goto throw_not_impl; +- } +- assert(array_size == 0 || // it doesn't matter what the spreader is +- (ek_adapter_opt_spread_count(ek_opt) == -1 && +- (ek_adapter_opt_spread_type(ek_opt) == dest || +- (ek_adapter_opt_spread_type(ek_opt) == T_BYTE && dest == T_BOOLEAN))), +- err_msg("dest=%d ek_opt=%d", dest, ek_opt)); +- +- if (array_size <= 0) { +- // since the general case does not handle length 0, this case is required: +- ek_opt = _adapter_opt_spread_0; +- break; +- } +- if (dest == T_OBJECT) { +- ek_try = EntryKind(_adapter_opt_spread_1_ref - 1 + array_size); +- if (ek_try < _adapter_opt_spread_LAST && +- ek_adapter_opt_spread_count(ek_try) == array_size) { +- assert(ek_adapter_opt_spread_type(ek_try) == dest, ""); +- ek_opt = ek_try; +- break; +- } +- } +- break; +- } +- break; +- +- case _adapter_collect_args: +- { +- int elem_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(argument())); +- // vminfo will be the location to insert the return value +- vminfo = argslot; +- ensure_vmlayout_field(target, CHECK); +- ensure_vmlayout_field(argument, CHECK); +- +- // general case: +- switch (dest) { +- default : if (!is_subword_type(dest)) goto throw_not_impl; +- // else fall through: +- case T_INT : ek_opt = _adapter_opt_collect_int; break; +- case T_LONG : ek_opt = _adapter_opt_collect_long; break; +- case T_FLOAT : ek_opt = _adapter_opt_collect_float; break; +- case T_DOUBLE : ek_opt = _adapter_opt_collect_double; break; +- case T_OBJECT : ek_opt = _adapter_opt_collect_ref; break; +- case T_VOID : ek_opt = _adapter_opt_collect_void; break; +- } +- assert(ek_adapter_opt_collect_slot(ek_opt) == -1 && +- ek_adapter_opt_collect_count(ek_opt) == -1 && +- (ek_adapter_opt_collect_type(ek_opt) == dest || +- ek_adapter_opt_collect_type(ek_opt) == T_INT && is_subword_type(dest)), +- ""); +- +- if (dest == T_OBJECT && elem_slots == 1 && OptimizeMethodHandles) { +- // filter operation on a ref +- ek_try = EntryKind(_adapter_opt_filter_S0_ref + argslot); +- if (ek_try < _adapter_opt_collect_LAST && +- ek_adapter_opt_collect_slot(ek_try) == argslot) { +- assert(ek_adapter_opt_collect_count(ek_try) == elem_slots && +- ek_adapter_opt_collect_type(ek_try) == dest, ""); +- ek_opt = ek_try; +- break; +- } +- ek_opt = _adapter_opt_collect_1_ref; +- break; +- } +- +- if (dest == T_OBJECT && elem_slots == 2 && OptimizeMethodHandles) { +- // filter of two arguments +- ek_try = EntryKind(_adapter_opt_collect_2_S0_ref + argslot); +- if (ek_try < _adapter_opt_collect_LAST && +- ek_adapter_opt_collect_slot(ek_try) == argslot) { +- assert(ek_adapter_opt_collect_count(ek_try) == elem_slots && +- ek_adapter_opt_collect_type(ek_try) == dest, ""); +- ek_opt = ek_try; +- break; +- } +- ek_opt = _adapter_opt_collect_2_ref; +- break; +- } +- +- if (dest == T_OBJECT && OptimizeMethodHandles) { +- // try to use a fixed length adapter +- ek_try = EntryKind(_adapter_opt_collect_0_ref + elem_slots); +- if (ek_try < _adapter_opt_collect_LAST && +- ek_adapter_opt_collect_count(ek_try) == elem_slots) { +- assert(ek_adapter_opt_collect_slot(ek_try) == -1 && +- ek_adapter_opt_collect_type(ek_try) == dest, ""); +- ek_opt = ek_try; +- break; +- } +- } +- +- break; +- } +- +- case _adapter_fold_args: +- { +- int elem_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(argument())); +- // vminfo will be the location to insert the return value +- vminfo = argslot + elem_slots; +- ensure_vmlayout_field(target, CHECK); +- ensure_vmlayout_field(argument, CHECK); +- +- switch (dest) { +- default : if (!is_subword_type(dest)) goto throw_not_impl; +- // else fall through: +- case T_INT : ek_opt = _adapter_opt_fold_int; break; +- case T_LONG : ek_opt = _adapter_opt_fold_long; break; +- case T_FLOAT : ek_opt = _adapter_opt_fold_float; break; +- case T_DOUBLE : ek_opt = _adapter_opt_fold_double; break; +- case T_OBJECT : ek_opt = _adapter_opt_fold_ref; break; +- case T_VOID : ek_opt = _adapter_opt_fold_void; break; +- } +- assert(ek_adapter_opt_collect_slot(ek_opt) == -1 && +- ek_adapter_opt_collect_count(ek_opt) == -1 && +- (ek_adapter_opt_collect_type(ek_opt) == dest || +- ek_adapter_opt_collect_type(ek_opt) == T_INT && is_subword_type(dest)), +- ""); +- +- if (dest == T_OBJECT && elem_slots == 0 && OptimizeMethodHandles) { +- // if there are no args, just pretend it's a collect +- ek_opt = _adapter_opt_collect_0_ref; +- break; +- } +- +- if (dest == T_OBJECT && OptimizeMethodHandles) { +- // try to use a fixed length adapter +- ek_try = EntryKind(_adapter_opt_fold_1_ref - 1 + elem_slots); +- if (ek_try < _adapter_opt_fold_LAST && +- ek_adapter_opt_collect_count(ek_try) == elem_slots) { +- assert(ek_adapter_opt_collect_slot(ek_try) == -1 && +- ek_adapter_opt_collect_type(ek_try) == dest, ""); +- ek_opt = ek_try; +- break; +- } +- } +- +- break; +- } +- +- default: +- // should have failed much earlier; must be a missing case here +- assert(false, "incomplete switch"); +- // and fall through: +- +- throw_not_impl: +- if (err == NULL) +- err = "unknown adapter type"; +- break; +- } +- +- if (err == NULL && (vminfo & CONV_VMINFO_MASK) != vminfo) { +- // should not happen, since vminfo is used to encode arg/slot indexes < 255 +- err = "vminfo overflow"; +- } +- +- if (err == NULL && !have_entry(ek_opt)) { +- err = "adapter stub for this kind of method handle is missing"; +- } +- +- if (err == NULL && ek_opt == ek_orig) { +- switch (ek_opt) { +- case _adapter_prim_to_prim: +- case _adapter_ref_to_prim: +- case _adapter_prim_to_ref: +- case _adapter_swap_args: +- case _adapter_rot_args: +- case _adapter_collect_args: +- case _adapter_fold_args: +- case _adapter_spread_args: +- // should be handled completely by optimized cases; see above +- err = "init_AdapterMethodHandle should not issue this"; +- break; +- } +- } +- +- if (err != NULL) { +- throw_InternalError_for_bad_conversion(conversion, err_msg("%s: conv_op %d ek_opt %d", err, conv_op, ek_opt), THREAD); +- return; +- } +- +- // Rebuild the conversion value; maybe parts of it were changed. +- jint new_conversion = adapter_conversion(conv_op, src, dest, stack_move, vminfo); +- +- // Finalize the conversion field. (Note that it is final to Java code.) +- java_lang_invoke_AdapterMethodHandle::set_conversion(mh(), new_conversion); +- +- if (java_lang_invoke_CountingMethodHandle::is_instance(mh())) { +- assert(ek_orig == _adapter_retype_only, "only one handled"); +- ek_opt = _adapter_opt_profiling; +- } +- +- // Done! +- java_lang_invoke_MethodHandle::set_vmentry(mh(), entry(ek_opt)); +- +- // There should be enough memory barriers on exit from native methods +- // to ensure that the MH is fully initialized to all threads before +- // Java code can publish it in global data structures. +-} +- +-void MethodHandles::ensure_vmlayout_field(Handle target, TRAPS) { +- Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(target())); +- Handle mtform(THREAD, java_lang_invoke_MethodType::form(mtype())); +- if (mtform.is_null()) { THROW(vmSymbols::java_lang_InternalError()); } +- if (java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) { +- if (java_lang_invoke_MethodTypeForm::vmlayout(mtform()) == NULL) { +- // fill it in +- Handle erased_mtype(THREAD, java_lang_invoke_MethodTypeForm::erasedType(mtform())); +- TempNewSymbol erased_signature +- = java_lang_invoke_MethodType::as_signature(erased_mtype(), /*intern:*/true, CHECK); +- methodOop cookie +- = SystemDictionary::find_method_handle_invoke(vmSymbols::invokeExact_name(), +- erased_signature, +- SystemDictionaryHandles::Object_klass(), +- THREAD); +- java_lang_invoke_MethodTypeForm::init_vmlayout(mtform(), cookie); +- } +- } +- assert(java_lang_invoke_MethodTypeForm::vmslots(mtform()) == argument_slot_count(mtype()), "must agree"); +-} +- +-#ifdef ASSERT +- +-extern "C" +-void print_method_handle(oop mh); +- +-static void stress_method_handle_walk_impl(Handle mh, TRAPS) { +- if (StressMethodHandleWalk) { +- // Exercise the MethodHandleWalk code in various ways and validate +- // the resulting method oop. Some of these produce output so they +- // are guarded under Verbose. +- ResourceMark rm; +- HandleMark hm; +- if (Verbose) { +- print_method_handle(mh()); +- } +- TempNewSymbol name = SymbolTable::new_symbol("invoke", CHECK); +- Handle mt = java_lang_invoke_MethodHandle::type(mh()); +- TempNewSymbol signature = java_lang_invoke_MethodType::as_signature(mt(), true, CHECK); +- MethodHandleCompiler mhc(mh, name, signature, 10000, false, CHECK); +- methodHandle m = mhc.compile(CHECK); +- if (Verbose) { +- m->print_codes(); +- } +- InterpreterOopMap mask; +- OopMapCache::compute_one_oop_map(m, m->code_size() - 1, &mask); +- // compile to object code if -Xcomp or WizardMode +- if ((WizardMode || +- CompilationPolicy::must_be_compiled(m)) +- && !instanceKlass::cast(m->method_holder())->is_not_initialized() +- && CompilationPolicy::can_be_compiled(m)) { +- // Force compilation +- CompileBroker::compile_method(m, InvocationEntryBci, +- CompilationPolicy::policy()->initial_compile_level(), +- methodHandle(), 0, "StressMethodHandleWalk", +- CHECK); +- } +- } +-} +- +-static void stress_method_handle_walk(Handle mh, TRAPS) { +- stress_method_handle_walk_impl(mh, THREAD); +- if (HAS_PENDING_EXCEPTION) { +- oop ex = PENDING_EXCEPTION; +- CLEAR_PENDING_EXCEPTION; +- tty->print("StressMethodHandleWalk: "); +- java_lang_Throwable::print(ex, tty); +- tty->cr(); +- } +-} +-#else +- +-static void stress_method_handle_walk(Handle mh, TRAPS) {} +- +-#endif +- + // +-// Here are the native methods on sun.invoke.MethodHandleImpl. ++// Here are the native methods in java.lang.invoke.MethodHandleNatives + // They are the private interface between this JVM and the HotSpot-specific + // Java code that implements JSR 292 method handles. + // + // Note: We use a JVM_ENTRY macro to define each of these, for this is the way + // that intrinsic (non-JNI) native methods are defined in HotSpot. + // +- +-// direct method handles for invokestatic or invokespecial +-// void init(DirectMethodHandle self, MemberName ref, boolean doDispatch, Class caller); +-JVM_ENTRY(void, MHN_init_DMH(JNIEnv *env, jobject igcls, jobject mh_jh, +- jobject target_jh, jboolean do_dispatch, jobject caller_jh)) { +- ResourceMark rm; // for error messages +- +- // This is the guy we are initializing: +- if (mh_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "self is null"); } +- Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh)); +- +- // Early returns out of this method leave the DMH in an unfinished state. +- assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); +- +- // which method are we really talking about? +- if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); } +- Handle target(THREAD, JNIHandles::resolve_non_null(target_jh)); +- if (java_lang_invoke_MemberName::is_instance(target()) && +- java_lang_invoke_MemberName::vmindex(target()) == VM_INDEX_UNINITIALIZED) { +- MethodHandles::resolve_MemberName(target, CHECK); +- } +- +- KlassHandle receiver_limit; int decode_flags = 0; +- methodHandle m = MethodHandles::decode_method(target(), receiver_limit, decode_flags); +- if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "no such method"); } +- +- // The trusted Java code that calls this method should already have performed +- // access checks on behalf of the given caller. But, we can verify this. +- if (VerifyMethodHandles && caller_jh != NULL) { +- KlassHandle caller(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(caller_jh))); +- // If this were a bytecode, the first access check would be against +- // the "reference class" mentioned in the CONSTANT_Methodref. +- // We don't know at this point which class that was, and if we +- // check against m.method_holder we might get the wrong answer. +- // So we just make sure to handle this check when the resolution +- // happens, when we call resolve_MemberName. +- // +- // (A public class can inherit public members from private supers, +- // and it would be wrong to check access against the private super +- // if the original symbolic reference was against the public class.) +- // +- // If there were a bytecode, the next step would be to lookup the method +- // in the reference class, then then check the method's access bits. +- // Emulate LinkResolver::check_method_accessability. +- klassOop resolved_klass = m->method_holder(); +- if (!Reflection::verify_field_access(caller->as_klassOop(), +- resolved_klass, resolved_klass, +- m->access_flags(), +- true)) { +- // %%% following cutout belongs in Reflection::verify_field_access? +- bool same_pm = Reflection::is_same_package_member(caller->as_klassOop(), +- resolved_klass, THREAD); +- if (!same_pm) { +- THROW_MSG(vmSymbols::java_lang_InternalError(), m->name_and_sig_as_C_string()); +- } +- } +- } +- +- MethodHandles::init_DirectMethodHandle(mh, m, (do_dispatch != JNI_FALSE), CHECK); +- stress_method_handle_walk(mh, CHECK); +-} +-JVM_END +- +-// bound method handles +-JVM_ENTRY(void, MHN_init_BMH(JNIEnv *env, jobject igcls, jobject mh_jh, +- jobject target_jh, int argnum)) { +- ResourceMark rm; // for error messages +- +- // This is the guy we are initializing: +- if (mh_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "self is null"); } +- Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh)); +- +- // Early returns out of this method leave the BMH in an unfinished state. +- assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); +- +- if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); } +- Handle target(THREAD, JNIHandles::resolve_non_null(target_jh)); +- +- if (!java_lang_invoke_MethodHandle::is_instance(target())) { +- // Target object is a reflective method. (%%% Do we need this alternate path?) +- Untested("init_BMH of non-MH"); +- if (argnum != 0) { THROW(vmSymbols::java_lang_InternalError()); } +- KlassHandle receiver_limit; int decode_flags = 0; +- methodHandle m = MethodHandles::decode_method(target(), receiver_limit, decode_flags); +- MethodHandles::init_BoundMethodHandle_with_receiver(mh, m, +- receiver_limit, +- decode_flags, +- CHECK); +- } else { +- // Build a BMH on top of a DMH or another BMH: +- MethodHandles::init_BoundMethodHandle(mh, target, argnum, CHECK); +- } +- +- if (StressMethodHandleWalk) { +- if (mh->klass() == SystemDictionary::BoundMethodHandle_klass()) +- stress_method_handle_walk(mh, CHECK); +- // else don't, since the subclass has not yet initialized its own fields +- } +-} +-JVM_END +- +-// adapter method handles +-JVM_ENTRY(void, MHN_init_AMH(JNIEnv *env, jobject igcls, jobject mh_jh, +- jobject target_jh, int argnum)) { +- // This is the guy we are initializing: +- if (mh_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "self is null"); } +- if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); } +- Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh)); +- Handle target(THREAD, JNIHandles::resolve_non_null(target_jh)); +- +- // Early returns out of this method leave the AMH in an unfinished state. +- assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); +- +- MethodHandles::init_AdapterMethodHandle(mh, target, argnum, CHECK); +- stress_method_handle_walk(mh, CHECK); +-} +-JVM_END +- +-// method type forms +-JVM_ENTRY(void, MHN_init_MT(JNIEnv *env, jobject igcls, jobject erased_jh)) { +- if (erased_jh == NULL) return; +- if (TraceMethodHandles) { +- tty->print("creating MethodType form "); +- if (WizardMode || Verbose) { // Warning: this calls Java code on the MH! +- // call Object.toString() +- Symbol* name = vmSymbols::toString_name(); +- Symbol* sig = vmSymbols::void_string_signature(); +- JavaCallArguments args(Handle(THREAD, JNIHandles::resolve_non_null(erased_jh))); +- JavaValue result(T_OBJECT); +- JavaCalls::call_virtual(&result, SystemDictionary::Object_klass(), name, sig, +- &args, CHECK); +- Handle str(THREAD, (oop)result.get_jobject()); +- java_lang_String::print(str, tty); +- } +- tty->cr(); +- } +-} +-JVM_END +- +-// debugging and reflection +-JVM_ENTRY(jobject, MHN_getTarget(JNIEnv *env, jobject igcls, jobject mh_jh, jint format)) { +- Handle mh(THREAD, JNIHandles::resolve(mh_jh)); +- if (!java_lang_invoke_MethodHandle::is_instance(mh())) { +- THROW_NULL(vmSymbols::java_lang_IllegalArgumentException()); +- } +- oop target = MethodHandles::encode_target(mh, format, CHECK_NULL); +- return JNIHandles::make_local(THREAD, target); +-} +-JVM_END +- + JVM_ENTRY(jint, MHN_getConstant(JNIEnv *env, jobject igcls, jint which)) { + switch (which) { +- case MethodHandles::GC_JVM_PUSH_LIMIT: +- guarantee(MethodHandlePushLimit >= 2 && MethodHandlePushLimit <= 0xFF, +- "MethodHandlePushLimit parameter must be in valid range"); +- return MethodHandlePushLimit; +- case MethodHandles::GC_JVM_STACK_MOVE_UNIT: +- // return number of words per slot, signed according to stack direction +- return MethodHandles::stack_move_unit(); +- case MethodHandles::GC_CONV_OP_IMPLEMENTED_MASK: +- return MethodHandles::adapter_conversion_ops_supported_mask(); + case MethodHandles::GC_COUNT_GWT: + #ifdef COMPILER2 + return true; +@@ -2880,64 +952,54 @@ + JVM_END + + #ifndef PRODUCT +-#define EACH_NAMED_CON(template) \ +- /* hold back this one until JDK stabilizes */ \ +- /* template(MethodHandles,GC_JVM_PUSH_LIMIT) */ \ +- /* hold back this one until JDK stabilizes */ \ +- /* template(MethodHandles,GC_JVM_STACK_MOVE_UNIT) */ \ +- /* hold back this one until JDK stabilizes */ \ +- /* template(MethodHandles,GC_OP_ROT_ARGS_DOWN_LIMIT_BIAS) */ \ +- template(MethodHandles,ETF_HANDLE_OR_METHOD_NAME) \ +- template(MethodHandles,ETF_DIRECT_HANDLE) \ +- template(MethodHandles,ETF_METHOD_NAME) \ +- template(MethodHandles,ETF_REFLECT_METHOD) \ ++#define EACH_NAMED_CON(template, requirement) \ ++ template(MethodHandles,GC_COUNT_GWT) \ + template(java_lang_invoke_MemberName,MN_IS_METHOD) \ + template(java_lang_invoke_MemberName,MN_IS_CONSTRUCTOR) \ + template(java_lang_invoke_MemberName,MN_IS_FIELD) \ + template(java_lang_invoke_MemberName,MN_IS_TYPE) \ + template(java_lang_invoke_MemberName,MN_SEARCH_SUPERCLASSES) \ + template(java_lang_invoke_MemberName,MN_SEARCH_INTERFACES) \ +- template(java_lang_invoke_MemberName,VM_INDEX_UNINITIALIZED) \ +- template(java_lang_invoke_AdapterMethodHandle,OP_RETYPE_ONLY) \ +- template(java_lang_invoke_AdapterMethodHandle,OP_RETYPE_RAW) \ +- template(java_lang_invoke_AdapterMethodHandle,OP_CHECK_CAST) \ +- template(java_lang_invoke_AdapterMethodHandle,OP_PRIM_TO_PRIM) \ +- template(java_lang_invoke_AdapterMethodHandle,OP_REF_TO_PRIM) \ +- template(java_lang_invoke_AdapterMethodHandle,OP_PRIM_TO_REF) \ +- template(java_lang_invoke_AdapterMethodHandle,OP_SWAP_ARGS) \ +- template(java_lang_invoke_AdapterMethodHandle,OP_ROT_ARGS) \ +- template(java_lang_invoke_AdapterMethodHandle,OP_DUP_ARGS) \ +- template(java_lang_invoke_AdapterMethodHandle,OP_DROP_ARGS) \ +- template(java_lang_invoke_AdapterMethodHandle,OP_COLLECT_ARGS) \ +- template(java_lang_invoke_AdapterMethodHandle,OP_SPREAD_ARGS) \ +- /* hold back this one until JDK stabilizes */ \ +- /*template(java_lang_invoke_AdapterMethodHandle,CONV_OP_LIMIT)*/ \ +- template(java_lang_invoke_AdapterMethodHandle,CONV_OP_MASK) \ +- template(java_lang_invoke_AdapterMethodHandle,CONV_VMINFO_MASK) \ +- template(java_lang_invoke_AdapterMethodHandle,CONV_VMINFO_SHIFT) \ +- template(java_lang_invoke_AdapterMethodHandle,CONV_OP_SHIFT) \ +- template(java_lang_invoke_AdapterMethodHandle,CONV_DEST_TYPE_SHIFT) \ +- template(java_lang_invoke_AdapterMethodHandle,CONV_SRC_TYPE_SHIFT) \ +- template(java_lang_invoke_AdapterMethodHandle,CONV_STACK_MOVE_SHIFT) \ +- template(java_lang_invoke_AdapterMethodHandle,CONV_STACK_MOVE_MASK) \ ++ template(java_lang_invoke_MemberName,MN_REFERENCE_KIND_SHIFT) \ ++ template(java_lang_invoke_MemberName,MN_REFERENCE_KIND_MASK) \ ++ template(MethodHandles,GC_LAMBDA_SUPPORT) \ + /*end*/ + ++#define IGNORE_REQ(req_expr) /* req_expr */ + #define ONE_PLUS(scope,value) 1+ +-static const int con_value_count = EACH_NAMED_CON(ONE_PLUS) 0; ++static const int con_value_count = EACH_NAMED_CON(ONE_PLUS, IGNORE_REQ) 0; + #define VALUE_COMMA(scope,value) scope::value, +-static const int con_values[con_value_count+1] = { EACH_NAMED_CON(VALUE_COMMA) 0 }; ++static const int con_values[con_value_count+1] = { EACH_NAMED_CON(VALUE_COMMA, IGNORE_REQ) 0 }; + #define STRING_NULL(scope,value) #value "\0" +-static const char con_names[] = { EACH_NAMED_CON(STRING_NULL) }; ++static const char con_names[] = { EACH_NAMED_CON(STRING_NULL, IGNORE_REQ) }; ++ ++static bool advertise_con_value(int which) { ++ if (which < 0) return false; ++ bool ok = true; ++ int count = 0; ++#define INC_COUNT(scope,value) \ ++ ++count; ++#define CHECK_REQ(req_expr) \ ++ if (which < count) return ok; \ ++ ok = (req_expr); ++ EACH_NAMED_CON(INC_COUNT, CHECK_REQ); ++#undef INC_COUNT ++#undef CHECK_REQ ++ assert(count == con_value_count, ""); ++ if (which < count) return ok; ++ return false; ++} + + #undef ONE_PLUS + #undef VALUE_COMMA + #undef STRING_NULL + #undef EACH_NAMED_CON +-#endif ++#endif // PRODUCT + + JVM_ENTRY(jint, MHN_getNamedCon(JNIEnv *env, jobject igcls, jint which, jobjectArray box_jh)) { + #ifndef PRODUCT +- if (which >= 0 && which < con_value_count) { ++ if (advertise_con_value(which)) { ++ assert(which >= 0 && which < con_value_count, ""); + int con = con_values[which]; + objArrayHandle box(THREAD, (objArrayOop) JNIHandles::resolve(box_jh)); + if (box.not_null() && box->klass() == Universe::objectArrayKlassObj() && box->length() > 0) { +@@ -2973,13 +1035,14 @@ + JVM_END + + // void resolve(MemberName self, Class caller) +-JVM_ENTRY(void, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) { +- if (mname_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "mname is null"); } ++JVM_ENTRY(jobject, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) { ++ if (mname_jh == NULL) { THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "mname is null"); } + Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh)); + + // The trusted Java code that calls this method should already have performed + // access checks on behalf of the given caller. But, we can verify this. +- if (VerifyMethodHandles && caller_jh != NULL) { ++ if (VerifyMethodHandles && caller_jh != NULL && ++ java_lang_invoke_MemberName::clazz(mname()) != NULL) { + klassOop reference_klass = java_lang_Class::as_klassOop(java_lang_invoke_MemberName::clazz(mname())); + if (reference_klass != NULL) { + // Emulate LinkResolver::check_klass_accessability. +@@ -2987,15 +1050,97 @@ + if (!Reflection::verify_class_access(caller, + reference_klass, + true)) { +- THROW_MSG(vmSymbols::java_lang_InternalError(), Klass::cast(reference_klass)->external_name()); ++ THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), Klass::cast(reference_klass)->external_name()); + } + } + } + +- MethodHandles::resolve_MemberName(mname, CHECK); ++ Handle resolved = MethodHandles::resolve_MemberName(mname, CHECK_NULL); ++ if (resolved.is_null()) { ++ int flags = java_lang_invoke_MemberName::flags(mname()); ++ int ref_kind = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK; ++ if (!MethodHandles::ref_kind_is_valid(ref_kind)) { ++ THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "obsolete MemberName format"); ++ } ++ if ((flags & ALL_KINDS) == IS_FIELD) { ++ THROW_MSG_NULL(vmSymbols::java_lang_NoSuchMethodError(), "field resolution failed"); ++ } else if ((flags & ALL_KINDS) == IS_METHOD || ++ (flags & ALL_KINDS) == IS_CONSTRUCTOR) { ++ THROW_MSG_NULL(vmSymbols::java_lang_NoSuchFieldError(), "method resolution failed"); ++ } else { ++ THROW_MSG_NULL(vmSymbols::java_lang_LinkageError(), "resolution failed"); ++ } ++ } ++ ++ return JNIHandles::make_local(THREAD, resolved()); + } + JVM_END + ++static jlong find_member_field_offset(oop mname, bool must_be_static, TRAPS) { ++ if (mname == NULL || ++ java_lang_invoke_MemberName::vmtarget(mname) == NULL) { ++ THROW_MSG_0(vmSymbols::java_lang_InternalError(), "mname not resolved"); ++ } else { ++ int flags = java_lang_invoke_MemberName::flags(mname); ++ if ((flags & IS_FIELD) != 0 && ++ (must_be_static ++ ? (flags & JVM_ACC_STATIC) != 0 ++ : (flags & JVM_ACC_STATIC) == 0)) { ++ int vmindex = java_lang_invoke_MemberName::vmindex(mname); ++ return (jlong) vmindex; ++ } ++ } ++ const char* msg = (must_be_static ? "static field required" : "non-static field required"); ++ THROW_MSG_0(vmSymbols::java_lang_InternalError(), msg); ++ return 0; ++} ++ ++JVM_ENTRY(jlong, MHN_objectFieldOffset(JNIEnv *env, jobject igcls, jobject mname_jh)) { ++ return find_member_field_offset(JNIHandles::resolve(mname_jh), false, THREAD); ++} ++JVM_END ++ ++JVM_ENTRY(jlong, MHN_staticFieldOffset(JNIEnv *env, jobject igcls, jobject mname_jh)) { ++ return find_member_field_offset(JNIHandles::resolve(mname_jh), true, THREAD); ++} ++JVM_END ++ ++JVM_ENTRY(jobject, MHN_staticFieldBase(JNIEnv *env, jobject igcls, jobject mname_jh)) { ++ // use the other function to perform sanity checks: ++ jlong ignore = find_member_field_offset(JNIHandles::resolve(mname_jh), true, CHECK_NULL); ++ oop clazz = java_lang_invoke_MemberName::clazz(JNIHandles::resolve_non_null(mname_jh)); ++ return JNIHandles::make_local(THREAD, clazz); ++} ++JVM_END ++ ++JVM_ENTRY(jobject, MHN_getMemberVMInfo(JNIEnv *env, jobject igcls, jobject mname_jh)) { ++ if (mname_jh == NULL) return NULL; ++ Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh)); ++ intptr_t vmindex = java_lang_invoke_MemberName::vmindex(mname()); ++ Handle vmtarget = java_lang_invoke_MemberName::vmtarget(mname()); ++ objArrayHandle result = oopFactory::new_objArray(SystemDictionary::Object_klass(), 2, CHECK_NULL); ++ jvalue vmindex_value; vmindex_value.j = (long)vmindex; ++ oop x = java_lang_boxing_object::create(T_LONG, &vmindex_value, CHECK_NULL); ++ result->obj_at_put(0, x); ++ x = NULL; ++ if (vmtarget.is_null() || vmtarget->is_instance()) { ++ x = vmtarget(); ++ } else if (vmtarget->is_klass()) { ++ x = Klass::cast((klassOop) vmtarget())->java_mirror(); ++ } else { ++ Handle mname2 = MethodHandles::new_MemberName(CHECK_NULL); ++ if (vmtarget->is_method()) ++ x = MethodHandles::init_method_MemberName(mname2(), methodOop(vmtarget()), false, NULL); ++ else ++ x = MethodHandles::init_MemberName(mname2(), vmtarget()); ++ } ++ result->obj_at_put(1, x); ++ return JNIHandles::make_local(env, result()); ++} ++JVM_END ++ ++ ++ + // static native int getMembers(Class defc, String matchName, String matchSig, + // int matchFlags, Class caller, int skip, MemberName[] results); + JVM_ENTRY(jint, MHN_getMembers(JNIEnv *env, jobject igcls, +@@ -3061,45 +1206,6 @@ + } + JVM_END + +-methodOop MethodHandles::resolve_raise_exception_method(TRAPS) { +- if (_raise_exception_method != NULL) { +- // no need to do it twice +- return raise_exception_method(); +- } +- // LinkResolver::resolve_invokedynamic can reach this point +- // because an invokedynamic has failed very early (7049415) +- KlassHandle MHN_klass = SystemDictionaryHandles::MethodHandleNatives_klass(); +- if (MHN_klass.not_null()) { +- TempNewSymbol raiseException_name = SymbolTable::new_symbol("raiseException", CHECK_NULL); +- TempNewSymbol raiseException_sig = SymbolTable::new_symbol("(ILjava/lang/Object;Ljava/lang/Object;)V", CHECK_NULL); +- methodOop raiseException_method = instanceKlass::cast(MHN_klass->as_klassOop()) +- ->find_method(raiseException_name, raiseException_sig); +- if (raiseException_method != NULL && raiseException_method->is_static()) { +- return raiseException_method; +- } +- } +- // not found; let the caller deal with it +- return NULL; +-} +-void MethodHandles::raise_exception(int code, oop actual, oop required, TRAPS) { +- methodOop raiseException_method = resolve_raise_exception_method(CHECK); +- if (raiseException_method != NULL && +- instanceKlass::cast(raiseException_method->method_holder())->is_not_initialized()) { +- instanceKlass::cast(raiseException_method->method_holder())->initialize(CHECK); +- // it had better be resolved by now, or maybe JSR 292 failed to load +- raiseException_method = raise_exception_method(); +- } +- if (raiseException_method == NULL) { +- THROW_MSG(vmSymbols::java_lang_InternalError(), "no raiseException method"); +- } +- JavaCallArguments args; +- args.push_int(code); +- args.push_oop(actual); +- args.push_oop(required); +- JavaValue result(T_VOID); +- JavaCalls::call(&result, raiseException_method, &args, CHECK); +-} +- + JVM_ENTRY(jobject, MH_invoke_UOE(JNIEnv *env, jobject igmh, jobjectArray igargs)) { + TempNewSymbol UOE_name = SymbolTable::new_symbol("java/lang/UnsupportedOperationException", CHECK_NULL); + THROW_MSG_NULL(UOE_name, "MethodHandle.invoke cannot be invoked reflectively"); +@@ -3129,39 +1235,30 @@ + #define MT JLINV"MethodType;" + #define MH JLINV"MethodHandle;" + #define MEM JLINV"MemberName;" +-#define AMH JLINV"AdapterMethodHandle;" +-#define BMH JLINV"BoundMethodHandle;" +-#define DMH JLINV"DirectMethodHandle;" + + #define CC (char*) /*cast a literal from (const char*)*/ + #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f) + + // These are the native methods on java.lang.invoke.MethodHandleNatives. +-static JNINativeMethod methods[] = { +- // void init(MemberName self, AccessibleObject ref) +- {CC"init", CC"("AMH""MH"I)V", FN_PTR(MHN_init_AMH)}, +- {CC"init", CC"("BMH""OBJ"I)V", FN_PTR(MHN_init_BMH)}, +- {CC"init", CC"("DMH""OBJ"Z"CLS")V", FN_PTR(MHN_init_DMH)}, +- {CC"init", CC"("MT")V", FN_PTR(MHN_init_MT)}, ++static JNINativeMethod required_methods_JDK8[] = { + {CC"init", CC"("MEM""OBJ")V", FN_PTR(MHN_init_Mem)}, + {CC"expand", CC"("MEM")V", FN_PTR(MHN_expand_Mem)}, +- {CC"resolve", CC"("MEM""CLS")V", FN_PTR(MHN_resolve_Mem)}, +- {CC"getTarget", CC"("MH"I)"OBJ, FN_PTR(MHN_getTarget)}, ++ {CC"resolve", CC"("MEM""CLS")"MEM, FN_PTR(MHN_resolve_Mem)}, + {CC"getConstant", CC"(I)I", FN_PTR(MHN_getConstant)}, + // static native int getNamedCon(int which, Object[] name) + {CC"getNamedCon", CC"(I["OBJ")I", FN_PTR(MHN_getNamedCon)}, + // static native int getMembers(Class defc, String matchName, String matchSig, + // int matchFlags, Class caller, int skip, MemberName[] results); +- {CC"getMembers", CC"("CLS""STRG""STRG"I"CLS"I["MEM")I", FN_PTR(MHN_getMembers)} +-}; +- +-static JNINativeMethod call_site_methods[] = { ++ {CC"getMembers", CC"("CLS""STRG""STRG"I"CLS"I["MEM")I", FN_PTR(MHN_getMembers)}, ++ {CC"objectFieldOffset", CC"("MEM")J", FN_PTR(MHN_objectFieldOffset)}, + {CC"setCallSiteTargetNormal", CC"("CS""MH")V", FN_PTR(MHN_setCallSiteTargetNormal)}, +- {CC"setCallSiteTargetVolatile", CC"("CS""MH")V", FN_PTR(MHN_setCallSiteTargetVolatile)} ++ {CC"setCallSiteTargetVolatile", CC"("CS""MH")V", FN_PTR(MHN_setCallSiteTargetVolatile)}, ++ {CC"staticFieldOffset", CC"("MEM")J", FN_PTR(MHN_staticFieldOffset)}, ++ {CC"staticFieldBase", CC"("MEM")"OBJ, FN_PTR(MHN_staticFieldBase)}, ++ {CC"getMemberVMInfo", CC"("MEM")"OBJ, FN_PTR(MHN_getMemberVMInfo)} + }; + + static JNINativeMethod invoke_methods[] = { +- // void init(MemberName self, AccessibleObject ref) + {CC"invoke", CC"(["OBJ")"OBJ, FN_PTR(MH_invoke_UOE)}, + {CC"invokeExact", CC"(["OBJ")"OBJ, FN_PTR(MH_invokeExact_UOE)} + }; +@@ -3169,8 +1266,6 @@ + // This one function is exported, used by NativeLookup. + + JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class)) { +- assert(MethodHandles::spot_check_entry_names(), "entry enum is OK"); +- + if (!EnableInvokeDynamic) { + warning("JSR 292 is disabled in this JVM. Use -XX:+UnlockDiagnosticVMOptions -XX:+EnableInvokeDynamic to enable."); + return; // bind nothing +@@ -3179,36 +1274,32 @@ + assert(!MethodHandles::enabled(), "must not be enabled"); + bool enable_MH = true; + +- { ++ jclass MH_class = NULL; ++ if (SystemDictionary::MethodHandle_klass() == NULL) { ++ enable_MH = false; ++ } else { ++ oop mirror = Klass::cast(SystemDictionary::MethodHandle_klass())->java_mirror(); ++ MH_class = (jclass) JNIHandles::make_local(env, mirror); ++ } ++ ++ int status; ++ ++ if (enable_MH) { + ThreadToNativeFromVM ttnfv(thread); +- int status = env->RegisterNatives(MHN_class, methods, sizeof(methods)/sizeof(JNINativeMethod)); +- if (!env->ExceptionOccurred()) { +- const char* L_MH_name = (JLINV "MethodHandle"); +- const char* MH_name = L_MH_name+1; +- jclass MH_class = env->FindClass(MH_name); ++ ++ status = env->RegisterNatives(MHN_class, required_methods_JDK8, sizeof(required_methods_JDK8)/sizeof(JNINativeMethod)); ++ if (status == JNI_OK && !env->ExceptionOccurred()) { + status = env->RegisterNatives(MH_class, invoke_methods, sizeof(invoke_methods)/sizeof(JNINativeMethod)); + } +- if (env->ExceptionOccurred()) { ++ if (status != JNI_OK || env->ExceptionOccurred()) { + warning("JSR 292 method handle code is mismatched to this JVM. Disabling support."); + enable_MH = false; + env->ExceptionClear(); + } +- +- status = env->RegisterNatives(MHN_class, call_site_methods, sizeof(call_site_methods)/sizeof(JNINativeMethod)); +- if (env->ExceptionOccurred()) { +- // Exception is okay until 7087357 +- env->ExceptionClear(); +- } + } + +- if (enable_MH) { +- methodOop raiseException_method = MethodHandles::resolve_raise_exception_method(CHECK); +- if (raiseException_method != NULL) { +- MethodHandles::set_raise_exception_method(raiseException_method); +- } else { +- warning("JSR 292 method handle code is mismatched to this JVM. Disabling support."); +- enable_MH = false; +- } ++ if (TraceInvokeDynamic) { ++ tty->print_cr("MethodHandle support loaded (using LambdaForms)"); + } + + if (enable_MH) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/prims/methodHandles.hpp +--- openjdk/hotspot/src/share/vm/prims/methodHandles.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/prims/methodHandles.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -33,523 +33,36 @@ + + class MacroAssembler; + class Label; +-class MethodHandleEntry; + + class MethodHandles: AllStatic { + // JVM support for MethodHandle, MethodType, and related types + // in java.lang.invoke and sun.invoke. + // See also javaClasses for layouts java_lang_invoke_Method{Handle,Type,Type::Form}. + public: +- enum EntryKind { +- _raise_exception, // stub for error generation from other stubs +- _invokestatic_mh, // how a MH emulates invokestatic +- _invokespecial_mh, // ditto for the other invokes... +- _invokevirtual_mh, +- _invokeinterface_mh, +- _bound_ref_mh, // reference argument is bound +- _bound_int_mh, // int argument is bound (via an Integer or Float) +- _bound_long_mh, // long argument is bound (via a Long or Double) +- _bound_ref_direct_mh, // same as above, with direct linkage to methodOop +- _bound_int_direct_mh, +- _bound_long_direct_mh, +- +- _adapter_mh_first, // adapter sequence goes here... +- _adapter_retype_only = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY, +- _adapter_retype_raw = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW, +- _adapter_check_cast = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST, +- _adapter_prim_to_prim = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM, +- _adapter_ref_to_prim = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM, +- _adapter_prim_to_ref = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF, +- _adapter_swap_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS, +- _adapter_rot_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS, +- _adapter_dup_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS, +- _adapter_drop_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS, +- _adapter_collect_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS, +- _adapter_spread_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS, +- _adapter_fold_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS, +- _adapter_unused_13 = _adapter_mh_first + 13, //hole in the CONV_OP enumeration +- _adapter_mh_last = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::CONV_OP_LIMIT - 1, +- +- // Optimized adapter types +- +- // argument list reordering +- _adapter_opt_swap_1, +- _adapter_opt_swap_2, +- _adapter_opt_rot_1_up, +- _adapter_opt_rot_1_down, +- _adapter_opt_rot_2_up, +- _adapter_opt_rot_2_down, +- // primitive single to single: +- _adapter_opt_i2i, // i2c, i2z, i2b, i2s +- // primitive double to single: +- _adapter_opt_l2i, +- _adapter_opt_d2f, +- // primitive single to double: +- _adapter_opt_i2l, +- _adapter_opt_f2d, +- // conversion between floating point and integer type is handled by Java +- +- // reference to primitive: +- _adapter_opt_unboxi, +- _adapter_opt_unboxl, +- +- // %% Maybe tame the following with a VM_SYMBOLS_DO type macro? +- +- // how a blocking adapter returns (platform-dependent) +- _adapter_opt_return_ref, +- _adapter_opt_return_int, +- _adapter_opt_return_long, +- _adapter_opt_return_float, +- _adapter_opt_return_double, +- _adapter_opt_return_void, +- _adapter_opt_return_S0_ref, // return ref to S=0 (last slot) +- _adapter_opt_return_S1_ref, // return ref to S=1 (2nd-to-last slot) +- _adapter_opt_return_S2_ref, +- _adapter_opt_return_S3_ref, +- _adapter_opt_return_S4_ref, +- _adapter_opt_return_S5_ref, +- _adapter_opt_return_any, // dynamically select r/i/l/f/d +- _adapter_opt_return_FIRST = _adapter_opt_return_ref, +- _adapter_opt_return_LAST = _adapter_opt_return_any, +- +- // spreading (array length cases 0, 1, ...) +- _adapter_opt_spread_0, // spread empty array to N=0 arguments +- _adapter_opt_spread_1_ref, // spread Object[] to N=1 argument +- _adapter_opt_spread_2_ref, // spread Object[] to N=2 arguments +- _adapter_opt_spread_3_ref, // spread Object[] to N=3 arguments +- _adapter_opt_spread_4_ref, // spread Object[] to N=4 arguments +- _adapter_opt_spread_5_ref, // spread Object[] to N=5 arguments +- _adapter_opt_spread_ref, // spread Object[] to N arguments +- _adapter_opt_spread_byte, // spread byte[] or boolean[] to N arguments +- _adapter_opt_spread_char, // spread char[], etc., to N arguments +- _adapter_opt_spread_short, // spread short[], etc., to N arguments +- _adapter_opt_spread_int, // spread int[], short[], etc., to N arguments +- _adapter_opt_spread_long, // spread long[] to N arguments +- _adapter_opt_spread_float, // spread float[] to N arguments +- _adapter_opt_spread_double, // spread double[] to N arguments +- _adapter_opt_spread_FIRST = _adapter_opt_spread_0, +- _adapter_opt_spread_LAST = _adapter_opt_spread_double, +- +- // blocking filter/collect conversions +- // These collect N arguments and replace them (at slot S) by a return value +- // which is passed to the final target, along with the unaffected arguments. +- // collect_{N}_{T} collects N arguments at any position into a T value +- // collect_{N}_S{S}_{T} collects N arguments at slot S into a T value +- // collect_{T} collects any number of arguments at any position +- // filter_S{S}_{T} is the same as collect_1_S{S}_{T} (a unary collection) +- // (collect_2 is also usable as a filter, with long or double arguments) +- _adapter_opt_collect_ref, // combine N arguments, replace with a reference +- _adapter_opt_collect_int, // combine N arguments, replace with an int, short, etc. +- _adapter_opt_collect_long, // combine N arguments, replace with a long +- _adapter_opt_collect_float, // combine N arguments, replace with a float +- _adapter_opt_collect_double, // combine N arguments, replace with a double +- _adapter_opt_collect_void, // combine N arguments, replace with nothing +- // if there is a small fixed number to push, do so without a loop: +- _adapter_opt_collect_0_ref, // collect N=0 arguments, insert a reference +- _adapter_opt_collect_1_ref, // collect N=1 argument, replace with a reference +- _adapter_opt_collect_2_ref, // combine N=2 arguments, replace with a reference +- _adapter_opt_collect_3_ref, // combine N=3 arguments, replace with a reference +- _adapter_opt_collect_4_ref, // combine N=4 arguments, replace with a reference +- _adapter_opt_collect_5_ref, // combine N=5 arguments, replace with a reference +- // filters are an important special case because they never move arguments: +- _adapter_opt_filter_S0_ref, // filter N=1 argument at S=0, replace with a reference +- _adapter_opt_filter_S1_ref, // filter N=1 argument at S=1, replace with a reference +- _adapter_opt_filter_S2_ref, // filter N=1 argument at S=2, replace with a reference +- _adapter_opt_filter_S3_ref, // filter N=1 argument at S=3, replace with a reference +- _adapter_opt_filter_S4_ref, // filter N=1 argument at S=4, replace with a reference +- _adapter_opt_filter_S5_ref, // filter N=1 argument at S=5, replace with a reference +- // these move arguments, but they are important for boxing +- _adapter_opt_collect_2_S0_ref, // combine last N=2 arguments, replace with a reference +- _adapter_opt_collect_2_S1_ref, // combine N=2 arguments at S=1, replace with a reference +- _adapter_opt_collect_2_S2_ref, // combine N=2 arguments at S=2, replace with a reference +- _adapter_opt_collect_2_S3_ref, // combine N=2 arguments at S=3, replace with a reference +- _adapter_opt_collect_2_S4_ref, // combine N=2 arguments at S=4, replace with a reference +- _adapter_opt_collect_2_S5_ref, // combine N=2 arguments at S=5, replace with a reference +- _adapter_opt_collect_FIRST = _adapter_opt_collect_ref, +- _adapter_opt_collect_LAST = _adapter_opt_collect_2_S5_ref, +- +- // blocking folding conversions +- // these are like collects, but retain all the N arguments for the final target +- //_adapter_opt_fold_0_ref, // same as _adapter_opt_collect_0_ref +- // fold_{N}_{T} processes N arguments at any position into a T value, which it inserts +- // fold_{T} processes any number of arguments at any position +- _adapter_opt_fold_ref, // process N arguments, prepend a reference +- _adapter_opt_fold_int, // process N arguments, prepend an int, short, etc. +- _adapter_opt_fold_long, // process N arguments, prepend a long +- _adapter_opt_fold_float, // process N arguments, prepend a float +- _adapter_opt_fold_double, // process N arguments, prepend a double +- _adapter_opt_fold_void, // process N arguments, but leave the list unchanged +- _adapter_opt_fold_1_ref, // process N=1 argument, prepend a reference +- _adapter_opt_fold_2_ref, // process N=2 arguments, prepend a reference +- _adapter_opt_fold_3_ref, // process N=3 arguments, prepend a reference +- _adapter_opt_fold_4_ref, // process N=4 arguments, prepend a reference +- _adapter_opt_fold_5_ref, // process N=5 arguments, prepend a reference +- _adapter_opt_fold_FIRST = _adapter_opt_fold_ref, +- _adapter_opt_fold_LAST = _adapter_opt_fold_5_ref, +- +- _adapter_opt_profiling, +- +- _EK_LIMIT, +- _EK_FIRST = 0 +- }; +- + public: + static bool enabled() { return _enabled; } + static void set_enabled(bool z); + + private: +- enum { // import java_lang_invoke_AdapterMethodHandle::CONV_OP_* +- CONV_OP_LIMIT = java_lang_invoke_AdapterMethodHandle::CONV_OP_LIMIT, +- CONV_OP_MASK = java_lang_invoke_AdapterMethodHandle::CONV_OP_MASK, +- CONV_TYPE_MASK = java_lang_invoke_AdapterMethodHandle::CONV_TYPE_MASK, +- CONV_VMINFO_MASK = java_lang_invoke_AdapterMethodHandle::CONV_VMINFO_MASK, +- CONV_VMINFO_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_VMINFO_SHIFT, +- CONV_OP_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_OP_SHIFT, +- CONV_DEST_TYPE_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_DEST_TYPE_SHIFT, +- CONV_SRC_TYPE_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_SRC_TYPE_SHIFT, +- CONV_STACK_MOVE_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_STACK_MOVE_SHIFT, +- CONV_STACK_MOVE_MASK = java_lang_invoke_AdapterMethodHandle::CONV_STACK_MOVE_MASK +- }; +- + static bool _enabled; +- static MethodHandleEntry* _entries[_EK_LIMIT]; +- static const char* _entry_names[_EK_LIMIT+1]; +- static jobject _raise_exception_method; +- static address _adapter_return_handlers[CONV_TYPE_MASK+1]; + + // Adapters. + static MethodHandlesAdapterBlob* _adapter_code; + +- static bool ek_valid(EntryKind ek) { return (uint)ek < (uint)_EK_LIMIT; } +- static bool conv_op_valid(int op) { return (uint)op < (uint)CONV_OP_LIMIT; } +- +- public: +- static bool have_entry(EntryKind ek) { return ek_valid(ek) && _entries[ek] != NULL; } +- static MethodHandleEntry* entry(EntryKind ek) { assert(ek_valid(ek), "initialized"); +- return _entries[ek]; } +- static const char* entry_name(EntryKind ek) { assert(ek_valid(ek), "oob"); +- return _entry_names[ek]; } +- static EntryKind adapter_entry_kind(int op) { assert(conv_op_valid(op), "oob"); +- return EntryKind(_adapter_mh_first + op); } +- +- static void init_entry(EntryKind ek, MethodHandleEntry* me) { +- assert(ek_valid(ek), "oob"); +- assert(_entries[ek] == NULL, "no double initialization"); +- _entries[ek] = me; +- } +- +- // Some adapter helper functions. +- static EntryKind ek_original_kind(EntryKind ek) { +- if (ek <= _adapter_mh_last) return ek; +- switch (ek) { +- case _adapter_opt_swap_1: +- case _adapter_opt_swap_2: +- return _adapter_swap_args; +- case _adapter_opt_rot_1_up: +- case _adapter_opt_rot_1_down: +- case _adapter_opt_rot_2_up: +- case _adapter_opt_rot_2_down: +- return _adapter_rot_args; +- case _adapter_opt_i2i: +- case _adapter_opt_l2i: +- case _adapter_opt_d2f: +- case _adapter_opt_i2l: +- case _adapter_opt_f2d: +- return _adapter_prim_to_prim; +- case _adapter_opt_unboxi: +- case _adapter_opt_unboxl: +- return _adapter_ref_to_prim; +- } +- if (ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST) +- return _adapter_spread_args; +- if (ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST) +- return _adapter_collect_args; +- if (ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST) +- return _adapter_fold_args; +- if (ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST) +- return _adapter_opt_return_any; +- if (ek == _adapter_opt_profiling) +- return _adapter_retype_only; +- assert(false, "oob"); +- return _EK_LIMIT; +- } +- +- static bool ek_supported(MethodHandles::EntryKind ek); +- +- static BasicType ek_bound_mh_arg_type(EntryKind ek) { +- switch (ek) { +- case _bound_int_mh : // fall-thru +- case _bound_int_direct_mh : return T_INT; +- case _bound_long_mh : // fall-thru +- case _bound_long_direct_mh : return T_LONG; +- default : return T_OBJECT; +- } +- } +- +- static int ek_adapter_opt_swap_slots(EntryKind ek) { +- switch (ek) { +- case _adapter_opt_swap_1 : return 1; +- case _adapter_opt_swap_2 : return 2; +- case _adapter_opt_rot_1_up : return 1; +- case _adapter_opt_rot_1_down : return 1; +- case _adapter_opt_rot_2_up : return 2; +- case _adapter_opt_rot_2_down : return 2; +- default : ShouldNotReachHere(); return -1; +- } +- } +- +- static int ek_adapter_opt_swap_mode(EntryKind ek) { +- switch (ek) { +- case _adapter_opt_swap_1 : return 0; +- case _adapter_opt_swap_2 : return 0; +- case _adapter_opt_rot_1_up : return 1; +- case _adapter_opt_rot_1_down : return -1; +- case _adapter_opt_rot_2_up : return 1; +- case _adapter_opt_rot_2_down : return -1; +- default : ShouldNotReachHere(); return 0; +- } +- } +- +- static int ek_adapter_opt_collect_count(EntryKind ek) { +- assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST || +- ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST, ""); +- switch (ek) { +- case _adapter_opt_collect_0_ref : return 0; +- case _adapter_opt_filter_S0_ref : +- case _adapter_opt_filter_S1_ref : +- case _adapter_opt_filter_S2_ref : +- case _adapter_opt_filter_S3_ref : +- case _adapter_opt_filter_S4_ref : +- case _adapter_opt_filter_S5_ref : +- case _adapter_opt_fold_1_ref : +- case _adapter_opt_collect_1_ref : return 1; +- case _adapter_opt_collect_2_S0_ref : +- case _adapter_opt_collect_2_S1_ref : +- case _adapter_opt_collect_2_S2_ref : +- case _adapter_opt_collect_2_S3_ref : +- case _adapter_opt_collect_2_S4_ref : +- case _adapter_opt_collect_2_S5_ref : +- case _adapter_opt_fold_2_ref : +- case _adapter_opt_collect_2_ref : return 2; +- case _adapter_opt_fold_3_ref : +- case _adapter_opt_collect_3_ref : return 3; +- case _adapter_opt_fold_4_ref : +- case _adapter_opt_collect_4_ref : return 4; +- case _adapter_opt_fold_5_ref : +- case _adapter_opt_collect_5_ref : return 5; +- default : return -1; // sentinel value for "variable" +- } +- } +- +- static int ek_adapter_opt_collect_slot(EntryKind ek) { +- assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST || +- ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST, ""); +- switch (ek) { +- case _adapter_opt_collect_2_S0_ref : +- case _adapter_opt_filter_S0_ref : return 0; +- case _adapter_opt_collect_2_S1_ref : +- case _adapter_opt_filter_S1_ref : return 1; +- case _adapter_opt_collect_2_S2_ref : +- case _adapter_opt_filter_S2_ref : return 2; +- case _adapter_opt_collect_2_S3_ref : +- case _adapter_opt_filter_S3_ref : return 3; +- case _adapter_opt_collect_2_S4_ref : +- case _adapter_opt_filter_S4_ref : return 4; +- case _adapter_opt_collect_2_S5_ref : +- case _adapter_opt_filter_S5_ref : return 5; +- default : return -1; // sentinel value for "variable" +- } +- } +- +- static BasicType ek_adapter_opt_collect_type(EntryKind ek) { +- assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST || +- ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST, ""); +- switch (ek) { +- case _adapter_opt_fold_int : +- case _adapter_opt_collect_int : return T_INT; +- case _adapter_opt_fold_long : +- case _adapter_opt_collect_long : return T_LONG; +- case _adapter_opt_fold_float : +- case _adapter_opt_collect_float : return T_FLOAT; +- case _adapter_opt_fold_double : +- case _adapter_opt_collect_double : return T_DOUBLE; +- case _adapter_opt_fold_void : +- case _adapter_opt_collect_void : return T_VOID; +- default : return T_OBJECT; +- } +- } +- +- static int ek_adapter_opt_return_slot(EntryKind ek) { +- assert(ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST, ""); +- switch (ek) { +- case _adapter_opt_return_S0_ref : return 0; +- case _adapter_opt_return_S1_ref : return 1; +- case _adapter_opt_return_S2_ref : return 2; +- case _adapter_opt_return_S3_ref : return 3; +- case _adapter_opt_return_S4_ref : return 4; +- case _adapter_opt_return_S5_ref : return 5; +- default : return -1; // sentinel value for "variable" +- } +- } +- +- static BasicType ek_adapter_opt_return_type(EntryKind ek) { +- assert(ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST, ""); +- switch (ek) { +- case _adapter_opt_return_int : return T_INT; +- case _adapter_opt_return_long : return T_LONG; +- case _adapter_opt_return_float : return T_FLOAT; +- case _adapter_opt_return_double : return T_DOUBLE; +- case _adapter_opt_return_void : return T_VOID; +- case _adapter_opt_return_any : return T_CONFLICT; // sentinel value for "variable" +- default : return T_OBJECT; +- } +- } +- +- static int ek_adapter_opt_spread_count(EntryKind ek) { +- assert(ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST, ""); +- switch (ek) { +- case _adapter_opt_spread_0 : return 0; +- case _adapter_opt_spread_1_ref : return 1; +- case _adapter_opt_spread_2_ref : return 2; +- case _adapter_opt_spread_3_ref : return 3; +- case _adapter_opt_spread_4_ref : return 4; +- case _adapter_opt_spread_5_ref : return 5; +- default : return -1; // sentinel value for "variable" +- } +- } +- +- static BasicType ek_adapter_opt_spread_type(EntryKind ek) { +- assert(ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST, ""); +- switch (ek) { +- // (there is no _adapter_opt_spread_boolean; we use byte) +- case _adapter_opt_spread_byte : return T_BYTE; +- case _adapter_opt_spread_char : return T_CHAR; +- case _adapter_opt_spread_short : return T_SHORT; +- case _adapter_opt_spread_int : return T_INT; +- case _adapter_opt_spread_long : return T_LONG; +- case _adapter_opt_spread_float : return T_FLOAT; +- case _adapter_opt_spread_double : return T_DOUBLE; +- default : return T_OBJECT; +- } +- } +- +- static methodOop raise_exception_method() { +- oop rem = JNIHandles::resolve(_raise_exception_method); +- assert(rem == NULL || rem->is_method(), ""); +- return (methodOop) rem; +- } +- static void set_raise_exception_method(methodOop rem) { +- assert(_raise_exception_method == NULL, ""); +- _raise_exception_method = JNIHandles::make_global(Handle(rem)); +- } +- static methodOop resolve_raise_exception_method(TRAPS); +- // call raise_exception_method from C code: +- static void raise_exception(int code, oop actual, oop required, TRAPS); +- +- static jint adapter_conversion(int conv_op, BasicType src, BasicType dest, +- int stack_move = 0, int vminfo = 0) { +- assert(conv_op_valid(conv_op), "oob"); +- jint conv = ((conv_op << CONV_OP_SHIFT) +- | (src << CONV_SRC_TYPE_SHIFT) +- | (dest << CONV_DEST_TYPE_SHIFT) +- | (stack_move << CONV_STACK_MOVE_SHIFT) +- | (vminfo << CONV_VMINFO_SHIFT) +- ); +- assert(adapter_conversion_op(conv) == conv_op, "decode conv_op"); +- assert(adapter_conversion_src_type(conv) == src, "decode src"); +- assert(adapter_conversion_dest_type(conv) == dest, "decode dest"); +- assert(adapter_conversion_stack_move(conv) == stack_move, "decode stack_move"); +- assert(adapter_conversion_vminfo(conv) == vminfo, "decode vminfo"); +- return conv; +- } +- static int adapter_conversion_op(jint conv) { +- return ((conv >> CONV_OP_SHIFT) & 0xF); +- } +- static BasicType adapter_conversion_src_type(jint conv) { +- return (BasicType)((conv >> CONV_SRC_TYPE_SHIFT) & 0xF); +- } +- static BasicType adapter_conversion_dest_type(jint conv) { +- return (BasicType)((conv >> CONV_DEST_TYPE_SHIFT) & 0xF); +- } +- static int adapter_conversion_stack_move(jint conv) { +- return (conv >> CONV_STACK_MOVE_SHIFT); +- } +- static int adapter_conversion_vminfo(jint conv) { +- return (conv >> CONV_VMINFO_SHIFT) & CONV_VMINFO_MASK; +- } +- +- // Bit mask of conversion_op values. May vary by platform. +- static int adapter_conversion_ops_supported_mask(); +- +- static bool conv_op_supported(int conv_op) { +- assert(conv_op_valid(conv_op), ""); +- return ((adapter_conversion_ops_supported_mask() & nth_bit(conv_op)) != 0); +- } +- +- // Offset in words that the interpreter stack pointer moves when an argument is pushed. +- // The stack_move value must always be a multiple of this. +- static int stack_move_unit() { +- return frame::interpreter_frame_expression_stack_direction() * Interpreter::stackElementWords; +- } +- +- // Adapter frame traversal. (Implementation-specific.) +- static frame ricochet_frame_sender(const frame& fr, RegisterMap* reg_map); +- static void ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map); +- +- enum { CONV_VMINFO_SIGN_FLAG = 0x80 }; +- // Shift values for prim-to-prim conversions. +- static int adapter_prim_to_prim_subword_vminfo(BasicType dest) { +- if (dest == T_BOOLEAN) return (BitsPerInt - 1); // boolean is 1 bit +- if (dest == T_CHAR) return (BitsPerInt - BitsPerShort); +- if (dest == T_BYTE) return (BitsPerInt - BitsPerByte ) | CONV_VMINFO_SIGN_FLAG; +- if (dest == T_SHORT) return (BitsPerInt - BitsPerShort) | CONV_VMINFO_SIGN_FLAG; +- return 0; // case T_INT +- } +- // Shift values for unboxing a primitive. +- static int adapter_unbox_subword_vminfo(BasicType dest) { +- if (dest == T_BOOLEAN) return (BitsPerInt - BitsPerByte ); // implemented as 1 byte +- if (dest == T_CHAR) return (BitsPerInt - BitsPerShort); +- if (dest == T_BYTE) return (BitsPerInt - BitsPerByte ) | CONV_VMINFO_SIGN_FLAG; +- if (dest == T_SHORT) return (BitsPerInt - BitsPerShort) | CONV_VMINFO_SIGN_FLAG; +- return 0; // case T_INT +- } +- // Here is the transformation the i2i adapter must perform: +- static int truncate_subword_from_vminfo(jint value, int vminfo) { +- int shift = vminfo & ~CONV_VMINFO_SIGN_FLAG; +- jint tem = value << shift; +- if ((vminfo & CONV_VMINFO_SIGN_FLAG) != 0) { +- return (jint)tem >> shift; +- } else { +- return (juint)tem >> shift; +- } +- } +- +- static inline address from_compiled_entry(EntryKind ek); +- static inline address from_interpreted_entry(EntryKind ek); +- +- // helpers for decode_method. +- static methodOop decode_methodOop(methodOop m, int& decode_flags_result); +- static methodHandle decode_vmtarget(oop vmtarget, int vmindex, oop mtype, KlassHandle& receiver_limit_result, int& decode_flags_result); +- static methodHandle decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result); +- static methodHandle decode_MethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); +- static methodHandle decode_DirectMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); +- static methodHandle decode_BoundMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); +- static methodHandle decode_AdapterMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); +- +- // Find out how many stack slots an mh pushes or pops. +- // The result is *not* reported as a multiple of stack_move_unit(); +- // It is a signed net number of pushes (a difference in vmslots). +- // To compare with a stack_move value, first multiply by stack_move_unit(). +- static int decode_MethodHandle_stack_pushes(oop mh); +- + public: + // working with member names +- static void resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type ++ static Handle resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type + static void expand_MemberName(Handle mname, int suppress, TRAPS); // expand defc/name/type if missing + static Handle new_MemberName(TRAPS); // must be followed by init_MemberName +- static void init_MemberName(oop mname_oop, oop target); // compute vmtarget/vmindex from target +- static void init_MemberName(oop mname_oop, methodOop m, bool do_dispatch = true); +- static void init_MemberName(oop mname_oop, klassOop field_holder, AccessFlags mods, int offset); ++ static oop init_MemberName(oop mname_oop, oop target_oop); // compute vmtarget/vmindex from target ++ static oop init_method_MemberName(oop mname_oop, methodOop m, bool do_dispatch, ++ klassOop receiver_limit); ++ static oop init_field_MemberName(oop mname_oop, klassOop field_holder, ++ AccessFlags mods, oop type, oop name, ++ intptr_t offset, bool is_setter = false); ++ static Handle init_method_MemberName(oop mname_oop, CallInfo& info, TRAPS); ++ static Handle init_field_MemberName(oop mname_oop, FieldAccessInfo& info, TRAPS); ++ static int method_ref_kind(methodOop m, bool do_dispatch_if_possible = true); + static int find_MemberNames(klassOop k, Symbol* name, Symbol* sig, + int mflags, klassOop caller, + int skip, objArrayOop results); +@@ -559,169 +72,113 @@ + // Generate MethodHandles adapters. + static void generate_adapters(); + +- // Called from InterpreterGenerator and MethodHandlesAdapterGenerator. +- static address generate_method_handle_interpreter_entry(MacroAssembler* _masm); +- static void generate_method_handle_stub(MacroAssembler* _masm, EntryKind ek); ++ // Called from MethodHandlesAdapterGenerator. ++ static address generate_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid); ++ static void generate_method_handle_dispatch(MacroAssembler* _masm, ++ vmIntrinsics::ID iid, ++ Register receiver_reg, ++ Register member_reg, ++ bool for_compiler_entry); + +- // argument list parsing +- static int argument_slot(oop method_type, int arg); +- static int argument_slot_count(oop method_type) { return argument_slot(method_type, -1); } +- static int argument_slot_to_argnum(oop method_type, int argslot); ++ // Queries ++ static bool is_signature_polymorphic(vmIntrinsics::ID iid) { ++ return (iid >= vmIntrinsics::FIRST_MH_SIG_POLY && ++ iid <= vmIntrinsics::LAST_MH_SIG_POLY); ++ } + +- // Runtime support +- enum { // bit-encoded flags from decode_method or decode_vmref +- _dmf_has_receiver = 0x01, // target method has leading reference argument +- _dmf_does_dispatch = 0x02, // method handle performs virtual or interface dispatch +- _dmf_from_interface = 0x04, // peforms interface dispatch +- _DMF_DIRECT_MASK = (_dmf_from_interface*2 - _dmf_has_receiver), +- _dmf_binds_method = 0x08, +- _dmf_binds_argument = 0x10, +- _DMF_BOUND_MASK = (_dmf_binds_argument*2 - _dmf_binds_method), +- _dmf_adapter_lsb = 0x20, +- _DMF_ADAPTER_MASK = (_dmf_adapter_lsb << CONV_OP_LIMIT) - _dmf_adapter_lsb +- }; +- static methodHandle decode_method(oop x, KlassHandle& receiver_limit_result, int& decode_flags_result); ++ static bool is_signature_polymorphic_intrinsic(vmIntrinsics::ID iid) { ++ assert(is_signature_polymorphic(iid), ""); ++ // Most sig-poly methods are intrinsics which do not require an ++ // appeal to Java for adapter code. ++ return (iid != vmIntrinsics::_invokeGeneric); ++ } ++ ++ static bool is_signature_polymorphic_static(vmIntrinsics::ID iid) { ++ assert(is_signature_polymorphic(iid), ""); ++ return (iid >= vmIntrinsics::FIRST_MH_STATIC && ++ iid <= vmIntrinsics::LAST_MH_SIG_POLY); ++ } ++ ++ static bool has_member_arg(vmIntrinsics::ID iid) { ++ assert(is_signature_polymorphic(iid), ""); ++ return (iid >= vmIntrinsics::_linkToVirtual && ++ iid <= vmIntrinsics::_linkToInterface); ++ } ++ static bool has_member_arg(Symbol* klass, Symbol* name) { ++ if ((klass == vmSymbols::java_lang_invoke_MethodHandle()) && ++ is_signature_polymorphic_name(name)) { ++ vmIntrinsics::ID iid = signature_polymorphic_name_id(name); ++ return has_member_arg(iid); ++ } ++ return false; ++ } ++ ++ static Symbol* signature_polymorphic_intrinsic_name(vmIntrinsics::ID iid); ++ static int signature_polymorphic_intrinsic_ref_kind(vmIntrinsics::ID iid); ++ ++ static vmIntrinsics::ID signature_polymorphic_name_id(klassOop klass, Symbol* name); ++ static vmIntrinsics::ID signature_polymorphic_name_id(Symbol* name); ++ static bool is_signature_polymorphic_name(Symbol* name) { ++ return signature_polymorphic_name_id(name) != vmIntrinsics::_none; ++ } ++ static bool is_method_handle_invoke_name(klassOop klass, Symbol* name); ++ static bool is_signature_polymorphic_name(klassOop klass, Symbol* name) { ++ return signature_polymorphic_name_id(klass, name) != vmIntrinsics::_none; ++ } ++ + enum { + // format of query to getConstant: +- GC_JVM_PUSH_LIMIT = 0, +- GC_JVM_STACK_MOVE_UNIT = 1, +- GC_CONV_OP_IMPLEMENTED_MASK = 2, +- GC_OP_ROT_ARGS_DOWN_LIMIT_BIAS = 3, + GC_COUNT_GWT = 4, +- +- // format of result from getTarget / encode_target: +- ETF_HANDLE_OR_METHOD_NAME = 0, // all available data (immediate MH or method) +- ETF_DIRECT_HANDLE = 1, // ultimate method handle (will be a DMH, may be self) +- ETF_METHOD_NAME = 2, // ultimate method as MemberName +- ETF_REFLECT_METHOD = 3, // ultimate method as java.lang.reflect object (sans refClass) +- ETF_FORCE_DIRECT_HANDLE = 64, +- ETF_COMPILE_DIRECT_HANDLE = 65, +- +- // ad hoc constants +- OP_ROT_ARGS_DOWN_LIMIT_BIAS = -1 ++ GC_LAMBDA_SUPPORT = 5 + }; + static int get_named_constant(int which, Handle name_box, TRAPS); +- static oop encode_target(Handle mh, int format, TRAPS); // report vmtarget (to Java code) +- static bool class_cast_needed(klassOop src, klassOop dst); + +- static instanceKlassHandle resolve_instance_klass(oop java_mirror_oop, TRAPS); +- static instanceKlassHandle resolve_instance_klass(jclass java_mirror_jh, TRAPS) { +- return resolve_instance_klass(JNIHandles::resolve(java_mirror_jh), THREAD); ++public: ++ static Symbol* lookup_signature(oop type_str, bool polymorphic, TRAPS); // use TempNewSymbol ++ static Symbol* lookup_basic_type_signature(Symbol* sig, bool keep_last_arg, TRAPS); // use TempNewSymbol ++ static Symbol* lookup_basic_type_signature(Symbol* sig, TRAPS) { ++ return lookup_basic_type_signature(sig, false, THREAD); ++ } ++ static bool is_basic_type_signature(Symbol* sig); ++ ++ static Symbol* lookup_method_type(Symbol* msig, Handle mtype, TRAPS); ++ ++ static void print_as_method_type_on(outputStream* st, Symbol* sig) { ++ print_as_basic_type_signature_on(st, sig, true, true); ++ } ++ static void print_as_basic_type_signature_on(outputStream* st, Symbol* sig, bool keep_arrays = false, bool keep_basic_names = false); ++ ++ // decoding CONSTANT_MethodHandle constants ++ enum { JVM_REF_MIN = JVM_REF_getField, JVM_REF_MAX = JVM_REF_invokeInterface }; ++ static bool ref_kind_is_valid(int ref_kind) { ++ return (ref_kind >= JVM_REF_MIN && ref_kind <= JVM_REF_MAX); ++ } ++ static bool ref_kind_is_field(int ref_kind) { ++ assert(ref_kind_is_valid(ref_kind), ""); ++ return (ref_kind <= JVM_REF_putStatic); ++ } ++ static bool ref_kind_is_getter(int ref_kind) { ++ assert(ref_kind_is_valid(ref_kind), ""); ++ return (ref_kind <= JVM_REF_getStatic); ++ } ++ static bool ref_kind_is_setter(int ref_kind) { ++ return ref_kind_is_field(ref_kind) && !ref_kind_is_getter(ref_kind); ++ } ++ static bool ref_kind_is_method(int ref_kind) { ++ return !ref_kind_is_field(ref_kind) && (ref_kind != JVM_REF_newInvokeSpecial); ++ } ++ static bool ref_kind_has_receiver(int ref_kind) { ++ assert(ref_kind_is_valid(ref_kind), ""); ++ return (ref_kind & 1) != 0; ++ } ++ static bool ref_kind_is_static(int ref_kind) { ++ return !ref_kind_has_receiver(ref_kind) && (ref_kind != JVM_REF_newInvokeSpecial); ++ } ++ static bool ref_kind_does_dispatch(int ref_kind) { ++ return (ref_kind == JVM_REF_invokeVirtual || ++ ref_kind == JVM_REF_invokeInterface); + } + +- private: +- // These checkers operate on a pair of whole MethodTypes: +- static const char* check_method_type_change(oop src_mtype, int src_beg, int src_end, +- int insert_argnum, oop insert_type, +- int change_argnum, oop change_type, +- int delete_argnum, +- oop dst_mtype, int dst_beg, int dst_end, +- bool raw = false); +- static const char* check_method_type_insertion(oop src_mtype, +- int insert_argnum, oop insert_type, +- oop dst_mtype) { +- oop no_ref = NULL; +- return check_method_type_change(src_mtype, 0, -1, +- insert_argnum, insert_type, +- -1, no_ref, -1, dst_mtype, 0, -1); +- } +- static const char* check_method_type_conversion(oop src_mtype, +- int change_argnum, oop change_type, +- oop dst_mtype) { +- oop no_ref = NULL; +- return check_method_type_change(src_mtype, 0, -1, -1, no_ref, +- change_argnum, change_type, +- -1, dst_mtype, 0, -1); +- } +- static const char* check_method_type_passthrough(oop src_mtype, oop dst_mtype, bool raw) { +- oop no_ref = NULL; +- return check_method_type_change(src_mtype, 0, -1, +- -1, no_ref, -1, no_ref, -1, +- dst_mtype, 0, -1, raw); +- } +- +- // These checkers operate on pairs of argument or return types: +- static const char* check_argument_type_change(BasicType src_type, klassOop src_klass, +- BasicType dst_type, klassOop dst_klass, +- int argnum, bool raw = false); +- +- static const char* check_argument_type_change(oop src_type, oop dst_type, +- int argnum, bool raw = false) { +- klassOop src_klass = NULL, dst_klass = NULL; +- BasicType src_bt = java_lang_Class::as_BasicType(src_type, &src_klass); +- BasicType dst_bt = java_lang_Class::as_BasicType(dst_type, &dst_klass); +- return check_argument_type_change(src_bt, src_klass, +- dst_bt, dst_klass, argnum, raw); +- } +- +- static const char* check_return_type_change(oop src_type, oop dst_type, bool raw = false) { +- return check_argument_type_change(src_type, dst_type, -1, raw); +- } +- +- static const char* check_return_type_change(BasicType src_type, klassOop src_klass, +- BasicType dst_type, klassOop dst_klass) { +- return check_argument_type_change(src_type, src_klass, dst_type, dst_klass, -1); +- } +- +- static const char* check_method_receiver(methodOop m, klassOop passed_recv_type); +- +- // These verifiers can block, and will throw an error if the checking fails: +- static void verify_vmslots(Handle mh, TRAPS); +- static void verify_vmargslot(Handle mh, int argnum, int argslot, TRAPS); +- +- static void verify_method_type(methodHandle m, Handle mtype, +- bool has_bound_oop, +- KlassHandle bound_oop_type, +- TRAPS); +- +- static void verify_method_signature(methodHandle m, Handle mtype, +- int first_ptype_pos, +- KlassHandle insert_ptype, TRAPS); +- +- static void verify_DirectMethodHandle(Handle mh, methodHandle m, TRAPS); +- static void verify_BoundMethodHandle(Handle mh, Handle target, int argnum, +- bool direct_to_method, TRAPS); +- static void verify_BoundMethodHandle_with_receiver(Handle mh, methodHandle m, TRAPS); +- static void verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS); +- +- public: +- +- // Fill in the fields of a DirectMethodHandle mh. (MH.type must be pre-filled.) +- static void init_DirectMethodHandle(Handle mh, methodHandle method, bool do_dispatch, TRAPS); +- +- // Fill in the fields of a BoundMethodHandle mh. (MH.type, BMH.argument must be pre-filled.) +- static void init_BoundMethodHandle(Handle mh, Handle target, int argnum, TRAPS); +- static void init_BoundMethodHandle_with_receiver(Handle mh, +- methodHandle original_m, +- KlassHandle receiver_limit, +- int decode_flags, +- TRAPS); +- +- // Fill in the fields of an AdapterMethodHandle mh. (MH.type must be pre-filled.) +- static void init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS); +- static void ensure_vmlayout_field(Handle target, TRAPS); +- +-#ifdef ASSERT +- static bool spot_check_entry_names(); +-#endif +- +- private: +- static methodHandle dispatch_decoded_method(methodHandle m, +- KlassHandle receiver_limit, +- int decode_flags, +- KlassHandle receiver_klass, +- TRAPS); +- +-public: +- static bool is_float_fixed_reinterpretation_cast(BasicType src, BasicType dst); +- static bool same_basic_type_for_arguments(BasicType src, BasicType dst, +- bool raw = false, +- bool for_return = false); +- static bool same_basic_type_for_returns(BasicType src, BasicType dst, bool raw = false) { +- return same_basic_type_for_arguments(src, dst, raw, true); +- } +- +- static Symbol* convert_to_signature(oop type_str, bool polymorphic, TRAPS); + + #ifdef TARGET_ARCH_x86 + # include "methodHandles_x86.hpp" +@@ -738,61 +195,6 @@ + #ifdef TARGET_ARCH_ppc + # include "methodHandles_ppc.hpp" + #endif +-}; +- +- +-// Access methods for the "entry" field of a java.lang.invoke.MethodHandle. +-// The field is primarily a jump target for compiled calls. +-// However, we squirrel away some nice pointers for other uses, +-// just before the jump target. +-// Aspects of a method handle entry: +-// - from_compiled_entry - stub used when compiled code calls the MH +-// - from_interpreted_entry - stub used when the interpreter calls the MH +-// - type_checking_entry - stub for runtime casting between MHForm siblings (NYI) +-class MethodHandleEntry { +- public: +- class Data { +- friend class MethodHandleEntry; +- size_t _total_size; // size including Data and code stub +- MethodHandleEntry* _type_checking_entry; +- address _from_interpreted_entry; +- MethodHandleEntry* method_entry() { return (MethodHandleEntry*)(this + 1); } +- }; +- +- Data* data() { return (Data*)this - 1; } +- +- address start_address() { return (address) data(); } +- address end_address() { return start_address() + data()->_total_size; } +- +- address from_compiled_entry() { return (address) this; } +- +- address from_interpreted_entry() { return data()->_from_interpreted_entry; } +- void set_from_interpreted_entry(address e) { data()->_from_interpreted_entry = e; } +- +- MethodHandleEntry* type_checking_entry() { return data()->_type_checking_entry; } +- void set_type_checking_entry(MethodHandleEntry* e) { data()->_type_checking_entry = e; } +- +- void set_end_address(address end_addr) { +- size_t total_size = end_addr - start_address(); +- assert(total_size > 0 && total_size < 0x1000, "reasonable end address"); +- data()->_total_size = total_size; +- } +- +- // Compiler support: +- static int from_interpreted_entry_offset_in_bytes() { +- return (int)( offset_of(Data, _from_interpreted_entry) - sizeof(Data) ); +- } +- static int type_checking_entry_offset_in_bytes() { +- return (int)( offset_of(Data, _from_interpreted_entry) - sizeof(Data) ); +- } +- +- static address start_compiled_entry(MacroAssembler* _masm, +- address interpreted_entry = NULL); +- static MethodHandleEntry* finish_compiled_entry(MacroAssembler* masm, address start_addr); +-}; +- +-address MethodHandles::from_compiled_entry(EntryKind ek) { return entry(ek)->from_compiled_entry(); } +-address MethodHandles::from_interpreted_entry(EntryKind ek) { return entry(ek)->from_interpreted_entry(); } + + + //------------------------------------------------------------------------------ +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/prims/unsafe.cpp +--- openjdk/hotspot/src/share/vm/prims/unsafe.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/prims/unsafe.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -779,16 +779,33 @@ + return JNIHandles::make_local(env, JNIHandles::resolve_non_null(clazz)); + UNSAFE_END + +-UNSAFE_ENTRY(void, Unsafe_EnsureClassInitialized(JNIEnv *env, jobject unsafe, jobject clazz)) ++UNSAFE_ENTRY(void, Unsafe_EnsureClassInitialized(JNIEnv *env, jobject unsafe, jobject clazz)) { + UnsafeWrapper("Unsafe_EnsureClassInitialized"); + if (clazz == NULL) { + THROW(vmSymbols::java_lang_NullPointerException()); + } + oop mirror = JNIHandles::resolve_non_null(clazz); +- instanceKlass* k = instanceKlass::cast(java_lang_Class::as_klassOop(mirror)); +- if (k != NULL) { ++ ++ klassOop klass = java_lang_Class::as_klassOop(mirror); ++ if (klass != NULL && Klass::cast(klass)->should_be_initialized()) { ++ instanceKlass* k = instanceKlass::cast(klass); + k->initialize(CHECK); + } ++} ++UNSAFE_END ++ ++UNSAFE_ENTRY(jboolean, Unsafe_ShouldBeInitialized(JNIEnv *env, jobject unsafe, jobject clazz)) { ++ UnsafeWrapper("Unsafe_ShouldBeInitialized"); ++ if (clazz == NULL) { ++ THROW_(vmSymbols::java_lang_NullPointerException(), false); ++ } ++ oop mirror = JNIHandles::resolve_non_null(clazz); ++ klassOop klass = java_lang_Class::as_klassOop(mirror); ++ if (klass != NULL && Klass::cast(klass)->should_be_initialized()) { ++ return true; ++ } ++ return false; ++} + UNSAFE_END + + static void getBaseAndScale(int& base, int& scale, jclass acls, TRAPS) { +@@ -1584,6 +1601,10 @@ + {CC"defineAnonymousClass", CC"("DAC_Args")"CLS, FN_PTR(Unsafe_DefineAnonymousClass)}, + }; + ++JNINativeMethod lform_methods[] = { ++ {CC"shouldBeInitialized",CC"("CLS")Z", FN_PTR(Unsafe_ShouldBeInitialized)}, ++}; ++ + #undef CC + #undef FN_PTR + +@@ -1654,6 +1675,15 @@ + env->ExceptionClear(); + } + } ++ if (EnableInvokeDynamic) { ++ env->RegisterNatives(unsafecls, lform_methods, sizeof(lform_methods)/sizeof(JNINativeMethod)); ++ if (env->ExceptionOccurred()) { ++ if (PrintMiscellaneous && (Verbose || WizardMode)) { ++ tty->print_cr("Warning: SDK 1.7 LambdaForm support in Unsafe not found."); ++ } ++ env->ExceptionClear(); ++ } ++ } + int status = env->RegisterNatives(unsafecls, methods, sizeof(methods)/sizeof(JNINativeMethod)); + if (env->ExceptionOccurred()) { + if (PrintMiscellaneous && (Verbose || WizardMode)) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/arguments.cpp +--- openjdk/hotspot/src/share/vm/runtime/arguments.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/arguments.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -3057,15 +3057,6 @@ + } + #endif // PRODUCT + +- // Transitional +- if (EnableMethodHandles || AnonymousClasses) { +- if (!EnableInvokeDynamic && !FLAG_IS_DEFAULT(EnableInvokeDynamic)) { +- warning("EnableMethodHandles and AnonymousClasses are obsolete. Keeping EnableInvokeDynamic disabled."); +- } else { +- EnableInvokeDynamic = true; +- } +- } +- + // JSR 292 is not supported before 1.7 + if (!JDK_Version::is_gte_jdk17x_version()) { + if (EnableInvokeDynamic) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/deoptimization.cpp +--- openjdk/hotspot/src/share/vm/runtime/deoptimization.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/deoptimization.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -388,7 +388,7 @@ + if (deopt_sender.is_interpreted_frame()) { + methodHandle method = deopt_sender.interpreter_frame_method(); + Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci()); +- if (cur.is_method_handle_invoke()) { ++ if (cur.is_invokedynamic() || cur.is_invokehandle()) { + // Method handle invokes may involve fairly arbitrary chains of + // calls so it's impossible to know how much actual space the + // caller has for locals. +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/fieldDescriptor.hpp +--- openjdk/hotspot/src/share/vm/runtime/fieldDescriptor.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/fieldDescriptor.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -115,6 +115,7 @@ + void initialize(klassOop k, int index); + + // Print ++ void print() { print_on(tty); } + void print_on(outputStream* st) const PRODUCT_RETURN; + void print_on_for(outputStream* st, oop obj) PRODUCT_RETURN; + }; +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/fprofiler.cpp +--- openjdk/hotspot/src/share/vm/runtime/fprofiler.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/fprofiler.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -337,11 +337,13 @@ + char c = (char) n->byte_at(i); + st->print("%c", c); + } +- if( Verbose ) { ++ if (Verbose || WizardMode) { + // Disambiguate overloaded methods + Symbol* sig = m->signature(); + sig->print_symbol_on(st); +- } ++ } else if (MethodHandles::is_signature_polymorphic(m->intrinsic_id())) ++ // compare with methodOopDesc::print_short_name ++ MethodHandles::print_as_basic_type_signature_on(st, m->signature(), true); + } + + virtual void print(outputStream* st, int total_ticks) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/frame.cpp +--- openjdk/hotspot/src/share/vm/runtime/frame.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/frame.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -170,11 +170,9 @@ + } + + // type testers +-bool frame::is_ricochet_frame() const { +- RicochetBlob* rcb = SharedRuntime::ricochet_blob(); +- return (_cb == rcb && rcb != NULL && rcb->returns_to_bounce_addr(_pc)); ++bool frame::is_ignored_frame() const { ++ return false; // FIXME: some LambdaForm frames should be ignored + } +- + bool frame::is_deoptimized_frame() const { + assert(_deopt_state != unknown, "not answerable"); + return _deopt_state == is_deoptimized; +@@ -348,17 +346,12 @@ + frame frame::real_sender(RegisterMap* map) const { + frame result = sender(map); + while (result.is_runtime_frame() || +- result.is_ricochet_frame()) { ++ result.is_ignored_frame()) { + result = result.sender(map); + } + return result; + } + +-frame frame::sender_for_ricochet_frame(RegisterMap* map) const { +- assert(is_ricochet_frame(), ""); +- return MethodHandles::ricochet_frame_sender(*this, map); +-} +- + // Note: called by profiler - NOT for current thread + frame frame::profile_find_Java_sender_frame(JavaThread *thread) { + // If we don't recognize this frame, walk back up the stack until we do +@@ -541,7 +534,6 @@ + const char* frame::print_name() const { + if (is_native_frame()) return "Native"; + if (is_interpreted_frame()) return "Interpreted"; +- if (is_ricochet_frame()) return "Ricochet"; + if (is_compiled_frame()) { + if (is_deoptimized_frame()) return "Deoptimized"; + return "Compiled"; +@@ -728,8 +720,6 @@ + st->print("v ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name()); + } else if (_cb->is_deoptimization_stub()) { + st->print("v ~DeoptimizationBlob"); +- } else if (_cb->is_ricochet_stub()) { +- st->print("v ~RichochetBlob"); + } else if (_cb->is_exception_stub()) { + st->print("v ~ExceptionBlob"); + } else if (_cb->is_safepoint_stub()) { +@@ -993,9 +983,6 @@ + + void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) { + assert(_cb != NULL, "sanity check"); +- if (_cb == SharedRuntime::ricochet_blob()) { +- oops_ricochet_do(f, reg_map); +- } + if (_cb->oop_maps() != NULL) { + OopMapSet::oops_do(this, reg_map, f); + +@@ -1014,11 +1001,6 @@ + cf->do_code_blob(_cb); + } + +-void frame::oops_ricochet_do(OopClosure* f, const RegisterMap* map) { +- assert(is_ricochet_frame(), ""); +- MethodHandles::ricochet_frame_oops_do(*this, f, map); +-} +- + class CompiledArgumentOopFinder: public SignatureInfo { + protected: + OopClosure* _f; +@@ -1087,7 +1069,7 @@ + // First consult the ADLC on where it puts parameter 0 for this signature. + VMReg reg = SharedRuntime::name_for_receiver(); + oop r = *caller.oopmapreg_to_location(reg, reg_map); +- assert( Universe::heap()->is_in_or_null(r), "bad receiver" ); ++ assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (intptr_t) r, (intptr_t) r)); + return r; + } + +@@ -1407,8 +1389,6 @@ + values.describe(-1, info_address, + FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no, + nm, nm->method()->name_and_sig_as_C_string()), 2); +- } else if (is_ricochet_frame()) { +- values.describe(-1, info_address, err_msg("#%d ricochet frame", frame_no), 2); + } else { + // provide default info if not handled before + char *info = (char *) "special frame"; +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/frame.hpp +--- openjdk/hotspot/src/share/vm/runtime/frame.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/frame.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -135,7 +135,7 @@ + bool is_interpreted_frame() const; + bool is_java_frame() const; + bool is_entry_frame() const; // Java frame called from C? +- bool is_ricochet_frame() const; ++ bool is_ignored_frame() const; + bool is_native_frame() const; + bool is_runtime_frame() const; + bool is_compiled_frame() const; +@@ -176,7 +176,6 @@ + // Helper methods for better factored code in frame::sender + frame sender_for_compiled_frame(RegisterMap* map) const; + frame sender_for_entry_frame(RegisterMap* map) const; +- frame sender_for_ricochet_frame(RegisterMap* map) const; + frame sender_for_interpreter_frame(RegisterMap* map) const; + frame sender_for_native_frame(RegisterMap* map) const; + +@@ -415,7 +414,6 @@ + // Oops-do's + void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f); + void oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache = true); +- void oops_ricochet_do(OopClosure* f, const RegisterMap* map); + + private: + void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/globals.hpp +--- openjdk/hotspot/src/share/vm/runtime/globals.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/globals.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -931,6 +931,9 @@ + diagnostic(bool, PrintAdapterHandlers, false, \ + "Print code generated for i2c/c2i adapters") \ + \ ++ diagnostic(bool, VerifyAdapterCalls, trueInDebug, \ ++ "Verify that i2c/c2i adapters are called properly") \ ++ \ + develop(bool, VerifyAdapterSharing, false, \ + "Verify that the code for shared adapters is the equivalent") \ + \ +@@ -3836,12 +3839,6 @@ + product(bool, AnonymousClasses, false, \ + "support sun.misc.Unsafe.defineAnonymousClass (deprecated)") \ + \ +- experimental(bool, EnableMethodHandles, false, \ +- "support method handles (deprecated)") \ +- \ +- diagnostic(intx, MethodHandlePushLimit, 3, \ +- "number of additional stack slots a method handle may push") \ +- \ + diagnostic(bool, PrintMethodHandleStubs, false, \ + "Print generated stub code for method handles") \ + \ +@@ -3851,19 +3848,12 @@ + diagnostic(bool, VerifyMethodHandles, trueInDebug, \ + "perform extra checks when constructing method handles") \ + \ +- diagnostic(bool, OptimizeMethodHandles, true, \ +- "when constructing method handles, try to improve them") \ +- \ +- develop(bool, StressMethodHandleWalk, false, \ +- "Process all method handles with MethodHandleWalk") \ ++ diagnostic(bool, ShowHiddenFrames, false, \ ++ "show method handle implementation frames (usually hidden)") \ + \ + experimental(bool, TrustFinalNonStaticFields, false, \ + "trust final non-static declarations for constant folding") \ + \ +- experimental(bool, AllowInvokeGeneric, false, \ +- "accept MethodHandle.invoke and MethodHandle.invokeGeneric " \ +- "as equivalent methods") \ +- \ + develop(bool, TraceInvokeDynamic, false, \ + "trace internal invoke dynamic operations") \ + \ +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/os.cpp +--- openjdk/hotspot/src/share/vm/runtime/os.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/os.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -807,7 +807,7 @@ + // the interpreter is generated into a buffer blob + InterpreterCodelet* i = Interpreter::codelet_containing(addr); + if (i != NULL) { +- st->print_cr(INTPTR_FORMAT " is an Interpreter codelet", addr); ++ st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", addr, (int)(addr - i->code_begin())); + i->print_on(st); + return; + } +@@ -818,14 +818,15 @@ + } + // + if (AdapterHandlerLibrary::contains(b)) { +- st->print_cr(INTPTR_FORMAT " is an AdapterHandler", addr); ++ st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", addr, (int)(addr - b->code_begin())); + AdapterHandlerLibrary::print_handler_on(st, b); + } + // the stubroutines are generated into a buffer blob + StubCodeDesc* d = StubCodeDesc::desc_for(addr); + if (d != NULL) { ++ st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", addr, (int)(addr - d->begin())); + d->print_on(st); +- if (verbose) st->cr(); ++ st->cr(); + return; + } + if (StubRoutines::contains(addr)) { +@@ -840,26 +841,25 @@ + } + VtableStub* v = VtableStubs::stub_containing(addr); + if (v != NULL) { ++ st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", addr, (int)(addr - v->entry_point())); + v->print_on(st); ++ st->cr(); + return; + } + } +- if (verbose && b->is_nmethod()) { ++ nmethod* nm = b->as_nmethod_or_null(); ++ if (nm != NULL) { + ResourceMark rm; +- st->print("%#p: Compiled ", addr); +- ((nmethod*)b)->method()->print_value_on(st); +- st->print(" = (CodeBlob*)" INTPTR_FORMAT, b); +- st->cr(); ++ st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT, ++ addr, (int)(addr - nm->entry_point()), nm); ++ if (verbose) { ++ st->print(" for "); ++ nm->method()->print_value_on(st); ++ } ++ nm->print_nmethod(verbose); + return; + } +- st->print(INTPTR_FORMAT " ", b); +- if ( b->is_nmethod()) { +- if (b->is_zombie()) { +- st->print_cr("is zombie nmethod"); +- } else if (b->is_not_entrant()) { +- st->print_cr("is non-entrant nmethod"); +- } +- } ++ st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", addr, (int)(addr - b->code_begin())); + b->print_on(st); + return; + } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/reflection.cpp +--- openjdk/hotspot/src/share/vm/runtime/reflection.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/reflection.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -36,7 +36,6 @@ + #include "oops/objArrayKlass.hpp" + #include "oops/objArrayOop.hpp" + #include "prims/jvm.h" +-#include "prims/methodHandleWalk.hpp" + #include "runtime/arguments.hpp" + #include "runtime/handles.inline.hpp" + #include "runtime/javaCalls.hpp" +@@ -502,11 +501,6 @@ + under_host_klass(accessee_ik, accessor)) + return true; + +- // Adapter frames can access anything. +- if (MethodHandleCompiler::klass_is_method_handle_adapter_holder(accessor)) +- // This is an internal adapter frame from the MethodHandleCompiler. +- return true; +- + if (RelaxAccessControlCheck || + (accessor_ik->major_version() < JAVA_1_5_VERSION && + accessee_ik->major_version() < JAVA_1_5_VERSION)) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/sharedRuntime.cpp +--- openjdk/hotspot/src/share/vm/runtime/sharedRuntime.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/sharedRuntime.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -88,8 +88,6 @@ + RuntimeStub* SharedRuntime::_resolve_static_call_blob; + + DeoptimizationBlob* SharedRuntime::_deopt_blob; +-RicochetBlob* SharedRuntime::_ricochet_blob; +- + SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob; + SafepointBlob* SharedRuntime::_polling_page_return_handler_blob; + +@@ -109,7 +107,6 @@ + _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), false); + _polling_page_return_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), true); + +- generate_ricochet_blob(); + generate_deopt_blob(); + + #ifdef COMPILER2 +@@ -117,33 +114,6 @@ + #endif // COMPILER2 + } + +-//----------------------------generate_ricochet_blob--------------------------- +-void SharedRuntime::generate_ricochet_blob() { +- if (!EnableInvokeDynamic) return; // leave it as a null +- +- // allocate space for the code +- ResourceMark rm; +- // setup code generation tools +- CodeBuffer buffer("ricochet_blob", 256 LP64_ONLY(+ 256), 256); // XXX x86 LP64L: 512, 512 +- MacroAssembler* masm = new MacroAssembler(&buffer); +- +- int bounce_offset = -1, exception_offset = -1, frame_size_in_words = -1; +- MethodHandles::RicochetFrame::generate_ricochet_blob(masm, &bounce_offset, &exception_offset, &frame_size_in_words); +- +- // ------------- +- // make sure all code is generated +- masm->flush(); +- +- // failed to generate? +- if (bounce_offset < 0 || exception_offset < 0 || frame_size_in_words < 0) { +- assert(false, "bad ricochet blob"); +- return; +- } +- +- _ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words); +-} +- +- + #include + + #ifndef USDT2 +@@ -527,10 +497,6 @@ + if (Interpreter::contains(return_address)) { + return Interpreter::rethrow_exception_entry(); + } +- // Ricochet frame unwind code +- if (SharedRuntime::ricochet_blob() != NULL && SharedRuntime::ricochet_blob()->returns_to_bounce_addr(return_address)) { +- return SharedRuntime::ricochet_blob()->exception_addr(); +- } + + guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub"); + guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!"); +@@ -768,13 +734,6 @@ + throw_and_post_jvmti_exception(thread, exception); + JRT_END + +-JRT_ENTRY(void, SharedRuntime::throw_WrongMethodTypeException(JavaThread* thread, oopDesc* required, oopDesc* actual)) +- assert(thread == JavaThread::current() && required->is_oop() && actual->is_oop(), "bad args"); +- ResourceMark rm; +- char* message = SharedRuntime::generate_wrong_method_type_message(thread, required, actual); +- throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_invoke_WrongMethodTypeException(), message); +-JRT_END +- + address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread, + address pc, + SharedRuntime::ImplicitExceptionKind exception_kind) +@@ -857,6 +816,12 @@ + return StubRoutines::throw_NullPointerException_at_call_entry(); + } + ++ if (nm->method()->is_method_handle_intrinsic()) { ++ // exception happened inside MH dispatch code, similar to a vtable stub ++ Events::log_exception(thread, "NullPointerException in MH adapter " INTPTR_FORMAT, pc); ++ return StubRoutines::throw_NullPointerException_at_call_entry(); ++ } ++ + #ifndef PRODUCT + _implicit_null_throws++; + #endif +@@ -1045,16 +1010,17 @@ + assert(!vfst.at_end(), "Java frame must exist"); + + // Find caller and bci from vframe +- methodHandle caller (THREAD, vfst.method()); +- int bci = vfst.bci(); ++ methodHandle caller(THREAD, vfst.method()); ++ int bci = vfst.bci(); + + // Find bytecode + Bytecode_invoke bytecode(caller, bci); +- bc = bytecode.java_code(); ++ bc = bytecode.invoke_code(); + int bytecode_index = bytecode.index(); + + // Find receiver for non-static call +- if (bc != Bytecodes::_invokestatic) { ++ if (bc != Bytecodes::_invokestatic && ++ bc != Bytecodes::_invokedynamic) { + // This register map must be update since we need to find the receiver for + // compiled frames. The receiver might be in a register. + RegisterMap reg_map2(thread); +@@ -1075,25 +1041,32 @@ + } + + // Resolve method. This is parameterized by bytecode. +- constantPoolHandle constants (THREAD, caller->constants()); +- assert (receiver.is_null() || receiver->is_oop(), "wrong receiver"); ++ constantPoolHandle constants(THREAD, caller->constants()); ++ assert(receiver.is_null() || receiver->is_oop(), "wrong receiver"); + LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle)); + + #ifdef ASSERT + // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls + if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) { + assert(receiver.not_null(), "should have thrown exception"); +- KlassHandle receiver_klass (THREAD, receiver->klass()); ++ KlassHandle receiver_klass(THREAD, receiver->klass()); + klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle)); + // klass is already loaded +- KlassHandle static_receiver_klass (THREAD, rk); +- assert(receiver_klass->is_subtype_of(static_receiver_klass()), "actual receiver must be subclass of static receiver klass"); ++ KlassHandle static_receiver_klass(THREAD, rk); ++ // Method handle invokes might have been optimized to a direct call ++ // so don't check for the receiver class. ++ // FIXME this weakens the assert too much ++ methodHandle callee = callinfo.selected_method(); ++ assert(receiver_klass->is_subtype_of(static_receiver_klass()) || ++ callee->is_method_handle_intrinsic() || ++ callee->is_compiled_lambda_form(), ++ "actual receiver must be subclass of static receiver klass"); + if (receiver_klass->oop_is_instance()) { + if (instanceKlass::cast(receiver_klass())->is_not_initialized()) { + tty->print_cr("ERROR: Klass not yet initialized!!"); + receiver_klass.print(); + } +- assert (!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized"); ++ assert(!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized"); + } + } + #endif +@@ -1186,8 +1159,10 @@ + call_info, CHECK_(methodHandle())); + methodHandle callee_method = call_info.selected_method(); + +- assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) || +- ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode"); ++ assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) || ++ (!is_virtual && invoke_code == Bytecodes::_invokehandle ) || ++ (!is_virtual && invoke_code == Bytecodes::_invokedynamic) || ++ ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode"); + + #ifndef PRODUCT + // tracing/debugging/statistics +@@ -1202,16 +1177,17 @@ + (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static", + Bytecodes::name(invoke_code)); + callee_method->print_short_name(tty); +- tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); ++ tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT, caller_frame.pc(), callee_method->code()); + } + #endif + +- // JSR 292 ++ // JSR 292 key invariant: + // If the resolved method is a MethodHandle invoke target the call +- // site must be a MethodHandle call site. +- if (callee_method->is_method_handle_invoke()) { +- assert(caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site"); +- } ++ // site must be a MethodHandle call site, because the lambda form might tail-call ++ // leaving the stack in a state unknown to either caller or callee ++ // TODO detune for now but we might need it again ++// assert(!callee_method->is_compiled_lambda_form() || ++// caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site"); + + // Compute entry points. This might require generation of C2I converter + // frames, so we cannot be holding any locks here. Furthermore, the +@@ -1284,7 +1260,6 @@ + assert(stub_frame.is_runtime_frame(), "sanity check"); + frame caller_frame = stub_frame.sender(®_map); + assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame"); +- assert(!caller_frame.is_ricochet_frame(), "unexpected frame"); + #endif /* ASSERT */ + + methodHandle callee_method; +@@ -1320,21 +1295,9 @@ + address sender_pc = caller_frame.pc(); + CodeBlob* sender_cb = caller_frame.cb(); + nmethod* sender_nm = sender_cb->as_nmethod_or_null(); +- bool is_mh_invoke_via_adapter = false; // Direct c2c call or via adapter? +- if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) { +- // If the callee_target is set, then we have come here via an i2c +- // adapter. +- methodOop callee = thread->callee_target(); +- if (callee != NULL) { +- assert(callee->is_method(), "sanity"); +- is_mh_invoke_via_adapter = true; +- } +- } + + if (caller_frame.is_interpreted_frame() || +- caller_frame.is_entry_frame() || +- caller_frame.is_ricochet_frame() || +- is_mh_invoke_via_adapter) { ++ caller_frame.is_entry_frame()) { + methodOop callee = thread->callee_target(); + guarantee(callee != NULL && callee->is_method(), "bad handshake"); + thread->set_vm_result(callee); +@@ -1677,12 +1640,6 @@ + // Get the return PC for the passed caller PC. + address return_pc = caller_pc + frame::pc_return_offset; + +- // Don't fixup method handle call sites as the executed method +- // handle adapters are doing the required MethodHandle chain work. +- if (nm->is_method_handle_return(return_pc)) { +- return; +- } +- + // There is a benign race here. We could be attempting to patch to a compiled + // entry point at the same time the callee is being deoptimized. If that is + // the case then entry_point may in fact point to a c2i and we'd patch the +@@ -1788,97 +1745,6 @@ + return generate_class_cast_message(objName, targetKlass->external_name()); + } + +-char* SharedRuntime::generate_wrong_method_type_message(JavaThread* thread, +- oopDesc* required, +- oopDesc* actual) { +- if (TraceMethodHandles) { +- tty->print_cr("WrongMethodType thread="PTR_FORMAT" req="PTR_FORMAT" act="PTR_FORMAT"", +- thread, required, actual); +- } +- assert(EnableInvokeDynamic, ""); +- oop singleKlass = wrong_method_type_is_for_single_argument(thread, required); +- char* message = NULL; +- if (singleKlass != NULL) { +- const char* objName = "argument or return value"; +- if (actual != NULL) { +- // be flexible about the junk passed in: +- klassOop ak = (actual->is_klass() +- ? (klassOop)actual +- : actual->klass()); +- objName = Klass::cast(ak)->external_name(); +- } +- Klass* targetKlass = Klass::cast(required->is_klass() +- ? (klassOop)required +- : java_lang_Class::as_klassOop(required)); +- message = generate_class_cast_message(objName, targetKlass->external_name()); +- } else { +- // %%% need to get the MethodType string, without messing around too much +- const char* desc = NULL; +- // Get a signature from the invoke instruction +- const char* mhName = "method handle"; +- const char* targetType = "the required signature"; +- int targetArity = -1, mhArity = -1; +- vframeStream vfst(thread, true); +- if (!vfst.at_end()) { +- Bytecode_invoke call(vfst.method(), vfst.bci()); +- methodHandle target; +- { +- EXCEPTION_MARK; +- target = call.static_target(THREAD); +- if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; } +- } +- if (target.not_null() +- && target->is_method_handle_invoke() +- && required == target->method_handle_type()) { +- targetType = target->signature()->as_C_string(); +- targetArity = ArgumentCount(target->signature()).size(); +- } +- } +- KlassHandle kignore; int dmf_flags = 0; +- methodHandle actual_method = MethodHandles::decode_method(actual, kignore, dmf_flags); +- if ((dmf_flags & ~(MethodHandles::_dmf_has_receiver | +- MethodHandles::_dmf_does_dispatch | +- MethodHandles::_dmf_from_interface)) != 0) +- actual_method = methodHandle(); // MH does extra binds, drops, etc. +- bool has_receiver = ((dmf_flags & MethodHandles::_dmf_has_receiver) != 0); +- if (actual_method.not_null()) { +- mhName = actual_method->signature()->as_C_string(); +- mhArity = ArgumentCount(actual_method->signature()).size(); +- if (!actual_method->is_static()) mhArity += 1; +- } else if (java_lang_invoke_MethodHandle::is_instance(actual)) { +- oopDesc* mhType = java_lang_invoke_MethodHandle::type(actual); +- mhArity = java_lang_invoke_MethodType::ptype_count(mhType); +- stringStream st; +- java_lang_invoke_MethodType::print_signature(mhType, &st); +- mhName = st.as_string(); +- } +- if (targetArity != -1 && targetArity != mhArity) { +- if (has_receiver && targetArity == mhArity-1) +- desc = " cannot be called without a receiver argument as "; +- else +- desc = " cannot be called with a different arity as "; +- } +- message = generate_class_cast_message(mhName, targetType, +- desc != NULL ? desc : +- " cannot be called as "); +- } +- if (TraceMethodHandles) { +- tty->print_cr("WrongMethodType => message=%s", message); +- } +- return message; +-} +- +-oop SharedRuntime::wrong_method_type_is_for_single_argument(JavaThread* thr, +- oopDesc* required) { +- if (required == NULL) return NULL; +- if (required->klass() == SystemDictionary::Class_klass()) +- return required; +- if (required->is_klass()) +- return Klass::cast(klassOop(required))->java_mirror(); +- return NULL; +-} +- +- + char* SharedRuntime::generate_class_cast_message( + const char* objName, const char* targetKlassName, const char* desc) { + size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1; +@@ -2119,8 +1985,17 @@ + // that allows sharing of adapters for the same calling convention. + class AdapterFingerPrint : public CHeapObj { + private: ++ enum { ++ _basic_type_bits = 4, ++ _basic_type_mask = right_n_bits(_basic_type_bits), ++ _basic_types_per_int = BitsPerInt / _basic_type_bits, ++ _compact_int_count = 3 ++ }; ++ // TO DO: Consider integrating this with a more global scheme for compressing signatures. ++ // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive. ++ + union { +- int _compact[3]; ++ int _compact[_compact_int_count]; + int* _fingerprint; + } _value; + int _length; // A negative length indicates the fingerprint is in the compact form, +@@ -2129,8 +2004,7 @@ + // Remap BasicTypes that are handled equivalently by the adapters. + // These are correct for the current system but someday it might be + // necessary to make this mapping platform dependent. +- static BasicType adapter_encoding(BasicType in) { +- assert((~0xf & in) == 0, "must fit in 4 bits"); ++ static int adapter_encoding(BasicType in) { + switch(in) { + case T_BOOLEAN: + case T_BYTE: +@@ -2141,6 +2015,8 @@ + + case T_OBJECT: + case T_ARRAY: ++ // In other words, we assume that any register good enough for ++ // an int or long is good enough for a managed pointer. + #ifdef _LP64 + return T_LONG; + #else +@@ -2165,8 +2041,9 @@ + // The fingerprint is based on the BasicType signature encoded + // into an array of ints with eight entries per int. + int* ptr; +- int len = (total_args_passed + 7) >> 3; +- if (len <= (int)(sizeof(_value._compact) / sizeof(int))) { ++ int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int; ++ if (len <= _compact_int_count) { ++ assert(_compact_int_count == 3, "else change next line"); + _value._compact[0] = _value._compact[1] = _value._compact[2] = 0; + // Storing the signature encoded as signed chars hits about 98% + // of the time. +@@ -2182,10 +2059,12 @@ + int sig_index = 0; + for (int index = 0; index < len; index++) { + int value = 0; +- for (int byte = 0; byte < 8; byte++) { +- if (sig_index < total_args_passed) { +- value = (value << 4) | adapter_encoding(sig_bt[sig_index++]); +- } ++ for (int byte = 0; byte < _basic_types_per_int; byte++) { ++ int bt = ((sig_index < total_args_passed) ++ ? adapter_encoding(sig_bt[sig_index++]) ++ : 0); ++ assert((bt & _basic_type_mask) == bt, "must fit in 4 bits"); ++ value = (value << _basic_type_bits) | bt; + } + ptr[index] = value; + } +@@ -2235,6 +2114,7 @@ + return false; + } + if (_length < 0) { ++ assert(_compact_int_count == 3, "else change next line"); + return _value._compact[0] == other->_value._compact[0] && + _value._compact[1] == other->_value._compact[1] && + _value._compact[2] == other->_value._compact[2]; +@@ -2531,13 +2411,17 @@ + entry->relocate(B->content_begin()); + #ifndef PRODUCT + // debugging suppport +- if (PrintAdapterHandlers) { +- tty->cr(); +- tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = %s, %d bytes generated)", ++ if (PrintAdapterHandlers || PrintStubCode) { ++ entry->print_adapter_on(tty); ++ tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)", + _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"), +- method->signature()->as_C_string(), fingerprint->as_string(), insts_size ); ++ method->signature()->as_C_string(), insts_size); + tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry()); +- Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + insts_size); ++ if (Verbose || PrintStubCode) { ++ address first_pc = entry->base_address(); ++ if (first_pc != NULL) ++ Disassembler::decode(first_pc, first_pc + insts_size); ++ } + } + #endif + +@@ -2561,11 +2445,25 @@ + return entry; + } + ++address AdapterHandlerEntry::base_address() { ++ address base = _i2c_entry; ++ if (base == NULL) base = _c2i_entry; ++ assert(base <= _c2i_entry || _c2i_entry == NULL, ""); ++ assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == NULL, ""); ++ return base; ++} ++ + void AdapterHandlerEntry::relocate(address new_base) { +- ptrdiff_t delta = new_base - _i2c_entry; ++ address old_base = base_address(); ++ assert(old_base != NULL, ""); ++ ptrdiff_t delta = new_base - old_base; ++ if (_i2c_entry != NULL) + _i2c_entry += delta; ++ if (_c2i_entry != NULL) + _c2i_entry += delta; ++ if (_c2i_unverified_entry != NULL) + _c2i_unverified_entry += delta; ++ assert(base_address() == new_base, ""); + } + + +@@ -2614,7 +2512,9 @@ + ResourceMark rm; + nmethod* nm = NULL; + +- assert(method->has_native_function(), "must have something valid to call!"); ++ assert(method->is_native(), "must be native"); ++ assert(method->is_method_handle_intrinsic() || ++ method->has_native_function(), "must have something valid to call!"); + + { + // perform the work while holding the lock, but perform any printing outside the lock +@@ -2651,9 +2551,11 @@ + assert( i==total_args_passed, "" ); + BasicType ret_type = ss.type(); + +- // Now get the compiled-Java layout as input arguments +- int comp_args_on_stack; +- comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false); ++ // Now get the compiled-Java layout as input (or output) arguments. ++ // NOTE: Stubs for compiled entry points of method handle intrinsics ++ // are just trampolines so the argument registers must be outgoing ones. ++ const bool is_outgoing = method->is_method_handle_intrinsic(); ++ int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, is_outgoing); + + // Generate the compiled-to-native wrapper code + nm = SharedRuntime::generate_native_wrapper(&_masm, +@@ -2939,18 +2841,22 @@ + AdapterHandlerTableIterator iter(_adapters); + while (iter.has_next()) { + AdapterHandlerEntry* a = iter.next(); +- if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) { ++ if (b == CodeCache::find_blob(a->get_i2c_entry())) { + st->print("Adapter for signature: "); +- st->print_cr("%s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT, +- a->fingerprint()->as_string(), +- a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry()); +- ++ a->print_adapter_on(tty); + return; + } + } + assert(false, "Should have found handler"); + } + ++void AdapterHandlerEntry::print_adapter_on(outputStream* st) const { ++ st->print_cr("AHE@" INTPTR_FORMAT ": %s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT, ++ (intptr_t) this, fingerprint()->as_string(), ++ get_i2c_entry(), get_c2i_entry(), get_c2i_unverified_entry()); ++ ++} ++ + #ifndef PRODUCT + + void AdapterHandlerLibrary::print_statistics() { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/sharedRuntime.hpp +--- openjdk/hotspot/src/share/vm/runtime/sharedRuntime.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/sharedRuntime.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -61,7 +61,6 @@ + static RuntimeStub* _resolve_static_call_blob; + + static DeoptimizationBlob* _deopt_blob; +- static RicochetBlob* _ricochet_blob; + + static SafepointBlob* _polling_page_safepoint_handler_blob; + static SafepointBlob* _polling_page_return_handler_blob; +@@ -187,7 +186,6 @@ + static void throw_NullPointerException(JavaThread* thread); + static void throw_NullPointerException_at_call(JavaThread* thread); + static void throw_StackOverflowError(JavaThread* thread); +- static void throw_WrongMethodTypeException(JavaThread* thread, oopDesc* required, oopDesc* actual); + static address continuation_for_implicit_exception(JavaThread* thread, + address faulting_pc, + ImplicitExceptionKind exception_kind); +@@ -223,16 +221,6 @@ + return _resolve_static_call_blob->entry_point(); + } + +- static RicochetBlob* ricochet_blob() { +-#ifdef X86 +- // Currently only implemented on x86 +- assert(!EnableInvokeDynamic || _ricochet_blob != NULL, "oops"); +-#endif +- return _ricochet_blob; +- } +- +- static void generate_ricochet_blob(); +- + static SafepointBlob* polling_page_return_handler_blob() { return _polling_page_return_handler_blob; } + static SafepointBlob* polling_page_safepoint_handler_blob() { return _polling_page_safepoint_handler_blob; } + +@@ -291,27 +279,6 @@ + static char* generate_class_cast_message(JavaThread* thr, const char* name); + + /** +- * Fill in the message for a WrongMethodTypeException +- * +- * @param thr the current thread +- * @param mtype (optional) expected method type (or argument class) +- * @param mhandle (optional) actual method handle (or argument) +- * @return the dynamically allocated exception message +- * +- * BCP for the frame on top of the stack must refer to an +- * 'invokevirtual' op for a method handle, or an 'invokedyamic' op. +- * The caller (or one of its callers) must use a ResourceMark +- * in order to correctly free the result. +- */ +- static char* generate_wrong_method_type_message(JavaThread* thr, +- oopDesc* mtype = NULL, +- oopDesc* mhandle = NULL); +- +- /** Return non-null if the mtype is a klass or Class, not a MethodType. */ +- static oop wrong_method_type_is_for_single_argument(JavaThread* thr, +- oopDesc* mtype); +- +- /** + * Fill in the "X cannot be cast to a Y" message for ClassCastException + * + * @param name the name of the class of the object attempted to be cast +@@ -453,6 +420,10 @@ + // convention (handlizes oops, etc), transitions to native, makes the call, + // returns to java state (possibly blocking), unhandlizes any result and + // returns. ++ // ++ // The wrapper may contain special-case code if the given method ++ // is a JNI critical method, or a compiled method handle adapter, ++ // such as _invokeBasic, _linkToVirtual, etc. + static nmethod *generate_native_wrapper(MacroAssembler* masm, + methodHandle method, + int compile_id, +@@ -647,13 +618,14 @@ + AdapterHandlerEntry(); + + public: +- address get_i2c_entry() { return _i2c_entry; } +- address get_c2i_entry() { return _c2i_entry; } +- address get_c2i_unverified_entry() { return _c2i_unverified_entry; } ++ address get_i2c_entry() const { return _i2c_entry; } ++ address get_c2i_entry() const { return _c2i_entry; } ++ address get_c2i_unverified_entry() const { return _c2i_unverified_entry; } + ++ address base_address(); + void relocate(address new_base); + +- AdapterFingerPrint* fingerprint() { return _fingerprint; } ++ AdapterFingerPrint* fingerprint() const { return _fingerprint; } + + AdapterHandlerEntry* next() { + return (AdapterHandlerEntry*)BasicHashtableEntry::next(); +@@ -665,7 +637,8 @@ + bool compare_code(unsigned char* code, int length, int total_args_passed, BasicType* sig_bt); + #endif + +- void print(); ++ //virtual void print_on(outputStream* st) const; DO NOT USE ++ void print_adapter_on(outputStream* st) const; + }; + + class AdapterHandlerLibrary: public AllStatic { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/signature.hpp +--- openjdk/hotspot/src/share/vm/runtime/signature.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/signature.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -396,6 +396,8 @@ + enum FailureMode { ReturnNull, CNFException, NCDFError }; + klassOop as_klass(Handle class_loader, Handle protection_domain, FailureMode failure_mode, TRAPS); + oop as_java_mirror(Handle class_loader, Handle protection_domain, FailureMode failure_mode, TRAPS); ++ const jbyte* raw_bytes() { return _signature->bytes() + _begin; } ++ int raw_length() { return _end - _begin; } + + // return same as_symbol except allocation of new symbols is avoided. + Symbol* as_symbol_or_null(); +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/stubRoutines.cpp +--- openjdk/hotspot/src/share/vm/runtime/stubRoutines.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/stubRoutines.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -53,7 +53,6 @@ + address StubRoutines::_throw_IncompatibleClassChangeError_entry = NULL; + address StubRoutines::_throw_NullPointerException_at_call_entry = NULL; + address StubRoutines::_throw_StackOverflowError_entry = NULL; +-address StubRoutines::_throw_WrongMethodTypeException_entry = NULL; + address StubRoutines::_handler_for_unsafe_access_entry = NULL; + jint StubRoutines::_verify_oop_count = 0; + address StubRoutines::_verify_oop_subroutine_entry = NULL; +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/stubRoutines.hpp +--- openjdk/hotspot/src/share/vm/runtime/stubRoutines.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/stubRoutines.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -130,7 +130,6 @@ + static address _throw_IncompatibleClassChangeError_entry; + static address _throw_NullPointerException_at_call_entry; + static address _throw_StackOverflowError_entry; +- static address _throw_WrongMethodTypeException_entry; + static address _handler_for_unsafe_access_entry; + + static address _atomic_xchg_entry; +@@ -225,6 +224,9 @@ + (_code2 != NULL && _code2->blob_contains(addr)) ; + } + ++ static CodeBlob* code1() { return _code1; } ++ static CodeBlob* code2() { return _code2; } ++ + // Debugging + static jint verify_oop_count() { return _verify_oop_count; } + static jint* verify_oop_count_addr() { return &_verify_oop_count; } +@@ -254,7 +256,6 @@ + static address throw_IncompatibleClassChangeError_entry(){ return _throw_IncompatibleClassChangeError_entry; } + static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; } + static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_entry; } +- static address throw_WrongMethodTypeException_entry() { return _throw_WrongMethodTypeException_entry; } + + // Exceptions during unsafe access - should throw Java exception rather + // than crash. +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/vframe.cpp +--- openjdk/hotspot/src/share/vm/runtime/vframe.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/vframe.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -410,8 +410,9 @@ + Klass::cast(method()->method_holder()) + ->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass())) { + // This is an auxilary frame -- skip it +- } else if (method()->is_method_handle_adapter()) { +- // This is an internal adapter frame from the MethodHandleCompiler -- skip it ++ } else if (method()->is_method_handle_intrinsic() || ++ method()->is_compiled_lambda_form()) { ++ // This is an internal adapter frame for method handles -- skip it + } else { + // This is non-excluded frame, we need to count it against the depth + if (depth-- <= 0) { +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/vframeArray.cpp +--- openjdk/hotspot/src/share/vm/runtime/vframeArray.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/vframeArray.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -24,6 +24,7 @@ + + #include "precompiled.hpp" + #include "classfile/vmSymbols.hpp" ++#include "interpreter/bytecode.hpp" + #include "interpreter/interpreter.hpp" + #include "memory/allocation.inline.hpp" + #include "memory/resourceArea.hpp" +@@ -510,7 +511,8 @@ + // in the above picture. + + // Find the skeletal interpreter frames to unpack into +- RegisterMap map(JavaThread::current(), false); ++ JavaThread* THREAD = JavaThread::current(); ++ RegisterMap map(THREAD, false); + // Get the youngest frame we will unpack (last to be unpacked) + frame me = unpack_frame.sender(&map); + int index; +@@ -520,29 +522,37 @@ + me = me.sender(&map); + } + ++ // Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee ++ // Unpack the frames from the oldest (frames() -1) to the youngest (0) + frame caller_frame = me; +- +- // Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee +- +- // Unpack the frames from the oldest (frames() -1) to the youngest (0) +- + for (index = frames() - 1; index >= 0 ; index--) { +- int callee_parameters = index == 0 ? 0 : element(index-1)->method()->size_of_parameters(); +- int callee_locals = index == 0 ? 0 : element(index-1)->method()->max_locals(); +- element(index)->unpack_on_stack(caller_actual_parameters, +- callee_parameters, +- callee_locals, +- &caller_frame, +- index == 0, +- exec_mode); ++ vframeArrayElement* elem = element(index); // caller ++ int callee_parameters, callee_locals; ++ if (index == 0) { ++ callee_parameters = callee_locals = 0; ++ } else { ++ methodHandle caller = elem->method(); ++ methodHandle callee = element(index - 1)->method(); ++ Bytecode_invoke inv(caller, elem->bci()); ++ // invokedynamic instructions don't have a class but obviously don't have a MemberName appendix. ++ // NOTE: Use machinery here that avoids resolving of any kind. ++ const bool has_member_arg = ++ !inv.is_invokedynamic() && MethodHandles::has_member_arg(inv.klass(), inv.name()); ++ callee_parameters = callee->size_of_parameters() + (has_member_arg ? 1 : 0); ++ callee_locals = callee->max_locals(); ++ } ++ elem->unpack_on_stack(caller_actual_parameters, ++ callee_parameters, ++ callee_locals, ++ &caller_frame, ++ index == 0, ++ exec_mode); + if (index == frames() - 1) { +- Deoptimization::unwind_callee_save_values(element(index)->iframe(), this); ++ Deoptimization::unwind_callee_save_values(elem->iframe(), this); + } +- caller_frame = *element(index)->iframe(); ++ caller_frame = *elem->iframe(); + caller_actual_parameters = callee_parameters; + } +- +- + deallocate_monitor_chunks(); + } + +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/vmStructs.cpp +--- openjdk/hotspot/src/share/vm/runtime/vmStructs.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/runtime/vmStructs.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -827,13 +827,6 @@ + /* CodeBlobs (NOTE: incomplete, but only a little) */ \ + /***************************************************/ \ + \ +- NOT_ZERO(X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _sender_pc, address))) \ +- NOT_ZERO(X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _exact_sender_sp, intptr_t*))) \ +- NOT_ZERO(X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _sender_link, intptr_t*))) \ +- NOT_ZERO(X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _saved_args_base, intptr_t*))) \ +- \ +- static_field(SharedRuntime, _ricochet_blob, RicochetBlob*) \ +- \ + nonstatic_field(CodeBlob, _name, const char*) \ + nonstatic_field(CodeBlob, _size, int) \ + nonstatic_field(CodeBlob, _header_size, int) \ +@@ -878,11 +871,8 @@ + nonstatic_field(nmethod, _compile_id, int) \ + nonstatic_field(nmethod, _exception_cache, ExceptionCache*) \ + nonstatic_field(nmethod, _marked_for_deoptimization, bool) \ +- \ +- nonstatic_field(RicochetBlob, _bounce_offset, int) \ +- nonstatic_field(RicochetBlob, _exception_offset, int) \ +- \ +- unchecked_c2_static_field(Deoptimization, _trap_reason_name, void*) \ ++ \ ++ unchecked_c2_static_field(Deoptimization, _trap_reason_name, void*) \ + \ + /********************************/ \ + /* JavaCalls (NOTE: incomplete) */ \ +@@ -1623,7 +1613,6 @@ + /*************************************************************/ \ + \ + declare_toplevel_type(SharedRuntime) \ +- X86_ONLY(declare_toplevel_type(MethodHandles::RicochetFrame)) \ + \ + declare_toplevel_type(CodeBlob) \ + declare_type(BufferBlob, CodeBlob) \ +@@ -1634,7 +1623,6 @@ + declare_type(SingletonBlob, CodeBlob) \ + declare_type(SafepointBlob, SingletonBlob) \ + declare_type(DeoptimizationBlob, SingletonBlob) \ +- declare_type(RicochetBlob, SingletonBlob) \ + declare_c2_type(ExceptionBlob, SingletonBlob) \ + declare_c2_type(UncommonTrapBlob, CodeBlob) \ + \ +@@ -2381,7 +2369,7 @@ + declare_constant(instanceKlass::initialization_error) \ + \ + /*********************************/ \ +- /* Symbol* - symbol max length */ \ ++ /* Symbol* - symbol max length */ \ + /*********************************/ \ + \ + declare_constant(Symbol::max_symbol_length) \ +@@ -2394,21 +2382,16 @@ + declare_constant(constantPoolOopDesc::_indy_argc_offset) \ + declare_constant(constantPoolOopDesc::_indy_argv_offset) \ + \ +- /*********************************************/ \ +- /* ConstantPoolCacheEntry FlagBitValues enum */ \ +- /*********************************************/ \ ++ /********************************/ \ ++ /* ConstantPoolCacheEntry enums */ \ ++ /********************************/ \ + \ +- declare_constant(ConstantPoolCacheEntry::hotSwapBit) \ +- declare_constant(ConstantPoolCacheEntry::methodInterface) \ +- declare_constant(ConstantPoolCacheEntry::volatileField) \ +- declare_constant(ConstantPoolCacheEntry::vfinalMethod) \ +- declare_constant(ConstantPoolCacheEntry::finalField) \ +- \ +- /******************************************/ \ +- /* ConstantPoolCacheEntry FlagValues enum */ \ +- /******************************************/ \ +- \ +- declare_constant(ConstantPoolCacheEntry::tosBits) \ ++ declare_constant(ConstantPoolCacheEntry::is_volatile_shift) \ ++ declare_constant(ConstantPoolCacheEntry::is_final_shift) \ ++ declare_constant(ConstantPoolCacheEntry::is_forced_virtual_shift) \ ++ declare_constant(ConstantPoolCacheEntry::is_vfinal_shift) \ ++ declare_constant(ConstantPoolCacheEntry::is_field_entry_shift) \ ++ declare_constant(ConstantPoolCacheEntry::tos_state_shift) \ + \ + /***************************************/ \ + /* java_lang_Thread::ThreadStatus enum */ \ +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/services/heapDumper.cpp +--- openjdk/hotspot/src/share/vm/services/heapDumper.cpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/services/heapDumper.cpp Thu Feb 06 14:24:53 2014 +0000 +@@ -1650,9 +1650,6 @@ + if (fr->is_entry_frame()) { + last_entry_frame = fr; + } +- if (fr->is_ricochet_frame()) { +- fr->oops_ricochet_do(&blk, vf->register_map()); +- } + } + vf = vf->sender(); + } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/utilities/accessFlags.hpp +--- openjdk/hotspot/src/share/vm/utilities/accessFlags.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/utilities/accessFlags.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -55,9 +55,6 @@ + JVM_ACC_IS_OBSOLETE = 0x00020000, // RedefineClasses() has made method obsolete + JVM_ACC_IS_PREFIXED_NATIVE = 0x00040000, // JVMTI has prefixed this native method + +- JVM_MH_INVOKE_BITS // = 0x10001100 // MethodHandle.invoke quasi-native +- = (JVM_ACC_NATIVE | JVM_ACC_SYNTHETIC | JVM_ACC_MONITOR_MATCH), +- + // klassOop flags + JVM_ACC_HAS_MIRANDA_METHODS = 0x10000000, // True if this class has miranda methods in it's vtable + JVM_ACC_HAS_VANILLA_CONSTRUCTOR = 0x20000000, // True if klass has a vanilla default constructor +@@ -131,15 +128,6 @@ + bool is_obsolete () const { return (_flags & JVM_ACC_IS_OBSOLETE ) != 0; } + bool is_prefixed_native () const { return (_flags & JVM_ACC_IS_PREFIXED_NATIVE ) != 0; } + +- // JSR 292: A method of the form MethodHandle.invoke(A...)R method is +- // neither bytecoded nor a JNI native, but rather a fast call through +- // a lightweight method handle object. Because it is not bytecoded, +- // it has the native bit set, but the monitor-match bit is also set +- // to distinguish it from a JNI native (which never has the match bit set). +- // The synthetic bit is also present, because such a method is never +- // explicitly defined in Java code. +- bool is_method_handle_invoke () const { return (_flags & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS; } +- + // klassOop flags + bool has_miranda_methods () const { return (_flags & JVM_ACC_HAS_MIRANDA_METHODS ) != 0; } + bool has_vanilla_constructor () const { return (_flags & JVM_ACC_HAS_VANILLA_CONSTRUCTOR) != 0; } +diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/utilities/exceptions.hpp +--- openjdk/hotspot/src/share/vm/utilities/exceptions.hpp Tue Jan 14 20:24:44 2014 -0500 ++++ openjdk/hotspot/src/share/vm/utilities/exceptions.hpp Thu Feb 06 14:24:53 2014 +0000 +@@ -220,6 +220,9 @@ + #define THROW_ARG(name, signature, args) \ + { Exceptions::_throw_args(THREAD_AND_LOCATION, name, signature, args); return; } + ++#define THROW_MSG_CAUSE(name, message, cause) \ ++ { Exceptions::_throw_msg_cause(THREAD_AND_LOCATION, name, message, cause); return; } ++ + #define THROW_OOP_(e, result) \ + { Exceptions::_throw_oop(THREAD_AND_LOCATION, e); return result; } + +@@ -238,6 +241,9 @@ + #define THROW_ARG_(name, signature, args, result) \ + { Exceptions::_throw_args(THREAD_AND_LOCATION, name, signature, args); return result; } + ++#define THROW_MSG_CAUSE(name, message, cause) \ ++ { Exceptions::_throw_msg_cause(THREAD_AND_LOCATION, name, message, cause); return; } ++ + #define THROW_MSG_CAUSE_(name, message, cause, result) \ + { Exceptions::_throw_msg_cause(THREAD_AND_LOCATION, name, message, cause); return result; } + diff -r 531847dfec6f -r ed2108ad126a patches/jsr292/7192406-exact_return_type_info.patch --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/patches/jsr292/7192406-exact_return_type_info.patch Thu Mar 27 04:19:17 2014 +0000 @@ -0,0 +1,660 @@ +# HG changeset patch +# User andrew +# Date 1391697224 0 +# Thu Feb 06 14:33:44 2014 +0000 +# Node ID 38ae397aa523096aa3f94d23e1a38aa75e55f8f5 +# Parent 98f6e8bc55e8dbac329e3d871de88b2a7529ff6d +7192406: JSR 292: C2 needs exact return type information for invokedynamic and invokehandle call sites +Reviewed-by: kvn + +diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/c1/c1_GraphBuilder.cpp +--- openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Mon Sep 10 16:37:22 2012 -0700 ++++ openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu Feb 06 14:33:44 2014 +0000 +@@ -1646,15 +1646,16 @@ + code == Bytecodes::_invokespecial || + code == Bytecodes::_invokevirtual || + code == Bytecodes::_invokeinterface; +- const bool is_invokedynamic = (code == Bytecodes::_invokedynamic); + + bool will_link; +- ciMethod* target = stream()->get_method(will_link); ++ ciSignature* declared_signature = NULL; ++ ciMethod* target = stream()->get_method(will_link, &declared_signature); + ciKlass* holder = stream()->get_declared_method_holder(); + const Bytecodes::Code bc_raw = stream()->cur_bc_raw(); ++ assert(declared_signature != NULL, "cannot be null"); + + // FIXME bail out for now +- if ((bc_raw == Bytecodes::_invokehandle || is_invokedynamic) && !will_link) { ++ if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) { + BAILOUT("unlinked call site (FIXME needs patching or recompile support)"); + } + +@@ -1834,7 +1835,7 @@ + bool success = false; + if (target->is_method_handle_intrinsic()) { + // method handle invokes +- success = for_method_handle_inline(target); ++ success = try_method_handle_inline(target); + } else { + // static binding => check if callee is ok + success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver); +@@ -1871,7 +1872,7 @@ + + // inlining not successful => standard invoke + bool is_loaded = target->is_loaded(); +- ValueType* result_type = as_ValueType(target->return_type()); ++ ValueType* result_type = as_ValueType(declared_signature->return_type()); + + // We require the debug info to be the "state before" because + // invokedynamics may deoptimize. +@@ -3794,7 +3795,7 @@ + } + + +-bool GraphBuilder::for_method_handle_inline(ciMethod* callee) { ++bool GraphBuilder::try_method_handle_inline(ciMethod* callee) { + ValueStack* state_before = state()->copy_for_parsing(); + vmIntrinsics::ID iid = callee->intrinsic_id(); + switch (iid) { +@@ -3829,7 +3830,7 @@ + // If the target is another method handle invoke try recursivly to get + // a better target. + if (target->is_method_handle_intrinsic()) { +- if (for_method_handle_inline(target)) { ++ if (try_method_handle_inline(target)) { + return true; + } + } else { +diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/c1/c1_GraphBuilder.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Mon Sep 10 16:37:22 2012 -0700 ++++ openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Thu Feb 06 14:33:44 2014 +0000 +@@ -346,7 +346,7 @@ + const char* should_not_inline(ciMethod* callee) const; + + // JSR 292 support +- bool for_method_handle_inline(ciMethod* callee); ++ bool try_method_handle_inline(ciMethod* callee); + + // helpers + void inline_bailout(const char* msg); +diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/ci/bcEscapeAnalyzer.cpp +--- openjdk/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp Mon Sep 10 16:37:22 2012 -0700 ++++ openjdk/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp Thu Feb 06 14:33:44 2014 +0000 +@@ -236,12 +236,16 @@ + ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder); + ciInstanceKlass* actual_recv = callee_holder; + +- // some methods are obviously bindable without any type checks so +- // convert them directly to an invokespecial. ++ // Some methods are obviously bindable without any type checks so ++ // convert them directly to an invokespecial or invokestatic. + if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) { + switch (code) { +- case Bytecodes::_invokevirtual: code = Bytecodes::_invokespecial; break; +- case Bytecodes::_invokehandle: code = Bytecodes::_invokestatic; break; ++ case Bytecodes::_invokevirtual: ++ code = Bytecodes::_invokespecial; ++ break; ++ case Bytecodes::_invokehandle: ++ code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial; ++ break; + } + } + +@@ -826,8 +830,8 @@ + break; + case Bytecodes::_getstatic: + case Bytecodes::_getfield: +- { bool will_link; +- ciField* field = s.get_field(will_link); ++ { bool ignored_will_link; ++ ciField* field = s.get_field(ignored_will_link); + BasicType field_type = field->type()->basic_type(); + if (s.cur_bc() != Bytecodes::_getstatic) { + set_method_escape(state.apop()); +@@ -865,16 +869,21 @@ + case Bytecodes::_invokestatic: + case Bytecodes::_invokedynamic: + case Bytecodes::_invokeinterface: +- { bool will_link; +- ciMethod* target = s.get_method(will_link); +- ciKlass* holder = s.get_declared_method_holder(); ++ { bool ignored_will_link; ++ ciSignature* declared_signature = NULL; ++ ciMethod* target = s.get_method(ignored_will_link, &declared_signature); ++ ciKlass* holder = s.get_declared_method_holder(); ++ assert(declared_signature != NULL, "cannot be null"); + // Push appendix argument, if one. + if (s.has_appendix()) { + state.apush(unknown_obj); + } + // Pass in raw bytecode because we need to see invokehandle instructions. + invoke(state, s.cur_bc_raw(), target, holder); +- ciType* return_type = target->return_type(); ++ // We are using the return type of the declared signature here because ++ // it might be a more concrete type than the one from the target (for ++ // e.g. invokedynamic and invokehandle). ++ ciType* return_type = declared_signature->return_type(); + if (!return_type->is_primitive_type()) { + state.apush(unknown_obj); + } else if (return_type->is_one_word()) { +diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/ci/ciEnv.cpp +--- openjdk/hotspot/src/share/vm/ci/ciEnv.cpp Mon Sep 10 16:37:22 2012 -0700 ++++ openjdk/hotspot/src/share/vm/ci/ciEnv.cpp Thu Feb 06 14:33:44 2014 +0000 +@@ -738,91 +738,81 @@ + ciMethod* ciEnv::get_method_by_index_impl(constantPoolHandle cpool, + int index, Bytecodes::Code bc, + ciInstanceKlass* accessor) { +- int holder_index = cpool->klass_ref_index_at(index); +- bool holder_is_accessible; +- ciKlass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor); +- ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder); ++ if (bc == Bytecodes::_invokedynamic) { ++ ConstantPoolCacheEntry* secondary_entry = cpool->cache()->secondary_entry_at(index); ++ const bool is_resolved = !secondary_entry->is_f1_null(); ++ // FIXME: code generation could allow for null (unlinked) call site ++ // The call site could be made patchable as follows: ++ // Load the appendix argument from the constant pool. ++ // Test the appendix argument and jump to a known deopt routine if it is null. ++ // Jump through a patchable call site, which is initially a deopt routine. ++ // Patch the call site to the nmethod entry point of the static compiled lambda form. ++ // As with other two-component call sites, both values must be independently verified. + +- // Get the method's name and signature. +- Symbol* name_sym = cpool->name_ref_at(index); +- Symbol* sig_sym = cpool->signature_ref_at(index); ++ if (is_resolved) { ++ // Get the invoker methodOop and the extra argument from the constant pool. ++ methodOop adapter = secondary_entry->f2_as_vfinal_method(); ++ return get_object(adapter)->as_method(); ++ } + +- if (cpool->has_preresolution() +- || (holder == ciEnv::MethodHandle_klass() && +- MethodHandles::is_signature_polymorphic_name(holder->get_klassOop(), name_sym))) { +- // Short-circuit lookups for JSR 292-related call sites. +- // That is, do not rely only on name-based lookups, because they may fail +- // if the names are not resolvable in the boot class loader (7056328). +- switch (bc) { +- case Bytecodes::_invokevirtual: +- case Bytecodes::_invokeinterface: +- case Bytecodes::_invokespecial: +- case Bytecodes::_invokestatic: +- { +- oop appendix_oop = NULL; +- methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index); +- if (m != NULL) { +- return get_object(m)->as_method(); +- } +- } +- break; +- } +- } +- +- if (holder_is_accessible) { // Our declared holder is loaded. +- instanceKlass* lookup = declared_holder->get_instanceKlass(); +- methodOop m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc); +- if (m != NULL && +- (bc == Bytecodes::_invokestatic +- ? instanceKlass::cast(m->method_holder())->is_not_initialized() +- : !instanceKlass::cast(m->method_holder())->is_loaded())) { +- m = NULL; +- } +- if (m != NULL) { +- // We found the method. +- return get_object(m)->as_method(); +- } +- } +- +- // Either the declared holder was not loaded, or the method could +- // not be found. Create a dummy ciMethod to represent the failed +- // lookup. +- ciSymbol* name = get_symbol(name_sym); +- ciSymbol* signature = get_symbol(sig_sym); +- return get_unloaded_method(declared_holder, name, signature, accessor); +-} +- +- +-// ------------------------------------------------------------------ +-// ciEnv::get_fake_invokedynamic_method_impl +-ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool, +- int index, Bytecodes::Code bc, +- ciInstanceKlass* accessor) { +- // Compare the following logic with InterpreterRuntime::resolve_invokedynamic. +- assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic"); +- +- ConstantPoolCacheEntry* secondary_entry = cpool->cache()->secondary_entry_at(index); +- bool is_resolved = !secondary_entry->is_f1_null(); +- // FIXME: code generation could allow for null (unlinked) call site +- // The call site could be made patchable as follows: +- // Load the appendix argument from the constant pool. +- // Test the appendix argument and jump to a known deopt routine if it is null. +- // Jump through a patchable call site, which is initially a deopt routine. +- // Patch the call site to the nmethod entry point of the static compiled lambda form. +- // As with other two-component call sites, both values must be independently verified. +- +- // Call site might not be resolved yet. +- // Stop the code path here with an unlinked method. +- if (!is_resolved) { ++ // Fake a method that is equivalent to a declared method. + ciInstanceKlass* holder = get_object(SystemDictionary::MethodHandle_klass())->as_instance_klass(); + ciSymbol* name = ciSymbol::invokeBasic_name(); + ciSymbol* signature = get_symbol(cpool->signature_ref_at(index)); + return get_unloaded_method(holder, name, signature, accessor); ++ } else { ++ const int holder_index = cpool->klass_ref_index_at(index); ++ bool holder_is_accessible; ++ ciKlass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor); ++ ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder); ++ ++ // Get the method's name and signature. ++ Symbol* name_sym = cpool->name_ref_at(index); ++ Symbol* sig_sym = cpool->signature_ref_at(index); ++ ++ if (cpool->has_preresolution() ++ || (holder == ciEnv::MethodHandle_klass() && ++ MethodHandles::is_signature_polymorphic_name(holder->get_klassOop(), name_sym))) { ++ // Short-circuit lookups for JSR 292-related call sites. ++ // That is, do not rely only on name-based lookups, because they may fail ++ // if the names are not resolvable in the boot class loader (7056328). ++ switch (bc) { ++ case Bytecodes::_invokevirtual: ++ case Bytecodes::_invokeinterface: ++ case Bytecodes::_invokespecial: ++ case Bytecodes::_invokestatic: ++ { ++ methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index); ++ if (m != NULL) { ++ return get_object(m)->as_method(); ++ } ++ } ++ break; ++ } ++ } ++ ++ if (holder_is_accessible) { // Our declared holder is loaded. ++ instanceKlass* lookup = declared_holder->get_instanceKlass(); ++ methodOop m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc); ++ if (m != NULL && ++ (bc == Bytecodes::_invokestatic ++ ? instanceKlass::cast(m->method_holder())->is_not_initialized() ++ : !instanceKlass::cast(m->method_holder())->is_loaded())) { ++ m = NULL; ++ } ++ if (m != NULL) { ++ // We found the method. ++ return get_object(m)->as_method(); ++ } ++ } ++ ++ // Either the declared holder was not loaded, or the method could ++ // not be found. Create a dummy ciMethod to represent the failed ++ // lookup. ++ ciSymbol* name = get_symbol(name_sym); ++ ciSymbol* signature = get_symbol(sig_sym); ++ return get_unloaded_method(declared_holder, name, signature, accessor); + } +- +- // Get the invoker methodOop and the extra argument from the constant pool. +- methodOop adapter = secondary_entry->f2_as_vfinal_method(); +- return get_object(adapter)->as_method(); + } + + +@@ -853,11 +843,7 @@ + ciMethod* ciEnv::get_method_by_index(constantPoolHandle cpool, + int index, Bytecodes::Code bc, + ciInstanceKlass* accessor) { +- if (bc == Bytecodes::_invokedynamic) { +- GUARDED_VM_ENTRY(return get_fake_invokedynamic_method_impl(cpool, index, bc, accessor);) +- } else { +- GUARDED_VM_ENTRY(return get_method_by_index_impl( cpool, index, bc, accessor);) +- } ++ GUARDED_VM_ENTRY(return get_method_by_index_impl(cpool, index, bc, accessor);) + } + + +diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/ci/ciEnv.hpp +--- openjdk/hotspot/src/share/vm/ci/ciEnv.hpp Mon Sep 10 16:37:22 2012 -0700 ++++ openjdk/hotspot/src/share/vm/ci/ciEnv.hpp Thu Feb 06 14:33:44 2014 +0000 +@@ -152,9 +152,6 @@ + ciMethod* get_method_by_index_impl(constantPoolHandle cpool, + int method_index, Bytecodes::Code bc, + ciInstanceKlass* loading_klass); +- ciMethod* get_fake_invokedynamic_method_impl(constantPoolHandle cpool, +- int index, Bytecodes::Code bc, +- ciInstanceKlass* accessor); + + // Helper methods + bool check_klass_accessibility(ciKlass* accessing_klass, +diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/ci/ciMethod.cpp +--- openjdk/hotspot/src/share/vm/ci/ciMethod.cpp Mon Sep 10 16:37:22 2012 -0700 ++++ openjdk/hotspot/src/share/vm/ci/ciMethod.cpp Thu Feb 06 14:33:44 2014 +0000 +@@ -1216,9 +1216,10 @@ + holder()->print_name_on(st); + st->print(" signature="); + signature()->as_symbol()->print_symbol_on(st); +- st->print(" arg_size=%d", arg_size()); + if (is_loaded()) { +- st->print(" loaded=true flags="); ++ st->print(" loaded=true"); ++ st->print(" arg_size=%d", arg_size()); ++ st->print(" flags="); + flags().print_member_flags(st); + } else { + st->print(" loaded=false"); +diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/ci/ciStreams.cpp +--- openjdk/hotspot/src/share/vm/ci/ciStreams.cpp Mon Sep 10 16:37:22 2012 -0700 ++++ openjdk/hotspot/src/share/vm/ci/ciStreams.cpp Thu Feb 06 14:33:44 2014 +0000 +@@ -355,11 +355,23 @@ + // ciBytecodeStream::get_method + // + // If this is a method invocation bytecode, get the invoked method. +-ciMethod* ciBytecodeStream::get_method(bool& will_link) { ++// Additionally return the declared signature to get more concrete ++// type information if required (Cf. invokedynamic and invokehandle). ++ciMethod* ciBytecodeStream::get_method(bool& will_link, ciSignature* *declared_signature_result) { + VM_ENTRY_MARK; ++ ciEnv* env = CURRENT_ENV; + constantPoolHandle cpool(_method->get_methodOop()->constants()); +- ciMethod* m = CURRENT_ENV->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder); ++ ciMethod* m = env->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder); + will_link = m->is_loaded(); ++ // Get declared method signature and return it. ++ if (has_optional_appendix()) { ++ const int sig_index = get_method_signature_index(); ++ Symbol* sig_sym = cpool->symbol_at(sig_index); ++ ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass(); ++ (*declared_signature_result) = new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym)); ++ } else { ++ (*declared_signature_result) = m->signature(); ++ } + return m; + } + +@@ -419,35 +431,18 @@ + } + + // ------------------------------------------------------------------ +-// ciBytecodeStream::get_declared_method_signature +-// +-// Get the declared signature of the currently referenced method. +-// +-// This is always the same as the signature of the resolved method +-// itself, except for _invokehandle and _invokedynamic calls. +-// +-ciSignature* ciBytecodeStream::get_declared_method_signature() { +- int sig_index = get_method_signature_index(); +- VM_ENTRY_MARK; +- ciEnv* env = CURRENT_ENV; +- constantPoolHandle cpool(_method->get_methodOop()->constants()); +- Symbol* sig_sym = cpool->symbol_at(sig_index); +- ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass(); +- return new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym)); +-} +- +-// ------------------------------------------------------------------ + // ciBytecodeStream::get_method_signature_index + // + // Get the constant pool index of the signature of the method + // referenced by the current bytecode. Used for generating + // deoptimization information. + int ciBytecodeStream::get_method_signature_index() { +- VM_ENTRY_MARK; +- constantPoolOop cpool = _holder->get_instanceKlass()->constants(); +- int method_index = get_method_index(); +- int name_and_type_index = cpool->name_and_type_ref_index_at(method_index); +- return cpool->signature_ref_index_at(name_and_type_index); ++ GUARDED_VM_ENTRY( ++ constantPoolOop cpool = _holder->get_instanceKlass()->constants(); ++ const int method_index = get_method_index(); ++ const int name_and_type_index = cpool->name_and_type_ref_index_at(method_index); ++ return cpool->signature_ref_index_at(name_and_type_index); ++ ) + } + + // ------------------------------------------------------------------ +diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/ci/ciStreams.hpp +--- openjdk/hotspot/src/share/vm/ci/ciStreams.hpp Mon Sep 10 16:37:22 2012 -0700 ++++ openjdk/hotspot/src/share/vm/ci/ciStreams.hpp Thu Feb 06 14:33:44 2014 +0000 +@@ -151,6 +151,8 @@ + // Does this instruction contain an index which refes into the CP cache? + bool has_cache_index() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); } + ++ bool has_optional_appendix() { return Bytecodes::has_optional_appendix(cur_bc_raw()); } ++ + int get_index_u1() const { + return bytecode().get_index_u1(cur_bc_raw()); + } +@@ -257,13 +259,11 @@ + int get_field_holder_index(); + int get_field_signature_index(); + +- // If this is a method invocation bytecode, get the invoked method. +- ciMethod* get_method(bool& will_link); ++ ciMethod* get_method(bool& will_link, ciSignature* *declared_signature_result); + bool has_appendix(); + ciObject* get_appendix(); + ciKlass* get_declared_method_holder(); + int get_method_holder_index(); +- ciSignature* get_declared_method_signature(); + int get_method_signature_index(); + + ciCPCache* get_cpcache() const; +diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/ci/ciTypeFlow.cpp +--- openjdk/hotspot/src/share/vm/ci/ciTypeFlow.cpp Mon Sep 10 16:37:22 2012 -0700 ++++ openjdk/hotspot/src/share/vm/ci/ciTypeFlow.cpp Thu Feb 06 14:33:44 2014 +0000 +@@ -643,9 +643,11 @@ + // ------------------------------------------------------------------ + // ciTypeFlow::StateVector::do_invoke + void ciTypeFlow::StateVector::do_invoke(ciBytecodeStream* str, +- bool has_receiver_foo) { ++ bool has_receiver) { + bool will_link; +- ciMethod* callee = str->get_method(will_link); ++ ciSignature* declared_signature = NULL; ++ ciMethod* callee = str->get_method(will_link, &declared_signature); ++ assert(declared_signature != NULL, "cannot be null"); + if (!will_link) { + // We weren't able to find the method. + if (str->cur_bc() == Bytecodes::_invokedynamic) { +@@ -658,22 +660,12 @@ + trap(str, unloaded_holder, str->get_method_holder_index()); + } + } else { +- // TODO Use Bytecode_invoke after metadata changes. +- //Bytecode_invoke inv(str->method(), str->cur_bci()); +- //const bool has_receiver = callee->is_loaded() ? !callee->is_static() : inv.has_receiver(); +- Bytecode inv(str); +- Bytecodes::Code code = inv.invoke_code(); +- const bool has_receiver = callee->is_loaded() ? !callee->is_static() : code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic; +- +- ciSignature* signature = callee->signature(); +- ciSignatureStream sigstr(signature); +- // Push appendix argument, if one. +- if (str->has_appendix()) { +- ciObject* appendix = str->get_appendix(); +- push_object(appendix->klass()); +- } +- int arg_size = signature->size(); +- int stack_base = stack_size() - arg_size; ++ // We are using the declared signature here because it might be ++ // different from the callee signature (Cf. invokedynamic and ++ // invokehandle). ++ ciSignatureStream sigstr(declared_signature); ++ const int arg_size = declared_signature->size(); ++ const int stack_base = stack_size() - arg_size; + int i = 0; + for( ; !sigstr.at_return_type(); sigstr.next()) { + ciType* type = sigstr.type(); +@@ -689,7 +681,6 @@ + for (int j = 0; j < arg_size; j++) { + pop(); + } +- assert(!callee->is_loaded() || has_receiver == !callee->is_static(), "mismatch"); + if (has_receiver) { + // Check this? + pop_object(); +diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/interpreter/bytecodes.hpp +--- openjdk/hotspot/src/share/vm/interpreter/bytecodes.hpp Mon Sep 10 16:37:22 2012 -0700 ++++ openjdk/hotspot/src/share/vm/interpreter/bytecodes.hpp Thu Feb 06 14:33:44 2014 +0000 +@@ -424,6 +424,8 @@ + || code == _fconst_0 || code == _dconst_0); } + static bool is_invoke (Code code) { return (_invokevirtual <= code && code <= _invokedynamic); } + ++ static bool has_optional_appendix(Code code) { return code == _invokedynamic || code == _invokehandle; } ++ + static int compute_flags (const char* format, int more_flags = 0); // compute the flags + static int flags (int code, bool is_wide) { + assert(code == (u_char)code, "must be a byte"); +diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/opto/doCall.cpp +--- openjdk/hotspot/src/share/vm/opto/doCall.cpp Mon Sep 10 16:37:22 2012 -0700 ++++ openjdk/hotspot/src/share/vm/opto/doCall.cpp Thu Feb 06 14:33:44 2014 +0000 +@@ -341,25 +341,26 @@ + kill_dead_locals(); + + // Set frequently used booleans +- bool is_virtual = bc() == Bytecodes::_invokevirtual; +- bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface; +- bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial; +- bool is_invokedynamic = bc() == Bytecodes::_invokedynamic; ++ const bool is_virtual = bc() == Bytecodes::_invokevirtual; ++ const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface; ++ const bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial; + + // Find target being called + bool will_link; +- ciMethod* bc_callee = iter().get_method(will_link); // actual callee from bytecode +- ciInstanceKlass* holder_klass = bc_callee->holder(); +- ciKlass* holder = iter().get_declared_method_holder(); ++ ciSignature* declared_signature = NULL; ++ ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode ++ ciInstanceKlass* holder_klass = orig_callee->holder(); ++ ciKlass* holder = iter().get_declared_method_holder(); + ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); ++ assert(declared_signature != NULL, "cannot be null"); + + // uncommon-trap when callee is unloaded, uninitialized or will not link + // bailout when too many arguments for register representation +- if (!will_link || can_not_compile_call_site(bc_callee, klass)) { ++ if (!will_link || can_not_compile_call_site(orig_callee, klass)) { + #ifndef PRODUCT + if (PrintOpto && (Verbose || WizardMode)) { + method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci()); +- bc_callee->print_name(); tty->cr(); ++ orig_callee->print_name(); tty->cr(); + } + #endif + return; +@@ -372,7 +373,7 @@ + // Note: In the absence of miranda methods, an abstract class K can perform + // an invokevirtual directly on an interface method I.m if K implements I. + +- const int nargs = bc_callee->arg_size(); ++ const int nargs = orig_callee->arg_size(); + + // Push appendix argument (MethodType, CallSite, etc.), if one. + if (iter().has_appendix()) { +@@ -392,13 +393,13 @@ + // Choose call strategy. + bool call_is_virtual = is_virtual_or_interface; + int vtable_index = methodOopDesc::invalid_vtable_index; +- ciMethod* callee = bc_callee; ++ ciMethod* callee = orig_callee; + + // Try to get the most accurate receiver type + if (is_virtual_or_interface) { + Node* receiver_node = stack(sp() - nargs); + const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); +- ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, bc_callee, receiver_type); ++ ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, orig_callee, receiver_type); + + // Have the call been sufficiently improved such that it is no longer a virtual? + if (optimized_virtual_method != NULL) { +@@ -425,7 +426,8 @@ + // It decides whether inlining is desirable or not. + CallGenerator* cg = C->call_generator(callee, vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); + +- bc_callee = callee = NULL; // don't use bc_callee and callee after this point ++ // NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead. ++ orig_callee = callee = NULL; + + // --------------------- + // Round double arguments before call +@@ -506,9 +508,9 @@ + round_double_result(cg->method()); + + ciType* rtype = cg->method()->return_type(); +- if (iter().cur_bc_raw() == Bytecodes::_invokehandle || is_invokedynamic) { ++ if (Bytecodes::has_optional_appendix(iter().cur_bc_raw())) { + // Be careful here with return types. +- ciType* ctype = iter().get_declared_method_signature()->return_type(); ++ ciType* ctype = declared_signature->return_type(); + if (ctype != rtype) { + BasicType rt = rtype->basic_type(); + BasicType ct = ctype->basic_type(); +@@ -537,15 +539,13 @@ + } else if (rt == T_OBJECT) { + assert(ct == T_OBJECT, err_msg("rt=T_OBJECT, ct=%d", ct)); + if (ctype->is_loaded()) { +- Node* if_fail = top(); +- retnode = gen_checkcast(retnode, makecon(TypeKlassPtr::make(ctype->as_klass())), &if_fail); +- if (if_fail != top()) { +- PreserveJVMState pjvms(this); +- set_control(if_fail); +- builtin_throw(Deoptimization::Reason_class_check); ++ const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass()); ++ const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass()); ++ if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { ++ Node* cast_obj = _gvn.transform(new (C, 2) CheckCastPPNode(control(), retnode, sig_type)); ++ pop(); ++ push(cast_obj); + } +- pop(); +- push(retnode); + } + } else { + assert(ct == rt, err_msg("unexpected mismatch rt=%d, ct=%d", rt, ct)); +diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/opto/graphKit.cpp +--- openjdk/hotspot/src/share/vm/opto/graphKit.cpp Mon Sep 10 16:37:22 2012 -0700 ++++ openjdk/hotspot/src/share/vm/opto/graphKit.cpp Thu Feb 06 14:33:44 2014 +0000 +@@ -1006,11 +1006,11 @@ + case Bytecodes::_putfield: + { + bool is_get = (depth >= 0), is_static = (depth & 1); +- bool ignore; + ciBytecodeStream iter(method()); + iter.reset_to_bci(bci()); + iter.next(); +- ciField* field = iter.get_field(ignore); ++ bool ignored_will_link; ++ ciField* field = iter.get_field(ignored_will_link); + int size = field->type()->size(); + inputs = (is_static ? 0 : 1); + if (is_get) { +@@ -1028,11 +1028,13 @@ + case Bytecodes::_invokedynamic: + case Bytecodes::_invokeinterface: + { +- bool ignore; + ciBytecodeStream iter(method()); + iter.reset_to_bci(bci()); + iter.next(); +- ciMethod* callee = iter.get_method(ignore); ++ bool ignored_will_link; ++ ciSignature* declared_signature = NULL; ++ ciMethod* callee = iter.get_method(ignored_will_link, &declared_signature); ++ assert(declared_signature != NULL, "cannot be null"); + // (Do not use ciMethod::arg_size(), because + // it might be an unloaded method, which doesn't + // know whether it is static or not.) +@@ -1046,7 +1048,7 @@ + // remove any appendix arguments that were popped. + inputs = callee->invoke_arg_size(code) - (callee->has_member_arg() ? 1 : 0); + } +- int size = callee->return_type()->size(); ++ int size = declared_signature->return_type()->size(); + depth = size - inputs; + } + break; diff -r 531847dfec6f -r ed2108ad126a patches/jsr292/7196242-loopsandthreads_crashed.patch --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/patches/jsr292/7196242-loopsandthreads_crashed.patch Thu Mar 27 04:19:17 2014 +0000 @@ -0,0 +1,124 @@ +# HG changeset patch +# User twisti +# Date 1347320242 25200 +# Mon Sep 10 16:37:22 2012 -0700 +# Node ID 98f6e8bc55e8dbac329e3d871de88b2a7529ff6d +# Parent 19ac51ce4be77e6895816f9823bce63a72392e89 +7196242: vm/mlvm/indy/stress/java/loopsAndThreads crashed +Reviewed-by: jrose, coleenp, jmasa, kvn + +diff -r 19ac51ce4be7 -r 98f6e8bc55e8 src/share/vm/interpreter/interpreterRuntime.cpp +--- openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Thu Feb 06 14:24:53 2014 +0000 ++++ openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Mon Sep 10 16:37:22 2012 -0700 +@@ -762,6 +762,7 @@ + } // end JvmtiHideSingleStepping + + cache_entry(thread)->set_method_handle( ++ pool, + info.resolved_method(), + info.resolved_appendix()); + } +@@ -788,6 +789,7 @@ + } // end JvmtiHideSingleStepping + + pool->cache()->secondary_entry_at(index)->set_dynamic_call( ++ pool, + info.resolved_method(), + info.resolved_appendix()); + } +diff -r 19ac51ce4be7 -r 98f6e8bc55e8 src/share/vm/oops/cpCacheOop.cpp +--- openjdk/hotspot/src/share/vm/oops/cpCacheOop.cpp Thu Feb 06 14:24:53 2014 +0000 ++++ openjdk/hotspot/src/share/vm/oops/cpCacheOop.cpp Mon Sep 10 16:37:22 2012 -0700 +@@ -265,25 +265,36 @@ + } + + +-void ConstantPoolCacheEntry::set_method_handle(methodHandle adapter, Handle appendix) { ++void ConstantPoolCacheEntry::set_method_handle(constantPoolHandle cpool, ++ methodHandle adapter, Handle appendix) { + assert(!is_secondary_entry(), ""); +- set_method_handle_common(Bytecodes::_invokehandle, adapter, appendix); ++ set_method_handle_common(cpool, Bytecodes::_invokehandle, adapter, appendix); + } + +-void ConstantPoolCacheEntry::set_dynamic_call(methodHandle adapter, Handle appendix) { ++void ConstantPoolCacheEntry::set_dynamic_call(constantPoolHandle cpool, ++ methodHandle adapter, Handle appendix) { + assert(is_secondary_entry(), ""); +- set_method_handle_common(Bytecodes::_invokedynamic, adapter, appendix); ++ set_method_handle_common(cpool, Bytecodes::_invokedynamic, adapter, appendix); + } + +-void ConstantPoolCacheEntry::set_method_handle_common(Bytecodes::Code invoke_code, methodHandle adapter, Handle appendix) { ++void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool, ++ Bytecodes::Code invoke_code, ++ methodHandle adapter, ++ Handle appendix) { + // NOTE: This CPCE can be the subject of data races. + // There are three words to update: flags, f2, f1 (in that order). + // Writers must store all other values before f1. + // Readers must test f1 first for non-null before reading other fields. +- // Competing writers must acquire exclusive access on the first +- // write, to flags, using a compare/exchange. +- // A losing writer must spin until the winner writes f1, +- // so that when he returns, he can use the linked cache entry. ++ // Competing writers must acquire exclusive access via a lock. ++ // A losing writer waits on the lock until the winner writes f1 and leaves ++ // the lock, so that when the losing writer returns, he can use the linked ++ // cache entry. ++ ++ Thread* THREAD = Thread::current(); ++ ObjectLocker ol(cpool, THREAD); ++ if (!is_f1_null()) { ++ return; ++ } + + bool has_appendix = appendix.not_null(); + if (!has_appendix) { +@@ -292,20 +303,11 @@ + appendix = Universe::void_mirror(); + } + +- bool owner = +- init_method_flags_atomic(as_TosState(adapter->result_type()), ++ set_method_flags(as_TosState(adapter->result_type()), + ((has_appendix ? 1 : 0) << has_appendix_shift) | + ( 1 << is_vfinal_shift) | + ( 1 << is_final_shift), + adapter->size_of_parameters()); +- if (!owner) { +- while (is_f1_null()) { +- // Pause momentarily on a low-level lock, to allow racing thread to win. +- MutexLockerEx mu(Patching_lock, Mutex::_no_safepoint_check_flag); +- os::yield(); +- } +- return; +- } + + if (TraceInvokeDynamic) { + tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method="PTR_FORMAT" ", +diff -r 19ac51ce4be7 -r 98f6e8bc55e8 src/share/vm/oops/cpCacheOop.hpp +--- openjdk/hotspot/src/share/vm/oops/cpCacheOop.hpp Thu Feb 06 14:24:53 2014 +0000 ++++ openjdk/hotspot/src/share/vm/oops/cpCacheOop.hpp Mon Sep 10 16:37:22 2012 -0700 +@@ -222,11 +222,13 @@ + ); + + void set_method_handle( ++ constantPoolHandle cpool, // holding constant pool (required for locking) + methodHandle method, // adapter for invokeExact, etc. + Handle appendix // stored in f1; could be a java.lang.invoke.MethodType + ); + + void set_dynamic_call( ++ constantPoolHandle cpool, // holding constant pool (required for locking) + methodHandle method, // adapter for this call site + Handle appendix // stored in f1; could be a java.lang.invoke.CallSite + ); +@@ -247,6 +249,7 @@ + // resolution logic needs to make slightly different assessments about the + // number and types of arguments. + void set_method_handle_common( ++ constantPoolHandle cpool, // holding constant pool (required for locking) + Bytecodes::Code invoke_code, // _invokehandle or _invokedynamic + methodHandle adapter, // invoker method (f2) + Handle appendix // appendix such as CallSite, MethodType, etc. (f1) diff -r 531847dfec6f -r ed2108ad126a patches/jsr292/7200949-jruby_fail.patch --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/patches/jsr292/7200949-jruby_fail.patch Thu Mar 27 04:19:17 2014 +0000 @@ -0,0 +1,1045 @@ +# HG changeset patch +# User twisti +# Date 1391807860 0 +# Fri Feb 07 21:17:40 2014 +0000 +# Node ID a66016d23db17dbe4b8d54b1680f732a116e0a4c +# Parent 38ae397aa523096aa3f94d23e1a38aa75e55f8f5 +7200949: JSR 292: rubybench/bench/time/bench_base64.rb fails with jruby.jar not on boot class path +Reviewed-by: jrose, kvn + +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciClassList.hpp +--- openjdk/hotspot/src/share/vm/ci/ciClassList.hpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/ci/ciClassList.hpp Fri Feb 07 21:17:40 2014 +0000 +@@ -49,6 +49,7 @@ + class ciCallSite; + class ciMemberName; + class ciMethodHandle; ++class ciMethodType; + class ciMethod; + class ciMethodData; + class ciReceiverTypeData; // part of ciMethodData +@@ -105,6 +106,7 @@ + friend class ciMethod; \ + friend class ciMethodData; \ + friend class ciMethodHandle; \ ++friend class ciMethodType; \ + friend class ciReceiverTypeData; \ + friend class ciSymbol; \ + friend class ciArray; \ +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciMethodType.hpp +--- /dev/null Thu Jan 01 00:00:00 1970 +0000 ++++ openjdk/hotspot/src/share/vm/ci/ciMethodType.hpp Fri Feb 07 21:17:40 2014 +0000 +@@ -0,0 +1,76 @@ ++/* ++ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef SHARE_VM_CI_CIMETHODTYPE_HPP ++#define SHARE_VM_CI_CIMETHODTYPE_HPP ++ ++#include "ci/ciInstance.hpp" ++#include "ci/ciUtilities.hpp" ++#include "classfile/javaClasses.hpp" ++ ++// ciMethodType ++// ++// The class represents a java.lang.invoke.MethodType object. ++class ciMethodType : public ciInstance { ++private: ++ ciType* class_to_citype(oop klass_oop) const { ++ if (java_lang_Class::is_primitive(klass_oop)) { ++ BasicType bt = java_lang_Class::primitive_type(klass_oop); ++ return ciType::make(bt); ++ } else { ++ klassOop k = java_lang_Class::as_klassOop(klass_oop); ++ return CURRENT_ENV->get_object(k)->as_klass(); ++ } ++ } ++ ++public: ++ ciMethodType(instanceHandle h_i) : ciInstance(h_i) {} ++ ++ // What kind of ciObject is this? ++ bool is_method_type() const { return true; } ++ ++ ciType* rtype() const { ++ GUARDED_VM_ENTRY( ++ oop rtype = java_lang_invoke_MethodType::rtype(get_oop()); ++ return class_to_citype(rtype); ++ ) ++ } ++ ++ int ptype_count() const { ++ GUARDED_VM_ENTRY(return java_lang_invoke_MethodType::ptype_count(get_oop());) ++ } ++ ++ int ptype_slot_count() const { ++ GUARDED_VM_ENTRY(return java_lang_invoke_MethodType::ptype_slot_count(get_oop());) ++ } ++ ++ ciType* ptype_at(int index) const { ++ GUARDED_VM_ENTRY( ++ oop ptype = java_lang_invoke_MethodType::ptype(get_oop(), index); ++ return class_to_citype(ptype); ++ ) ++ } ++}; ++ ++#endif // SHARE_VM_CI_CIMETHODTYPE_HPP +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciObject.hpp +--- openjdk/hotspot/src/share/vm/ci/ciObject.hpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/ci/ciObject.hpp Fri Feb 07 21:17:40 2014 +0000 +@@ -146,6 +146,7 @@ + virtual bool is_method() { return false; } + virtual bool is_method_data() { return false; } + virtual bool is_method_handle() const { return false; } ++ virtual bool is_method_type() const { return false; } + virtual bool is_array() { return false; } + virtual bool is_obj_array() { return false; } + virtual bool is_type_array() { return false; } +@@ -193,103 +194,107 @@ + } + + // Subclass casting with assertions. +- ciNullObject* as_null_object() { ++ ciNullObject* as_null_object() { + assert(is_null_object(), "bad cast"); + return (ciNullObject*)this; + } +- ciCallSite* as_call_site() { ++ ciCallSite* as_call_site() { + assert(is_call_site(), "bad cast"); +- return (ciCallSite*) this; ++ return (ciCallSite*)this; + } +- ciCPCache* as_cpcache() { ++ ciCPCache* as_cpcache() { + assert(is_cpcache(), "bad cast"); +- return (ciCPCache*) this; ++ return (ciCPCache*)this; + } +- ciInstance* as_instance() { ++ ciInstance* as_instance() { + assert(is_instance(), "bad cast"); + return (ciInstance*)this; + } +- ciMemberName* as_member_name() { ++ ciMemberName* as_member_name() { + assert(is_member_name(), "bad cast"); + return (ciMemberName*)this; + } +- ciMethod* as_method() { ++ ciMethod* as_method() { + assert(is_method(), "bad cast"); + return (ciMethod*)this; + } +- ciMethodData* as_method_data() { ++ ciMethodData* as_method_data() { + assert(is_method_data(), "bad cast"); + return (ciMethodData*)this; + } +- ciMethodHandle* as_method_handle() { ++ ciMethodHandle* as_method_handle() { + assert(is_method_handle(), "bad cast"); +- return (ciMethodHandle*) this; ++ return (ciMethodHandle*)this; + } +- ciArray* as_array() { ++ ciMethodType* as_method_type() { ++ assert(is_method_type(), "bad cast"); ++ return (ciMethodType*)this; ++ } ++ ciArray* as_array() { + assert(is_array(), "bad cast"); + return (ciArray*)this; + } +- ciObjArray* as_obj_array() { ++ ciObjArray* as_obj_array() { + assert(is_obj_array(), "bad cast"); + return (ciObjArray*)this; + } +- ciTypeArray* as_type_array() { ++ ciTypeArray* as_type_array() { + assert(is_type_array(), "bad cast"); + return (ciTypeArray*)this; + } +- ciSymbol* as_symbol() { ++ ciSymbol* as_symbol() { + assert(is_symbol(), "bad cast"); + return (ciSymbol*)this; + } +- ciType* as_type() { ++ ciType* as_type() { + assert(is_type(), "bad cast"); + return (ciType*)this; + } +- ciReturnAddress* as_return_address() { ++ ciReturnAddress* as_return_address() { + assert(is_return_address(), "bad cast"); + return (ciReturnAddress*)this; + } +- ciKlass* as_klass() { ++ ciKlass* as_klass() { + assert(is_klass(), "bad cast"); + return (ciKlass*)this; + } +- ciInstanceKlass* as_instance_klass() { ++ ciInstanceKlass* as_instance_klass() { + assert(is_instance_klass(), "bad cast"); + return (ciInstanceKlass*)this; + } +- ciMethodKlass* as_method_klass() { ++ ciMethodKlass* as_method_klass() { + assert(is_method_klass(), "bad cast"); + return (ciMethodKlass*)this; + } +- ciArrayKlass* as_array_klass() { ++ ciArrayKlass* as_array_klass() { + assert(is_array_klass(), "bad cast"); + return (ciArrayKlass*)this; + } +- ciObjArrayKlass* as_obj_array_klass() { ++ ciObjArrayKlass* as_obj_array_klass() { + assert(is_obj_array_klass(), "bad cast"); + return (ciObjArrayKlass*)this; + } +- ciTypeArrayKlass* as_type_array_klass() { ++ ciTypeArrayKlass* as_type_array_klass() { + assert(is_type_array_klass(), "bad cast"); + return (ciTypeArrayKlass*)this; + } +- ciKlassKlass* as_klass_klass() { ++ ciKlassKlass* as_klass_klass() { + assert(is_klass_klass(), "bad cast"); + return (ciKlassKlass*)this; + } +- ciInstanceKlassKlass* as_instance_klass_klass() { ++ ciInstanceKlassKlass* as_instance_klass_klass() { + assert(is_instance_klass_klass(), "bad cast"); + return (ciInstanceKlassKlass*)this; + } +- ciArrayKlassKlass* as_array_klass_klass() { ++ ciArrayKlassKlass* as_array_klass_klass() { + assert(is_array_klass_klass(), "bad cast"); + return (ciArrayKlassKlass*)this; + } +- ciObjArrayKlassKlass* as_obj_array_klass_klass() { ++ ciObjArrayKlassKlass* as_obj_array_klass_klass() { + assert(is_obj_array_klass_klass(), "bad cast"); + return (ciObjArrayKlassKlass*)this; + } +- ciTypeArrayKlassKlass* as_type_array_klass_klass() { ++ ciTypeArrayKlassKlass* as_type_array_klass_klass() { + assert(is_type_array_klass_klass(), "bad cast"); + return (ciTypeArrayKlassKlass*)this; + } +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciObjectFactory.cpp +--- openjdk/hotspot/src/share/vm/ci/ciObjectFactory.cpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/ci/ciObjectFactory.cpp Fri Feb 07 21:17:40 2014 +0000 +@@ -32,6 +32,7 @@ + #include "ci/ciMethod.hpp" + #include "ci/ciMethodData.hpp" + #include "ci/ciMethodHandle.hpp" ++#include "ci/ciMethodType.hpp" + #include "ci/ciMethodKlass.hpp" + #include "ci/ciNullObject.hpp" + #include "ci/ciObjArray.hpp" +@@ -349,6 +350,8 @@ + return new (arena()) ciMemberName(h_i); + else if (java_lang_invoke_MethodHandle::is_instance(o)) + return new (arena()) ciMethodHandle(h_i); ++ else if (java_lang_invoke_MethodType::is_instance(o)) ++ return new (arena()) ciMethodType(h_i); + else + return new (arena()) ciInstance(h_i); + } else if (o->is_objArray()) { +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciSignature.cpp +--- openjdk/hotspot/src/share/vm/ci/ciSignature.cpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/ci/ciSignature.cpp Fri Feb 07 21:17:40 2014 +0000 +@@ -23,6 +23,7 @@ + */ + + #include "precompiled.hpp" ++#include "ci/ciMethodType.hpp" + #include "ci/ciSignature.hpp" + #include "ci/ciUtilities.hpp" + #include "memory/allocation.inline.hpp" +@@ -80,6 +81,24 @@ + } + + // ------------------------------------------------------------------ ++// ciSignature::ciSignature ++ciSignature::ciSignature(ciKlass* accessing_klass, ciSymbol* symbol, ciMethodType* method_type) : ++ _symbol(symbol), ++ _accessing_klass(accessing_klass), ++ _size( method_type->ptype_slot_count()), ++ _count(method_type->ptype_count()) ++{ ++ ASSERT_IN_VM; ++ EXCEPTION_CONTEXT; ++ Arena* arena = CURRENT_ENV->arena(); ++ _types = new (arena) GrowableArray(arena, _count + 1, 0, NULL); ++ for (int i = 0; i < _count; i++) { ++ _types->append(method_type->ptype_at(i)); ++ } ++ _types->append(method_type->rtype()); ++} ++ ++// ------------------------------------------------------------------ + // ciSignature::return_type + // + // What is the return type of this signature? +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciSignature.hpp +--- openjdk/hotspot/src/share/vm/ci/ciSignature.hpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/ci/ciSignature.hpp Fri Feb 07 21:17:40 2014 +0000 +@@ -47,6 +47,7 @@ + friend class ciObjectFactory; + + ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* signature); ++ ciSignature(ciKlass* accessing_klass, ciSymbol* signature, ciMethodType* method_type); + + void get_all_klasses(); + +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciStreams.cpp +--- openjdk/hotspot/src/share/vm/ci/ciStreams.cpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/ci/ciStreams.cpp Fri Feb 07 21:17:40 2014 +0000 +@@ -363,12 +363,15 @@ + constantPoolHandle cpool(_method->get_methodOop()->constants()); + ciMethod* m = env->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder); + will_link = m->is_loaded(); +- // Get declared method signature and return it. +- if (has_optional_appendix()) { +- const int sig_index = get_method_signature_index(); +- Symbol* sig_sym = cpool->symbol_at(sig_index); +- ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass(); +- (*declared_signature_result) = new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym)); ++ ++ // Use the MethodType stored in the CP cache to create a signature ++ // with correct types (in respect to class loaders). ++ if (has_method_type()) { ++ ciSymbol* sig_sym = env->get_symbol(cpool->symbol_at(get_method_signature_index())); ++ ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass(); ++ ciMethodType* method_type = get_method_type(); ++ ciSignature* declared_signature = new (env->arena()) ciSignature(pool_holder, sig_sym, method_type); ++ (*declared_signature_result) = declared_signature; + } else { + (*declared_signature_result) = m->signature(); + } +@@ -399,6 +402,31 @@ + } + + // ------------------------------------------------------------------ ++// ciBytecodeStream::has_method_type ++// ++// Returns true if there is a MethodType argument stored in the ++// constant pool cache at the current bci. ++bool ciBytecodeStream::has_method_type() { ++ GUARDED_VM_ENTRY( ++ constantPoolHandle cpool(_method->get_methodOop()->constants()); ++ return constantPoolOopDesc::has_method_type_at_if_loaded(cpool, get_method_index()); ++ ) ++} ++ ++// ------------------------------------------------------------------ ++// ciBytecodeStream::get_method_type ++// ++// Return the MethodType stored in the constant pool cache at ++// the current bci. ++ciMethodType* ciBytecodeStream::get_method_type() { ++ GUARDED_VM_ENTRY( ++ constantPoolHandle cpool(_method->get_methodOop()->constants()); ++ oop method_type_oop = constantPoolOopDesc::method_type_at_if_loaded(cpool, get_method_index()); ++ return CURRENT_ENV->get_object(method_type_oop)->as_method_type(); ++ ) ++} ++ ++// ------------------------------------------------------------------ + // ciBytecodeStream::get_declared_method_holder + // + // Get the declared holder of the currently referenced method. +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciStreams.hpp +--- openjdk/hotspot/src/share/vm/ci/ciStreams.hpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/ci/ciStreams.hpp Fri Feb 07 21:17:40 2014 +0000 +@@ -259,12 +259,14 @@ + int get_field_holder_index(); + int get_field_signature_index(); + +- ciMethod* get_method(bool& will_link, ciSignature* *declared_signature_result); +- bool has_appendix(); +- ciObject* get_appendix(); +- ciKlass* get_declared_method_holder(); +- int get_method_holder_index(); +- int get_method_signature_index(); ++ ciMethod* get_method(bool& will_link, ciSignature* *declared_signature_result); ++ bool has_appendix(); ++ ciObject* get_appendix(); ++ bool has_method_type(); ++ ciMethodType* get_method_type(); ++ ciKlass* get_declared_method_holder(); ++ int get_method_holder_index(); ++ int get_method_signature_index(); + + ciCPCache* get_cpcache() const; + ciCallSite* get_call_site(); +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/classfile/systemDictionary.cpp +--- openjdk/hotspot/src/share/vm/classfile/systemDictionary.cpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/classfile/systemDictionary.cpp Fri Feb 07 21:17:40 2014 +0000 +@@ -2432,7 +2432,8 @@ + methodHandle SystemDictionary::find_method_handle_invoker(Symbol* name, + Symbol* signature, + KlassHandle accessing_klass, +- Handle* appendix_result, ++ Handle *appendix_result, ++ Handle *method_type_result, + TRAPS) { + methodHandle empty; + assert(EnableInvokeDynamic, ""); +@@ -2464,6 +2465,7 @@ + vmSymbols::linkMethod_signature(), + &args, CHECK_(empty)); + Handle mname(THREAD, (oop) result.get_jobject()); ++ (*method_type_result) = method_type; + return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD); + } + +@@ -2610,7 +2612,8 @@ + Handle bootstrap_specifier, + Symbol* name, + Symbol* type, +- Handle* appendix_result, ++ Handle *appendix_result, ++ Handle *method_type_result, + TRAPS) { + methodHandle empty; + Handle bsm, info; +@@ -2653,6 +2656,7 @@ + vmSymbols::linkCallSite_signature(), + &args, CHECK_(empty)); + Handle mname(THREAD, (oop) result.get_jobject()); ++ (*method_type_result) = method_type; + return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD); + } + +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/classfile/systemDictionary.hpp +--- openjdk/hotspot/src/share/vm/classfile/systemDictionary.hpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/classfile/systemDictionary.hpp Fri Feb 07 21:17:40 2014 +0000 +@@ -488,6 +488,7 @@ + Symbol* signature, + KlassHandle accessing_klass, + Handle *appendix_result, ++ Handle *method_type_result, + TRAPS); + // for a given signature, find the internal MethodHandle method (linkTo* or invokeBasic) + // (does not ask Java, since this is a low-level intrinsic defined by the JVM) +@@ -514,6 +515,7 @@ + Symbol* name, + Symbol* type, + Handle *appendix_result, ++ Handle *method_type_result, + TRAPS); + + // Utility for printing loader "name" as part of tracing constraints +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/interpreter/bytecodeInterpreter.cpp +--- openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Fri Feb 07 21:17:40 2014 +0000 +@@ -2216,12 +2216,11 @@ + methodOop method = cache->f1_as_method(); + VERIFY_OOP(method); + +- /** Re-enabled in 7200949 + if (cache->has_appendix()) { + constantPoolOop constants = METHOD->constants(); + SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); + MORE_STACK(1); +- } **/ ++ } + + istate->set_msg(call_method); + istate->set_callee(method); +@@ -2249,12 +2248,11 @@ + + VERIFY_OOP(method); + +- /** Re-enabled in 7200949 + if (cache->has_appendix()) { + constantPoolOop constants = METHOD->constants(); + SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); + MORE_STACK(1); +- } **/ ++ } + + istate->set_msg(call_method); + istate->set_callee(method); +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/interpreter/interpreterRuntime.cpp +--- openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Fri Feb 07 21:17:40 2014 +0000 +@@ -764,7 +764,8 @@ + cache_entry(thread)->set_method_handle( + pool, + info.resolved_method(), +- info.resolved_appendix()); ++ info.resolved_appendix(), ++ info.resolved_method_type()); + } + IRT_END + +@@ -791,7 +792,8 @@ + pool->cache()->secondary_entry_at(index)->set_dynamic_call( + pool, + info.resolved_method(), +- info.resolved_appendix()); ++ info.resolved_appendix(), ++ info.resolved_method_type()); + } + IRT_END + +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/interpreter/linkResolver.cpp +--- openjdk/hotspot/src/share/vm/interpreter/linkResolver.cpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/interpreter/linkResolver.cpp Fri Feb 07 21:17:40 2014 +0000 +@@ -99,7 +99,7 @@ + assert(!resolved_method->is_compiled_lambda_form(), "these must be handled via an invokehandle call"); + } + +-void CallInfo::set_handle(methodHandle resolved_method, Handle resolved_appendix, TRAPS) { ++void CallInfo::set_handle(methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS) { + if (resolved_method.is_null()) { + THROW_MSG(vmSymbols::java_lang_InternalError(), "resolved method is null"); + } +@@ -110,7 +110,8 @@ + int vtable_index = methodOopDesc::nonvirtual_vtable_index; + assert(resolved_method->vtable_index() == vtable_index, ""); + set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK); +- _resolved_appendix = resolved_appendix; ++ _resolved_appendix = resolved_appendix; ++ _resolved_method_type = resolved_method_type; + } + + void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) { +@@ -221,7 +222,8 @@ + void LinkResolver::lookup_polymorphic_method(methodHandle& result, + KlassHandle klass, Symbol* name, Symbol* full_signature, + KlassHandle current_klass, +- Handle* appendix_result_or_null, ++ Handle *appendix_result_or_null, ++ Handle *method_type_result, + TRAPS) { + vmIntrinsics::ID iid = MethodHandles::signature_polymorphic_name_id(name); + if (TraceMethodHandles) { +@@ -275,10 +277,12 @@ + } + + Handle appendix; ++ Handle method_type; + result = SystemDictionary::find_method_handle_invoker(name, + full_signature, + current_klass, + &appendix, ++ &method_type, + CHECK); + if (TraceMethodHandles) { + tty->print("lookup_polymorphic_method => (via Java) "); +@@ -307,6 +311,7 @@ + + assert(appendix_result_or_null != NULL, ""); + (*appendix_result_or_null) = appendix; ++ (*method_type_result) = method_type; + return; + } + } +@@ -419,7 +424,7 @@ + if (resolved_method.is_null()) { + // JSR 292: see if this is an implicitly generated method MethodHandle.linkToVirtual(*...), etc + lookup_polymorphic_method(resolved_method, resolved_klass, method_name, method_signature, +- current_klass, (Handle*)NULL, THREAD); ++ current_klass, (Handle*)NULL, (Handle*)NULL, THREAD); + if (HAS_PENDING_EXCEPTION) { + nested_exception = Handle(THREAD, PENDING_EXCEPTION); + CLEAR_PENDING_EXCEPTION; +@@ -1207,11 +1212,12 @@ + assert(resolved_klass() == SystemDictionary::MethodHandle_klass(), ""); + assert(MethodHandles::is_signature_polymorphic_name(method_name), ""); + methodHandle resolved_method; +- Handle resolved_appendix; ++ Handle resolved_appendix; ++ Handle resolved_method_type; + lookup_polymorphic_method(resolved_method, resolved_klass, + method_name, method_signature, +- current_klass, &resolved_appendix, CHECK); +- result.set_handle(resolved_method, resolved_appendix, CHECK); ++ current_klass, &resolved_appendix, &resolved_method_type, CHECK); ++ result.set_handle(resolved_method, resolved_appendix, resolved_method_type, CHECK); + } + + +@@ -1219,7 +1225,7 @@ + assert(EnableInvokeDynamic, ""); + pool->set_invokedynamic(); // mark header to flag active call sites + +- //resolve_pool(, method_name, method_signature, current_klass, pool, index, CHECK); ++ //resolve_pool(, method_name, method_signature, current_klass, pool, index, CHECK); + Symbol* method_name = pool->name_ref_at(index); + Symbol* method_signature = pool->signature_ref_at(index); + KlassHandle current_klass = KlassHandle(THREAD, pool->pool_holder()); +@@ -1236,9 +1242,10 @@ + bootstrap_specifier = Handle(THREAD, bsm_info); + } + if (!cpce->is_f1_null()) { +- methodHandle method(THREAD, cpce->f2_as_vfinal_method()); +- Handle appendix(THREAD, cpce->has_appendix() ? cpce->f1_appendix() : (oop)NULL); +- result.set_handle(method, appendix, CHECK); ++ methodHandle method( THREAD, cpce->f2_as_vfinal_method()); ++ Handle appendix( THREAD, cpce->appendix_if_resolved(pool)); ++ Handle method_type(THREAD, cpce->method_type_if_resolved(pool)); ++ result.set_handle(method, appendix, method_type, CHECK); + return; + } + +@@ -1260,11 +1267,13 @@ + // JSR 292: this must resolve to an implicitly generated method MH.linkToCallSite(*...) + // The appendix argument is likely to be a freshly-created CallSite. + Handle resolved_appendix; ++ Handle resolved_method_type; + methodHandle resolved_method = + SystemDictionary::find_dynamic_call_site_invoker(current_klass, + bootstrap_specifier, + method_name, method_signature, + &resolved_appendix, ++ &resolved_method_type, + CHECK); + if (HAS_PENDING_EXCEPTION) { + if (TraceMethodHandles) { +@@ -1285,7 +1294,7 @@ + THROW_MSG_CAUSE(vmSymbols::java_lang_BootstrapMethodError(), + "BootstrapMethodError", nested_exception) + } +- result.set_handle(resolved_method, resolved_appendix, CHECK); ++ result.set_handle(resolved_method, resolved_appendix, resolved_method_type, CHECK); + } + + //------------------------------------------------------------------------------------------------------------------------ +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/interpreter/linkResolver.hpp +--- openjdk/hotspot/src/share/vm/interpreter/linkResolver.hpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/interpreter/linkResolver.hpp Fri Feb 07 21:17:40 2014 +0000 +@@ -76,12 +76,13 @@ + methodHandle _selected_method; // dynamic (actual) target method + int _vtable_index; // vtable index of selected method + Handle _resolved_appendix; // extra argument in constant pool (if CPCE::has_appendix) ++ Handle _resolved_method_type; // MethodType (for invokedynamic and invokehandle call sites) + +- void set_static( KlassHandle resolved_klass, methodHandle resolved_method , TRAPS); +- void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method , TRAPS); +- void set_virtual( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS); +- void set_handle( methodHandle resolved_method, Handle resolved_appendix, TRAPS); +- void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS); ++ void set_static( KlassHandle resolved_klass, methodHandle resolved_method , TRAPS); ++ void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method , TRAPS); ++ void set_virtual( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index , TRAPS); ++ void set_handle( methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS); ++ void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index , TRAPS); + + friend class LinkResolver; + +@@ -91,6 +92,7 @@ + methodHandle resolved_method() const { return _resolved_method; } + methodHandle selected_method() const { return _selected_method; } + Handle resolved_appendix() const { return _resolved_appendix; } ++ Handle resolved_method_type() const { return _resolved_method_type; } + + BasicType result_type() const { return selected_method()->result_type(); } + bool has_vtable_index() const { return _vtable_index >= 0; } +@@ -113,7 +115,7 @@ + static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + static void lookup_method_in_interfaces (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + static void lookup_polymorphic_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, +- KlassHandle current_klass, Handle* appendix_result_or_null, TRAPS); ++ KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS); + + static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/interpreter/rewriter.cpp +--- openjdk/hotspot/src/share/vm/interpreter/rewriter.cpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/interpreter/rewriter.cpp Fri Feb 07 21:17:40 2014 +0000 +@@ -163,10 +163,14 @@ + if (status == 0) { + if (_pool->klass_ref_at_noresolve(cp_index) == vmSymbols::java_lang_invoke_MethodHandle() && + MethodHandles::is_signature_polymorphic_name(SystemDictionary::MethodHandle_klass(), +- _pool->name_ref_at(cp_index))) ++ _pool->name_ref_at(cp_index))) { ++ assert(has_cp_cache(cp_index), "should already have an entry"); ++ int cpc = maybe_add_cp_cache_entry(cp_index); // should already have an entry ++ int cpc2 = add_secondary_cp_cache_entry(cpc); + status = +1; +- else ++ } else { + status = -1; ++ } + _method_handle_invokers[cp_index] = status; + } + // We use a special internal bytecode for such methods (if non-static). +@@ -195,6 +199,10 @@ + int cp_index = Bytes::get_Java_u2(p); + int cpc = maybe_add_cp_cache_entry(cp_index); // add lazily + int cpc2 = add_secondary_cp_cache_entry(cpc); ++ // The second secondary entry is required to store the MethodType and ++ // must be the next entry. ++ int cpc3 = add_secondary_cp_cache_entry(cpc); ++ assert(cpc2 + 1 == cpc3, err_msg_res("must be consecutive: %d + 1 == %d", cpc2, cpc3)); + + // Replace the trailing four bytes with a CPC index for the dynamic + // call site. Unlike other CPC entries, there is one per bytecode, +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/oops/constantPoolOop.cpp +--- openjdk/hotspot/src/share/vm/oops/constantPoolOop.cpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/oops/constantPoolOop.cpp Fri Feb 07 21:17:40 2014 +0000 +@@ -270,13 +270,7 @@ + int which) { + assert(!constantPoolCacheOopDesc::is_secondary_index(which), "no indy instruction here"); + if (cpool->cache() == NULL) return NULL; // nothing to load yet +- int cache_index = get_cpcache_index(which); +- if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) { +- if (PrintMiscellaneous && (Verbose||WizardMode)) { +- tty->print_cr("bad operand %d in:", which); cpool->print(); +- } +- return NULL; +- } ++ int cache_index = decode_cpcache_index(which, true); + ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); + return e->method_if_resolved(cpool); + } +@@ -284,44 +278,33 @@ + + bool constantPoolOopDesc::has_appendix_at_if_loaded(constantPoolHandle cpool, int which) { + if (cpool->cache() == NULL) return false; // nothing to load yet +- // XXX Is there a simpler way to get to the secondary entry? +- ConstantPoolCacheEntry* e; +- if (constantPoolCacheOopDesc::is_secondary_index(which)) { +- e = cpool->cache()->secondary_entry_at(which); +- } else { +- int cache_index = get_cpcache_index(which); +- if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) { +- if (PrintMiscellaneous && (Verbose||WizardMode)) { +- tty->print_cr("bad operand %d in:", which); cpool->print(); +- } +- return false; +- } +- e = cpool->cache()->entry_at(cache_index); +- } ++ int cache_index = decode_cpcache_index(which, true); ++ ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); + return e->has_appendix(); + } + + + oop constantPoolOopDesc::appendix_at_if_loaded(constantPoolHandle cpool, int which) { + if (cpool->cache() == NULL) return NULL; // nothing to load yet +- // XXX Is there a simpler way to get to the secondary entry? +- ConstantPoolCacheEntry* e; +- if (constantPoolCacheOopDesc::is_secondary_index(which)) { +- e = cpool->cache()->secondary_entry_at(which); +- } else { +- int cache_index = get_cpcache_index(which); +- if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) { +- if (PrintMiscellaneous && (Verbose||WizardMode)) { +- tty->print_cr("bad operand %d in:", which); cpool->print(); +- } +- return NULL; +- } +- e = cpool->cache()->entry_at(cache_index); +- } +- if (!e->has_appendix()) { +- return NULL; +- } +- return e->f1_as_instance(); ++ int cache_index = decode_cpcache_index(which, true); ++ ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); ++ return e->appendix_if_resolved(cpool); ++} ++ ++ ++bool constantPoolOopDesc::has_method_type_at_if_loaded(constantPoolHandle cpool, int which) { ++ if (cpool->cache() == NULL) return false; // nothing to load yet ++ int cache_index = decode_cpcache_index(which, true); ++ ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); ++ return e->has_method_type(); ++} ++ ++oop constantPoolOopDesc::method_type_at_if_loaded(constantPoolHandle cpool, int which) { ++ if (cpool->cache() == NULL) return NULL; // nothing to load yet ++ int cache_index = decode_cpcache_index(which, true); ++ ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); // get next CPC entry ++ ConstantPoolCacheEntry* e2 = cpool->cache()->find_secondary_entry_for(e); ++ return e2->method_type_if_resolved(cpool); + } + + +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/oops/constantPoolOop.hpp +--- openjdk/hotspot/src/share/vm/oops/constantPoolOop.hpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/oops/constantPoolOop.hpp Fri Feb 07 21:17:40 2014 +0000 +@@ -674,6 +674,8 @@ + static methodOop method_at_if_loaded (constantPoolHandle this_oop, int which); + static bool has_appendix_at_if_loaded (constantPoolHandle this_oop, int which); + static oop appendix_at_if_loaded (constantPoolHandle this_oop, int which); ++ static bool has_method_type_at_if_loaded (constantPoolHandle this_oop, int which); ++ static oop method_type_at_if_loaded (constantPoolHandle this_oop, int which); + static klassOop klass_at_if_loaded (constantPoolHandle this_oop, int which); + static klassOop klass_ref_at_if_loaded (constantPoolHandle this_oop, int which); + // Same as above - but does LinkResolving. +@@ -704,6 +706,12 @@ + #endif //ASSERT + + static int get_cpcache_index(int index) { return index - CPCACHE_INDEX_TAG; } ++ static int decode_cpcache_index(int raw_index, bool invokedynamic_ok = false) { ++ if (invokedynamic_ok && constantPoolCacheOopDesc::is_secondary_index(raw_index)) ++ return constantPoolCacheOopDesc::decode_secondary_index(raw_index); ++ else ++ return get_cpcache_index(raw_index); ++ } + + private: + +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/oops/cpCacheOop.cpp +--- openjdk/hotspot/src/share/vm/oops/cpCacheOop.cpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/oops/cpCacheOop.cpp Fri Feb 07 21:17:40 2014 +0000 +@@ -266,21 +266,23 @@ + + + void ConstantPoolCacheEntry::set_method_handle(constantPoolHandle cpool, +- methodHandle adapter, Handle appendix) { ++ methodHandle adapter, ++ Handle appendix, Handle method_type) { + assert(!is_secondary_entry(), ""); +- set_method_handle_common(cpool, Bytecodes::_invokehandle, adapter, appendix); ++ set_method_handle_common(cpool, Bytecodes::_invokehandle, adapter, appendix, method_type); + } + + void ConstantPoolCacheEntry::set_dynamic_call(constantPoolHandle cpool, +- methodHandle adapter, Handle appendix) { ++ methodHandle adapter, ++ Handle appendix, Handle method_type) { + assert(is_secondary_entry(), ""); +- set_method_handle_common(cpool, Bytecodes::_invokedynamic, adapter, appendix); ++ set_method_handle_common(cpool, Bytecodes::_invokedynamic, adapter, appendix, method_type); + } + + void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool, + Bytecodes::Code invoke_code, + methodHandle adapter, +- Handle appendix) { ++ Handle appendix, Handle method_type) { + // NOTE: This CPCE can be the subject of data races. + // There are three words to update: flags, f2, f1 (in that order). + // Writers must store all other values before f1. +@@ -296,23 +298,28 @@ + return; + } + +- bool has_appendix = appendix.not_null(); ++ const bool has_appendix = appendix.not_null(); ++ const bool has_method_type = method_type.not_null(); ++ + if (!has_appendix) { + // The extra argument is not used, but we need a non-null value to signify linkage state. + // Set it to something benign that will never leak memory. + appendix = Universe::void_mirror(); + } + ++ // Write the flags. + set_method_flags(as_TosState(adapter->result_type()), +- ((has_appendix ? 1 : 0) << has_appendix_shift) | +- ( 1 << is_vfinal_shift) | +- ( 1 << is_final_shift), ++ ((has_appendix ? 1 : 0) << has_appendix_shift) | ++ ((has_method_type ? 1 : 0) << has_method_type_shift) | ++ ( 1 << is_vfinal_shift) | ++ ( 1 << is_final_shift), + adapter->size_of_parameters()); + + if (TraceInvokeDynamic) { +- tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method="PTR_FORMAT" ", ++ tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ", + invoke_code, +- (intptr_t)appendix(), (has_appendix ? "" : " (unused)"), ++ (intptr_t)appendix(), (has_appendix ? "" : " (unused)"), ++ (intptr_t)method_type(), (has_method_type ? "" : " (unused)"), + (intptr_t)adapter()); + adapter->print(); + if (has_appendix) appendix()->print(); +@@ -336,14 +343,31 @@ + // The fact that String and List are involved is encoded in the MethodType in f1. + // This allows us to create fewer method oops, while keeping type safety. + // ++ + set_f2_as_vfinal_method(adapter()); ++ ++ // Store MethodType, if any. ++ if (has_method_type) { ++ ConstantPoolCacheEntry* e2 = cpool->cache()->find_secondary_entry_for(this); ++ ++ // Write the flags. ++ e2->set_method_flags(as_TosState(adapter->result_type()), ++ ((has_method_type ? 1 : 0) << has_method_type_shift) | ++ ( 1 << is_vfinal_shift) | ++ ( 1 << is_final_shift), ++ adapter->size_of_parameters()); ++ e2->release_set_f1(method_type()); ++ } ++ + assert(appendix.not_null(), "needed for linkage state"); + release_set_f1(appendix()); // This must be the last one to set (see NOTE above)! ++ + if (!is_secondary_entry()) { + // The interpreter assembly code does not check byte_2, + // but it is used by is_resolved, method_if_resolved, etc. + set_bytecode_2(invoke_code); + } ++ + NOT_PRODUCT(verify(tty)); + if (TraceInvokeDynamic) { + this->print(tty, 0); +@@ -401,6 +425,20 @@ + } + + ++oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) { ++ if (is_f1_null() || !has_appendix()) ++ return NULL; ++ return f1_appendix(); ++} ++ ++ ++oop ConstantPoolCacheEntry::method_type_if_resolved(constantPoolHandle cpool) { ++ if (is_f1_null() || !has_method_type()) ++ return NULL; ++ return f1_as_instance(); ++} ++ ++ + class LocalOopClosure: public OopClosure { + private: + void (*_f)(oop*); +diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/oops/cpCacheOop.hpp +--- openjdk/hotspot/src/share/vm/oops/cpCacheOop.hpp Thu Feb 06 14:33:44 2014 +0000 ++++ openjdk/hotspot/src/share/vm/oops/cpCacheOop.hpp Fri Feb 07 21:17:40 2014 +0000 +@@ -167,10 +167,11 @@ + tos_state_mask = right_n_bits(tos_state_bits), + tos_state_shift = BitsPerInt - tos_state_bits, // see verify_tos_state_shift below + // misc. option bits; can be any bit position in [16..27] +- is_vfinal_shift = 21, +- is_volatile_shift = 22, +- is_final_shift = 23, +- has_appendix_shift = 24, ++ is_vfinal_shift = 20, ++ is_volatile_shift = 21, ++ is_final_shift = 22, ++ has_appendix_shift = 23, ++ has_method_type_shift = 24, + is_forced_virtual_shift = 25, + is_field_entry_shift = 26, + // low order bits give field index (for FieldInfo) or method parameter size: +@@ -224,13 +225,15 @@ + void set_method_handle( + constantPoolHandle cpool, // holding constant pool (required for locking) + methodHandle method, // adapter for invokeExact, etc. +- Handle appendix // stored in f1; could be a java.lang.invoke.MethodType ++ Handle appendix, // stored in f1; could be a java.lang.invoke.MethodType ++ Handle method_type // stored in f1 (of secondary entry); is a java.lang.invoke.MethodType + ); + + void set_dynamic_call( + constantPoolHandle cpool, // holding constant pool (required for locking) + methodHandle method, // adapter for this call site +- Handle appendix // stored in f1; could be a java.lang.invoke.CallSite ++ Handle appendix, // stored in f1; could be a java.lang.invoke.CallSite ++ Handle method_type // stored in f1 (of secondary entry); is a java.lang.invoke.MethodType + ); + + // Common code for invokedynamic and MH invocations. +@@ -252,10 +255,13 @@ + constantPoolHandle cpool, // holding constant pool (required for locking) + Bytecodes::Code invoke_code, // _invokehandle or _invokedynamic + methodHandle adapter, // invoker method (f2) +- Handle appendix // appendix such as CallSite, MethodType, etc. (f1) ++ Handle appendix, // appendix such as CallSite, MethodType, etc. (f1) ++ Handle method_type // MethodType (f1 of secondary entry) + ); + +- methodOop method_if_resolved(constantPoolHandle cpool); ++ methodOop method_if_resolved(constantPoolHandle cpool); ++ oop appendix_if_resolved(constantPoolHandle cpool); ++ oop method_type_if_resolved(constantPoolHandle cpool); + + void set_parameter_size(int value); + +@@ -267,11 +273,11 @@ + case Bytecodes::_getfield : // fall through + case Bytecodes::_invokespecial : // fall through + case Bytecodes::_invokestatic : // fall through ++ case Bytecodes::_invokehandle : // fall through ++ case Bytecodes::_invokedynamic : // fall through + case Bytecodes::_invokeinterface : return 1; + case Bytecodes::_putstatic : // fall through + case Bytecodes::_putfield : // fall through +- case Bytecodes::_invokehandle : // fall through +- case Bytecodes::_invokedynamic : // fall through + case Bytecodes::_invokevirtual : return 2; + default : break; + } +@@ -310,7 +316,8 @@ + int parameter_size() const { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); } + bool is_volatile() const { return (_flags & (1 << is_volatile_shift)) != 0; } + bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; } +- bool has_appendix() const { return (_flags & (1 << has_appendix_shift)) != 0; } ++ bool has_appendix() const { return (_flags & (1 << has_appendix_shift)) != 0; } ++ bool has_method_type() const { return (_flags & (1 << has_method_type_shift)) != 0; } + bool is_forced_virtual() const { return (_flags & (1 << is_forced_virtual_shift)) != 0; } + bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; } + bool is_method_entry() const { return (_flags & (1 << is_field_entry_shift)) == 0; } +@@ -446,6 +453,29 @@ + return entry_at(primary_index); + } + ++ int index_of(ConstantPoolCacheEntry* e) { ++ assert(base() <= e && e < base() + length(), "oob"); ++ int cpc_index = (e - base()); ++ assert(entry_at(cpc_index) == e, "sanity"); ++ return cpc_index; ++ } ++ ConstantPoolCacheEntry* find_secondary_entry_for(ConstantPoolCacheEntry* e) { ++ const int cpc_index = index_of(e); ++ if (e->is_secondary_entry()) { ++ ConstantPoolCacheEntry* e2 = entry_at(cpc_index + 1); ++ assert(e->main_entry_index() == e2->main_entry_index(), ""); ++ return e2; ++ } else { ++ for (int i = length() - 1; i >= 0; i--) { ++ ConstantPoolCacheEntry* e2 = entry_at(i); ++ if (cpc_index == e2->main_entry_index()) ++ return e2; ++ } ++ } ++ fatal("no secondary entry found"); ++ return NULL; ++ } ++ + // Code generation + static ByteSize base_offset() { return in_ByteSize(sizeof(constantPoolCacheOopDesc)); } + static ByteSize entry_offset(int raw_index) { diff -r 531847dfec6f -r ed2108ad126a patches/jsr292/8029507-jvm_method_processing.patch --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/patches/jsr292/8029507-jvm_method_processing.patch Thu Mar 27 04:19:17 2014 +0000 @@ -0,0 +1,215 @@ +diff -r a66016d23db1 src/share/vm/prims/methodHandles.cpp +--- openjdk/hotspot/src/share/vm/prims/methodHandles.cpp Fri Feb 07 21:17:40 2014 +0000 ++++ openjdk/hotspot/src/share/vm/prims/methodHandles.cpp Mon Feb 24 20:42:09 2014 +0000 +@@ -175,30 +175,32 @@ + } + + oop MethodHandles::init_method_MemberName(oop mname_oop, methodOop m, bool do_dispatch, +- klassOop receiver_limit) { ++ klassOop resolved_klass) { + AccessFlags mods = m->access_flags(); + int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS ); + int vmindex = methodOopDesc::nonvirtual_vtable_index; // implies never any dispatch +- klassOop mklass = m->method_holder(); +- if (receiver_limit == NULL) +- receiver_limit = mklass; ++ bool is_itable_call = false; ++ klassOop m_klass = m->method_holder(); ++ // resolved_klass is a copy of CallInfo::resolved_klass, if available ++ if (resolved_klass == NULL) ++ resolved_klass = m_klass; + if (m->is_initializer()) { + flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT); + } else if (mods.is_static()) { + flags |= IS_METHOD | (JVM_REF_invokeStatic << REFERENCE_KIND_SHIFT); +- } else if (receiver_limit != mklass && +- !Klass::cast(receiver_limit)->is_subtype_of(mklass)) { ++ } else if (resolved_klass != m_klass && ++ !Klass::cast(resolved_klass)->is_subtype_of(m_klass)) { + return NULL; // bad receiver limit +- } else if (Klass::cast(receiver_limit)->is_interface() && +- Klass::cast(mklass)->is_interface()) { ++ } else if (Klass::cast(resolved_klass)->is_interface() && ++ Klass::cast(m_klass)->is_interface()) { + flags |= IS_METHOD | (JVM_REF_invokeInterface << REFERENCE_KIND_SHIFT); +- receiver_limit = mklass; // ignore passed-in limit; interfaces are interconvertible + vmindex = klassItable::compute_itable_index(m); +- } else if (mklass != receiver_limit && Klass::cast(mklass)->is_interface()) { ++ is_itable_call = true; ++ } else if (m_klass != resolved_klass && Klass::cast(m_klass)->is_interface()) { + flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT); + // it is a miranda method, so m->vtable_index is not what we want + ResourceMark rm; +- klassVtable* vt = instanceKlass::cast(receiver_limit)->vtable(); ++ klassVtable* vt = instanceKlass::cast(resolved_klass)->vtable(); + vmindex = vt->index_of_miranda(m->name(), m->signature()); + } else if (!do_dispatch || m->can_be_statically_bound()) { + flags |= IS_METHOD | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT); +@@ -207,10 +209,36 @@ + vmindex = m->vtable_index(); + } + ++ if (vmindex >= 0 && !is_itable_call) { ++ if (Klass::cast(m_klass)->is_interface()) { ++ // This is a vtable call to an interface method (abstract "miranda method"). ++ // The vtable index is meaningless without a class (not interface) receiver type, so get one. ++ // (LinkResolver should help us figure this out.) ++ KlassHandle m_klass_non_interface = resolved_klass; ++ if (m_klass_non_interface->is_interface()) { ++ m_klass_non_interface = SystemDictionary::Object_klass(); ++#ifdef ASSERT ++ { ResourceMark rm; ++ methodOop m2 = m_klass_non_interface->vtable()->method_at(vmindex); ++ assert(m->name() == m2->name() && m->signature() == m2->signature(), ++ err_msg("at %d, %s != %s", vmindex, ++ m->name_and_sig_as_C_string(), m2->name_and_sig_as_C_string())); ++ } ++#endif //ASSERT ++ } ++ if (!m->is_public()) { ++ assert(m->is_public(), "virtual call must be to public interface method"); ++ return NULL; // elicit an error later in product build ++ } ++ assert(Klass::cast(resolved_klass)->is_subtype_of(m_klass_non_interface()), "virtual call must be type-safe"); ++ m_klass = m_klass_non_interface(); ++ } ++ } ++ + java_lang_invoke_MemberName::set_flags(mname_oop, flags); + java_lang_invoke_MemberName::set_vmtarget(mname_oop, m); + java_lang_invoke_MemberName::set_vmindex(mname_oop, vmindex); // vtable/itable index +- java_lang_invoke_MemberName::set_clazz(mname_oop, Klass::cast(receiver_limit)->java_mirror()); ++ java_lang_invoke_MemberName::set_clazz(mname_oop, Klass::cast(m_klass)->java_mirror()); + // Note: name and type can be lazily computed by resolve_MemberName, + // if Java code needs them as resolved String and MethodType objects. + // The clazz must be eagerly stored, because it provides a GC +@@ -580,7 +608,7 @@ + // An unresolved member name is a mere symbolic reference. + // Resolving it plants a vmtarget/vmindex in it, + // which refers dirctly to JVM internals. +-Handle MethodHandles::resolve_MemberName(Handle mname, TRAPS) { ++Handle MethodHandles::resolve_MemberName(Handle mname, KlassHandle caller, TRAPS) { + Handle empty; + assert(java_lang_invoke_MemberName::is_instance(mname()), ""); + +@@ -659,21 +687,49 @@ + if (ref_kind == JVM_REF_invokeStatic) { + //do_dispatch = false; // no need, since statics are never dispatched + LinkResolver::resolve_static_call(result, +- defc, name, type, KlassHandle(), false, false, THREAD); ++ defc, name, type, caller, caller.not_null(), false, THREAD); + } else if (ref_kind == JVM_REF_invokeInterface) { + LinkResolver::resolve_interface_call(result, Handle(), defc, +- defc, name, type, KlassHandle(), false, false, THREAD); ++ defc, name, type, caller, caller.not_null(), false, THREAD); + } else if (mh_invoke_id != vmIntrinsics::_none) { + assert(!is_signature_polymorphic_static(mh_invoke_id), ""); + LinkResolver::resolve_handle_call(result, +- defc, name, type, KlassHandle(), THREAD); ++ defc, name, type, caller, THREAD); + } else if (ref_kind == JVM_REF_invokeSpecial) { + do_dispatch = false; // force non-virtual linkage + LinkResolver::resolve_special_call(result, +- defc, name, type, KlassHandle(), false, THREAD); ++ defc, name, type, caller, caller.not_null(), THREAD); ++ // CR 8029533: ++ // As a corner case, invokespecial can return a method *below* its resolved_klass. ++ // Since method search *starts* at the resolved_klass, the eventual ++ // method is almost always in a supertype *above* the resolved_klass. ++ // This pattern breaks when an invokespecial "over-reaches" beyond an ++ // immediate super to a method overridden by a super class. ++ // In that case, the selected method will be below the resolved_klass. ++ // (This is the behavior enabled by the famous ACC_SUPER classfile flag.) ++ // ++ // Downstream of this code, we make assumptions about resolved_klass being below m. ++ // (See init_method_MemberName, the comment "bad receiver limit".) ++ // We basically want to patch result._resolved_klass to be m.method_holder(). ++ // The simplest way to get this happier outcome is to re-resolve. ++ if (!HAS_PENDING_EXCEPTION && ++ caller.not_null() && ++ result.resolved_method().not_null()) { ++ // this is the m_klass value that will be checked later: ++ klassOop m_klass = result.resolved_method()->method_holder(); ++ if (m_klass != result.resolved_klass()() && ++ Klass::cast(m_klass)->is_subtype_of(result.resolved_klass()())) { ++ KlassHandle adjusted_defc(THREAD, m_klass); ++ LinkResolver::resolve_special_call(result, ++ adjusted_defc, name, type, caller, caller.not_null(), THREAD); ++ assert(HAS_PENDING_EXCEPTION // if there is something like an OOM, pass it up to caller ++ || result.resolved_method()->method_holder() == adjusted_defc(), ++ "same method, different resolved_klass"); ++ } ++ } + } else if (ref_kind == JVM_REF_invokeVirtual) { + LinkResolver::resolve_virtual_call(result, Handle(), defc, +- defc, name, type, KlassHandle(), false, false, THREAD); ++ defc, name, type, caller, caller.not_null(), false, THREAD); + } else { + assert(false, err_msg("ref_kind=%d", ref_kind)); + } +@@ -700,7 +756,7 @@ + assert(!HAS_PENDING_EXCEPTION, ""); + if (name == vmSymbols::object_initializer_name()) { + LinkResolver::resolve_special_call(result, +- defc, name, type, KlassHandle(), false, THREAD); ++ defc, name, type, caller, caller.not_null(), THREAD); + } else { + break; // will throw after end of switch + } +@@ -1044,7 +1100,12 @@ + if (VerifyMethodHandles && caller_jh != NULL && + java_lang_invoke_MemberName::clazz(mname()) != NULL) { + klassOop reference_klass = java_lang_Class::as_klassOop(java_lang_invoke_MemberName::clazz(mname())); +- if (reference_klass != NULL) { ++ if (reference_klass != NULL && Klass::cast(reference_klass)->oop_is_objArray()) { ++ reference_klass = objArrayKlass::cast(reference_klass)->bottom_klass(); ++ } ++ ++ // Reflection::verify_class_access can only handle instance classes. ++ if (reference_klass != NULL && Klass::cast(reference_klass)->oop_is_instance()) { + // Emulate LinkResolver::check_klass_accessability. + klassOop caller = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(caller_jh)); + if (!Reflection::verify_class_access(caller, +@@ -1055,7 +1116,11 @@ + } + } + +- Handle resolved = MethodHandles::resolve_MemberName(mname, CHECK_NULL); ++ KlassHandle caller(THREAD, ++ caller_jh == NULL ? (klassOop) NULL : ++ java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(caller_jh))); ++ Handle resolved = MethodHandles::resolve_MemberName(mname, caller, CHECK_NULL); ++ + if (resolved.is_null()) { + int flags = java_lang_invoke_MemberName::flags(mname()); + int ref_kind = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK; +diff -r a66016d23db1 src/share/vm/prims/methodHandles.hpp +--- openjdk/hotspot/src/share/vm/prims/methodHandles.hpp Fri Feb 07 21:17:40 2014 +0000 ++++ openjdk/hotspot/src/share/vm/prims/methodHandles.hpp Mon Feb 24 20:42:09 2014 +0000 +@@ -51,12 +51,12 @@ + + public: + // working with member names +- static Handle resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type ++ static Handle resolve_MemberName(Handle mname, KlassHandle caller, TRAPS); // compute vmtarget/vmindex from name/type + static void expand_MemberName(Handle mname, int suppress, TRAPS); // expand defc/name/type if missing + static Handle new_MemberName(TRAPS); // must be followed by init_MemberName + static oop init_MemberName(oop mname_oop, oop target_oop); // compute vmtarget/vmindex from target + static oop init_method_MemberName(oop mname_oop, methodOop m, bool do_dispatch, +- klassOop receiver_limit); ++ klassOop resolved_klass); + static oop init_field_MemberName(oop mname_oop, klassOop field_holder, + AccessFlags mods, oop type, oop name, + intptr_t offset, bool is_setter = false); +diff -r a66016d23db1 src/share/vm/runtime/reflection.cpp +--- openjdk/hotspot/src/share/vm/runtime/reflection.cpp Fri Feb 07 21:17:40 2014 +0000 ++++ openjdk/hotspot/src/share/vm/runtime/reflection.cpp Mon Feb 24 20:42:09 2014 +0000 +@@ -460,7 +460,7 @@ + // doesn't have a classloader. + if ((current_class == NULL) || + (current_class == new_class) || +- (instanceKlass::cast(new_class)->is_public()) || ++ (Klass::cast(new_class)->is_public()) || + is_same_class_package(current_class, new_class)) { + return true; + } diff -r 531847dfec6f -r ed2108ad126a patches/jsr292/arm-7023639.patch --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/patches/jsr292/arm-7023639.patch Thu Mar 27 04:19:17 2014 +0000 @@ -0,0 +1,361 @@ +diff -r ad4c46e70192 src/cpu/zero/vm/asm_helper.cpp +--- openjdk/hotspot/src/cpu/zero/vm/asm_helper.cpp Tue Mar 11 15:45:00 2014 +0000 ++++ openjdk/hotspot/src/cpu/zero/vm/asm_helper.cpp Wed Mar 12 13:34:40 2014 +0000 +@@ -23,6 +23,10 @@ + #define ARCH_VFP (1<<17) + #define ARCH_CLZ (1<<18) + ++/* A workaround for private and protected fields */ ++#define private public ++#define protected public ++ + #include "precompiled.hpp" + #include "asm/assembler.hpp" + #include "interp_masm_zero.hpp" +@@ -33,8 +37,10 @@ + #include "oops/methodDataOop.hpp" + #include "oops/methodOop.hpp" + #include "oops/oop.inline.hpp" ++#include "oops/klassOop.hpp" + #include "prims/jvmtiExport.hpp" + #include "prims/jvmtiThreadState.hpp" ++#include "runtime/frame.hpp" + #include "runtime/deoptimization.hpp" + #include "runtime/frame.inline.hpp" + #include "runtime/sharedRuntime.hpp" +@@ -68,11 +74,22 @@ + // particular method. + #define NAME1 "sun.nio.ch.FileChannelImpl$Unmapper.run()V" + #define EQ(S1, S2) (S1 && (strncmp(S1, S2, strlen(S2)) == 0)) +-extern "C" void my_trace(void *jpc, void *istate) ++extern "C" void my_trace(void *jpc, interpreterState istate) + { +- char *name = meth((interpreterState)istate); +- if (EQ(name, NAME1)); +- asm volatile("nop"); // Somewhere to put a breakpoint ++ JavaThread *jt = istate->thread(); ++ if (jt->zero_stack()->sp() && jt->top_zero_frame()) { ++ bool has_last_Java_frame = jt->has_last_Java_frame(); ++ if (!has_last_Java_frame) ++ jt->set_last_Java_frame(); ++ ++ StackFrameStream sfs(jt); ++ for(int i = 0; !sfs.is_done(); sfs.next(), i++) { ++ } ++ ++ // Reset the frame anchor if necessary ++ if (!has_last_Java_frame) ++ jt->reset_last_Java_frame(); ++ } + } + + extern "C" unsigned hwcap(void) +@@ -603,7 +620,7 @@ + print_def("CONSTANTPOOL_CACHE", offset_of(constantPoolOopDesc, _cache)); + print_def("CONSTANTPOOL_POOL_HOLDER", offset_of(constantPoolOopDesc, _pool_holder)); + print_def("CONSTANTPOOL_BASE", sizeof(constantPoolOopDesc)); +- print_def("CP_CACHE_VOLATILE_FIELD_FLAG_BIT", ConstantPoolCacheEntry::volatileField); ++ print_def("CP_CACHE_VOLATILE_FIELD_FLAG_BIT", ConstantPoolCacheEntry::is_volatile_shift); + print_def("CP_CACHE_FLAGS", offset_of(ConstantPoolCacheEntry, _flags)); + nl(); + print_def("CP_OFFSET", in_bytes(constantPoolCacheOopDesc::base_offset())); +@@ -704,10 +721,10 @@ + print_def("class_fully_initialized", instanceKlass::fully_initialized); + print_def("class_init_error", instanceKlass::initialization_error); + nl(); +- print_def("flag_methodInterface", 1 << ConstantPoolCacheEntry::methodInterface); +- print_def("flag_volatileField", 1 << ConstantPoolCacheEntry::volatileField); +- print_def("flag_vfinalMethod", 1 << ConstantPoolCacheEntry::vfinalMethod); +- print_def("flag_finalField", 1 << ConstantPoolCacheEntry::finalField); ++ print_def("flag_methodInterface", 1 << ConstantPoolCacheEntry::has_method_type_shift); ++ print_def("flag_volatileField", 1 << ConstantPoolCacheEntry::is_volatile_shift); ++ print_def("flag_vfinalMethod", 1 << ConstantPoolCacheEntry::is_vfinal_shift); ++ print_def("flag_finalField", 1 << ConstantPoolCacheEntry::is_final_shift); + nl(); + print_def("INVOCATIONCOUNTER_COUNTINCREMENT", InvocationCounter::count_increment); + nl(); +diff -r ad4c46e70192 src/cpu/zero/vm/bytecodes_arm.def +--- openjdk/hotspot/src/cpu/zero/vm/bytecodes_arm.def Tue Mar 11 15:45:00 2014 +0000 ++++ openjdk/hotspot/src/cpu/zero/vm/bytecodes_arm.def Wed Mar 12 13:34:40 2014 +0000 +@@ -1950,7 +1950,7 @@ + str r3, [thread, #THREAD_LAST_JAVA_SP] + ldr r0, [istate, #ISTATE_METHOD] + ldr r3, [r2, #0] +- ldrh r0, [r0, #40] ++ ldrh r0, [r0, #METHOD_MAXLOCALS] + add r1, r2, #4 + str r3, [thread, #THREAD_TOP_ZERO_FRAME] + +@@ -1981,7 +1981,7 @@ + str r3, [thread, #THREAD_LAST_JAVA_SP] + ldr r0, [istate, #ISTATE_METHOD] + ldr r3, [stack, #0] +- ldrh r0, [r0, #40] ++ ldrh r0, [r0, #METHOD_MAXLOCALS] + + str r3, [thread, #THREAD_TOP_ZERO_FRAME] + str r1, [stack, r0, lsl #2]! +@@ -2008,7 +2008,7 @@ + str r3, [thread, #THREAD_LAST_JAVA_SP] + ldr r0, [istate, #ISTATE_METHOD] + ldr r3, [r2, #0] +- ldrh r0, [r0, #40] ++ ldrh r0, [r0, #METHOD_MAXLOCALS] + add r1, r2, #4 + str r3, [thread, #THREAD_TOP_ZERO_FRAME] + +@@ -3993,7 +3996,7 @@ + str r3, [thread, #THREAD_LAST_JAVA_SP] + ldr r0, [istate, #ISTATE_METHOD] + ldr r3, [stack, #0] +- ldrh r0, [r0, #40] ++ ldrh r0, [r0, #METHOD_MAXLOCALS] + str r3, [thread, #THREAD_TOP_ZERO_FRAME] + str r1, [stack, r0, lsl #2]! + +@@ -4022,7 +4025,7 @@ + str r3, [thread, #THREAD_LAST_JAVA_SP] + ldr r0, [istate, #ISTATE_METHOD] + ldr r3, [stack, #0] +- ldrh r0, [r0, #40] ++ ldrh r0, [r0, #METHOD_MAXLOCALS] + str r3, [thread, #THREAD_TOP_ZERO_FRAME] + str r1, [stack, r0, lsl #2]! + +@@ -4429,7 +4432,7 @@ + str r3, [thread, #THREAD_LAST_JAVA_SP] + ldr r0, [istate, #ISTATE_METHOD] + ldr r3, [stack, #0] +- ldrh r0, [r0, #40] ++ ldrh r0, [r0, #METHOD_MAXLOCALS] + str r3, [thread, #THREAD_TOP_ZERO_FRAME] + str r1, [stack, r0, lsl #2]! + +diff -r ad4c46e70192 src/cpu/zero/vm/cppInterpreter_arm.S +--- openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_arm.S Tue Mar 11 15:45:00 2014 +0000 ++++ openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_arm.S Wed Mar 12 13:34:40 2014 +0000 +@@ -3431,7 +3444,7 @@ + str r3, [thread, #THREAD_LAST_JAVA_FP] + ldr r0, [istate, #ISTATE_METHOD] + ldr r3, [r2, #0] +- ldrh r0, [r0, #40] ++ ldrh r0, [r0, #METHOD_MAXLOCALS] + add r1, r2, #4 + str r3, [thread, #THREAD_TOP_ZERO_FRAME] + +@@ -3560,7 +3573,7 @@ + str r3, [thread, #THREAD_LAST_JAVA_SP] + ldr r0, [istate, #ISTATE_METHOD] + ldr r3, [r2, #0] +- ldrh r0, [r0, #40] ++ ldrh r0, [r0, #METHOD_MAXLOCALS] + add r1, r2, #4 + str r3, [thread, #THREAD_TOP_ZERO_FRAME] + +diff -r ad4c46e70192 src/cpu/zero/vm/cppInterpreter_zero.cpp +--- openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Tue Mar 11 15:45:00 2014 +0000 ++++ openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed Mar 12 13:34:40 2014 +0000 +@@ -1,6 +1,6 @@ + /* +- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. +- * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc. ++ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2013 Red Hat, Inc. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -66,9 +66,10 @@ + CALL_VM_NOCHECK_NOFIX(func) \ + fixup_after_potential_safepoint() + +- +-#ifdef z_CPPDEBUG ++//#define CPPIDEBUG 1 ++#ifdef CPPIDEBUG + #define CPPINT_DEBUG( Z_code_ ) Z_code_ ++CPPINT_DEBUG ( static const char *FFng_Zero_Flag = "CPPINT_DEBUG_ON\n"; ) + #else + #define CPPINT_DEBUG( Z_code_ ) + #endif +@@ -618,6 +619,25 @@ + return 0; + } + ++int CppInterpreter::method_handle_entry(methodOop method, ++ intptr_t UNUSED, TRAPS) { ++ JavaThread *thread = (JavaThread *) THREAD; ++ ZeroStack *stack = thread->zero_stack(); ++ CPPINT_DEBUG( tty->print_cr( "method_handle : 0x%x , thread: 0x%x , stack: %0x%x.", \ ++ method, thread, stack ); ) ++ ++ return MethodHandles::method_handle_entry_invokeBasic(method, UNUSED, THREAD); ++} ++ ++void CppInterpreter::process_method_handle(oop method_handle, TRAPS) { ++ JavaThread *thread = (JavaThread *) THREAD; ++ ZeroStack *stack = thread->zero_stack(); ++ CPPINT_DEBUG( tty->print_cr( "process_method_handle : 0x%x , thread: 0x%x , stack: %0x%x.", \ ++ method_handle, thread, stack ); ) ++ methodOop method = (methodOop) java_lang_invoke_MemberName::vmtarget(method_handle); ++ MethodHandles::invoke_target(method, THREAD); ++} ++ + // The new slots will be inserted before slot insert_before. + // Slots < insert_before will have the same slot number after the insert. + // Slots >= insert_before will become old_slot + num_slots. +diff -r ad4c46e70192 src/cpu/zero/vm/cppInterpreter_zero.hpp +--- openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_zero.hpp Tue Mar 11 15:45:00 2014 +0000 ++++ openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_zero.hpp Wed Mar 12 13:34:40 2014 +0000 +@@ -1,6 +1,6 @@ + /* +- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. +- * Copyright 2007, 2008, 2010, 2011 Red Hat, Inc. ++ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. ++ * Copyright 2013 Red Hat, Inc. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -37,6 +37,7 @@ + static int accessor_entry(methodOop method, intptr_t UNUSED, TRAPS); + static int empty_entry(methodOop method, intptr_t UNUSED, TRAPS); + static int method_handle_entry(methodOop method, intptr_t UNUSED, TRAPS); ++ static void process_method_handle(oop method_handle, TRAPS); + + public: + // Main loop of normal_entry +@@ -44,7 +45,6 @@ + + private: + // Helpers for method_handle_entry +- static void process_method_handle(oop method_handle, TRAPS); + static void insert_vmslots(int insert_before, int num_slots, TRAPS); + static void remove_vmslots(int first_slot, int num_slots, TRAPS); + static BasicType result_type_of_handle(oop method_handle); +diff -r ad4c46e70192 src/cpu/zero/vm/methodHandles_zero.hpp +--- openjdk/hotspot/src/cpu/zero/vm/methodHandles_zero.hpp Tue Mar 11 15:45:00 2014 +0000 ++++ openjdk/hotspot/src/cpu/zero/vm/methodHandles_zero.hpp Wed Mar 12 13:34:40 2014 +0000 +@@ -1,6 +1,6 @@ + /* +- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. +- * Copyright 2011, 2012 Red Hat, Inc. ++ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. ++ * Copyright 2013 Red Hat, Inc. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -29,10 +29,11 @@ + adapter_code_size = sizeof(ZeroEntry) * (Interpreter::method_handle_invoke_LAST - Interpreter::method_handle_invoke_FIRST + 1) + }; + ++public: ++ static void invoke_target(methodOop method, TRAPS); ++ static int method_handle_entry_invokeBasic(methodOop method, intptr_t UNUSED, TRAPS); + private: + static oop popFromStack(TRAPS); +- static void invoke_target(methodOop method, TRAPS); +- static int method_handle_entry_invokeBasic(methodOop method, intptr_t UNUSED, TRAPS); + static int method_handle_entry_linkToStaticOrSpecial(methodOop method, intptr_t UNUSED, TRAPS); + static int method_handle_entry_linkToVirtual(methodOop method, intptr_t UNUSED, TRAPS); + static int method_handle_entry_linkToInterface(methodOop method, intptr_t UNUSED, TRAPS); +diff -r ad4c46e70192 src/cpu/zero/vm/thumb2.cpp +--- openjdk/hotspot/src/cpu/zero/vm/thumb2.cpp Tue Mar 11 15:45:00 2014 +0000 ++++ openjdk/hotspot/src/cpu/zero/vm/thumb2.cpp Wed Mar 12 13:34:40 2014 +0000 +@@ -430,13 +430,15 @@ + + #ifdef PRODUCT + +-#define JASSERT(cond, msg) 0 ++#define JASSERT(cond, msg) + #define J_Unimplemented() longjmp(compiler_error_env, COMPILER_RESULT_FATAL) ++#define JDEBUG_( _j_ ) + + #else + + #define JASSERT(cond, msg) do { if (!(cond)) fatal(msg); } while (0) + #define J_Unimplemented() { report_unimplemented(__FILE__, __LINE__); BREAKPOINT; } ++#define JDEBUG_( _j_ ) _j_ + + #endif // PRODUCT + +@@ -4571,7 +4573,7 @@ + if (!cache->is_resolved((Bytecodes::Code)opc_getfield)) return 0; + + TosState tos_type = cache->flag_state(); +- int field_offset = cache->f2(); ++ int field_offset = cache->f2_as_index(); + + // Slow entry point - callee save + // R0 = method +@@ -5886,7 +5890,7 @@ + } + + TosState tos_type = cache->flag_state(); +- int field_offset = cache->f2(); ++ int field_offset = cache->f2_as_index(); + + if (tos_type == ltos || tos_type == dtos) { + Reg r_lo, r_hi; +@@ -5949,7 +5953,8 @@ + } + + TosState tos_type = cache->flag_state(); +- int field_offset = cache->f2(); ++ int field_offset = cache->f2_as_index(); ++ JDEBUG_( tty->print("f2_as_index getstatic %d: %s: %s %d\n", index , name->as_C_string(), sig->as_C_string(), field_offset); ); + + if (tos_type == ltos || tos_type == dtos) { + Reg r_lo, r_hi, r_addr; +@@ -6018,7 +6023,7 @@ + storeBarrier(jinfo->codebuf); + + TosState tos_type = cache->flag_state(); +- int field_offset = cache->f2(); ++ int field_offset = cache->f2_as_index(); + + if (tos_type == ltos || tos_type == dtos) { + Reg r_lo, r_hi; +@@ -6083,7 +6088,7 @@ + storeBarrier(jinfo->codebuf); + + TosState tos_type = cache->flag_state(); +- int field_offset = cache->f2(); ++ int field_offset = cache->f2_as_index(); + Reg r_obj; + + if (tos_type == ltos || tos_type == dtos) { +@@ -6163,7 +6168,7 @@ + break; + } + +- callee = opcode == opc_invokevirtual ? (methodOop)cache->f2() : (methodOop)cache->f1(); ++ callee = opcode == opc_invokevirtual ? (methodOop)cache->f2_as_index() : (methodOop)cache->f1_as_instance(); + + if (opcode != opc_invokevirtual || cache->is_vfinal()) { + if (handle_special_method(callee, jinfo, stackdepth)) +@@ -6181,7 +6186,7 @@ + JASSERT(cache->parameter_size() == 1, "not 1 parameter to accessor"); + + TosState tos_type = entry->flag_state(); +- int field_offset = entry->f2(); ++ int field_offset = entry->f2_as_index(); + + JASSERT(tos_type == btos || tos_type == ctos || tos_type == stos || tos_type == atos || tos_type == itos, "not itos or atos"); + +@@ -6228,7 +6233,7 @@ + ldr_imm(jinfo->codebuf, ARM_R0, ARM_R0, + CP_OFFSET + (index << 4) + (opcode == opc_invokevirtual ? 8 : 4), 1, 0); + else +- ldr_imm(jinfo->codebuf, ARM_R0, ARM_R3, INSTANCEKLASS_VTABLE_OFFSET + cache->f2() * 4, 1, 0); ++ ldr_imm(jinfo->codebuf, ARM_R0, ARM_R3, INSTANCEKLASS_VTABLE_OFFSET + cache->f2_as_index() * 4, 1, 0); + add_imm(jinfo->codebuf, ARM_R2, ARM_R2, bci+CONSTMETHOD_CODEOFFSET); + str_imm(jinfo->codebuf, ARM_R1, Rthread, THREAD_LAST_JAVA_SP, 1, 0); + str_imm(jinfo->codebuf, ARM_R1, Rthread, THREAD_LAST_JAVA_FP, 1, 0); +@@ -6298,7 +6303,7 @@ + int r = JSTACK_REG(jstack); + PUSH(jstack, r); + ldr_imm(jinfo->codebuf, r, Ristate, ISTATE_CONSTANTS, 1, 0); +- ldr_imm(jinfo->codebuf, r, r, CP_OFFSET + (index << 4) + 4, 1, 0); // offset to cache->f1() ++ ldr_imm(jinfo->codebuf, r, r, CP_OFFSET + (index << 4) + 4, 1, 0); // offset to cache->f1_as_instance() + } else { + Thumb2_Exit(jinfo, H_EXIT_TO_INTERPRETER, bci, stackdepth); + } diff -r 531847dfec6f -r ed2108ad126a patches/zero/7023639-8000780-jsr292_fast_path.patch --- a/patches/zero/7023639-8000780-jsr292_fast_path.patch Thu Mar 27 03:50:20 2014 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,28468 +0,0 @@ -# HG changeset patch -# User andrew -# Date 1391696693 0 -# Thu Feb 06 14:24:53 2014 +0000 -# Node ID 19ac51ce4be77e6895816f9823bce63a72392e89 -# Parent 3442eb7ef2d216d6bf655d537929a2d31a76a321 -7023639: JSR 292 method handle invocation needs a fast path for compiled code -6984705: JSR 292 method handle creation should not go through JNI -Summary: remove assembly code for JDK 7 chained method handles -Reviewed-by: jrose, twisti, kvn, mhaupt -Contributed-by: John Rose , Christian Thalinger , Michael Haupt - -8000780: [Backport from jdk8] Fix zero fail to build in icedtea7-head. -Summary: Update Zero in icedtea7 to use the hsx24 b25+ java level MLVM - hooks (MLVM Lazy) now in jdk8 and jdk7u-dev head. -Reviewed-by: rkennke ( Roman Kenbke ) - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java ---- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java Thu Feb 06 14:24:53 2014 +0000 -@@ -93,7 +93,6 @@ - public boolean isUncommonTrapStub() { return false; } - public boolean isExceptionStub() { return false; } - public boolean isSafepointStub() { return false; } -- public boolean isRicochetBlob() { return false; } - public boolean isAdapterBlob() { return false; } - - // Fine grain nmethod support: isNmethod() == isJavaMethod() || isNativeMethod() || isOSRMethod() -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java ---- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java Thu Feb 06 14:24:53 2014 +0000 -@@ -57,7 +57,6 @@ - virtualConstructor.addMapping("BufferBlob", BufferBlob.class); - virtualConstructor.addMapping("nmethod", NMethod.class); - virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class); -- virtualConstructor.addMapping("RicochetBlob", RicochetBlob.class); - virtualConstructor.addMapping("AdapterBlob", AdapterBlob.class); - virtualConstructor.addMapping("MethodHandlesAdapterBlob", MethodHandlesAdapterBlob.class); - virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class); -@@ -127,10 +126,6 @@ - Assert.that(result.blobContains(start) || result.blobContains(start.addOffsetTo(8)), - "found wrong CodeBlob"); - } -- if (result.isRicochetBlob()) { -- // This should probably be done for other SingletonBlobs -- return VM.getVM().ricochetBlob(); -- } - return result; - } - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/code/RicochetBlob.java ---- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/RicochetBlob.java Tue Jan 14 20:24:44 2014 -0500 -+++ /dev/null Thu Jan 01 00:00:00 1970 +0000 -@@ -1,70 +0,0 @@ --/* -- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. -- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -- * -- * This code is free software; you can redistribute it and/or modify it -- * under the terms of the GNU General Public License version 2 only, as -- * published by the Free Software Foundation. -- * -- * This code is distributed in the hope that it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -- * version 2 for more details (a copy is included in the LICENSE file that -- * accompanied this code). -- * -- * You should have received a copy of the GNU General Public License version -- * 2 along with this work; if not, write to the Free Software Foundation, -- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -- * -- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -- * or visit www.oracle.com if you need additional information or have any -- * questions. -- * -- */ -- --package sun.jvm.hotspot.code; -- --import java.util.*; --import sun.jvm.hotspot.debugger.*; --import sun.jvm.hotspot.runtime.*; --import sun.jvm.hotspot.types.*; -- --/** RicochetBlob (currently only used by Compiler 2) */ -- --public class RicochetBlob extends SingletonBlob { -- static { -- VM.registerVMInitializedObserver(new Observer() { -- public void update(Observable o, Object data) { -- initialize(VM.getVM().getTypeDataBase()); -- } -- }); -- } -- -- private static void initialize(TypeDataBase db) { -- Type type = db.lookupType("RicochetBlob"); -- -- bounceOffsetField = type.getCIntegerField("_bounce_offset"); -- exceptionOffsetField = type.getCIntegerField("_exception_offset"); -- } -- -- private static CIntegerField bounceOffsetField; -- private static CIntegerField exceptionOffsetField; -- -- public RicochetBlob(Address addr) { -- super(addr); -- } -- -- public boolean isRicochetBlob() { -- return true; -- } -- -- public Address bounceAddr() { -- return codeBegin().addOffsetTo(bounceOffsetField.getValue(addr)); -- } -- -- public boolean returnsToBounceAddr(Address pc) { -- Address bouncePc = bounceAddr(); -- return (pc.equals(bouncePc) || pc.addOffsetTo(Frame.pcReturnOffset()).equals(bouncePc)); -- } -- --} -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java ---- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Frame.java Thu Feb 06 14:24:53 2014 +0000 -@@ -147,12 +147,6 @@ - } - } - -- public boolean isRicochetFrame() { -- CodeBlob cb = VM.getVM().getCodeCache().findBlob(getPC()); -- RicochetBlob rcb = VM.getVM().ricochetBlob(); -- return (cb == rcb && rcb != null && rcb.returnsToBounceAddr(getPC())); -- } -- - public boolean isCompiledFrame() { - if (Assert.ASSERTS_ENABLED) { - Assert.that(!VM.getVM().isCore(), "noncore builds only"); -@@ -216,8 +210,7 @@ - public Frame realSender(RegisterMap map) { - if (!VM.getVM().isCore()) { - Frame result = sender(map); -- while (result.isRuntimeFrame() || -- result.isRicochetFrame()) { -+ while (result.isRuntimeFrame()) { - result = result.sender(map); - } - return result; -@@ -631,9 +624,6 @@ - if (Assert.ASSERTS_ENABLED) { - Assert.that(cb != null, "sanity check"); - } -- if (cb == VM.getVM().ricochetBlob()) { -- oopsRicochetDo(oopVisitor, regMap); -- } - if (cb.getOopMaps() != null) { - OopMapSet.oopsDo(this, cb, regMap, oopVisitor, VM.getVM().isDebugging()); - -@@ -650,10 +640,6 @@ - // } - } - -- private void oopsRicochetDo (AddressVisitor oopVisitor, RegisterMap regMap) { -- // XXX Empty for now -- } -- - // FIXME: implement the above routines, plus add - // oops_interpreted_arguments_do and oops_compiled_arguments_do - } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java ---- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Thu Feb 06 14:24:53 2014 +0000 -@@ -87,8 +87,6 @@ - private StubRoutines stubRoutines; - private Bytes bytes; - -- private RicochetBlob ricochetBlob; -- - /** Flags indicating whether we are attached to a core, C1, or C2 build */ - private boolean usingClientCompiler; - private boolean usingServerCompiler; -@@ -628,18 +626,6 @@ - return stubRoutines; - } - -- public RicochetBlob ricochetBlob() { -- if (ricochetBlob == null) { -- Type ricochetType = db.lookupType("SharedRuntime"); -- AddressField ricochetBlobAddress = ricochetType.getAddressField("_ricochet_blob"); -- Address addr = ricochetBlobAddress.getValue(); -- if (addr != null) { -- ricochetBlob = new RicochetBlob(addr); -- } -- } -- return ricochetBlob; -- } -- - public VMRegImpl getVMRegImplInfo() { - if (vmregImpl == null) { - vmregImpl = new VMRegImpl(); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCFrame.java ---- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCFrame.java Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCFrame.java Thu Feb 06 14:24:53 2014 +0000 -@@ -571,8 +571,6 @@ - // registers callee-saved, then we will have to copy over - // the RegisterMap update logic from the Intel code. - -- if (isRicochetFrame()) return senderForRicochetFrame(map); -- - // The constructor of the sender must know whether this frame is interpreted so it can set the - // sender's _interpreter_sp_adjustment field. - if (VM.getVM().getInterpreter().contains(pc)) { -@@ -945,20 +943,6 @@ - } - - -- private Frame senderForRicochetFrame(SPARCRegisterMap map) { -- if (DEBUG) { -- System.out.println("senderForRicochetFrame"); -- } -- //RicochetFrame* f = RicochetFrame::from_frame(fr); -- // Cf. is_interpreted_frame path of frame::sender -- Address youngerSP = getSP(); -- Address sp = getSenderSP(); -- map.makeIntegerRegsUnsaved(); -- map.shiftWindow(sp, youngerSP); -- boolean thisFrameAdjustedStack = true; // I5_savedSP is live in this RF -- return new SPARCFrame(biasSP(sp), biasSP(youngerSP), thisFrameAdjustedStack); -- } -- - private Frame senderForEntryFrame(RegisterMap regMap) { - SPARCRegisterMap map = (SPARCRegisterMap) regMap; - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCRicochetFrame.java ---- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sparc/SPARCRicochetFrame.java Tue Jan 14 20:24:44 2014 -0500 -+++ /dev/null Thu Jan 01 00:00:00 1970 +0000 -@@ -1,77 +0,0 @@ --/* -- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. -- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -- * -- * This code is free software; you can redistribute it and/or modify it -- * under the terms of the GNU General Public License version 2 only, as -- * published by the Free Software Foundation. -- * -- * This code is distributed in the hope that it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -- * version 2 for more details (a copy is included in the LICENSE file that -- * accompanied this code). -- * -- * You should have received a copy of the GNU General Public License version -- * 2 along with this work; if not, write to the Free Software Foundation, -- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -- * -- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -- * or visit www.oracle.com if you need additional information or have any -- * questions. -- * -- */ -- --package sun.jvm.hotspot.runtime.sparc; -- --import java.util.*; --import sun.jvm.hotspot.asm.sparc.SPARCRegister; --import sun.jvm.hotspot.asm.sparc.SPARCRegisters; --import sun.jvm.hotspot.debugger.*; --import sun.jvm.hotspot.runtime.*; --import sun.jvm.hotspot.types.*; -- --public class SPARCRicochetFrame { -- static { -- VM.registerVMInitializedObserver(new Observer() { -- public void update(Observable o, Object data) { -- initialize(VM.getVM().getTypeDataBase()); -- } -- }); -- } -- -- private SPARCFrame frame; -- -- private static void initialize(TypeDataBase db) { -- // Type type = db.lookupType("MethodHandles::RicochetFrame"); -- -- } -- -- static SPARCRicochetFrame fromFrame(SPARCFrame f) { -- return new SPARCRicochetFrame(f); -- } -- -- private SPARCRicochetFrame(SPARCFrame f) { -- frame = f; -- } -- -- private Address registerValue(SPARCRegister reg) { -- return frame.getSP().addOffsetTo(reg.spOffsetInSavedWindow()).getAddressAt(0); -- } -- -- public Address savedArgsBase() { -- return registerValue(SPARCRegisters.L4); -- } -- public Address exactSenderSP() { -- return registerValue(SPARCRegisters.I5); -- } -- public Address senderLink() { -- return frame.getSenderSP(); -- } -- public Address senderPC() { -- return frame.getSenderPC(); -- } -- public Address extendedSenderSP() { -- return savedArgsBase(); -- } --} -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java ---- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86Frame.java Thu Feb 06 14:24:53 2014 +0000 -@@ -269,7 +269,6 @@ - - if (isEntryFrame()) return senderForEntryFrame(map); - if (isInterpretedFrame()) return senderForInterpreterFrame(map); -- if (isRicochetFrame()) return senderForRicochetFrame(map); - - if(cb == null) { - cb = VM.getVM().getCodeCache().findBlob(getPC()); -@@ -288,16 +287,6 @@ - return new X86Frame(getSenderSP(), getLink(), getSenderPC()); - } - -- private Frame senderForRicochetFrame(X86RegisterMap map) { -- if (DEBUG) { -- System.out.println("senderForRicochetFrame"); -- } -- X86RicochetFrame f = X86RicochetFrame.fromFrame(this); -- if (map.getUpdateMap()) -- updateMapWithSavedLink(map, f.senderLinkAddress()); -- return new X86Frame(f.extendedSenderSP(), f.exactSenderSP(), f.senderLink(), f.senderPC()); -- } -- - private Frame senderForEntryFrame(X86RegisterMap map) { - if (DEBUG) { - System.out.println("senderForEntryFrame"); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86RicochetFrame.java ---- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/x86/X86RicochetFrame.java Tue Jan 14 20:24:44 2014 -0500 -+++ /dev/null Thu Jan 01 00:00:00 1970 +0000 -@@ -1,81 +0,0 @@ --/* -- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. -- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -- * -- * This code is free software; you can redistribute it and/or modify it -- * under the terms of the GNU General Public License version 2 only, as -- * published by the Free Software Foundation. -- * -- * This code is distributed in the hope that it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -- * version 2 for more details (a copy is included in the LICENSE file that -- * accompanied this code). -- * -- * You should have received a copy of the GNU General Public License version -- * 2 along with this work; if not, write to the Free Software Foundation, -- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -- * -- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -- * or visit www.oracle.com if you need additional information or have any -- * questions. -- * -- */ -- --package sun.jvm.hotspot.runtime.x86; -- --import java.util.*; --import sun.jvm.hotspot.debugger.*; --import sun.jvm.hotspot.runtime.*; --import sun.jvm.hotspot.types.*; -- --public class X86RicochetFrame extends VMObject { -- static { -- VM.registerVMInitializedObserver(new Observer() { -- public void update(Observable o, Object data) { -- initialize(VM.getVM().getTypeDataBase()); -- } -- }); -- } -- -- private static void initialize(TypeDataBase db) { -- Type type = db.lookupType("MethodHandles::RicochetFrame"); -- -- senderLinkField = type.getAddressField("_sender_link"); -- savedArgsBaseField = type.getAddressField("_saved_args_base"); -- exactSenderSPField = type.getAddressField("_exact_sender_sp"); -- senderPCField = type.getAddressField("_sender_pc"); -- } -- -- private static AddressField senderLinkField; -- private static AddressField savedArgsBaseField; -- private static AddressField exactSenderSPField; -- private static AddressField senderPCField; -- -- static X86RicochetFrame fromFrame(X86Frame f) { -- return new X86RicochetFrame(f.getFP().addOffsetTo(- senderLinkField.getOffset())); -- } -- -- private X86RicochetFrame(Address addr) { -- super(addr); -- } -- -- public Address senderLink() { -- return senderLinkField.getValue(addr); -- } -- public Address senderLinkAddress() { -- return addr.addOffsetTo(senderLinkField.getOffset()); -- } -- public Address savedArgsBase() { -- return savedArgsBaseField.getValue(addr); -- } -- public Address extendedSenderSP() { -- return savedArgsBase(); -- } -- public Address exactSenderSP() { -- return exactSenderSPField.getValue(addr); -- } -- public Address senderPC() { -- return senderPCField.getValue(addr); -- } --} -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 make/solaris/makefiles/fastdebug.make ---- openjdk/hotspot/make/solaris/makefiles/fastdebug.make Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/make/solaris/makefiles/fastdebug.make Thu Feb 06 14:24:53 2014 +0000 -@@ -36,6 +36,14 @@ - ifeq ("${Platform_compiler}", "sparcWorks") - OPT_CFLAGS/SLOWER = -xO2 - -+ifeq ($(COMPILER_REV_NUMERIC), 510) -+# CC 5.10 has bug XXXXX with -xO4 -+OPT_CFLAGS/jvmtiClassFileReconstituter.o = $(OPT_CFLAGS/SLOWER) -+# jvm98 crashes on solaris-i586-fastdebug and solaris-sparc-fastdebug with stack overflow -+OPT_CFLAGS/escape.o = $(OPT_CFLAGS) -xspace -+OPT_CFLAGS/matcher.o = $(OPT_CFLAGS) -xspace -+endif # COMPILER_REV_NUMERIC == 510 -+ - ifeq ($(COMPILER_REV_NUMERIC), 509) - # To avoid jvm98 crash - OPT_CFLAGS/instanceKlass.o = $(OPT_CFLAGS/SLOWER) -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 make/solaris/makefiles/optimized.make ---- openjdk/hotspot/make/solaris/makefiles/optimized.make Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/make/solaris/makefiles/optimized.make Thu Feb 06 14:24:53 2014 +0000 -@@ -32,6 +32,11 @@ - # (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files) - ifeq ("${Platform_compiler}", "sparcWorks") - -+ifeq ($(COMPILER_REV_NUMERIC), 510) -+# CC 5.10 has bug XXXXX with -xO4 -+OPT_CFLAGS/jvmtiClassFileReconstituter.o = $(OPT_CFLAGS/O2) -+endif # COMPILER_REV_NUMERIC == 510 -+ - ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1) - # dtrace cannot handle tail call optimization (6672627, 6693876) - OPT_CFLAGS/jni.o = $(OPT_CFLAGS/DEFAULT) $(OPT_CCFLAGS/NO_TAIL_CALL_OPT) -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 make/solaris/makefiles/product.make ---- openjdk/hotspot/make/solaris/makefiles/product.make Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/make/solaris/makefiles/product.make Thu Feb 06 14:24:53 2014 +0000 -@@ -40,6 +40,11 @@ - # (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files) - ifeq ("${Platform_compiler}", "sparcWorks") - -+ifeq ($(COMPILER_REV_NUMERIC), 510) -+# CC 5.10 has bug XXXXX with -xO4 -+OPT_CFLAGS/jvmtiClassFileReconstituter.o = $(OPT_CFLAGS/O2) -+endif # COMPILER_REV_NUMERIC == 510 -+ - ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1) - # dtrace cannot handle tail call optimization (6672627, 6693876) - OPT_CFLAGS/jni.o = $(OPT_CFLAGS/DEFAULT) $(OPT_CCFLAGS/NO_TAIL_CALL_OPT) -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/assembler_sparc.cpp ---- openjdk/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -44,8 +44,10 @@ - - #ifdef PRODUCT - #define BLOCK_COMMENT(str) /* nothing */ -+#define STOP(error) stop(error) - #else - #define BLOCK_COMMENT(str) block_comment(str) -+#define STOP(error) block_comment(error); stop(error) - #endif - - // Convert the raw encoding form into the form expected by the -@@ -992,7 +994,7 @@ - save_frame(0); // to avoid clobbering O0 - ld_ptr(pc_addr, L0); - br_null_short(L0, Assembler::pt, PcOk); -- stop("last_Java_pc not zeroed before leaving Java"); -+ STOP("last_Java_pc not zeroed before leaving Java"); - bind(PcOk); - - // Verify that flags was zeroed on return to Java -@@ -1001,7 +1003,7 @@ - tst(L0); - br(Assembler::zero, false, Assembler::pt, FlagsOk); - delayed() -> restore(); -- stop("flags not zeroed before leaving Java"); -+ STOP("flags not zeroed before leaving Java"); - bind(FlagsOk); - #endif /* ASSERT */ - // -@@ -1021,7 +1023,7 @@ - andcc(last_java_sp, 0x01, G0); - br(Assembler::notZero, false, Assembler::pt, StackOk); - delayed()->nop(); -- stop("Stack Not Biased in set_last_Java_frame"); -+ STOP("Stack Not Biased in set_last_Java_frame"); - bind(StackOk); - #endif // ASSERT - assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); -@@ -1650,23 +1652,28 @@ - - - void RegistersForDebugging::print(outputStream* s) { -+ FlagSetting fs(Debugging, true); - int j; -- for ( j = 0; j < 8; ++j ) -- if ( j != 6 ) s->print_cr("i%d = 0x%.16lx", j, i[j]); -- else s->print_cr( "fp = 0x%.16lx", i[j]); -+ for (j = 0; j < 8; ++j) { -+ if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } -+ else { s->print( "fp = " ); os::print_location(s, i[j]); } -+ } - s->cr(); - -- for ( j = 0; j < 8; ++j ) -- s->print_cr("l%d = 0x%.16lx", j, l[j]); -+ for (j = 0; j < 8; ++j) { -+ s->print("l%d = ", j); os::print_location(s, l[j]); -+ } - s->cr(); - -- for ( j = 0; j < 8; ++j ) -- if ( j != 6 ) s->print_cr("o%d = 0x%.16lx", j, o[j]); -- else s->print_cr( "sp = 0x%.16lx", o[j]); -+ for (j = 0; j < 8; ++j) { -+ if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } -+ else { s->print( "sp = " ); os::print_location(s, o[j]); } -+ } - s->cr(); - -- for ( j = 0; j < 8; ++j ) -- s->print_cr("g%d = 0x%.16lx", j, g[j]); -+ for (j = 0; j < 8; ++j) { -+ s->print("g%d = ", j); os::print_location(s, g[j]); -+ } - s->cr(); - - // print out floats with compression -@@ -2020,8 +2027,8 @@ - char* b = new char[1024]; - sprintf(b, "untested: %s", what); - -- if ( ShowMessageBoxOnError ) stop(b); -- else warn(b); -+ if (ShowMessageBoxOnError) { STOP(b); } -+ else { warn(b); } - } - - -@@ -2998,26 +3005,60 @@ - } - - -+// virtual method calling -+void MacroAssembler::lookup_virtual_method(Register recv_klass, -+ RegisterOrConstant vtable_index, -+ Register method_result) { -+ assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); -+ Register sethi_temp = method_result; -+ const int base = (instanceKlass::vtable_start_offset() * wordSize + -+ // method pointer offset within the vtable entry: -+ vtableEntry::method_offset_in_bytes()); -+ RegisterOrConstant vtable_offset = vtable_index; -+ // Each of the following three lines potentially generates an instruction. -+ // But the total number of address formation instructions will always be -+ // at most two, and will often be zero. In any case, it will be optimal. -+ // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). -+ // If vtable_index is a constant, we will have at most (set B+X<is_global()) sub_2 = L0; - if (!sup_2->is_global()) sup_2 = L1; -- -- save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); -+ bool did_save = false; -+ if (temp_reg == noreg || temp2_reg == noreg) { -+ temp_reg = L2; -+ temp2_reg = L3; -+ save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); -+ sub_klass = sub_2; -+ super_klass = sup_2; -+ did_save = true; -+ } -+ Label L_failure, L_pop_to_failure, L_pop_to_success; -+ check_klass_subtype_fast_path(sub_klass, super_klass, -+ temp_reg, temp2_reg, -+ (did_save ? &L_pop_to_success : &L_success), -+ (did_save ? &L_pop_to_failure : &L_failure), NULL); -+ -+ if (!did_save) -+ save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); - check_klass_subtype_slow_path(sub_2, sup_2, - L2, L3, L4, L5, - NULL, &L_pop_to_failure); - - // on success: -+ bind(L_pop_to_success); - restore(); - ba_short(L_success); - -@@ -3234,54 +3275,6 @@ - } - - --void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg, -- Register temp_reg, -- Label& wrong_method_type) { -- assert_different_registers(mtype_reg, mh_reg, temp_reg); -- // compare method type against that of the receiver -- RegisterOrConstant mhtype_offset = delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg); -- load_heap_oop(mh_reg, mhtype_offset, temp_reg); -- cmp_and_brx_short(temp_reg, mtype_reg, Assembler::notEqual, Assembler::pn, wrong_method_type); --} -- -- --// A method handle has a "vmslots" field which gives the size of its --// argument list in JVM stack slots. This field is either located directly --// in every method handle, or else is indirectly accessed through the --// method handle's MethodType. This macro hides the distinction. --void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg, -- Register temp_reg) { -- assert_different_registers(vmslots_reg, mh_reg, temp_reg); -- // load mh.type.form.vmslots -- Register temp2_reg = vmslots_reg; -- load_heap_oop(Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg); -- load_heap_oop(Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg); -- ld( Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg); --} -- -- --void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) { -- assert(mh_reg == G3_method_handle, "caller must put MH object in G3"); -- assert_different_registers(mh_reg, temp_reg); -- -- // pick out the interpreted side of the handler -- // NOTE: vmentry is not an oop! -- ld_ptr(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg); -- -- // off we go... -- ld_ptr(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes(), temp_reg); -- jmp(temp_reg, 0); -- -- // for the various stubs which take control at this point, -- // see MethodHandles::generate_method_handle_stub -- -- // Some callers can fill the delay slot. -- if (emit_delayed_nop) { -- delayed()->nop(); -- } --} -- -- - RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, - Register temp_reg, - int extra_slot_offset) { -@@ -3914,7 +3907,7 @@ - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); - or3(t1, t2, t3); - cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); -- stop("assert(top >= start)"); -+ STOP("assert(top >= start)"); - should_not_reach_here(); - - bind(next); -@@ -3922,13 +3915,13 @@ - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); - or3(t3, t2, t3); - cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); -- stop("assert(top <= end)"); -+ STOP("assert(top <= end)"); - should_not_reach_here(); - - bind(next2); - and3(t3, MinObjAlignmentInBytesMask, t3); - cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); -- stop("assert(aligned)"); -+ STOP("assert(aligned)"); - should_not_reach_here(); - - bind(ok); -@@ -3976,7 +3969,7 @@ - btst(MinObjAlignmentInBytesMask, obj); - br(Assembler::zero, false, Assembler::pt, L); - delayed()->nop(); -- stop("eden top is not properly aligned"); -+ STOP("eden top is not properly aligned"); - bind(L); - } - #endif // ASSERT -@@ -4013,7 +4006,7 @@ - btst(MinObjAlignmentInBytesMask, top_addr); - br(Assembler::zero, false, Assembler::pt, L); - delayed()->nop(); -- stop("eden top is not properly aligned"); -+ STOP("eden top is not properly aligned"); - bind(L); - } - #endif // ASSERT -@@ -4066,7 +4059,7 @@ - btst(MinObjAlignmentInBytesMask, free); - br(Assembler::zero, false, Assembler::pt, L); - delayed()->nop(); -- stop("updated TLAB free is not properly aligned"); -+ STOP("updated TLAB free is not properly aligned"); - bind(L); - } - #endif // ASSERT -@@ -4164,7 +4157,7 @@ - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); - sll_ptr(t2, LogHeapWordSize, t2); - cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); -- stop("assert(t1 == tlab_size)"); -+ STOP("assert(t1 == tlab_size)"); - should_not_reach_here(); - - bind(ok); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/assembler_sparc.hpp ---- openjdk/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -2538,6 +2538,11 @@ - Register temp_reg, Register temp2_reg, - Label& no_such_interface); - -+ // virtual method calling -+ void lookup_virtual_method(Register recv_klass, -+ RegisterOrConstant vtable_index, -+ Register method_result); -+ - // Test sub_klass against super_klass, with fast and slow paths. - - // The fast path produces a tri-state answer: yes / no / maybe-slow. -@@ -2577,12 +2582,6 @@ - Label& L_success); - - // method handles (JSR 292) -- void check_method_handle_type(Register mtype_reg, Register mh_reg, -- Register temp_reg, -- Label& wrong_method_type); -- void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg, -- Register temp_reg); -- void jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop = true); - // offset relative to Gargs of argument at tos[arg_slot]. - // (arg_slot == 0 means the last argument, not the first). - RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, -@@ -2590,7 +2589,7 @@ - int extra_slot_offset = 0); - // Address of Gargs and argument_offset. - Address argument_address(RegisterOrConstant arg_slot, -- Register temp_reg, -+ Register temp_reg = noreg, - int extra_slot_offset = 0); - - // Stack overflow checking -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp ---- openjdk/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -2956,6 +2956,7 @@ - void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { - ciMethod* method = op->profiled_method(); - int bci = op->profiled_bci(); -+ ciMethod* callee = op->profiled_callee(); - - // Update counter for all call types - ciMethodData* md = method->method_data_or_null(); -@@ -2984,9 +2985,11 @@ - - Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); - Bytecodes::Code bc = method->java_code_at_bci(bci); -+ const bool callee_is_static = callee->is_loaded() && callee->is_static(); - // Perform additional virtual call profiling for invokevirtual and - // invokeinterface bytecodes - if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && -+ !callee_is_static && // required for optimized MH invokes - C1ProfileVirtualCalls) { - assert(op->recv()->is_single_cpu(), "recv must be allocated"); - Register recv = op->recv()->as_register(); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/cppInterpreter_sparc.cpp ---- openjdk/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -514,9 +514,9 @@ - // Need to differentiate between igetfield, agetfield, bgetfield etc. - // because they are different sizes. - // Get the type from the constant pool cache -- __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch); -- // Make sure we don't need to mask G1_scratch for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -+ __ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch); -+ // Make sure we don't need to mask G1_scratch after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); - __ cmp(G1_scratch, atos ); - __ br(Assembler::equal, true, Assembler::pt, xreturn_path); - __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/frame_sparc.cpp ---- openjdk/hotspot/src/cpu/sparc/vm/frame_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/frame_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -514,7 +514,6 @@ - // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be - // explicitly recognized. - -- if (is_ricochet_frame()) return sender_for_ricochet_frame(map); - - bool frame_is_interpreted = is_interpreted_frame(); - if (frame_is_interpreted) { -@@ -821,9 +820,7 @@ - values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1); - } - -- if (is_ricochet_frame()) { -- MethodHandles::RicochetFrame::describe(this, values, frame_no); -- } else if (is_interpreted_frame()) { -+ if (is_interpreted_frame()) { - DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp); - DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp); - DESCRIBE_FP_OFFSET(interpreter_frame_padding); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/interp_masm_sparc.cpp ---- openjdk/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -505,7 +505,7 @@ - void InterpreterMacroAssembler::load_receiver(Register param_count, - Register recv) { - sll(param_count, Interpreter::logStackElementSize, param_count); -- ld_ptr(Lesp, param_count, recv); // gets receiver Oop -+ ld_ptr(Lesp, param_count, recv); // gets receiver oop - } - - void InterpreterMacroAssembler::empty_expression_stack() { -@@ -767,8 +767,12 @@ - get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size); - ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); - const int shift_count = (1 + byte_no) * BitsPerByte; -- srl( bytecode, shift_count, bytecode); -- and3(bytecode, 0xFF, bytecode); -+ assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || -+ (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), -+ "correct shift count"); -+ srl(bytecode, shift_count, bytecode); -+ assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); -+ and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode); - } - - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/interpreterGenerator_sparc.hpp ---- openjdk/hotspot/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -32,7 +32,6 @@ - address generate_normal_entry(bool synchronized); - address generate_native_entry(bool synchronized); - address generate_abstract_entry(void); -- address generate_method_handle_entry(void); - address generate_math_entry(AbstractInterpreter::MethodKind kind); - address generate_empty_entry(void); - address generate_accessor_entry(void); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/interpreter_sparc.cpp ---- openjdk/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -255,17 +255,6 @@ - } - - --// Method handle invoker --// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...) --address InterpreterGenerator::generate_method_handle_entry(void) { -- if (!EnableInvokeDynamic) { -- return generate_abstract_entry(); -- } -- -- return MethodHandles::generate_method_handle_interpreter_entry(_masm); --} -- -- - //---------------------------------------------------------------------------------------------------- - // Entry points & stack frame layout - // -@@ -395,7 +384,7 @@ - case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; - case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; - case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; -- case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break; -+ - case Interpreter::java_lang_math_sin : break; - case Interpreter::java_lang_math_cos : break; - case Interpreter::java_lang_math_tan : break; -@@ -405,7 +394,9 @@ - case Interpreter::java_lang_math_log10 : break; - case Interpreter::java_lang_ref_reference_get - : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; -- default : ShouldNotReachHere(); break; -+ default: -+ fatal(err_msg("unexpected method kind: %d", kind)); -+ break; - } - - if (entry_point) return entry_point; -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/methodHandles_sparc.cpp ---- openjdk/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -31,452 +31,37 @@ - - #ifdef PRODUCT - #define BLOCK_COMMENT(str) /* nothing */ -+#define STOP(error) stop(error) - #else - #define BLOCK_COMMENT(str) __ block_comment(str) -+#define STOP(error) block_comment(error); __ stop(error) - #endif - - #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") - --address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm, -- address interpreted_entry) { -- // Just before the actual machine code entry point, allocate space -- // for a MethodHandleEntry::Data record, so that we can manage everything -- // from one base pointer. -- __ align(wordSize); -- address target = __ pc() + sizeof(Data); -- while (__ pc() < target) { -- __ nop(); -- __ align(wordSize); -- } -- -- MethodHandleEntry* me = (MethodHandleEntry*) __ pc(); -- me->set_end_address(__ pc()); // set a temporary end_address -- me->set_from_interpreted_entry(interpreted_entry); -- me->set_type_checking_entry(NULL); -- -- return (address) me; -+// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant. -+static RegisterOrConstant constant(int value) { -+ return RegisterOrConstant(value); - } - --MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm, -- address start_addr) { -- MethodHandleEntry* me = (MethodHandleEntry*) start_addr; -- assert(me->end_address() == start_addr, "valid ME"); -- -- // Fill in the real end_address: -- __ align(wordSize); -- me->set_end_address(__ pc()); -- -- return me; --} -- --// stack walking support -- --frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) { -- //RicochetFrame* f = RicochetFrame::from_frame(fr); -- // Cf. is_interpreted_frame path of frame::sender -- intptr_t* younger_sp = fr.sp(); -- intptr_t* sp = fr.sender_sp(); -- map->make_integer_regs_unsaved(); -- map->shift_window(sp, younger_sp); -- bool this_frame_adjusted_stack = true; // I5_savedSP is live in this RF -- return frame(sp, younger_sp, this_frame_adjusted_stack); --} -- --void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) { -- ResourceMark rm; -- RicochetFrame* f = RicochetFrame::from_frame(fr); -- -- // pick up the argument type descriptor: -- Thread* thread = Thread::current(); -- Handle cookie(thread, f->compute_saved_args_layout(true, true)); -- -- // process fixed part -- blk->do_oop((oop*)f->saved_target_addr()); -- blk->do_oop((oop*)f->saved_args_layout_addr()); -- -- // process variable arguments: -- if (cookie.is_null()) return; // no arguments to describe -- -- // the cookie is actually the invokeExact method for my target -- // his argument signature is what I'm interested in -- assert(cookie->is_method(), ""); -- methodHandle invoker(thread, methodOop(cookie())); -- assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method"); -- assert(!invoker->is_static(), "must have MH argument"); -- int slot_count = invoker->size_of_parameters(); -- assert(slot_count >= 1, "must include 'this'"); -- intptr_t* base = f->saved_args_base(); -- intptr_t* retval = NULL; -- if (f->has_return_value_slot()) -- retval = f->return_value_slot_addr(); -- int slot_num = slot_count - 1; -- intptr_t* loc = &base[slot_num]; -- //blk->do_oop((oop*) loc); // original target, which is irrelevant -- int arg_num = 0; -- for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) { -- if (ss.at_return_type()) continue; -- BasicType ptype = ss.type(); -- if (ptype == T_ARRAY) ptype = T_OBJECT; // fold all refs to T_OBJECT -- assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void"); -- slot_num -= type2size[ptype]; -- loc = &base[slot_num]; -- bool is_oop = (ptype == T_OBJECT && loc != retval); -- if (is_oop) blk->do_oop((oop*)loc); -- arg_num += 1; -- } -- assert(slot_num == 0, "must have processed all the arguments"); --} -- --// Ricochet Frames --const Register MethodHandles::RicochetFrame::L1_continuation = L1; --const Register MethodHandles::RicochetFrame::L2_saved_target = L2; --const Register MethodHandles::RicochetFrame::L3_saved_args_layout = L3; --const Register MethodHandles::RicochetFrame::L4_saved_args_base = L4; // cf. Gargs = G4 --const Register MethodHandles::RicochetFrame::L5_conversion = L5; --#ifdef ASSERT --const Register MethodHandles::RicochetFrame::L0_magic_number_1 = L0; --#endif //ASSERT -- --oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) { -- if (read_cache) { -- oop cookie = saved_args_layout(); -- if (cookie != NULL) return cookie; -- } -- oop target = saved_target(); -- oop mtype = java_lang_invoke_MethodHandle::type(target); -- oop mtform = java_lang_invoke_MethodType::form(mtype); -- oop cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform); -- if (write_cache) { -- (*saved_args_layout_addr()) = cookie; -- } -- return cookie; --} -- --void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm, -- // output params: -- int* bounce_offset, -- int* exception_offset, -- int* frame_size_in_words) { -- (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize; -- -- address start = __ pc(); -- --#ifdef ASSERT -- __ illtrap(0); __ illtrap(0); __ illtrap(0); -- // here's a hint of something special: -- __ set(MAGIC_NUMBER_1, G0); -- __ set(MAGIC_NUMBER_2, G0); --#endif //ASSERT -- __ illtrap(0); // not reached -- -- // Return values are in registers. -- // L1_continuation contains a cleanup continuation we must return -- // to. -- -- (*bounce_offset) = __ pc() - start; -- BLOCK_COMMENT("ricochet_blob.bounce"); -- -- if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); -- trace_method_handle(_masm, "return/ricochet_blob.bounce"); -- -- __ JMP(L1_continuation, 0); -- __ delayed()->nop(); -- __ illtrap(0); -- -- DEBUG_ONLY(__ set(MAGIC_NUMBER_2, G0)); -- -- (*exception_offset) = __ pc() - start; -- BLOCK_COMMENT("ricochet_blob.exception"); -- -- // compare this to Interpreter::rethrow_exception_entry, which is parallel code -- // for example, see TemplateInterpreterGenerator::generate_throw_exception -- // Live registers in: -- // Oexception (O0): exception -- // Oissuing_pc (O1): return address/pc that threw exception (ignored, always equal to bounce addr) -- __ verify_oop(Oexception); -- -- // Take down the frame. -- -- // Cf. InterpreterMacroAssembler::remove_activation. -- leave_ricochet_frame(_masm, /*recv_reg=*/ noreg, I5_savedSP, I7); -- -- // We are done with this activation frame; find out where to go next. -- // The continuation point will be an exception handler, which expects -- // the following registers set up: -- // -- // Oexception: exception -- // Oissuing_pc: the local call that threw exception -- // Other On: garbage -- // In/Ln: the contents of the caller's register window -- // -- // We do the required restore at the last possible moment, because we -- // need to preserve some state across a runtime call. -- // (Remember that the caller activation is unknown--it might not be -- // interpreted, so things like Lscratch are useless in the caller.) -- __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore -- __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller -- __ call_VM_leaf(L7_thread_cache, -- CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), -- G2_thread, Oissuing_pc->after_save()); -- -- // The caller's SP was adjusted upon method entry to accomodate -- // the callee's non-argument locals. Undo that adjustment. -- __ JMP(O0, 0); // return exception handler in caller -- __ delayed()->restore(I5_savedSP, G0, SP); -- -- // (same old exception object is already in Oexception; see above) -- // Note that an "issuing PC" is actually the next PC after the call --} -- --void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm, -- Register recv_reg, -- Register argv_reg, -- address return_handler) { -- // does not include the __ save() -- assert(argv_reg == Gargs, ""); -- Address G3_mh_vmtarget( recv_reg, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes()); -- Address G3_amh_conversion(recv_reg, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes()); -- -- // Create the RicochetFrame. -- // Unlike on x86 we can store all required information in local -- // registers. -- BLOCK_COMMENT("push RicochetFrame {"); -- __ set(ExternalAddress(return_handler), L1_continuation); -- __ load_heap_oop(G3_mh_vmtarget, L2_saved_target); -- __ mov(G0, L3_saved_args_layout); -- __ mov(Gargs, L4_saved_args_base); -- __ lduw(G3_amh_conversion, L5_conversion); // 32-bit field -- // I5, I6, I7 are already set up -- DEBUG_ONLY(__ set((int32_t) MAGIC_NUMBER_1, L0_magic_number_1)); -- BLOCK_COMMENT("} RicochetFrame"); --} -- --void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm, -- Register recv_reg, -- Register new_sp_reg, -- Register sender_pc_reg) { -- assert(new_sp_reg == I5_savedSP, "exact_sender_sp already in place"); -- assert(sender_pc_reg == I7, "in a fixed place"); -- // does not include the __ ret() & __ restore() -- assert_different_registers(recv_reg, new_sp_reg, sender_pc_reg); -- // Take down the frame. -- // Cf. InterpreterMacroAssembler::remove_activation. -- BLOCK_COMMENT("end_ricochet_frame {"); -- if (recv_reg->is_valid()) -- __ mov(L2_saved_target, recv_reg); -- BLOCK_COMMENT("} end_ricochet_frame"); --} -- --// Emit code to verify that FP is pointing at a valid ricochet frame. --#ifndef PRODUCT --enum { -- ARG_LIMIT = 255, SLOP = 45, -- // use this parameter for checking for garbage stack movements: -- UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP) -- // the slop defends against false alarms due to fencepost errors --}; --#endif -- --#ifdef ASSERT --void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) { -- // The stack should look like this: -- // ... keep1 | dest=42 | keep2 | magic | handler | magic | recursive args | [RF] -- // Check various invariants. -- -- Register O7_temp = O7, O5_temp = O5; -- -- Label L_ok_1, L_ok_2, L_ok_3, L_ok_4; -- BLOCK_COMMENT("verify_clean {"); -- // Magic numbers must check out: -- __ set((int32_t) MAGIC_NUMBER_1, O7_temp); -- __ cmp_and_br_short(O7_temp, L0_magic_number_1, Assembler::equal, Assembler::pt, L_ok_1); -- __ stop("damaged ricochet frame: MAGIC_NUMBER_1 not found"); -- -- __ BIND(L_ok_1); -- -- // Arguments pointer must look reasonable: --#ifdef _LP64 -- Register FP_temp = O5_temp; -- __ add(FP, STACK_BIAS, FP_temp); --#else -- Register FP_temp = FP; --#endif -- __ cmp_and_brx_short(L4_saved_args_base, FP_temp, Assembler::greaterEqualUnsigned, Assembler::pt, L_ok_2); -- __ stop("damaged ricochet frame: L4 < FP"); -- -- __ BIND(L_ok_2); -- // Disable until we decide on it's fate -- // __ sub(L4_saved_args_base, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize, O7_temp); -- // __ cmp(O7_temp, FP_temp); -- // __ br(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok_3); -- // __ delayed()->nop(); -- // __ stop("damaged ricochet frame: (L4 - UNREASONABLE_STACK_MOVE) > FP"); -- -- __ BIND(L_ok_3); -- extract_conversion_dest_type(_masm, L5_conversion, O7_temp); -- __ cmp_and_br_short(O7_temp, T_VOID, Assembler::equal, Assembler::pt, L_ok_4); -- extract_conversion_vminfo(_masm, L5_conversion, O5_temp); -- __ ld_ptr(L4_saved_args_base, __ argument_offset(O5_temp, O5_temp), O7_temp); -- assert(Assembler::is_simm13(RETURN_VALUE_PLACEHOLDER), "must be simm13"); -- __ cmp_and_brx_short(O7_temp, (int32_t) RETURN_VALUE_PLACEHOLDER, Assembler::equal, Assembler::pt, L_ok_4); -- __ stop("damaged ricochet frame: RETURN_VALUE_PLACEHOLDER not found"); -- __ BIND(L_ok_4); -- BLOCK_COMMENT("} verify_clean"); --} --#endif //ASSERT -- - void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) { - if (VerifyMethodHandles) - verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), temp_reg, temp2_reg, -- "AMH argument is a Class"); -+ "MH argument is a Class"); - __ load_heap_oop(Address(klass_reg, java_lang_Class::klass_offset_in_bytes()), klass_reg); - } - --void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg) { -- assert(CONV_VMINFO_SHIFT == 0, "preshifted"); -- assert(CONV_VMINFO_MASK == right_n_bits(BitsPerByte), "else change type of following load"); -- __ ldub(conversion_field_addr.plus_disp(BytesPerInt - 1), reg); -+#ifdef ASSERT -+static int check_nonzero(const char* xname, int x) { -+ assert(x != 0, err_msg("%s should be nonzero", xname)); -+ return x; - } -- --void MethodHandles::extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg) { -- assert(CONV_VMINFO_SHIFT == 0, "preshifted"); -- __ and3(conversion_field_reg, CONV_VMINFO_MASK, reg); --} -- --void MethodHandles::extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg) { -- __ srl(conversion_field_reg, CONV_DEST_TYPE_SHIFT, reg); -- __ and3(reg, 0x0F, reg); --} -- --void MethodHandles::load_stack_move(MacroAssembler* _masm, -- Address G3_amh_conversion, -- Register stack_move_reg) { -- BLOCK_COMMENT("load_stack_move {"); -- __ ldsw(G3_amh_conversion, stack_move_reg); -- __ sra(stack_move_reg, CONV_STACK_MOVE_SHIFT, stack_move_reg); --#ifdef ASSERT -- if (VerifyMethodHandles) { -- Label L_ok, L_bad; -- int32_t stack_move_limit = 0x0800; // extra-large -- __ cmp_and_br_short(stack_move_reg, stack_move_limit, Assembler::greaterEqual, Assembler::pn, L_bad); -- __ cmp(stack_move_reg, -stack_move_limit); -- __ br(Assembler::greater, false, Assembler::pt, L_ok); -- __ delayed()->nop(); -- __ BIND(L_bad); -- __ stop("load_stack_move of garbage value"); -- __ BIND(L_ok); -- } --#endif -- BLOCK_COMMENT("} load_stack_move"); --} -+#define NONZERO(x) check_nonzero(#x, x) -+#else //ASSERT -+#define NONZERO(x) (x) -+#endif //ASSERT - - #ifdef ASSERT --void MethodHandles::RicochetFrame::verify() const { -- assert(magic_number_1() == MAGIC_NUMBER_1, ""); -- if (!Universe::heap()->is_gc_active()) { -- if (saved_args_layout() != NULL) { -- assert(saved_args_layout()->is_method(), "must be valid oop"); -- } -- if (saved_target() != NULL) { -- assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value"); -- } -- } -- int conv_op = adapter_conversion_op(conversion()); -- assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS || -- conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS || -- conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF, -- "must be a sane conversion"); -- if (has_return_value_slot()) { -- assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, ""); -- } --} -- --void MethodHandles::verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) { -- // Verify that argslot lies within (Gargs, FP]. -- Label L_ok, L_bad; -- BLOCK_COMMENT("verify_argslot {"); -- __ cmp_and_brx_short(Gargs, argslot_reg, Assembler::greaterUnsigned, Assembler::pn, L_bad); -- __ add(FP, STACK_BIAS, temp_reg); // STACK_BIAS is zero on !_LP64 -- __ cmp_and_brx_short(argslot_reg, temp_reg, Assembler::lessEqualUnsigned, Assembler::pt, L_ok); -- __ BIND(L_bad); -- __ stop(error_message); -- __ BIND(L_ok); -- BLOCK_COMMENT("} verify_argslot"); --} -- --void MethodHandles::verify_argslots(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, -- Register arg_slot_base_reg, -- Register temp_reg, -- Register temp2_reg, -- bool negate_argslots, -- const char* error_message) { -- // Verify that [argslot..argslot+size) lies within (Gargs, FP). -- Label L_ok, L_bad; -- BLOCK_COMMENT("verify_argslots {"); -- if (negate_argslots) { -- if (arg_slots.is_constant()) { -- arg_slots = -1 * arg_slots.as_constant(); -- } else { -- __ neg(arg_slots.as_register(), temp_reg); -- arg_slots = temp_reg; -- } -- } -- __ add(arg_slot_base_reg, __ argument_offset(arg_slots, temp_reg), temp_reg); -- __ add(FP, STACK_BIAS, temp2_reg); // STACK_BIAS is zero on !_LP64 -- __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::greaterUnsigned, Assembler::pn, L_bad); -- // Gargs points to the first word so adjust by BytesPerWord -- __ add(arg_slot_base_reg, BytesPerWord, temp_reg); -- __ cmp_and_brx_short(Gargs, temp_reg, Assembler::lessEqualUnsigned, Assembler::pt, L_ok); -- __ BIND(L_bad); -- __ stop(error_message); -- __ BIND(L_ok); -- BLOCK_COMMENT("} verify_argslots"); --} -- --// Make sure that arg_slots has the same sign as the given direction. --// If (and only if) arg_slots is a assembly-time constant, also allow it to be zero. --void MethodHandles::verify_stack_move(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, int direction) { -- enum { UNREASONABLE_STACK_MOVE = 256 * 4 }; // limit of 255 arguments -- bool allow_zero = arg_slots.is_constant(); -- if (direction == 0) { direction = +1; allow_zero = true; } -- assert(stack_move_unit() == -1, "else add extra checks here"); -- if (arg_slots.is_register()) { -- Label L_ok, L_bad; -- BLOCK_COMMENT("verify_stack_move {"); -- // __ btst(-stack_move_unit() - 1, arg_slots.as_register()); // no need -- // __ br(Assembler::notZero, false, Assembler::pn, L_bad); -- // __ delayed()->nop(); -- __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD); -- if (direction > 0) { -- __ br(allow_zero ? Assembler::less : Assembler::lessEqual, false, Assembler::pn, L_bad); -- __ delayed()->nop(); -- __ cmp(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE); -- __ br(Assembler::less, false, Assembler::pn, L_ok); -- __ delayed()->nop(); -- } else { -- __ br(allow_zero ? Assembler::greater : Assembler::greaterEqual, false, Assembler::pn, L_bad); -- __ delayed()->nop(); -- __ cmp(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE); -- __ br(Assembler::greater, false, Assembler::pn, L_ok); -- __ delayed()->nop(); -- } -- __ BIND(L_bad); -- if (direction > 0) -- __ stop("assert arg_slots > 0"); -- else -- __ stop("assert arg_slots < 0"); -- __ BIND(L_ok); -- BLOCK_COMMENT("} verify_stack_move"); -- } else { -- intptr_t size = arg_slots.as_constant(); -- if (direction < 0) size = -size; -- assert(size >= 0, "correct direction of constant move"); -- assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move"); -- } --} -- - void MethodHandles::verify_klass(MacroAssembler* _masm, - Register obj_reg, KlassHandle klass, - Register temp_reg, Register temp2_reg, -@@ -485,6 +70,14 @@ - assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() && - klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(), - "must be one of the SystemDictionaryHandles"); -+ bool did_save = false; -+ if (temp_reg == noreg || temp2_reg == noreg) { -+ temp_reg = L1; -+ temp2_reg = L2; -+ __ save_frame_and_mov(0, obj_reg, L0); -+ obj_reg = L0; -+ did_save = true; -+ } - Label L_ok, L_bad; - BLOCK_COMMENT("verify_klass {"); - __ verify_oop(obj_reg); -@@ -499,548 +92,412 @@ - __ ld_ptr(Address(temp2_reg, 0), temp2_reg); - __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok); - __ BIND(L_bad); -- __ stop(error_message); -+ if (did_save) __ restore(); -+ __ STOP(error_message); - __ BIND(L_ok); -+ if (did_save) __ restore(); - BLOCK_COMMENT("} verify_klass"); - } -+ -+void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) { -+ Label L; -+ BLOCK_COMMENT("verify_ref_kind {"); -+ __ lduw(Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())), temp); -+ __ srl( temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT, temp); -+ __ and3(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK, temp); -+ __ cmp_and_br_short(temp, ref_kind, Assembler::equal, Assembler::pt, L); -+ { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); -+ jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); -+ if (ref_kind == JVM_REF_invokeVirtual || -+ ref_kind == JVM_REF_invokeSpecial) -+ // could do this for all ref_kinds, but would explode assembly code size -+ trace_method_handle(_masm, buf); -+ __ STOP(buf); -+ } -+ BLOCK_COMMENT("} verify_ref_kind"); -+ __ bind(L); -+} -+ - #endif // ASSERT - -- --void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp) { -+void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp, -+ bool for_compiler_entry) { - assert(method == G5_method, "interpreter calling convention"); - __ verify_oop(method); -- __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target); -- if (JvmtiExport::can_post_interpreter_events()) { -+ -+ if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { -+ Label run_compiled_code; - // JVMTI events, such as single-stepping, are implemented partly by avoiding running - // compiled code in threads for which the event is enabled. Check here for - // interp_only_mode if these events CAN be enabled. - __ verify_thread(); -- Label skip_compiled_code; -- - const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); - __ ld(interp_only, temp); -- __ tst(temp); -- __ br(Assembler::notZero, true, Assembler::pn, skip_compiled_code); -- __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target); -- __ bind(skip_compiled_code); -+ __ cmp_and_br_short(temp, 0, Assembler::zero, Assembler::pt, run_compiled_code); -+ __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target); -+ __ jmp(target, 0); -+ __ delayed()->nop(); -+ __ BIND(run_compiled_code); -+ // Note: we could fill some delay slots here, but -+ // it doesn't matter, since this is interpreter code. - } - __ jmp(target, 0); - __ delayed()->nop(); - } - -+void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, -+ Register recv, Register method_temp, -+ Register temp2, Register temp3, -+ bool for_compiler_entry) { -+ BLOCK_COMMENT("jump_to_lambda_form {"); -+ // This is the initial entry point of a lazy method handle. -+ // After type checking, it picks up the invoker from the LambdaForm. -+ assert_different_registers(recv, method_temp, temp2, temp3); -+ assert(method_temp == G5_method, "required register for loading method"); -+ -+ //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); }); -+ -+ // Load the invoker, as MH -> MH.form -> LF.vmentry -+ __ verify_oop(recv); -+ __ load_heap_oop(Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())), method_temp); -+ __ verify_oop(method_temp); -+ __ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())), method_temp); -+ __ verify_oop(method_temp); -+ // the following assumes that a methodOop is normally compressed in the vmtarget field: -+ __ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())), method_temp); -+ __ verify_oop(method_temp); -+ -+ if (VerifyMethodHandles && !for_compiler_entry) { -+ // make sure recv is already on stack -+ __ load_sized_value(Address(method_temp, methodOopDesc::size_of_parameters_offset()), -+ temp2, -+ sizeof(u2), /*is_signed*/ false); -+ // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), ""); -+ Label L; -+ __ ld_ptr(__ argument_address(temp2, temp2, -1), temp2); -+ __ cmp_and_br_short(temp2, recv, Assembler::equal, Assembler::pt, L); -+ __ STOP("receiver not on stack"); -+ __ BIND(L); -+ } -+ -+ jump_from_method_handle(_masm, method_temp, temp2, temp3, for_compiler_entry); -+ BLOCK_COMMENT("} jump_to_lambda_form"); -+} -+ - - // Code generation --address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { -- // I5_savedSP/O5_savedSP: sender SP (must preserve) -+address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm, -+ vmIntrinsics::ID iid) { -+ const bool not_for_compiler_entry = false; // this is the interpreter entry -+ assert(is_signature_polymorphic(iid), "expected invoke iid"); -+ if (iid == vmIntrinsics::_invokeGeneric || -+ iid == vmIntrinsics::_compiledLambdaForm) { -+ // Perhaps surprisingly, the symbolic references visible to Java are not directly used. -+ // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. -+ // They all allow an appendix argument. -+ __ should_not_reach_here(); // empty stubs make SG sick -+ return NULL; -+ } -+ -+ // I5_savedSP/O5_savedSP: sender SP (must preserve; see prepare_to_jump_from_interpreted) -+ // G5_method: methodOop - // G4 (Gargs): incoming argument list (must preserve) -- // G5_method: invoke methodOop -- // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots]) -- // O0, O1, O2, O3, O4: garbage temps, blown away -- Register O0_mtype = O0; -- Register O1_scratch = O1; -- Register O2_scratch = O2; -- Register O3_scratch = O3; -- Register O4_argslot = O4; -- Register O4_argbase = O4; -+ // O0: used as temp to hold mh or receiver -+ // O1, O4: garbage temps, blown away -+ Register O1_scratch = O1; -+ Register O4_param_size = O4; // size of parameters - -- // emit WrongMethodType path first, to enable back-branch from main path -- Label wrong_method_type; -- __ bind(wrong_method_type); -- Label invoke_generic_slow_path; -- assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");; -- __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch); -- __ cmp(O1_scratch, (int) vmIntrinsics::_invokeExact); -- __ brx(Assembler::notEqual, false, Assembler::pt, invoke_generic_slow_path); -- __ delayed()->nop(); -- __ mov(O0_mtype, G5_method_type); // required by throw_WrongMethodType -- __ mov(G3_method_handle, G3_method_handle); // already in this register -- // O0 will be filled in with JavaThread in stub -- __ jump_to(AddressLiteral(StubRoutines::throw_WrongMethodTypeException_entry()), O3_scratch); -- __ delayed()->nop(); -+ address code_start = __ pc(); - - // here's where control starts out: - __ align(CodeEntryAlignment); - address entry_point = __ pc(); - -- // fetch the MethodType from the method handle -- // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list. -- // This would simplify several touchy bits of code. -- // See 6984712: JSR 292 method handle calls need a clean argument base pointer -- { -- Register tem = G5_method; -- for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) { -- __ ld_ptr(Address(tem, *pchase), O0_mtype); -- tem = O0_mtype; // in case there is another indirection -+ if (VerifyMethodHandles) { -+ Label L; -+ BLOCK_COMMENT("verify_intrinsic_id {"); -+ __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch); -+ __ cmp_and_br_short(O1_scratch, (int) iid, Assembler::equal, Assembler::pt, L); -+ if (iid == vmIntrinsics::_linkToVirtual || -+ iid == vmIntrinsics::_linkToSpecial) { -+ // could do this for all kinds, but would explode assembly code size -+ trace_method_handle(_masm, "bad methodOop::intrinsic_id"); - } -+ __ STOP("bad methodOop::intrinsic_id"); -+ __ bind(L); -+ BLOCK_COMMENT("} verify_intrinsic_id"); - } - -- // given the MethodType, find out where the MH argument is buried -- __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O4_argslot); -- __ ldsw( Address(O4_argslot, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot); -- __ add(__ argument_address(O4_argslot, O4_argslot, 1), O4_argbase); -- // Note: argument_address uses its input as a scratch register! -- Address mh_receiver_slot_addr(O4_argbase, -Interpreter::stackElementSize); -- __ ld_ptr(mh_receiver_slot_addr, G3_method_handle); -+ // First task: Find out how big the argument list is. -+ Address O4_first_arg_addr; -+ int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid); -+ assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic"); -+ if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) { -+ __ load_sized_value(Address(G5_method, methodOopDesc::size_of_parameters_offset()), -+ O4_param_size, -+ sizeof(u2), /*is_signed*/ false); -+ // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), ""); -+ O4_first_arg_addr = __ argument_address(O4_param_size, O4_param_size, -1); -+ } else { -+ DEBUG_ONLY(O4_param_size = noreg); -+ } - -- trace_method_handle(_masm, "invokeExact"); -+ Register O0_mh = noreg; -+ if (!is_signature_polymorphic_static(iid)) { -+ __ ld_ptr(O4_first_arg_addr, O0_mh = O0); -+ DEBUG_ONLY(O4_param_size = noreg); -+ } - -- __ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type); -+ // O4_first_arg_addr is live! - -- // Nobody uses the MH receiver slot after this. Make sure. -- DEBUG_ONLY(__ set((int32_t) 0x999999, O1_scratch); __ st_ptr(O1_scratch, mh_receiver_slot_addr)); -+ if (TraceMethodHandles) { -+ const char* name = vmIntrinsics::name_at(iid); -+ if (*name == '_') name += 1; -+ const size_t len = strlen(name) + 50; -+ char* qname = NEW_C_HEAP_ARRAY(char, len, mtInternal); -+ const char* suffix = ""; -+ if (vmIntrinsics::method_for(iid) == NULL || -+ !vmIntrinsics::method_for(iid)->access_flags().is_public()) { -+ if (is_signature_polymorphic_static(iid)) -+ suffix = "/static"; -+ else -+ suffix = "/private"; -+ } -+ jio_snprintf(qname, len, "MethodHandle::interpreter_entry::%s%s", name, suffix); -+ if (O0_mh != noreg) -+ __ mov(O0_mh, G3_method_handle); // make stub happy -+ trace_method_handle(_masm, qname); -+ } - -- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); -+ if (iid == vmIntrinsics::_invokeBasic) { -+ generate_method_handle_dispatch(_masm, iid, O0_mh, noreg, not_for_compiler_entry); - -- // for invokeGeneric (only), apply argument and result conversions on the fly -- __ bind(invoke_generic_slow_path); --#ifdef ASSERT -- if (VerifyMethodHandles) { -- Label L; -- __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch); -- __ cmp(O1_scratch, (int) vmIntrinsics::_invokeGeneric); -- __ brx(Assembler::equal, false, Assembler::pt, L); -- __ delayed()->nop(); -- __ stop("bad methodOop::intrinsic_id"); -- __ bind(L); -+ } else { -+ // Adjust argument list by popping the trailing MemberName argument. -+ Register O0_recv = noreg; -+ if (MethodHandles::ref_kind_has_receiver(ref_kind)) { -+ // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack. -+ __ ld_ptr(O4_first_arg_addr, O0_recv = O0); -+ DEBUG_ONLY(O4_param_size = noreg); -+ } -+ Register G5_member = G5_method; // MemberName ptr; incoming method ptr is dead now -+ __ ld_ptr(__ argument_address(constant(0)), G5_member); -+ __ add(Gargs, Interpreter::stackElementSize, Gargs); -+ generate_method_handle_dispatch(_masm, iid, O0_recv, G5_member, not_for_compiler_entry); - } --#endif //ASSERT - -- // make room on the stack for another pointer: -- insert_arg_slots(_masm, 2 * stack_move_unit(), O4_argbase, O1_scratch, O2_scratch, O3_scratch); -- // load up an adapter from the calling type (Java weaves this) -- Register O2_form = O2_scratch; -- Register O3_adapter = O3_scratch; -- __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O2_form); -- __ load_heap_oop(Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); -- __ verify_oop(O3_adapter); -- __ st_ptr(O3_adapter, Address(O4_argbase, 1 * Interpreter::stackElementSize)); -- // As a trusted first argument, pass the type being called, so the adapter knows -- // the actual types of the arguments and return values. -- // (Generic invokers are shared among form-families of method-type.) -- __ st_ptr(O0_mtype, Address(O4_argbase, 0 * Interpreter::stackElementSize)); -- // FIXME: assert that O3_adapter is of the right method-type. -- __ mov(O3_adapter, G3_method_handle); -- trace_method_handle(_masm, "invokeGeneric"); -- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); -+ if (PrintMethodHandleStubs) { -+ address code_end = __ pc(); -+ tty->print_cr("--------"); -+ tty->print_cr("method handle interpreter entry for %s", vmIntrinsics::name_at(iid)); -+ Disassembler::decode(code_start, code_end); -+ tty->cr(); -+ } - - return entry_point; - } - --// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant. --static RegisterOrConstant constant(int value) { -- return RegisterOrConstant(value); --} -+void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, -+ vmIntrinsics::ID iid, -+ Register receiver_reg, -+ Register member_reg, -+ bool for_compiler_entry) { -+ assert(is_signature_polymorphic(iid), "expected invoke iid"); -+ // temps used in this code are not used in *either* compiled or interpreted calling sequences -+ Register temp1 = (for_compiler_entry ? G1_scratch : O1); -+ Register temp2 = (for_compiler_entry ? G4_scratch : O4); -+ Register temp3 = G3_scratch; -+ Register temp4 = (for_compiler_entry ? noreg : O2); -+ if (for_compiler_entry) { -+ assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : O0), "only valid assignment"); -+ assert_different_registers(temp1, O0, O1, O2, O3, O4, O5); -+ assert_different_registers(temp2, O0, O1, O2, O3, O4, O5); -+ assert_different_registers(temp3, O0, O1, O2, O3, O4, O5); -+ assert_different_registers(temp4, O0, O1, O2, O3, O4, O5); -+ } -+ if (receiver_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, receiver_reg); -+ if (member_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, member_reg); -+ if (!for_compiler_entry) assert_different_registers(temp1, temp2, temp3, temp4, O5_savedSP); // don't trash lastSP - --static void load_vmargslot(MacroAssembler* _masm, Address vmargslot_addr, Register result) { -- __ ldsw(vmargslot_addr, result); --} -+ if (iid == vmIntrinsics::_invokeBasic) { -+ // indirect through MH.form.vmentry.vmtarget -+ jump_to_lambda_form(_masm, receiver_reg, G5_method, temp2, temp3, for_compiler_entry); - --static RegisterOrConstant adjust_SP_and_Gargs_down_by_slots(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, -- Register temp_reg, Register temp2_reg) { -- // Keep the stack pointer 2*wordSize aligned. -- const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); -- if (arg_slots.is_constant()) { -- const int offset = arg_slots.as_constant() << LogBytesPerWord; -- const int masked_offset = round_to(offset, 2 * BytesPerWord); -- const int masked_offset2 = (offset + 1*BytesPerWord) & ~TwoWordAlignmentMask; -- assert(masked_offset == masked_offset2, "must agree"); -- __ sub(Gargs, offset, Gargs); -- __ sub(SP, masked_offset, SP ); -- return offset; - } else { -+ // The method is a member invoker used by direct method handles. -+ if (VerifyMethodHandles) { -+ // make sure the trailing argument really is a MemberName (caller responsibility) -+ verify_klass(_masm, member_reg, SystemDictionaryHandles::MemberName_klass(), -+ temp1, temp2, -+ "MemberName required for invokeVirtual etc."); -+ } -+ -+ Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes())); -+ Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes())); -+ Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())); -+ -+ Register temp1_recv_klass = temp1; -+ if (iid != vmIntrinsics::_linkToStatic) { -+ __ verify_oop(receiver_reg); -+ if (iid == vmIntrinsics::_linkToSpecial) { -+ // Don't actually load the klass; just null-check the receiver. -+ __ null_check(receiver_reg); -+ } else { -+ // load receiver klass itself -+ __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes()); -+ __ load_klass(receiver_reg, temp1_recv_klass); -+ __ verify_oop(temp1_recv_klass); -+ } -+ BLOCK_COMMENT("check_receiver {"); -+ // The receiver for the MemberName must be in receiver_reg. -+ // Check the receiver against the MemberName.clazz -+ if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { -+ // Did not load it above... -+ __ load_klass(receiver_reg, temp1_recv_klass); -+ __ verify_oop(temp1_recv_klass); -+ } -+ if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { -+ Label L_ok; -+ Register temp2_defc = temp2; -+ __ load_heap_oop(member_clazz, temp2_defc); -+ load_klass_from_Class(_masm, temp2_defc, temp3, temp4); -+ __ verify_oop(temp2_defc); -+ __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok); -+ // If we get here, the type check failed! -+ __ STOP("receiver class disagrees with MemberName.clazz"); -+ __ bind(L_ok); -+ } -+ BLOCK_COMMENT("} check_receiver"); -+ } -+ if (iid == vmIntrinsics::_linkToSpecial || -+ iid == vmIntrinsics::_linkToStatic) { -+ DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass -+ } -+ -+ // Live registers at this point: -+ // member_reg - MemberName that was the trailing argument -+ // temp1_recv_klass - klass of stacked receiver, if needed -+ // O5_savedSP - interpreter linkage (if interpreted) -+ // O0..O7,G1,G4 - compiler arguments (if compiled) -+ -+ bool method_is_live = false; -+ switch (iid) { -+ case vmIntrinsics::_linkToSpecial: -+ if (VerifyMethodHandles) { -+ verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); -+ } -+ __ load_heap_oop(member_vmtarget, G5_method); -+ method_is_live = true; -+ break; -+ -+ case vmIntrinsics::_linkToStatic: -+ if (VerifyMethodHandles) { -+ verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); -+ } -+ __ load_heap_oop(member_vmtarget, G5_method); -+ method_is_live = true; -+ break; -+ -+ case vmIntrinsics::_linkToVirtual: - #ifdef ASSERT - { -- Label L_ok; -- __ cmp_and_br_short(arg_slots.as_register(), 0, Assembler::greaterEqual, Assembler::pt, L_ok); -- __ stop("negative arg_slots"); -- __ bind(L_ok); -+ // same as TemplateTable::invokevirtual, -+ // minus the CP setup and profiling: -+ -+ if (VerifyMethodHandles) { -+ verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3); -+ } -+ -+ // pick out the vtable index from the MemberName, and then we can discard it: -+ Register temp2_index = temp2; -+ __ ld_ptr(member_vmindex, temp2_index); -+ -+ if (VerifyMethodHandles) { -+ Label L_index_ok; -+ __ cmp_and_br_short(temp2_index, (int) 0, Assembler::greaterEqual, Assembler::pn, L_index_ok); -+ __ STOP("no virtual index"); -+ __ BIND(L_index_ok); -+ } -+ -+ // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget -+ // at this point. And VerifyMethodHandles has already checked clazz, if needed. -+ -+ // get target methodOop & entry point -+ __ lookup_virtual_method(temp1_recv_klass, temp2_index, G5_method); -+ method_is_live = true; -+ break; - } --#endif -- __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg); -- __ add( temp_reg, 1*BytesPerWord, temp2_reg); -- __ andn(temp2_reg, TwoWordAlignmentMask, temp2_reg); -- __ sub(Gargs, temp_reg, Gargs); -- __ sub(SP, temp2_reg, SP ); -- return temp_reg; -+ -+ case vmIntrinsics::_linkToInterface: -+ { -+ // same as TemplateTable::invokeinterface -+ // (minus the CP setup and profiling, with different argument motion) -+ if (VerifyMethodHandles) { -+ verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3); -+ } -+ -+ Register temp3_intf = temp3; -+ __ load_heap_oop(member_clazz, temp3_intf); -+ load_klass_from_Class(_masm, temp3_intf, temp2, temp4); -+ __ verify_oop(temp3_intf); -+ -+ Register G5_index = G5_method; -+ __ ld_ptr(member_vmindex, G5_index); -+ if (VerifyMethodHandles) { -+ Label L; -+ __ cmp_and_br_short(G5_index, 0, Assembler::greaterEqual, Assembler::pt, L); -+ __ STOP("invalid vtable index for MH.invokeInterface"); -+ __ bind(L); -+ } -+ -+ // given intf, index, and recv klass, dispatch to the implementation method -+ Label L_no_such_interface; -+ Register no_sethi_temp = noreg; -+ __ lookup_interface_method(temp1_recv_klass, temp3_intf, -+ // note: next two args must be the same: -+ G5_index, G5_method, -+ temp2, no_sethi_temp, -+ L_no_such_interface); -+ -+ __ verify_oop(G5_method); -+ jump_from_method_handle(_masm, G5_method, temp2, temp3, for_compiler_entry); -+ -+ __ bind(L_no_such_interface); -+ AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry()); -+ __ jump_to(icce, temp3); -+ __ delayed()->nop(); -+ break; -+ } -+ -+ default: -+ fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); -+ break; -+ } -+ -+ if (method_is_live) { -+ // live at this point: G5_method, O5_savedSP (if interpreted) -+ -+ // After figuring out which concrete method to call, jump into it. -+ // Note that this works in the interpreter with no data motion. -+ // But the compiled version will require that rcx_recv be shifted out. -+ __ verify_oop(G5_method); -+ jump_from_method_handle(_masm, G5_method, temp1, temp3, for_compiler_entry); -+ } - } - } - --static RegisterOrConstant adjust_SP_and_Gargs_up_by_slots(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, -- Register temp_reg, Register temp2_reg) { -- // Keep the stack pointer 2*wordSize aligned. -- const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); -- if (arg_slots.is_constant()) { -- const int offset = arg_slots.as_constant() << LogBytesPerWord; -- const int masked_offset = offset & ~TwoWordAlignmentMask; -- __ add(Gargs, offset, Gargs); -- __ add(SP, masked_offset, SP ); -- return offset; -- } else { -- __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg); -- __ andn(temp_reg, TwoWordAlignmentMask, temp2_reg); -- __ add(Gargs, temp_reg, Gargs); -- __ add(SP, temp2_reg, SP ); -- return temp_reg; -- } --} -- --// Helper to insert argument slots into the stack. --// arg_slots must be a multiple of stack_move_unit() and < 0 --// argslot_reg is decremented to point to the new (shifted) location of the argslot --// But, temp_reg ends up holding the original value of argslot_reg. --void MethodHandles::insert_arg_slots(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, -- Register argslot_reg, -- Register temp_reg, Register temp2_reg, Register temp3_reg) { -- // allow constant zero -- if (arg_slots.is_constant() && arg_slots.as_constant() == 0) -- return; -- -- // We have to insert at least one word, so bang the stack. -- if (UseStackBanging) { -- // Save G3_method_handle since bang_stack_with_offset uses it as a temp register -- __ mov(G3_method_handle, temp_reg); -- int frame_size = (arg_slots.is_constant() ? -1 * arg_slots.as_constant() * wordSize : 0); -- if (frame_size <= 0) -- frame_size = 256 * Interpreter::stackElementSize; // conservative -- __ generate_stack_overflow_check(frame_size); -- __ mov(temp_reg, G3_method_handle); -- } -- -- assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, -- (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); -- -- BLOCK_COMMENT("insert_arg_slots {"); -- if (VerifyMethodHandles) -- verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame"); -- if (VerifyMethodHandles) -- verify_stack_move(_masm, arg_slots, -1); -- -- // Make space on the stack for the inserted argument(s). -- // Then pull down everything shallower than argslot_reg. -- // The stacked return address gets pulled down with everything else. -- // That is, copy [sp, argslot) downward by -size words. In pseudo-code: -- // sp -= size; -- // for (temp = sp + size; temp < argslot; temp++) -- // temp[-size] = temp[0] -- // argslot -= size; -- -- // offset is temp3_reg in case of arg_slots being a register. -- RegisterOrConstant offset = adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg); -- __ sub(Gargs, offset, temp_reg); // source pointer for copy -- -- { -- Label loop; -- __ BIND(loop); -- // pull one word down each time through the loop -- __ ld_ptr( Address(temp_reg, 0 ), temp2_reg); -- __ st_ptr(temp2_reg, Address(temp_reg, offset) ); -- __ add(temp_reg, wordSize, temp_reg); -- __ cmp_and_brx_short(temp_reg, argslot_reg, Assembler::lessUnsigned, Assembler::pt, loop); -- } -- -- // Now move the argslot down, to point to the opened-up space. -- __ add(argslot_reg, offset, argslot_reg); -- BLOCK_COMMENT("} insert_arg_slots"); --} -- -- --// Helper to remove argument slots from the stack. --// arg_slots must be a multiple of stack_move_unit() and > 0 --void MethodHandles::remove_arg_slots(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, -- Register argslot_reg, -- Register temp_reg, Register temp2_reg, Register temp3_reg) { -- // allow constant zero -- if (arg_slots.is_constant() && arg_slots.as_constant() == 0) -- return; -- assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, -- (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); -- -- BLOCK_COMMENT("remove_arg_slots {"); -- if (VerifyMethodHandles) -- verify_argslots(_masm, arg_slots, argslot_reg, temp_reg, temp2_reg, false, -- "deleted argument(s) must fall within current frame"); -- if (VerifyMethodHandles) -- verify_stack_move(_masm, arg_slots, +1); -- -- // Pull up everything shallower than argslot. -- // Then remove the excess space on the stack. -- // The stacked return address gets pulled up with everything else. -- // That is, copy [sp, argslot) upward by size words. In pseudo-code: -- // for (temp = argslot-1; temp >= sp; --temp) -- // temp[size] = temp[0] -- // argslot += size; -- // sp += size; -- -- RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg); -- __ sub(argslot_reg, wordSize, temp_reg); // source pointer for copy -- -- { -- Label L_loop; -- __ BIND(L_loop); -- // pull one word up each time through the loop -- __ ld_ptr( Address(temp_reg, 0 ), temp2_reg); -- __ st_ptr(temp2_reg, Address(temp_reg, offset) ); -- __ sub(temp_reg, wordSize, temp_reg); -- __ cmp_and_brx_short(temp_reg, Gargs, Assembler::greaterEqualUnsigned, Assembler::pt, L_loop); -- } -- -- // And adjust the argslot address to point at the deletion point. -- __ add(argslot_reg, offset, argslot_reg); -- -- // We don't need the offset at this point anymore, just adjust SP and Gargs. -- (void) adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg); -- -- BLOCK_COMMENT("} remove_arg_slots"); --} -- --// Helper to copy argument slots to the top of the stack. --// The sequence starts with argslot_reg and is counted by slot_count --// slot_count must be a multiple of stack_move_unit() and >= 0 --// This function blows the temps but does not change argslot_reg. --void MethodHandles::push_arg_slots(MacroAssembler* _masm, -- Register argslot_reg, -- RegisterOrConstant slot_count, -- Register temp_reg, Register temp2_reg) { -- // allow constant zero -- if (slot_count.is_constant() && slot_count.as_constant() == 0) -- return; -- assert_different_registers(argslot_reg, temp_reg, temp2_reg, -- (!slot_count.is_register() ? Gargs : slot_count.as_register()), -- SP); -- assert(Interpreter::stackElementSize == wordSize, "else change this code"); -- -- BLOCK_COMMENT("push_arg_slots {"); -- if (VerifyMethodHandles) -- verify_stack_move(_masm, slot_count, 0); -- -- RegisterOrConstant offset = adjust_SP_and_Gargs_down_by_slots(_masm, slot_count, temp2_reg, temp_reg); -- -- if (slot_count.is_constant()) { -- for (int i = slot_count.as_constant() - 1; i >= 0; i--) { -- __ ld_ptr( Address(argslot_reg, i * wordSize), temp_reg); -- __ st_ptr(temp_reg, Address(Gargs, i * wordSize)); -- } -- } else { -- Label L_plural, L_loop, L_break; -- // Emit code to dynamically check for the common cases, zero and one slot. -- __ cmp(slot_count.as_register(), (int32_t) 1); -- __ br(Assembler::greater, false, Assembler::pn, L_plural); -- __ delayed()->nop(); -- __ br(Assembler::less, false, Assembler::pn, L_break); -- __ delayed()->nop(); -- __ ld_ptr( Address(argslot_reg, 0), temp_reg); -- __ st_ptr(temp_reg, Address(Gargs, 0)); -- __ ba_short(L_break); -- __ BIND(L_plural); -- -- // Loop for 2 or more: -- // top = &argslot[slot_count] -- // while (top > argslot) *(--Gargs) = *(--top) -- Register top_reg = temp_reg; -- __ add(argslot_reg, offset, top_reg); -- __ add(Gargs, offset, Gargs ); // move back up again so we can go down -- __ BIND(L_loop); -- __ sub(top_reg, wordSize, top_reg); -- __ sub(Gargs, wordSize, Gargs ); -- __ ld_ptr( Address(top_reg, 0), temp2_reg); -- __ st_ptr(temp2_reg, Address(Gargs, 0)); -- __ cmp_and_brx_short(top_reg, argslot_reg, Assembler::greaterUnsigned, Assembler::pt, L_loop); -- __ BIND(L_break); -- } -- BLOCK_COMMENT("} push_arg_slots"); --} -- --// in-place movement; no change to Gargs --// blows temp_reg, temp2_reg --void MethodHandles::move_arg_slots_up(MacroAssembler* _masm, -- Register bottom_reg, // invariant -- Address top_addr, // can use temp_reg -- RegisterOrConstant positive_distance_in_slots, // destroyed if register -- Register temp_reg, Register temp2_reg) { -- assert_different_registers(bottom_reg, -- temp_reg, temp2_reg, -- positive_distance_in_slots.register_or_noreg()); -- BLOCK_COMMENT("move_arg_slots_up {"); -- Label L_loop, L_break; -- Register top_reg = temp_reg; -- if (!top_addr.is_same_address(Address(top_reg, 0))) { -- __ add(top_addr, top_reg); -- } -- // Detect empty (or broken) loop: --#ifdef ASSERT -- if (VerifyMethodHandles) { -- // Verify that &bottom < &top (non-empty interval) -- Label L_ok, L_bad; -- if (positive_distance_in_slots.is_register()) { -- __ cmp(positive_distance_in_slots.as_register(), (int32_t) 0); -- __ br(Assembler::lessEqual, false, Assembler::pn, L_bad); -- __ delayed()->nop(); -- } -- __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_ok); -- __ BIND(L_bad); -- __ stop("valid bounds (copy up)"); -- __ BIND(L_ok); -- } --#endif -- __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::greaterEqualUnsigned, Assembler::pn, L_break); -- // work top down to bottom, copying contiguous data upwards -- // In pseudo-code: -- // while (--top >= bottom) *(top + distance) = *(top + 0); -- RegisterOrConstant offset = __ argument_offset(positive_distance_in_slots, positive_distance_in_slots.register_or_noreg()); -- __ BIND(L_loop); -- __ sub(top_reg, wordSize, top_reg); -- __ ld_ptr( Address(top_reg, 0 ), temp2_reg); -- __ st_ptr(temp2_reg, Address(top_reg, offset) ); -- __ cmp_and_brx_short(top_reg, bottom_reg, Assembler::greaterUnsigned, Assembler::pt, L_loop); -- assert(Interpreter::stackElementSize == wordSize, "else change loop"); -- __ BIND(L_break); -- BLOCK_COMMENT("} move_arg_slots_up"); --} -- --// in-place movement; no change to rsp --// blows temp_reg, temp2_reg --void MethodHandles::move_arg_slots_down(MacroAssembler* _masm, -- Address bottom_addr, // can use temp_reg -- Register top_reg, // invariant -- RegisterOrConstant negative_distance_in_slots, // destroyed if register -- Register temp_reg, Register temp2_reg) { -- assert_different_registers(top_reg, -- negative_distance_in_slots.register_or_noreg(), -- temp_reg, temp2_reg); -- BLOCK_COMMENT("move_arg_slots_down {"); -- Label L_loop, L_break; -- Register bottom_reg = temp_reg; -- if (!bottom_addr.is_same_address(Address(bottom_reg, 0))) { -- __ add(bottom_addr, bottom_reg); -- } -- // Detect empty (or broken) loop: --#ifdef ASSERT -- assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, ""); -- if (VerifyMethodHandles) { -- // Verify that &bottom < &top (non-empty interval) -- Label L_ok, L_bad; -- if (negative_distance_in_slots.is_register()) { -- __ cmp(negative_distance_in_slots.as_register(), (int32_t) 0); -- __ br(Assembler::greaterEqual, false, Assembler::pn, L_bad); -- __ delayed()->nop(); -- } -- __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_ok); -- __ BIND(L_bad); -- __ stop("valid bounds (copy down)"); -- __ BIND(L_ok); -- } --#endif -- __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::greaterEqualUnsigned, Assembler::pn, L_break); -- // work bottom up to top, copying contiguous data downwards -- // In pseudo-code: -- // while (bottom < top) *(bottom - distance) = *(bottom + 0), bottom++; -- RegisterOrConstant offset = __ argument_offset(negative_distance_in_slots, negative_distance_in_slots.register_or_noreg()); -- __ BIND(L_loop); -- __ ld_ptr( Address(bottom_reg, 0 ), temp2_reg); -- __ st_ptr(temp2_reg, Address(bottom_reg, offset) ); -- __ add(bottom_reg, wordSize, bottom_reg); -- __ cmp_and_brx_short(bottom_reg, top_reg, Assembler::lessUnsigned, Assembler::pt, L_loop); -- assert(Interpreter::stackElementSize == wordSize, "else change loop"); -- __ BIND(L_break); -- BLOCK_COMMENT("} move_arg_slots_down"); --} -- --// Copy from a field or array element to a stacked argument slot. --// is_element (ignored) says whether caller is loading an array element instead of an instance field. --void MethodHandles::move_typed_arg(MacroAssembler* _masm, -- BasicType type, bool is_element, -- Address value_src, Address slot_dest, -- Register temp_reg) { -- assert(!slot_dest.uses(temp_reg), "must be different register"); -- BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)"); -- if (type == T_OBJECT || type == T_ARRAY) { -- __ load_heap_oop(value_src, temp_reg); -- __ verify_oop(temp_reg); -- __ st_ptr(temp_reg, slot_dest); -- } else if (type != T_VOID) { -- int arg_size = type2aelembytes(type); -- bool arg_is_signed = is_signed_subword_type(type); -- int slot_size = is_subword_type(type) ? type2aelembytes(T_INT) : arg_size; // store int sub-words as int -- __ load_sized_value( value_src, temp_reg, arg_size, arg_is_signed); -- __ store_sized_value(temp_reg, slot_dest, slot_size ); -- } -- BLOCK_COMMENT("} move_typed_arg"); --} -- --// Cf. TemplateInterpreterGenerator::generate_return_entry_for and --// InterpreterMacroAssembler::save_return_value --void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type, -- Address return_slot) { -- BLOCK_COMMENT("move_return_value {"); -- // Look at the type and pull the value out of the corresponding register. -- if (type == T_VOID) { -- // nothing to do -- } else if (type == T_OBJECT) { -- __ verify_oop(O0); -- __ st_ptr(O0, return_slot); -- } else if (type == T_INT || is_subword_type(type)) { -- int type_size = type2aelembytes(T_INT); -- __ store_sized_value(O0, return_slot, type_size); -- } else if (type == T_LONG) { -- // store the value by parts -- // Note: We assume longs are continguous (if misaligned) on the interpreter stack. --#if !defined(_LP64) && defined(COMPILER2) -- __ stx(G1, return_slot); --#else -- #ifdef _LP64 -- __ stx(O0, return_slot); -- #else -- if (return_slot.has_disp()) { -- // The displacement is a constant -- __ st(O0, return_slot); -- __ st(O1, return_slot.plus_disp(Interpreter::stackElementSize)); -- } else { -- __ std(O0, return_slot); -- } -- #endif --#endif -- } else if (type == T_FLOAT) { -- __ stf(FloatRegisterImpl::S, Ftos_f, return_slot); -- } else if (type == T_DOUBLE) { -- __ stf(FloatRegisterImpl::D, Ftos_f, return_slot); -- } else { -- ShouldNotReachHere(); -- } -- BLOCK_COMMENT("} move_return_value"); --} -- - #ifndef PRODUCT --void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no) { -- RicochetFrame* rf = new RicochetFrame(*fr); -- -- // ricochet slots (kept in registers for sparc) -- values.describe(frame_no, rf->register_addr(I5_savedSP), err_msg("exact_sender_sp reg for #%d", frame_no)); -- values.describe(frame_no, rf->register_addr(L5_conversion), err_msg("conversion reg for #%d", frame_no)); -- values.describe(frame_no, rf->register_addr(L4_saved_args_base), err_msg("saved_args_base reg for #%d", frame_no)); -- values.describe(frame_no, rf->register_addr(L3_saved_args_layout), err_msg("saved_args_layout reg for #%d", frame_no)); -- values.describe(frame_no, rf->register_addr(L2_saved_target), err_msg("saved_target reg for #%d", frame_no)); -- values.describe(frame_no, rf->register_addr(L1_continuation), err_msg("continuation reg for #%d", frame_no)); -- -- // relevant ricochet targets (in caller frame) -- values.describe(-1, rf->saved_args_base(), err_msg("*saved_args_base for #%d", frame_no)); -- values.describe(-1, (intptr_t *)(STACK_BIAS+(uintptr_t)rf->exact_sender_sp()), err_msg("*exact_sender_sp+STACK_BIAS for #%d", frame_no)); --} --#endif // ASSERT -- --#ifndef PRODUCT --extern "C" void print_method_handle(oop mh); - void trace_method_handle_stub(const char* adaptername, - oopDesc* mh, - intptr_t* saved_sp, - intptr_t* args, - intptr_t* tracing_fp) { -- bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have mh -- -- tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp, args); -+ bool has_mh = (strstr(adaptername, "/static") == NULL && -+ strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH -+ const char* mh_reg_name = has_mh ? "G3_mh" : "G3"; -+ tty->print_cr("MH %s %s="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, -+ adaptername, mh_reg_name, -+ (intptr_t) mh, saved_sp, args); - - if (Verbose) { - // dumping last frame with frame::describe -@@ -1101,6 +558,7 @@ - - // mark saved_sp, if seems valid (may not be valid for some adapters) - intptr_t *unbiased_sp = (intptr_t *)(STACK_BIAS+(uintptr_t)saved_sp); -+ const int ARG_LIMIT = 255, SLOP = 45, UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP); - if ((unbiased_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (unbiased_sp < dump_fp)) { - values.describe(-1, unbiased_sp, "*saved_sp+STACK_BIAS"); - } -@@ -1108,10 +566,13 @@ - // Note: the unextended_sp may not be correct - tty->print_cr(" stack layout:"); - values.print(p); -- } -- -- if (has_mh) { -- print_method_handle(mh); -+ if (has_mh && mh->is_oop()) { -+ mh->print(); -+ if (java_lang_invoke_MethodHandle::is_instance(mh)) { -+ if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0) -+ java_lang_invoke_MethodHandle::form(mh)->print(); -+ } -+ } - } - } - -@@ -1154,1280 +615,3 @@ - BLOCK_COMMENT("} trace_method_handle"); - } - #endif // PRODUCT -- --// which conversion op types are implemented here? --int MethodHandles::adapter_conversion_ops_supported_mask() { -- return ((1<from_compiled_entry(), "method must be linked"); -- -- __ set(AddressLiteral((address) &_raise_exception_method), G5_method); -- __ ld_ptr(Address(G5_method, 0), G5_method); -- -- const int jobject_oop_offset = 0; -- __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method); -- -- adjust_SP_and_Gargs_down_by_slots(_masm, 3, noreg, noreg); -- -- __ st (O0_code, __ argument_address(constant(2), noreg, 0)); -- __ st_ptr(O1_actual, __ argument_address(constant(1), noreg, 0)); -- __ st_ptr(O2_required, __ argument_address(constant(0), noreg, 0)); -- jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); -- } -- break; -- -- case _invokestatic_mh: -- case _invokespecial_mh: -- { -- __ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop -- // Same as TemplateTable::invokestatic or invokespecial, -- // minus the CP setup and profiling: -- if (ek == _invokespecial_mh) { -- // Must load & check the first argument before entering the target method. -- __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); -- __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle); -- __ null_check(G3_method_handle); -- __ verify_oop(G3_method_handle); -- } -- jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); -- } -- break; -- -- case _invokevirtual_mh: -- { -- // Same as TemplateTable::invokevirtual, -- // minus the CP setup and profiling: -- -- // Pick out the vtable index and receiver offset from the MH, -- // and then we can discard it: -- Register O2_index = O2_scratch; -- __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); -- __ ldsw(G3_dmh_vmindex, O2_index); -- // Note: The verifier allows us to ignore G3_mh_vmtarget. -- __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle); -- __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); -- -- // Get receiver klass: -- Register O0_klass = O0_argslot; -- __ load_klass(G3_method_handle, O0_klass); -- __ verify_oop(O0_klass); -- -- // Get target methodOop & entry point: -- const int base = instanceKlass::vtable_start_offset() * wordSize; -- assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); -- -- __ sll_ptr(O2_index, LogBytesPerWord, O2_index); -- __ add(O0_klass, O2_index, O0_klass); -- Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes()); -- __ ld_ptr(vtable_entry_addr, G5_method); -- -- jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); -- } -- break; -- -- case _invokeinterface_mh: -- { -- // Same as TemplateTable::invokeinterface, -- // minus the CP setup and profiling: -- __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); -- Register O1_intf = O1_scratch; -- Register G5_index = G5_scratch; -- __ load_heap_oop(G3_mh_vmtarget, O1_intf); -- __ ldsw(G3_dmh_vmindex, G5_index); -- __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle); -- __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); -- -- // Get receiver klass: -- Register O0_klass = O0_argslot; -- __ load_klass(G3_method_handle, O0_klass); -- __ verify_oop(O0_klass); -- -- // Get interface: -- Label no_such_interface; -- __ verify_oop(O1_intf); -- __ lookup_interface_method(O0_klass, O1_intf, -- // Note: next two args must be the same: -- G5_index, G5_method, -- O2_scratch, -- O3_scratch, -- no_such_interface); -- -- jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); -- -- __ bind(no_such_interface); -- // Throw an exception. -- // For historical reasons, it will be IncompatibleClassChangeError. -- __ unimplemented("not tested yet"); -- __ ld_ptr(Address(O1_intf, java_mirror_offset), O2_required); // required interface -- __ mov( O0_klass, O1_actual); // bad receiver -- __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); -- __ delayed()->mov(Bytecodes::_invokeinterface, O0_code); // who is complaining? -- } -- break; -- -- case _bound_ref_mh: -- case _bound_int_mh: -- case _bound_long_mh: -- case _bound_ref_direct_mh: -- case _bound_int_direct_mh: -- case _bound_long_direct_mh: -- { -- const bool direct_to_method = (ek >= _bound_ref_direct_mh); -- BasicType arg_type = ek_bound_mh_arg_type(ek); -- int arg_slots = type2size[arg_type]; -- -- // Make room for the new argument: -- load_vmargslot(_masm, G3_bmh_vmargslot, O0_argslot); -- __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); -- -- insert_arg_slots(_masm, arg_slots * stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); -- -- // Store bound argument into the new stack slot: -- __ load_heap_oop(G3_bmh_argument, O1_scratch); -- if (arg_type == T_OBJECT) { -- __ st_ptr(O1_scratch, Address(O0_argslot, 0)); -- } else { -- Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type)); -- move_typed_arg(_masm, arg_type, false, -- prim_value_addr, -- Address(O0_argslot, 0), -- O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3) -- } -- -- if (direct_to_method) { -- __ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop -- jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch); -- } else { -- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); // target is a methodOop -- __ verify_oop(G3_method_handle); -- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); -- } -- } -- break; -- -- case _adapter_opt_profiling: -- if (java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes() != 0) { -- Address G3_mh_vmcount(G3_method_handle, java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes()); -- __ ld(G3_mh_vmcount, O1_scratch); -- __ add(O1_scratch, 1, O1_scratch); -- __ st(O1_scratch, G3_mh_vmcount); -- } -- // fall through -- -- case _adapter_retype_only: -- case _adapter_retype_raw: -- // Immediately jump to the next MH layer: -- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); -- __ verify_oop(G3_method_handle); -- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); -- // This is OK when all parameter types widen. -- // It is also OK when a return type narrows. -- break; -- -- case _adapter_check_cast: -- { -- // Check a reference argument before jumping to the next layer of MH: -- load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); -- Address vmarg = __ argument_address(O0_argslot, O0_argslot); -- -- // What class are we casting to? -- Register O1_klass = O1_scratch; // Interesting AMH data. -- __ load_heap_oop(G3_amh_argument, O1_klass); // This is a Class object! -- load_klass_from_Class(_masm, O1_klass, O2_scratch, O3_scratch); -- -- Label L_done; -- __ ld_ptr(vmarg, O2_scratch); -- __ br_null_short(O2_scratch, Assembler::pn, L_done); // No cast if null. -- __ load_klass(O2_scratch, O2_scratch); -- -- // Live at this point: -- // - O0_argslot : argslot index in vmarg; may be required in the failing path -- // - O1_klass : klass required by the target method -- // - O2_scratch : argument klass to test -- // - G3_method_handle: adapter method handle -- __ check_klass_subtype(O2_scratch, O1_klass, O3_scratch, O4_scratch, L_done); -- -- // If we get here, the type check failed! -- __ load_heap_oop(G3_amh_argument, O2_required); // required class -- __ ld_ptr( vmarg, O1_actual); // bad object -- __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); -- __ delayed()->mov(Bytecodes::_checkcast, O0_code); // who is complaining? -- -- __ BIND(L_done); -- // Get the new MH: -- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); -- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); -- } -- break; -- -- case _adapter_prim_to_prim: -- case _adapter_ref_to_prim: -- // Handled completely by optimized cases. -- __ stop("init_AdapterMethodHandle should not issue this"); -- break; -- -- case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim --//case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim -- case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim -- case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim -- { -- // Perform an in-place conversion to int or an int subword. -- load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); -- Address value; -- Address vmarg; -- bool value_left_justified = false; -- -- switch (ek) { -- case _adapter_opt_i2i: -- value = vmarg = __ argument_address(O0_argslot, O0_argslot); -- break; -- case _adapter_opt_l2i: -- { -- // just delete the extra slot --#ifdef _LP64 -- // In V9, longs are given 2 64-bit slots in the interpreter, but the -- // data is passed in only 1 slot. -- // Keep the second slot. -- __ add(__ argument_address(O0_argslot, O0_argslot, -1), O0_argslot); -- remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); -- value = Address(O0_argslot, 4); // Get least-significant 32-bit of 64-bit value. -- vmarg = Address(O0_argslot, Interpreter::stackElementSize); --#else -- // Keep the first slot. -- __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); -- remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); -- value = Address(O0_argslot, 0); -- vmarg = value; --#endif -- } -- break; -- case _adapter_opt_unboxi: -- { -- vmarg = __ argument_address(O0_argslot, O0_argslot); -- // Load the value up from the heap. -- __ ld_ptr(vmarg, O1_scratch); -- int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); --#ifdef ASSERT -- for (int bt = T_BOOLEAN; bt < T_INT; bt++) { -- if (is_subword_type(BasicType(bt))) -- assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), ""); -- } --#endif -- __ null_check(O1_scratch, value_offset); -- value = Address(O1_scratch, value_offset); --#ifdef _BIG_ENDIAN -- // Values stored in objects are packed. -- value_left_justified = true; --#endif -- } -- break; -- default: -- ShouldNotReachHere(); -- } -- -- // This check is required on _BIG_ENDIAN -- Register G5_vminfo = G5_scratch; -- __ ldsw(G3_amh_conversion, G5_vminfo); -- assert(CONV_VMINFO_SHIFT == 0, "preshifted"); -- -- // Original 32-bit vmdata word must be of this form: -- // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 | -- __ lduw(value, O1_scratch); -- if (!value_left_justified) -- __ sll(O1_scratch, G5_vminfo, O1_scratch); -- Label zero_extend, done; -- __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo); -- __ br(Assembler::zero, false, Assembler::pn, zero_extend); -- __ delayed()->nop(); -- -- // this path is taken for int->byte, int->short -- __ sra(O1_scratch, G5_vminfo, O1_scratch); -- __ ba_short(done); -- -- __ bind(zero_extend); -- // this is taken for int->char -- __ srl(O1_scratch, G5_vminfo, O1_scratch); -- -- __ bind(done); -- __ st(O1_scratch, vmarg); -- -- // Get the new MH: -- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); -- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); -- } -- break; -- -- case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim -- case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim -- { -- // Perform an in-place int-to-long or ref-to-long conversion. -- load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); -- -- // On big-endian machine we duplicate the slot and store the MSW -- // in the first slot. -- __ add(__ argument_address(O0_argslot, O0_argslot, 1), O0_argslot); -- -- insert_arg_slots(_masm, stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); -- -- Address arg_lsw(O0_argslot, 0); -- Address arg_msw(O0_argslot, -Interpreter::stackElementSize); -- -- switch (ek) { -- case _adapter_opt_i2l: -- { --#ifdef _LP64 -- __ ldsw(arg_lsw, O2_scratch); // Load LSW sign-extended --#else -- __ ldsw(arg_lsw, O3_scratch); // Load LSW sign-extended -- __ srlx(O3_scratch, BitsPerInt, O2_scratch); // Move MSW value to lower 32-bits for std --#endif -- __ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64 -- } -- break; -- case _adapter_opt_unboxl: -- { -- // Load the value up from the heap. -- __ ld_ptr(arg_lsw, O1_scratch); -- int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG); -- assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), ""); -- __ null_check(O1_scratch, value_offset); -- __ ld_long(Address(O1_scratch, value_offset), O2_scratch); // Uses O2/O3 on !_LP64 -- __ st_long(O2_scratch, arg_msw); -- } -- break; -- default: -- ShouldNotReachHere(); -- } -- -- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); -- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); -- } -- break; -- -- case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim -- case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim -- { -- // perform an in-place floating primitive conversion -- __ unimplemented(entry_name(ek)); -- } -- break; -- -- case _adapter_prim_to_ref: -- __ unimplemented(entry_name(ek)); // %%% FIXME: NYI -- break; -- -- case _adapter_swap_args: -- case _adapter_rot_args: -- // handled completely by optimized cases -- __ stop("init_AdapterMethodHandle should not issue this"); -- break; -- -- case _adapter_opt_swap_1: -- case _adapter_opt_swap_2: -- case _adapter_opt_rot_1_up: -- case _adapter_opt_rot_1_down: -- case _adapter_opt_rot_2_up: -- case _adapter_opt_rot_2_down: -- { -- int swap_slots = ek_adapter_opt_swap_slots(ek); -- int rotate = ek_adapter_opt_swap_mode(ek); -- -- // 'argslot' is the position of the first argument to swap. -- load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); -- __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); -- if (VerifyMethodHandles) -- verify_argslot(_masm, O0_argslot, O2_scratch, "swap point must fall within current frame"); -- -- // 'vminfo' is the second. -- Register O1_destslot = O1_scratch; -- load_conversion_vminfo(_masm, G3_amh_conversion, O1_destslot); -- __ add(__ argument_address(O1_destslot, O1_destslot), O1_destslot); -- if (VerifyMethodHandles) -- verify_argslot(_masm, O1_destslot, O2_scratch, "swap point must fall within current frame"); -- -- assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here"); -- if (!rotate) { -- // simple swap -- for (int i = 0; i < swap_slots; i++) { -- __ ld_ptr( Address(O0_argslot, i * wordSize), O2_scratch); -- __ ld_ptr( Address(O1_destslot, i * wordSize), O3_scratch); -- __ st_ptr(O3_scratch, Address(O0_argslot, i * wordSize)); -- __ st_ptr(O2_scratch, Address(O1_destslot, i * wordSize)); -- } -- } else { -- // A rotate is actually pair of moves, with an "odd slot" (or pair) -- // changing place with a series of other slots. -- // First, push the "odd slot", which is going to get overwritten -- switch (swap_slots) { -- case 2 : __ ld_ptr(Address(O0_argslot, 1 * wordSize), O4_scratch); // fall-thru -- case 1 : __ ld_ptr(Address(O0_argslot, 0 * wordSize), O3_scratch); break; -- default: ShouldNotReachHere(); -- } -- if (rotate > 0) { -- // Here is rotate > 0: -- // (low mem) (high mem) -- // | dest: more_slots... | arg: odd_slot :arg+1 | -- // => -- // | dest: odd_slot | dest+1: more_slots... :arg+1 | -- // work argslot down to destslot, copying contiguous data upwards -- // pseudo-code: -- // argslot = src_addr - swap_bytes -- // destslot = dest_addr -- // while (argslot >= destslot) *(argslot + swap_bytes) = *(argslot + 0), argslot--; -- move_arg_slots_up(_masm, -- O1_destslot, -- Address(O0_argslot, 0), -- swap_slots, -- O0_argslot, O2_scratch); -- } else { -- // Here is the other direction, rotate < 0: -- // (low mem) (high mem) -- // | arg: odd_slot | arg+1: more_slots... :dest+1 | -- // => -- // | arg: more_slots... | dest: odd_slot :dest+1 | -- // work argslot up to destslot, copying contiguous data downwards -- // pseudo-code: -- // argslot = src_addr + swap_bytes -- // destslot = dest_addr -- // while (argslot <= destslot) *(argslot - swap_bytes) = *(argslot + 0), argslot++; -- // dest_slot denotes an exclusive upper limit -- int limit_bias = OP_ROT_ARGS_DOWN_LIMIT_BIAS; -- if (limit_bias != 0) -- __ add(O1_destslot, - limit_bias * wordSize, O1_destslot); -- move_arg_slots_down(_masm, -- Address(O0_argslot, swap_slots * wordSize), -- O1_destslot, -- -swap_slots, -- O0_argslot, O2_scratch); -- -- __ sub(O1_destslot, swap_slots * wordSize, O1_destslot); -- } -- // pop the original first chunk into the destination slot, now free -- switch (swap_slots) { -- case 2 : __ st_ptr(O4_scratch, Address(O1_destslot, 1 * wordSize)); // fall-thru -- case 1 : __ st_ptr(O3_scratch, Address(O1_destslot, 0 * wordSize)); break; -- default: ShouldNotReachHere(); -- } -- } -- -- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); -- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); -- } -- break; -- -- case _adapter_dup_args: -- { -- // 'argslot' is the position of the first argument to duplicate. -- load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); -- __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); -- -- // 'stack_move' is negative number of words to duplicate. -- Register O1_stack_move = O1_scratch; -- load_stack_move(_masm, G3_amh_conversion, O1_stack_move); -- -- if (VerifyMethodHandles) { -- verify_argslots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, true, -- "copied argument(s) must fall within current frame"); -- } -- -- if (UseStackBanging) { -- // Save G3_method_handle since bang_stack_with_offset uses it as a temp register -- __ mov(G3_method_handle, O3_scratch); -- // Bang the stack before pushing args. -- int frame_size = 256 * Interpreter::stackElementSize; // conservative -- __ generate_stack_overflow_check(frame_size + sizeof(RicochetFrame)); -- __ mov(O3_scratch, G3_method_handle); -- } -- // insert location is always the bottom of the argument list: -- __ neg(O1_stack_move); -- push_arg_slots(_masm, O0_argslot, O1_stack_move, O2_scratch, O3_scratch); -- -- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); -- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); -- } -- break; -- -- case _adapter_drop_args: -- { -- // 'argslot' is the position of the first argument to nuke. -- load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); -- __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); -- -- // 'stack_move' is number of words to drop. -- Register O1_stack_move = O1_scratch; -- load_stack_move(_masm, G3_amh_conversion, O1_stack_move); -- -- remove_arg_slots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, O4_scratch); -- -- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); -- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); -- } -- break; -- -- case _adapter_collect_args: -- case _adapter_fold_args: -- case _adapter_spread_args: -- // Handled completely by optimized cases. -- __ stop("init_AdapterMethodHandle should not issue this"); -- break; -- -- case _adapter_opt_collect_ref: -- case _adapter_opt_collect_int: -- case _adapter_opt_collect_long: -- case _adapter_opt_collect_float: -- case _adapter_opt_collect_double: -- case _adapter_opt_collect_void: -- case _adapter_opt_collect_0_ref: -- case _adapter_opt_collect_1_ref: -- case _adapter_opt_collect_2_ref: -- case _adapter_opt_collect_3_ref: -- case _adapter_opt_collect_4_ref: -- case _adapter_opt_collect_5_ref: -- case _adapter_opt_filter_S0_ref: -- case _adapter_opt_filter_S1_ref: -- case _adapter_opt_filter_S2_ref: -- case _adapter_opt_filter_S3_ref: -- case _adapter_opt_filter_S4_ref: -- case _adapter_opt_filter_S5_ref: -- case _adapter_opt_collect_2_S0_ref: -- case _adapter_opt_collect_2_S1_ref: -- case _adapter_opt_collect_2_S2_ref: -- case _adapter_opt_collect_2_S3_ref: -- case _adapter_opt_collect_2_S4_ref: -- case _adapter_opt_collect_2_S5_ref: -- case _adapter_opt_fold_ref: -- case _adapter_opt_fold_int: -- case _adapter_opt_fold_long: -- case _adapter_opt_fold_float: -- case _adapter_opt_fold_double: -- case _adapter_opt_fold_void: -- case _adapter_opt_fold_1_ref: -- case _adapter_opt_fold_2_ref: -- case _adapter_opt_fold_3_ref: -- case _adapter_opt_fold_4_ref: -- case _adapter_opt_fold_5_ref: -- { -- // Given a fresh incoming stack frame, build a new ricochet frame. -- // On entry, TOS points at a return PC, and FP is the callers frame ptr. -- // RSI/R13 has the caller's exact stack pointer, which we must also preserve. -- // RCX contains an AdapterMethodHandle of the indicated kind. -- -- // Relevant AMH fields: -- // amh.vmargslot: -- // points to the trailing edge of the arguments -- // to filter, collect, or fold. For a boxing operation, -- // it points just after the single primitive value. -- // amh.argument: -- // recursively called MH, on |collect| arguments -- // amh.vmtarget: -- // final destination MH, on return value, etc. -- // amh.conversion.dest: -- // tells what is the type of the return value -- // (not needed here, since dest is also derived from ek) -- // amh.conversion.vminfo: -- // points to the trailing edge of the return value -- // when the vmtarget is to be called; this is -- // equal to vmargslot + (retained ? |collect| : 0) -- -- // Pass 0 or more argument slots to the recursive target. -- int collect_count_constant = ek_adapter_opt_collect_count(ek); -- -- // The collected arguments are copied from the saved argument list: -- int collect_slot_constant = ek_adapter_opt_collect_slot(ek); -- -- assert(ek_orig == _adapter_collect_args || -- ek_orig == _adapter_fold_args, ""); -- bool retain_original_args = (ek_orig == _adapter_fold_args); -- -- // The return value is replaced (or inserted) at the 'vminfo' argslot. -- // Sometimes we can compute this statically. -- int dest_slot_constant = -1; -- if (!retain_original_args) -- dest_slot_constant = collect_slot_constant; -- else if (collect_slot_constant >= 0 && collect_count_constant >= 0) -- // We are preserving all the arguments, and the return value is prepended, -- // so the return slot is to the left (above) the |collect| sequence. -- dest_slot_constant = collect_slot_constant + collect_count_constant; -- -- // Replace all those slots by the result of the recursive call. -- // The result type can be one of ref, int, long, float, double, void. -- // In the case of void, nothing is pushed on the stack after return. -- BasicType dest = ek_adapter_opt_collect_type(ek); -- assert(dest == type2wfield[dest], "dest is a stack slot type"); -- int dest_count = type2size[dest]; -- assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size"); -- -- // Choose a return continuation. -- EntryKind ek_ret = _adapter_opt_return_any; -- if (dest != T_CONFLICT && OptimizeMethodHandles) { -- switch (dest) { -- case T_INT : ek_ret = _adapter_opt_return_int; break; -- case T_LONG : ek_ret = _adapter_opt_return_long; break; -- case T_FLOAT : ek_ret = _adapter_opt_return_float; break; -- case T_DOUBLE : ek_ret = _adapter_opt_return_double; break; -- case T_OBJECT : ek_ret = _adapter_opt_return_ref; break; -- case T_VOID : ek_ret = _adapter_opt_return_void; break; -- default : ShouldNotReachHere(); -- } -- if (dest == T_OBJECT && dest_slot_constant >= 0) { -- EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant); -- if (ek_try <= _adapter_opt_return_LAST && -- ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) { -- ek_ret = ek_try; -- } -- } -- assert(ek_adapter_opt_return_type(ek_ret) == dest, ""); -- } -- -- // Already pushed: ... keep1 | collect | keep2 | -- -- // Push a few extra argument words, if we need them to store the return value. -- { -- int extra_slots = 0; -- if (retain_original_args) { -- extra_slots = dest_count; -- } else if (collect_count_constant == -1) { -- extra_slots = dest_count; // collect_count might be zero; be generous -- } else if (dest_count > collect_count_constant) { -- extra_slots = (dest_count - collect_count_constant); -- } else { -- // else we know we have enough dead space in |collect| to repurpose for return values -- } -- if (extra_slots != 0) { -- __ sub(SP, round_to(extra_slots, 2) * Interpreter::stackElementSize, SP); -- } -- } -- -- // Set up Ricochet Frame. -- __ mov(SP, O5_savedSP); // record SP for the callee -- -- // One extra (empty) slot for outgoing target MH (see Gargs computation below). -- __ save_frame(2); // Note: we need to add 2 slots since frame::memory_parameter_word_sp_offset is 23. -- -- // Note: Gargs is live throughout the following, until we make our recursive call. -- // And the RF saves a copy in L4_saved_args_base. -- -- RicochetFrame::enter_ricochet_frame(_masm, G3_method_handle, Gargs, -- entry(ek_ret)->from_interpreted_entry()); -- -- // Compute argument base: -- // Set up Gargs for current frame, extra (empty) slot is for outgoing target MH (space reserved by save_frame above). -- __ add(FP, STACK_BIAS - (1 * Interpreter::stackElementSize), Gargs); -- -- // Now pushed: ... keep1 | collect | keep2 | extra | [RF] -- --#ifdef ASSERT -- if (VerifyMethodHandles && dest != T_CONFLICT) { -- BLOCK_COMMENT("verify AMH.conv.dest {"); -- extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O1_scratch); -- Label L_dest_ok; -- __ cmp(O1_scratch, (int) dest); -- __ br(Assembler::equal, false, Assembler::pt, L_dest_ok); -- __ delayed()->nop(); -- if (dest == T_INT) { -- for (int bt = T_BOOLEAN; bt < T_INT; bt++) { -- if (is_subword_type(BasicType(bt))) { -- __ cmp(O1_scratch, (int) bt); -- __ br(Assembler::equal, false, Assembler::pt, L_dest_ok); -- __ delayed()->nop(); -- } -- } -- } -- __ stop("bad dest in AMH.conv"); -- __ BIND(L_dest_ok); -- BLOCK_COMMENT("} verify AMH.conv.dest"); -- } --#endif //ASSERT -- -- // Find out where the original copy of the recursive argument sequence begins. -- Register O0_coll = O0_scratch; -- { -- RegisterOrConstant collect_slot = collect_slot_constant; -- if (collect_slot_constant == -1) { -- load_vmargslot(_masm, G3_amh_vmargslot, O1_scratch); -- collect_slot = O1_scratch; -- } -- // collect_slot might be 0, but we need the move anyway. -- __ add(RicochetFrame::L4_saved_args_base, __ argument_offset(collect_slot, collect_slot.register_or_noreg()), O0_coll); -- // O0_coll now points at the trailing edge of |collect| and leading edge of |keep2| -- } -- -- // Replace the old AMH with the recursive MH. (No going back now.) -- // In the case of a boxing call, the recursive call is to a 'boxer' method, -- // such as Integer.valueOf or Long.valueOf. In the case of a filter -- // or collect call, it will take one or more arguments, transform them, -- // and return some result, to store back into argument_base[vminfo]. -- __ load_heap_oop(G3_amh_argument, G3_method_handle); -- if (VerifyMethodHandles) verify_method_handle(_masm, G3_method_handle, O1_scratch, O2_scratch); -- -- // Calculate |collect|, the number of arguments we are collecting. -- Register O1_collect_count = O1_scratch; -- RegisterOrConstant collect_count; -- if (collect_count_constant < 0) { -- __ load_method_handle_vmslots(O1_collect_count, G3_method_handle, O2_scratch); -- collect_count = O1_collect_count; -- } else { -- collect_count = collect_count_constant; --#ifdef ASSERT -- if (VerifyMethodHandles) { -- BLOCK_COMMENT("verify collect_count_constant {"); -- __ load_method_handle_vmslots(O3_scratch, G3_method_handle, O2_scratch); -- Label L_count_ok; -- __ cmp_and_br_short(O3_scratch, collect_count_constant, Assembler::equal, Assembler::pt, L_count_ok); -- __ stop("bad vminfo in AMH.conv"); -- __ BIND(L_count_ok); -- BLOCK_COMMENT("} verify collect_count_constant"); -- } --#endif //ASSERT -- } -- -- // copy |collect| slots directly to TOS: -- push_arg_slots(_masm, O0_coll, collect_count, O2_scratch, O3_scratch); -- // Now pushed: ... keep1 | collect | keep2 | RF... | collect | -- // O0_coll still points at the trailing edge of |collect| and leading edge of |keep2| -- -- // If necessary, adjust the saved arguments to make room for the eventual return value. -- // Normal adjustment: ... keep1 | +dest+ | -collect- | keep2 | RF... | collect | -- // If retaining args: ... keep1 | +dest+ | collect | keep2 | RF... | collect | -- // In the non-retaining case, this might move keep2 either up or down. -- // We don't have to copy the whole | RF... collect | complex, -- // but we must adjust RF.saved_args_base. -- // Also, from now on, we will forget about the original copy of |collect|. -- // If we are retaining it, we will treat it as part of |keep2|. -- // For clarity we will define |keep3| = |collect|keep2| or |keep2|. -- -- BLOCK_COMMENT("adjust trailing arguments {"); -- // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements. -- int open_count = dest_count; -- RegisterOrConstant close_count = collect_count_constant; -- Register O1_close_count = O1_collect_count; -- if (retain_original_args) { -- close_count = constant(0); -- } else if (collect_count_constant == -1) { -- close_count = O1_collect_count; -- } -- -- // How many slots need moving? This is simply dest_slot (0 => no |keep3|). -- RegisterOrConstant keep3_count; -- Register O2_keep3_count = O2_scratch; -- if (dest_slot_constant < 0) { -- extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O2_keep3_count); -- keep3_count = O2_keep3_count; -- } else { -- keep3_count = dest_slot_constant; --#ifdef ASSERT -- if (VerifyMethodHandles && dest_slot_constant < 0) { -- BLOCK_COMMENT("verify dest_slot_constant {"); -- extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O3_scratch); -- Label L_vminfo_ok; -- __ cmp_and_br_short(O3_scratch, dest_slot_constant, Assembler::equal, Assembler::pt, L_vminfo_ok); -- __ stop("bad vminfo in AMH.conv"); -- __ BIND(L_vminfo_ok); -- BLOCK_COMMENT("} verify dest_slot_constant"); -- } --#endif //ASSERT -- } -- -- // tasks remaining: -- bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0); -- bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0)); -- bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant()); -- -- // Old and new argument locations (based at slot 0). -- // Net shift (&new_argv - &old_argv) is (close_count - open_count). -- bool zero_open_count = (open_count == 0); // remember this bit of info -- if (move_keep3 && fix_arg_base) { -- // It will be easier to have everything in one register: -- if (close_count.is_register()) { -- // Deduct open_count from close_count register to get a clean +/- value. -- __ sub(close_count.as_register(), open_count, close_count.as_register()); -- } else { -- close_count = close_count.as_constant() - open_count; -- } -- open_count = 0; -- } -- Register L4_old_argv = RicochetFrame::L4_saved_args_base; -- Register O3_new_argv = O3_scratch; -- if (fix_arg_base) { -- __ add(L4_old_argv, __ argument_offset(close_count, O4_scratch), O3_new_argv, -- -(open_count * Interpreter::stackElementSize)); -- } -- -- // First decide if any actual data are to be moved. -- // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change. -- // (As it happens, all movements involve an argument list size change.) -- -- // If there are variable parameters, use dynamic checks to skip around the whole mess. -- Label L_done; -- if (keep3_count.is_register()) { -- __ cmp_and_br_short(keep3_count.as_register(), 0, Assembler::equal, Assembler::pn, L_done); -- } -- if (close_count.is_register()) { -- __ cmp_and_br_short(close_count.as_register(), open_count, Assembler::equal, Assembler::pn, L_done); -- } -- -- if (move_keep3 && fix_arg_base) { -- bool emit_move_down = false, emit_move_up = false, emit_guard = false; -- if (!close_count.is_constant()) { -- emit_move_down = emit_guard = !zero_open_count; -- emit_move_up = true; -- } else if (open_count != close_count.as_constant()) { -- emit_move_down = (open_count > close_count.as_constant()); -- emit_move_up = !emit_move_down; -- } -- Label L_move_up; -- if (emit_guard) { -- __ cmp(close_count.as_register(), open_count); -- __ br(Assembler::greater, false, Assembler::pn, L_move_up); -- __ delayed()->nop(); -- } -- -- if (emit_move_down) { -- // Move arguments down if |+dest+| > |-collect-| -- // (This is rare, except when arguments are retained.) -- // This opens space for the return value. -- if (keep3_count.is_constant()) { -- for (int i = 0; i < keep3_count.as_constant(); i++) { -- __ ld_ptr( Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch); -- __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize) ); -- } -- } else { -- // Live: O1_close_count, O2_keep3_count, O3_new_argv -- Register argv_top = O0_scratch; -- __ add(L4_old_argv, __ argument_offset(keep3_count, O4_scratch), argv_top); -- move_arg_slots_down(_masm, -- Address(L4_old_argv, 0), // beginning of old argv -- argv_top, // end of old argv -- close_count, // distance to move down (must be negative) -- O4_scratch, G5_scratch); -- } -- } -- -- if (emit_guard) { -- __ ba_short(L_done); // assumes emit_move_up is true also -- __ BIND(L_move_up); -- } -- -- if (emit_move_up) { -- // Move arguments up if |+dest+| < |-collect-| -- // (This is usual, except when |keep3| is empty.) -- // This closes up the space occupied by the now-deleted collect values. -- if (keep3_count.is_constant()) { -- for (int i = keep3_count.as_constant() - 1; i >= 0; i--) { -- __ ld_ptr( Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch); -- __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize) ); -- } -- } else { -- Address argv_top(L4_old_argv, __ argument_offset(keep3_count, O4_scratch)); -- // Live: O1_close_count, O2_keep3_count, O3_new_argv -- move_arg_slots_up(_masm, -- L4_old_argv, // beginning of old argv -- argv_top, // end of old argv -- close_count, // distance to move up (must be positive) -- O4_scratch, G5_scratch); -- } -- } -- } -- __ BIND(L_done); -- -- if (fix_arg_base) { -- // adjust RF.saved_args_base -- __ mov(O3_new_argv, RicochetFrame::L4_saved_args_base); -- } -- -- if (stomp_dest) { -- // Stomp the return slot, so it doesn't hold garbage. -- // This isn't strictly necessary, but it may help detect bugs. -- __ set(RicochetFrame::RETURN_VALUE_PLACEHOLDER, O4_scratch); -- __ st_ptr(O4_scratch, Address(RicochetFrame::L4_saved_args_base, -- __ argument_offset(keep3_count, keep3_count.register_or_noreg()))); // uses O2_keep3_count -- } -- BLOCK_COMMENT("} adjust trailing arguments"); -- -- BLOCK_COMMENT("do_recursive_call"); -- __ mov(SP, O5_savedSP); // record SP for the callee -- __ set(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr() - frame::pc_return_offset), O7); -- // The globally unique bounce address has two purposes: -- // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame). -- // 2. When returned to, it cuts back the stack and redirects control flow -- // to the return handler. -- // The return handler will further cut back the stack when it takes -- // down the RF. Perhaps there is a way to streamline this further. -- -- if (UseStackBanging) { -- // Save G3_method_handle since bang_stack_with_offset uses it as a temp register -- __ mov(G3_method_handle, O4_scratch); -- // Bang the stack before recursive call. -- // Even if slots == 0, we are inside a RicochetFrame. -- int frame_size = collect_count.is_constant() ? collect_count.as_constant() * wordSize : -1; -- if (frame_size < 0) { -- frame_size = 256 * Interpreter::stackElementSize; // conservative -- } -- __ generate_stack_overflow_check(frame_size + sizeof(RicochetFrame)); -- __ mov(O4_scratch, G3_method_handle); -- } -- // State during recursive call: -- // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc | -- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); -- } -- break; -- -- case _adapter_opt_return_ref: -- case _adapter_opt_return_int: -- case _adapter_opt_return_long: -- case _adapter_opt_return_float: -- case _adapter_opt_return_double: -- case _adapter_opt_return_void: -- case _adapter_opt_return_S0_ref: -- case _adapter_opt_return_S1_ref: -- case _adapter_opt_return_S2_ref: -- case _adapter_opt_return_S3_ref: -- case _adapter_opt_return_S4_ref: -- case _adapter_opt_return_S5_ref: -- { -- BasicType dest_type_constant = ek_adapter_opt_return_type(ek); -- int dest_slot_constant = ek_adapter_opt_return_slot(ek); -- -- if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); -- -- if (dest_slot_constant == -1) { -- // The current stub is a general handler for this dest_type. -- // It can be called from _adapter_opt_return_any below. -- // Stash the address in a little table. -- assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob"); -- address return_handler = __ pc(); -- _adapter_return_handlers[dest_type_constant] = return_handler; -- if (dest_type_constant == T_INT) { -- // do the subword types too -- for (int bt = T_BOOLEAN; bt < T_INT; bt++) { -- if (is_subword_type(BasicType(bt)) && -- _adapter_return_handlers[bt] == NULL) { -- _adapter_return_handlers[bt] = return_handler; -- } -- } -- } -- } -- -- // On entry to this continuation handler, make Gargs live again. -- __ mov(RicochetFrame::L4_saved_args_base, Gargs); -- -- Register O7_temp = O7; -- Register O5_vminfo = O5; -- -- RegisterOrConstant dest_slot = dest_slot_constant; -- if (dest_slot_constant == -1) { -- extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O5_vminfo); -- dest_slot = O5_vminfo; -- } -- // Store the result back into the argslot. -- // This code uses the interpreter calling sequence, in which the return value -- // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop. -- // There are certain irregularities with floating point values, which can be seen -- // in TemplateInterpreterGenerator::generate_return_entry_for. -- move_return_value(_masm, dest_type_constant, __ argument_address(dest_slot, O7_temp)); -- -- RicochetFrame::leave_ricochet_frame(_masm, G3_method_handle, I5_savedSP, I7); -- -- // Load the final target and go. -- if (VerifyMethodHandles) verify_method_handle(_masm, G3_method_handle, O0_scratch, O1_scratch); -- __ restore(I5_savedSP, G0, SP); -- __ jump_to_method_handle_entry(G3_method_handle, O0_scratch); -- __ illtrap(0); -- } -- break; -- -- case _adapter_opt_return_any: -- { -- Register O7_temp = O7; -- Register O5_dest_type = O5; -- -- if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); -- extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O5_dest_type); -- __ set(ExternalAddress((address) &_adapter_return_handlers[0]), O7_temp); -- __ sll_ptr(O5_dest_type, LogBytesPerWord, O5_dest_type); -- __ ld_ptr(O7_temp, O5_dest_type, O7_temp); -- --#ifdef ASSERT -- { Label L_ok; -- __ br_notnull_short(O7_temp, Assembler::pt, L_ok); -- __ stop("bad method handle return"); -- __ BIND(L_ok); -- } --#endif //ASSERT -- __ JMP(O7_temp, 0); -- __ delayed()->nop(); -- } -- break; -- -- case _adapter_opt_spread_0: -- case _adapter_opt_spread_1_ref: -- case _adapter_opt_spread_2_ref: -- case _adapter_opt_spread_3_ref: -- case _adapter_opt_spread_4_ref: -- case _adapter_opt_spread_5_ref: -- case _adapter_opt_spread_ref: -- case _adapter_opt_spread_byte: -- case _adapter_opt_spread_char: -- case _adapter_opt_spread_short: -- case _adapter_opt_spread_int: -- case _adapter_opt_spread_long: -- case _adapter_opt_spread_float: -- case _adapter_opt_spread_double: -- { -- // spread an array out into a group of arguments -- int length_constant = ek_adapter_opt_spread_count(ek); -- bool length_can_be_zero = (length_constant == 0); -- if (length_constant < 0) { -- // some adapters with variable length must handle the zero case -- if (!OptimizeMethodHandles || -- ek_adapter_opt_spread_type(ek) != T_OBJECT) -- length_can_be_zero = true; -- } -- -- // find the address of the array argument -- load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot); -- __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot); -- -- // O0_argslot points both to the array and to the first output arg -- Address vmarg = Address(O0_argslot, 0); -- -- // Get the array value. -- Register O1_array = O1_scratch; -- Register O2_array_klass = O2_scratch; -- BasicType elem_type = ek_adapter_opt_spread_type(ek); -- int elem_slots = type2size[elem_type]; // 1 or 2 -- int array_slots = 1; // array is always a T_OBJECT -- int length_offset = arrayOopDesc::length_offset_in_bytes(); -- int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type); -- __ ld_ptr(vmarg, O1_array); -- -- Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done; -- if (length_can_be_zero) { -- // handle the null pointer case, if zero is allowed -- Label L_skip; -- if (length_constant < 0) { -- load_conversion_vminfo(_masm, G3_amh_conversion, O3_scratch); -- __ cmp_zero_and_br(Assembler::notZero, O3_scratch, L_skip); -- __ delayed()->nop(); // to avoid back-to-back cbcond instructions -- } -- __ br_null_short(O1_array, Assembler::pn, L_array_is_empty); -- __ BIND(L_skip); -- } -- __ null_check(O1_array, oopDesc::klass_offset_in_bytes()); -- __ load_klass(O1_array, O2_array_klass); -- -- // Check the array type. -- Register O3_klass = O3_scratch; -- __ load_heap_oop(G3_amh_argument, O3_klass); // this is a Class object! -- load_klass_from_Class(_masm, O3_klass, O4_scratch, G5_scratch); -- -- Label L_ok_array_klass, L_bad_array_klass, L_bad_array_length; -- __ check_klass_subtype(O2_array_klass, O3_klass, O4_scratch, G5_scratch, L_ok_array_klass); -- // If we get here, the type check failed! -- __ ba_short(L_bad_array_klass); -- __ BIND(L_ok_array_klass); -- -- // Check length. -- if (length_constant >= 0) { -- __ ldsw(Address(O1_array, length_offset), O4_scratch); -- __ cmp(O4_scratch, length_constant); -- } else { -- Register O3_vminfo = O3_scratch; -- load_conversion_vminfo(_masm, G3_amh_conversion, O3_vminfo); -- __ ldsw(Address(O1_array, length_offset), O4_scratch); -- __ cmp(O3_vminfo, O4_scratch); -- } -- __ br(Assembler::notEqual, false, Assembler::pn, L_bad_array_length); -- __ delayed()->nop(); -- -- Register O2_argslot_limit = O2_scratch; -- -- // Array length checks out. Now insert any required stack slots. -- if (length_constant == -1) { -- // Form a pointer to the end of the affected region. -- __ add(O0_argslot, Interpreter::stackElementSize, O2_argslot_limit); -- // 'stack_move' is negative number of words to insert -- // This number already accounts for elem_slots. -- Register O3_stack_move = O3_scratch; -- load_stack_move(_masm, G3_amh_conversion, O3_stack_move); -- __ cmp(O3_stack_move, 0); -- assert(stack_move_unit() < 0, "else change this comparison"); -- __ br(Assembler::less, false, Assembler::pn, L_insert_arg_space); -- __ delayed()->nop(); -- __ br(Assembler::equal, false, Assembler::pn, L_copy_args); -- __ delayed()->nop(); -- // single argument case, with no array movement -- __ BIND(L_array_is_empty); -- remove_arg_slots(_masm, -stack_move_unit() * array_slots, -- O0_argslot, O1_scratch, O2_scratch, O3_scratch); -- __ ba_short(L_args_done); // no spreading to do -- __ BIND(L_insert_arg_space); -- // come here in the usual case, stack_move < 0 (2 or more spread arguments) -- // Live: O1_array, O2_argslot_limit, O3_stack_move -- insert_arg_slots(_masm, O3_stack_move, -- O0_argslot, O4_scratch, G5_scratch, O1_scratch); -- // reload from rdx_argslot_limit since rax_argslot is now decremented -- __ ld_ptr(Address(O2_argslot_limit, -Interpreter::stackElementSize), O1_array); -- } else if (length_constant >= 1) { -- int new_slots = (length_constant * elem_slots) - array_slots; -- insert_arg_slots(_masm, new_slots * stack_move_unit(), -- O0_argslot, O2_scratch, O3_scratch, O4_scratch); -- } else if (length_constant == 0) { -- __ BIND(L_array_is_empty); -- remove_arg_slots(_masm, -stack_move_unit() * array_slots, -- O0_argslot, O1_scratch, O2_scratch, O3_scratch); -- } else { -- ShouldNotReachHere(); -- } -- -- // Copy from the array to the new slots. -- // Note: Stack change code preserves integrity of O0_argslot pointer. -- // So even after slot insertions, O0_argslot still points to first argument. -- // Beware: Arguments that are shallow on the stack are deep in the array, -- // and vice versa. So a downward-growing stack (the usual) has to be copied -- // elementwise in reverse order from the source array. -- __ BIND(L_copy_args); -- if (length_constant == -1) { -- // [O0_argslot, O2_argslot_limit) is the area we are inserting into. -- // Array element [0] goes at O0_argslot_limit[-wordSize]. -- Register O1_source = O1_array; -- __ add(Address(O1_array, elem0_offset), O1_source); -- Register O4_fill_ptr = O4_scratch; -- __ mov(O2_argslot_limit, O4_fill_ptr); -- Label L_loop; -- __ BIND(L_loop); -- __ add(O4_fill_ptr, -Interpreter::stackElementSize * elem_slots, O4_fill_ptr); -- move_typed_arg(_masm, elem_type, true, -- Address(O1_source, 0), Address(O4_fill_ptr, 0), -- O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3) -- __ add(O1_source, type2aelembytes(elem_type), O1_source); -- __ cmp_and_brx_short(O4_fill_ptr, O0_argslot, Assembler::greaterUnsigned, Assembler::pt, L_loop); -- } else if (length_constant == 0) { -- // nothing to copy -- } else { -- int elem_offset = elem0_offset; -- int slot_offset = length_constant * Interpreter::stackElementSize; -- for (int index = 0; index < length_constant; index++) { -- slot_offset -= Interpreter::stackElementSize * elem_slots; // fill backward -- move_typed_arg(_masm, elem_type, true, -- Address(O1_array, elem_offset), Address(O0_argslot, slot_offset), -- O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3) -- elem_offset += type2aelembytes(elem_type); -- } -- } -- __ BIND(L_args_done); -- -- // Arguments are spread. Move to next method handle. -- __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); -- __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); -- -- __ BIND(L_bad_array_klass); -- assert(!vmarg.uses(O2_required), "must be different registers"); -- __ load_heap_oop(Address(O2_array_klass, java_mirror_offset), O2_required); // required class -- __ ld_ptr( vmarg, O1_actual); // bad object -- __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); -- __ delayed()->mov(Bytecodes::_aaload, O0_code); // who is complaining? -- -- __ bind(L_bad_array_length); -- assert(!vmarg.uses(O2_required), "must be different registers"); -- __ mov( G3_method_handle, O2_required); // required class -- __ ld_ptr(vmarg, O1_actual); // bad object -- __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch); -- __ delayed()->mov(Bytecodes::_arraylength, O0_code); // who is complaining? -- } -- break; -- -- default: -- DEBUG_ONLY(tty->print_cr("bad ek=%d (%s)", (int)ek, entry_name(ek))); -- ShouldNotReachHere(); -- } -- BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek))); -- -- address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry); -- __ unimplemented(entry_name(ek)); // %%% FIXME: NYI -- -- init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie)); --} -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/methodHandles_sparc.hpp ---- openjdk/hotspot/src/cpu/sparc/vm/methodHandles_sparc.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/methodHandles_sparc.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -30,186 +30,9 @@ - adapter_code_size = NOT_LP64(23000 DEBUG_ONLY(+ 40000)) LP64_ONLY(35000 DEBUG_ONLY(+ 50000)) - }; - --public: -- --class RicochetFrame : public ResourceObj { -- friend class MethodHandles; -- -- private: -- /* -- RF field x86 SPARC -- sender_pc *(rsp+0) I7-0x8 -- sender_link rbp I6+BIAS -- exact_sender_sp rsi/r13 I5_savedSP -- conversion *(rcx+&amh_conv) L5_conv -- saved_args_base rax L4_sab (cf. Gargs = G4) -- saved_args_layout #NULL L3_sal -- saved_target *(rcx+&mh_vmtgt) L2_stgt -- continuation #STUB_CON L1_cont -- */ -- static const Register L1_continuation ; // what to do when control gets back here -- static const Register L2_saved_target ; // target method handle to invoke on saved_args -- static const Register L3_saved_args_layout; // caching point for MethodTypeForm.vmlayout cookie -- static const Register L4_saved_args_base ; // base of pushed arguments (slot 0, arg N) (-3) -- static const Register L5_conversion ; // misc. information from original AdapterMethodHandle (-2) -- -- frame _fr; -- -- RicochetFrame(const frame& fr) : _fr(fr) { } -- -- intptr_t* register_addr(Register reg) const { -- assert((_fr.sp() + reg->sp_offset_in_saved_window()) == _fr.register_addr(reg), "must agree"); -- return _fr.register_addr(reg); -- } -- intptr_t register_value(Register reg) const { return *register_addr(reg); } -- -- public: -- intptr_t* continuation() const { return (intptr_t*) register_value(L1_continuation); } -- oop saved_target() const { return (oop) register_value(L2_saved_target); } -- oop saved_args_layout() const { return (oop) register_value(L3_saved_args_layout); } -- intptr_t* saved_args_base() const { return (intptr_t*) register_value(L4_saved_args_base); } -- intptr_t conversion() const { return register_value(L5_conversion); } -- intptr_t* exact_sender_sp() const { return (intptr_t*) register_value(I5_savedSP); } -- intptr_t* sender_link() const { return _fr.sender_sp(); } // XXX -- address sender_pc() const { return _fr.sender_pc(); } -- -- // This value is not used for much, but it apparently must be nonzero. -- static int frame_size_in_bytes() { return wordSize * 4; } -- -- intptr_t* extended_sender_sp() const { return saved_args_base(); } -- -- intptr_t return_value_slot_number() const { -- return adapter_conversion_vminfo(conversion()); -- } -- BasicType return_value_type() const { -- return adapter_conversion_dest_type(conversion()); -- } -- bool has_return_value_slot() const { -- return return_value_type() != T_VOID; -- } -- intptr_t* return_value_slot_addr() const { -- assert(has_return_value_slot(), ""); -- return saved_arg_slot_addr(return_value_slot_number()); -- } -- intptr_t* saved_target_slot_addr() const { -- return saved_arg_slot_addr(saved_args_length()); -- } -- intptr_t* saved_arg_slot_addr(int slot) const { -- assert(slot >= 0, ""); -- return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) ); -- } -- -- jint saved_args_length() const; -- jint saved_arg_offset(int arg) const; -- -- // GC interface -- oop* saved_target_addr() { return (oop*)register_addr(L2_saved_target); } -- oop* saved_args_layout_addr() { return (oop*)register_addr(L3_saved_args_layout); } -- -- oop compute_saved_args_layout(bool read_cache, bool write_cache); -- --#ifdef ASSERT -- // The magic number is supposed to help find ricochet frames within the bytes of stack dumps. -- enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E }; -- static const Register L0_magic_number_1 ; // cookie for debugging, at start of RSA -- static Address magic_number_2_addr() { return Address(L4_saved_args_base, -wordSize); } -- intptr_t magic_number_1() const { return register_value(L0_magic_number_1); } -- intptr_t magic_number_2() const { return saved_args_base()[-1]; } --#endif //ASSERT -- -- public: -- enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) }; -- -- void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc. -- -- static void generate_ricochet_blob(MacroAssembler* _masm, -- // output params: -- int* bounce_offset, -- int* exception_offset, -- int* frame_size_in_words); -- -- static void enter_ricochet_frame(MacroAssembler* _masm, -- Register recv_reg, -- Register argv_reg, -- address return_handler); -- -- static void leave_ricochet_frame(MacroAssembler* _masm, -- Register recv_reg, -- Register new_sp_reg, -- Register sender_pc_reg); -- -- static RicochetFrame* from_frame(const frame& fr) { -- RicochetFrame* rf = new RicochetFrame(fr); -- rf->verify(); -- return rf; -- } -- -- static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN; -- -- static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN; --}; -- - // Additional helper methods for MethodHandles code generation: - public: - static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg); -- static void load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg); -- static void extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg); -- static void extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg); -- -- static void load_stack_move(MacroAssembler* _masm, -- Address G3_amh_conversion, -- Register G5_stack_move); -- -- static void insert_arg_slots(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, -- Register argslot_reg, -- Register temp_reg, Register temp2_reg, Register temp3_reg); -- -- static void remove_arg_slots(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, -- Register argslot_reg, -- Register temp_reg, Register temp2_reg, Register temp3_reg); -- -- static void push_arg_slots(MacroAssembler* _masm, -- Register argslot_reg, -- RegisterOrConstant slot_count, -- Register temp_reg, Register temp2_reg); -- -- static void move_arg_slots_up(MacroAssembler* _masm, -- Register bottom_reg, // invariant -- Address top_addr, // can use temp_reg -- RegisterOrConstant positive_distance_in_slots, -- Register temp_reg, Register temp2_reg); -- -- static void move_arg_slots_down(MacroAssembler* _masm, -- Address bottom_addr, // can use temp_reg -- Register top_reg, // invariant -- RegisterOrConstant negative_distance_in_slots, -- Register temp_reg, Register temp2_reg); -- -- static void move_typed_arg(MacroAssembler* _masm, -- BasicType type, bool is_element, -- Address value_src, Address slot_dest, -- Register temp_reg); -- -- static void move_return_value(MacroAssembler* _masm, BasicType type, -- Address return_slot); -- -- static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, -- Register temp_reg, -- const char* error_message) NOT_DEBUG_RETURN; -- -- static void verify_argslots(MacroAssembler* _masm, -- RegisterOrConstant argslot_count, -- Register argslot_reg, -- Register temp_reg, -- Register temp2_reg, -- bool negate_argslot, -- const char* error_message) NOT_DEBUG_RETURN; -- -- static void verify_stack_move(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, -- int direction) NOT_DEBUG_RETURN; - - static void verify_klass(MacroAssembler* _masm, - Register obj_reg, KlassHandle klass, -@@ -223,8 +46,17 @@ - "reference is a MH"); - } - -+ static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN; -+ - // Similar to InterpreterMacroAssembler::jump_from_interpreted. - // Takes care of special dispatch from single stepping too. -- static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, Register temp2); -+ static void jump_from_method_handle(MacroAssembler* _masm, Register method, -+ Register temp, Register temp2, -+ bool for_compiler_entry); -+ -+ static void jump_to_lambda_form(MacroAssembler* _masm, -+ Register recv, Register method_temp, -+ Register temp2, Register temp3, -+ bool for_compiler_entry); - - static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN; -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/sharedRuntime_sparc.cpp ---- openjdk/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -400,13 +400,13 @@ - case T_LONG: // LP64, longs compete with int args - assert(sig_bt[i+1] == T_VOID, ""); - #ifdef _LP64 -- if (int_reg_cnt < int_reg_max) int_reg_cnt++; -+ if (int_reg_cnt < int_reg_max) int_reg_cnt++; - #endif - break; - case T_OBJECT: - case T_ARRAY: - case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address -- if (int_reg_cnt < int_reg_max) int_reg_cnt++; -+ if (int_reg_cnt < int_reg_max) int_reg_cnt++; - #ifndef _LP64 - else stk_reg_pairs++; - #endif -@@ -416,11 +416,11 @@ - case T_CHAR: - case T_BYTE: - case T_BOOLEAN: -- if (int_reg_cnt < int_reg_max) int_reg_cnt++; -+ if (int_reg_cnt < int_reg_max) int_reg_cnt++; - else stk_reg_pairs++; - break; - case T_FLOAT: -- if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++; -+ if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++; - else stk_reg_pairs++; - break; - case T_DOUBLE: -@@ -436,7 +436,6 @@ - // This is where the longs/doubles start on the stack. - stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round - -- int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only - int flt_reg_pairs = (flt_reg_cnt+1) & ~1; - - // int stk_reg = frame::register_save_words*(wordSize>>2); -@@ -517,24 +516,15 @@ - stk_reg_pairs += 2; - } - #else // COMPILER2 -- if (int_reg_pairs + 1 < int_reg_max) { -- if (is_outgoing) { -- regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg()); -- } else { -- regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg()); -- } -- int_reg_pairs += 2; -- } else { - regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); - stk_reg_pairs += 2; -- } - #endif // COMPILER2 - #endif // _LP64 - break; - - case T_FLOAT: - if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg()); -- else regs[i].set1( VMRegImpl::stack2reg(stk_reg++)); -+ else regs[i].set1(VMRegImpl::stack2reg(stk_reg++)); - break; - case T_DOUBLE: - assert(sig_bt[i+1] == T_VOID, "expecting half"); -@@ -886,6 +876,20 @@ - __ delayed()->add(SP, G1, Gargs); - } - -+static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg, -+ address code_start, address code_end, -+ Label& L_ok) { -+ Label L_fail; -+ __ set(ExternalAddress(code_start), temp_reg); -+ __ set(pointer_delta(code_end, code_start, 1), temp2_reg); -+ __ cmp(pc_reg, temp_reg); -+ __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail); -+ __ delayed()->add(temp_reg, temp2_reg, temp_reg); -+ __ cmp(pc_reg, temp_reg); -+ __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok); -+ __ bind(L_fail); -+} -+ - void AdapterGenerator::gen_i2c_adapter( - int total_args_passed, - // VMReg max_arg, -@@ -907,6 +911,51 @@ - // This removes all sorts of headaches on the x86 side and also eliminates - // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. - -+ // More detail: -+ // Adapters can be frameless because they do not require the caller -+ // to perform additional cleanup work, such as correcting the stack pointer. -+ // An i2c adapter is frameless because the *caller* frame, which is interpreted, -+ // routinely repairs its own stack pointer (from interpreter_frame_last_sp), -+ // even if a callee has modified the stack pointer. -+ // A c2i adapter is frameless because the *callee* frame, which is interpreted, -+ // routinely repairs its caller's stack pointer (from sender_sp, which is set -+ // up via the senderSP register). -+ // In other words, if *either* the caller or callee is interpreted, we can -+ // get the stack pointer repaired after a call. -+ // This is why c2i and i2c adapters cannot be indefinitely composed. -+ // In particular, if a c2i adapter were to somehow call an i2c adapter, -+ // both caller and callee would be compiled methods, and neither would -+ // clean up the stack pointer changes performed by the two adapters. -+ // If this happens, control eventually transfers back to the compiled -+ // caller, but with an uncorrected stack, causing delayed havoc. -+ -+ if (VerifyAdapterCalls && -+ (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { -+ // So, let's test for cascading c2i/i2c adapters right now. -+ // assert(Interpreter::contains($return_addr) || -+ // StubRoutines::contains($return_addr), -+ // "i2c adapter must return to an interpreter frame"); -+ __ block_comment("verify_i2c { "); -+ Label L_ok; -+ if (Interpreter::code() != NULL) -+ range_check(masm, O7, O0, O1, -+ Interpreter::code()->code_start(), Interpreter::code()->code_end(), -+ L_ok); -+ if (StubRoutines::code1() != NULL) -+ range_check(masm, O7, O0, O1, -+ StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), -+ L_ok); -+ if (StubRoutines::code2() != NULL) -+ range_check(masm, O7, O0, O1, -+ StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), -+ L_ok); -+ const char* msg = "i2c adapter must return to an interpreter frame"; -+ __ block_comment(msg); -+ __ stop(msg); -+ __ bind(L_ok); -+ __ block_comment("} verify_i2ce "); -+ } -+ - // As you can see from the list of inputs & outputs there are not a lot - // of temp registers to work with: mostly G1, G3 & G4. - -@@ -1937,20 +1986,156 @@ - __ bind(done); - } - -+static void verify_oop_args(MacroAssembler* masm, -+ int total_args_passed, -+ const BasicType* sig_bt, -+ const VMRegPair* regs) { -+ Register temp_reg = G5_method; // not part of any compiled calling seq -+ if (VerifyOops) { -+ for (int i = 0; i < total_args_passed; i++) { -+ if (sig_bt[i] == T_OBJECT || -+ sig_bt[i] == T_ARRAY) { -+ VMReg r = regs[i].first(); -+ assert(r->is_valid(), "bad oop arg"); -+ if (r->is_stack()) { -+ RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; -+ ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg); -+ __ ld_ptr(SP, ld_off, temp_reg); -+ __ verify_oop(temp_reg); -+ } else { -+ __ verify_oop(r->as_Register()); -+ } -+ } -+ } -+ } -+} -+ -+static void gen_special_dispatch(MacroAssembler* masm, -+ int total_args_passed, -+ int comp_args_on_stack, -+ vmIntrinsics::ID special_dispatch, -+ const BasicType* sig_bt, -+ const VMRegPair* regs) { -+ verify_oop_args(masm, total_args_passed, sig_bt, regs); -+ -+ // Now write the args into the outgoing interpreter space -+ bool has_receiver = false; -+ Register receiver_reg = noreg; -+ int member_arg_pos = -1; -+ Register member_reg = noreg; -+ int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch); -+ if (ref_kind != 0) { -+ member_arg_pos = total_args_passed - 1; // trailing MemberName argument -+ member_reg = G5_method; // known to be free at this point -+ has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); -+ } else if (special_dispatch == vmIntrinsics::_invokeBasic) { -+ has_receiver = true; -+ } else { -+ fatal(err_msg("special_dispatch=%d", special_dispatch)); -+ } -+ -+ if (member_reg != noreg) { -+ // Load the member_arg into register, if necessary. -+ assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob"); -+ assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object"); -+ VMReg r = regs[member_arg_pos].first(); -+ assert(r->is_valid(), "bad member arg"); -+ if (r->is_stack()) { -+ RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; -+ ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); -+ __ ld_ptr(SP, ld_off, member_reg); -+ } else { -+ // no data motion is needed -+ member_reg = r->as_Register(); -+ } -+ } -+ -+ if (has_receiver) { -+ // Make sure the receiver is loaded into a register. -+ assert(total_args_passed > 0, "oob"); -+ assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); -+ VMReg r = regs[0].first(); -+ assert(r->is_valid(), "bad receiver arg"); -+ if (r->is_stack()) { -+ // Porting note: This assumes that compiled calling conventions always -+ // pass the receiver oop in a register. If this is not true on some -+ // platform, pick a temp and load the receiver from stack. -+ assert(false, "receiver always in a register"); -+ receiver_reg = G3_scratch; // known to be free at this point -+ RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS; -+ ld_off = __ ensure_simm13_or_reg(ld_off, member_reg); -+ __ ld_ptr(SP, ld_off, receiver_reg); -+ } else { -+ // no data motion is needed -+ receiver_reg = r->as_Register(); -+ } -+ } -+ -+ // Figure out which address we are really jumping to: -+ MethodHandles::generate_method_handle_dispatch(masm, special_dispatch, -+ receiver_reg, member_reg, /*for_compiler_entry:*/ true); -+} -+ - // --------------------------------------------------------------------------- - // Generate a native wrapper for a given method. The method takes arguments - // in the Java compiled code convention, marshals them to the native - // convention (handlizes oops, etc), transitions to native, makes the call, - // returns to java state (possibly blocking), unhandlizes any result and - // returns. -+// -+// Critical native functions are a shorthand for the use of -+// GetPrimtiveArrayCritical and disallow the use of any other JNI -+// functions. The wrapper is expected to unpack the arguments before -+// passing them to the callee and perform checks before and after the -+// native call to ensure that they GC_locker -+// lock_critical/unlock_critical semantics are followed. Some other -+// parts of JNI setup are skipped like the tear down of the JNI handle -+// block and the check for pending exceptions it's impossible for them -+// to be thrown. -+// -+// They are roughly structured like this: -+// if (GC_locker::needs_gc()) -+// SharedRuntime::block_for_jni_critical(); -+// tranistion to thread_in_native -+// unpack arrray arguments and call native entry point -+// check for safepoint in progress -+// check if any thread suspend flags are set -+// call into JVM and possible unlock the JNI critical -+// if a GC was suppressed while in the critical native. -+// transition back to thread_in_Java -+// return to caller -+// - nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, - methodHandle method, - int compile_id, - int total_in_args, - int comp_args_on_stack, // in VMRegStackSlots -- BasicType *in_sig_bt, -- VMRegPair *in_regs, -+ BasicType* in_sig_bt, -+ VMRegPair* in_regs, - BasicType ret_type) { -+ if (method->is_method_handle_intrinsic()) { -+ vmIntrinsics::ID iid = method->intrinsic_id(); -+ intptr_t start = (intptr_t)__ pc(); -+ int vep_offset = ((intptr_t)__ pc()) - start; -+ gen_special_dispatch(masm, -+ total_in_args, -+ comp_args_on_stack, -+ method->intrinsic_id(), -+ in_sig_bt, -+ in_regs); -+ int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period -+ __ flush(); -+ int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually -+ return nmethod::new_native_nmethod(method, -+ compile_id, -+ masm->code(), -+ vep_offset, -+ frame_complete, -+ stack_slots / VMRegImpl::slots_per_word, -+ in_ByteSize(-1), -+ in_ByteSize(-1), -+ (OopMapSet*)NULL); -+ } - bool is_critical_native = true; - address native_func = method->critical_native_function(); - if (native_func == NULL) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/stubGenerator_sparc.cpp ---- openjdk/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -3404,14 +3404,6 @@ - StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry; - #endif // COMPILER2 !=> _LP64 - -- // Build this early so it's available for the interpreter. The -- // stub expects the required and actual type to already be in O1 -- // and O2 respectively. -- StubRoutines::_throw_WrongMethodTypeException_entry = -- generate_throw_exception("WrongMethodTypeException throw_exception", -- CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException), -- G5_method_type, G3_method_handle); -- - // Build this early so it's available for the interpreter. - StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); - } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/templateInterpreter_sparc.cpp ---- openjdk/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -692,9 +692,9 @@ - // Need to differentiate between igetfield, agetfield, bgetfield etc. - // because they are different sizes. - // Get the type from the constant pool cache -- __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch); -- // Make sure we don't need to mask G1_scratch for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -+ __ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch); -+ // Make sure we don't need to mask G1_scratch after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); - __ cmp(G1_scratch, atos ); - __ br(Assembler::equal, true, Assembler::pt, xreturn_path); - __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i); -@@ -1659,7 +1659,7 @@ - int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0; - *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS; - } else { -- assert(caller->is_compiled_frame() || caller->is_entry_frame() || caller->is_ricochet_frame(), "only possible cases"); -+ assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases"); - // Don't have Lesp available; lay out locals block in the caller - // adjacent to the register window save area. - // -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/templateTable_sparc.cpp ---- openjdk/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -378,7 +378,7 @@ - Register Rcache = G3_scratch; - Register Rscratch = G4_scratch; - -- resolve_cache_and_index(f1_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1)); -+ resolve_cache_and_index(f12_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1)); - - __ verify_oop(Otos_i); - -@@ -2093,10 +2093,12 @@ - // Depends on cpCacheOop layout! - Label resolved; - -- if (byte_no == f1_oop) { -- // We are resolved if the f1 field contains a non-null object (CallSite, etc.) -- // This kind of CP cache entry does not need to match the flags byte, because -+ if (byte_no == f12_oop) { -+ // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.) -+ // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because - // there is a 1-1 relation between bytecode type and CP entry type. -+ // The caller will also load a methodOop from f2. -+ assert(result != noreg, ""); - assert_different_registers(result, Rcache); - __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); - __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + -@@ -2123,10 +2125,13 @@ - case Bytecodes::_invokespecial : // fall through - case Bytecodes::_invokestatic : // fall through - case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; -+ case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; - case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; - case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; - case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; -- default : ShouldNotReachHere(); break; -+ default: -+ fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); -+ break; - } - // first time invocation - must resolve first - __ call_VM(noreg, entry, O1); -@@ -2139,48 +2144,54 @@ - } - - void TemplateTable::load_invoke_cp_cache_entry(int byte_no, -- Register Rmethod, -- Register Ritable_index, -- Register Rflags, -+ Register method, -+ Register itable_index, -+ Register flags, - bool is_invokevirtual, - bool is_invokevfinal, - bool is_invokedynamic) { - // Uses both G3_scratch and G4_scratch -- Register Rcache = G3_scratch; -- Register Rscratch = G4_scratch; -- assert_different_registers(Rcache, Rmethod, Ritable_index); -- -- ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); -+ Register cache = G3_scratch; -+ Register index = G4_scratch; -+ assert_different_registers(cache, method, itable_index); - - // determine constant pool cache field offsets -+ assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); - const int method_offset = in_bytes( -- cp_base_offset + -- (is_invokevirtual -+ constantPoolCacheOopDesc::base_offset() + -+ ((byte_no == f2_byte) - ? ConstantPoolCacheEntry::f2_offset() - : ConstantPoolCacheEntry::f1_offset() - ) - ); -- const int flags_offset = in_bytes(cp_base_offset + -+ const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + - ConstantPoolCacheEntry::flags_offset()); - // access constant pool cache fields -- const int index_offset = in_bytes(cp_base_offset + -+ const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + - ConstantPoolCacheEntry::f2_offset()); - - if (is_invokevfinal) { -- __ get_cache_and_index_at_bcp(Rcache, Rscratch, 1); -- __ ld_ptr(Rcache, method_offset, Rmethod); -- } else if (byte_no == f1_oop) { -- // Resolved f1_oop goes directly into 'method' register. -- resolve_cache_and_index(byte_no, Rmethod, Rcache, Rscratch, sizeof(u4)); -+ __ get_cache_and_index_at_bcp(cache, index, 1); -+ __ ld_ptr(Address(cache, method_offset), method); -+ } else if (byte_no == f12_oop) { -+ // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'. -+ // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset). -+ // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle. -+ size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); -+ resolve_cache_and_index(byte_no, itable_index, cache, index, index_size); -+ __ ld_ptr(Address(cache, index_offset), method); -+ itable_index = noreg; // hack to disable load below - } else { -- resolve_cache_and_index(byte_no, noreg, Rcache, Rscratch, sizeof(u2)); -- __ ld_ptr(Rcache, method_offset, Rmethod); -+ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); -+ __ ld_ptr(Address(cache, method_offset), method); - } - -- if (Ritable_index != noreg) { -- __ ld_ptr(Rcache, index_offset, Ritable_index); -+ if (itable_index != noreg) { -+ // pick up itable index from f2 also: -+ assert(byte_no == f1_byte, "already picked up f1"); -+ __ ld_ptr(Address(cache, index_offset), itable_index); - } -- __ ld_ptr(Rcache, flags_offset, Rflags); -+ __ ld_ptr(Address(cache, flags_offset), flags); - } - - // The Rcache register must be set before call -@@ -2272,7 +2283,7 @@ - - if (__ membar_has_effect(membar_bits)) { - // Get volatile flag -- __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); -+ __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); - __ and3(Rflags, Lscratch, Lscratch); - } - -@@ -2280,9 +2291,9 @@ - - // compute field type - Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj; -- __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags); -- // Make sure we don't need to mask Rflags for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -+ __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); -+ // Make sure we don't need to mask Rflags after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); - - // Check atos before itos for getstatic, more likely (in Queens at least) - __ cmp(Rflags, atos); -@@ -2445,7 +2456,7 @@ - if (__ membar_has_effect(membar_bits)) { - // Get volatile flag - __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags); -- __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); -+ __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); - } - - switch (bytecode()) { -@@ -2569,9 +2580,9 @@ - Label two_word, valsizeknown; - __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); - __ mov(Lesp, G4_scratch); -- __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags); -- // Make sure we don't need to mask Rflags for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -+ __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); -+ // Make sure we don't need to mask Rflags after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); - __ cmp(Rflags, ltos); - __ br(Assembler::equal, false, Assembler::pt, two_word); - __ delayed()->cmp(Rflags, dtos); -@@ -2625,7 +2636,7 @@ - - Label notVolatile, checkVolatile, exit; - if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { -- __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); -+ __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); - __ and3(Rflags, Lscratch, Lscratch); - - if (__ membar_has_effect(read_bits)) { -@@ -2635,9 +2646,9 @@ - } - } - -- __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags); -- // Make sure we don't need to mask Rflags for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -+ __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags); -+ // Make sure we don't need to mask Rflags after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); - - // compute field type - Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat; -@@ -2833,7 +2844,7 @@ - Label notVolatile, checkVolatile, exit; - if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { - __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); -- __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); -+ __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); - __ and3(Rflags, Lscratch, Lscratch); - if (__ membar_has_effect(read_bits)) { - __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); -@@ -2916,7 +2927,7 @@ - - // Test volatile - Label notVolatile; -- __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); -+ __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch); - __ btst(Rflags, Lscratch); - __ br(Assembler::zero, false, Assembler::pt, notVolatile); - __ delayed()->nop(); -@@ -2936,27 +2947,82 @@ - ShouldNotReachHere(); - } - -+ -+void TemplateTable::prepare_invoke(int byte_no, -+ Register method, // linked method (or i-klass) -+ Register ra, // return address -+ Register index, // itable index, MethodType, etc. -+ Register recv, // if caller wants to see it -+ Register flags // if caller wants to test it -+ ) { -+ // determine flags -+ const Bytecodes::Code code = bytecode(); -+ const bool is_invokeinterface = code == Bytecodes::_invokeinterface; -+ const bool is_invokedynamic = code == Bytecodes::_invokedynamic; -+ const bool is_invokehandle = code == Bytecodes::_invokehandle; -+ const bool is_invokevirtual = code == Bytecodes::_invokevirtual; -+ const bool is_invokespecial = code == Bytecodes::_invokespecial; -+ const bool load_receiver = (recv != noreg); -+ assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); -+ assert(recv == noreg || recv == O0, ""); -+ assert(flags == noreg || flags == O1, ""); -+ -+ // setup registers & access constant pool cache -+ if (recv == noreg) recv = O0; -+ if (flags == noreg) flags = O1; -+ const Register temp = O2; -+ assert_different_registers(method, ra, index, recv, flags, temp); -+ -+ load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); -+ -+ __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore -+ -+ // maybe push appendix to arguments -+ if (is_invokedynamic || is_invokehandle) { -+ Label L_no_push; -+ __ verify_oop(index); -+ __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp); -+ __ btst(flags, temp); -+ __ br(Assembler::zero, false, Assembler::pt, L_no_push); -+ __ delayed()->nop(); -+ // Push the appendix as a trailing parameter. -+ // This must be done before we get the receiver, -+ // since the parameter_size includes it. -+ __ push_ptr(index); // push appendix (MethodType, CallSite, etc.) -+ __ bind(L_no_push); -+ } -+ -+ // load receiver if needed (after appendix is pushed so parameter size is correct) -+ if (load_receiver) { -+ __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size -+ __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp -+ __ verify_oop(recv); -+ } -+ -+ // compute return type -+ __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra); -+ // Make sure we don't need to mask flags after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); -+ // load return address -+ { -+ const address table_addr = (is_invokeinterface || is_invokedynamic) ? -+ (address)Interpreter::return_5_addrs_by_index_table() : -+ (address)Interpreter::return_3_addrs_by_index_table(); -+ AddressLiteral table(table_addr); -+ __ set(table, temp); -+ __ sll(ra, LogBytesPerWord, ra); -+ __ ld_ptr(Address(temp, ra), ra); -+ } -+} -+ -+ - void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) { - Register Rtemp = G4_scratch; - Register Rcall = Rindex; - assert_different_registers(Rcall, G5_method, Gargs, Rret); - - // get target methodOop & entry point -- const int base = instanceKlass::vtable_start_offset() * wordSize; -- if (vtableEntry::size() % 3 == 0) { -- // scale the vtable index by 12: -- int one_third = vtableEntry::size() / 3; -- __ sll(Rindex, exact_log2(one_third * 1 * wordSize), Rtemp); -- __ sll(Rindex, exact_log2(one_third * 2 * wordSize), Rindex); -- __ add(Rindex, Rtemp, Rindex); -- } else { -- // scale the vtable index by 8: -- __ sll(Rindex, exact_log2(vtableEntry::size() * wordSize), Rindex); -- } -- -- __ add(Rrecv, Rindex, Rrecv); -- __ ld_ptr(Rrecv, base + vtableEntry::method_offset_in_bytes(), G5_method); -- -+ __ lookup_virtual_method(Rrecv, Rindex, G5_method); - __ call_from_interpreter(Rcall, Gargs, Rret); - } - -@@ -2965,16 +3031,16 @@ - assert(byte_no == f2_byte, "use this argument"); - - Register Rscratch = G3_scratch; -- Register Rtemp = G4_scratch; -- Register Rret = Lscratch; -- Register Rrecv = G5_method; -+ Register Rtemp = G4_scratch; -+ Register Rret = Lscratch; -+ Register O0_recv = O0; - Label notFinal; - - load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false); - __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore - - // Check for vfinal -- __ set((1 << ConstantPoolCacheEntry::vfinalMethod), G4_scratch); -+ __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch); - __ btst(Rret, G4_scratch); - __ br(Assembler::zero, false, Assembler::pt, notFinal); - __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters -@@ -2986,27 +3052,27 @@ - __ bind(notFinal); - - __ mov(G5_method, Rscratch); // better scratch register -- __ load_receiver(G4_scratch, O0); // gets receiverOop -- // receiver is in O0 -- __ verify_oop(O0); -+ __ load_receiver(G4_scratch, O0_recv); // gets receiverOop -+ // receiver is in O0_recv -+ __ verify_oop(O0_recv); - - // get return address - AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); - __ set(table, Rtemp); -- __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type -- // Make sure we don't need to mask Rret for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -+ __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type -+ // Make sure we don't need to mask Rret after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); - __ sll(Rret, LogBytesPerWord, Rret); - __ ld_ptr(Rtemp, Rret, Rret); // get return address - - // get receiver klass -- __ null_check(O0, oopDesc::klass_offset_in_bytes()); -- __ load_klass(O0, Rrecv); -- __ verify_oop(Rrecv); -- -- __ profile_virtual_call(Rrecv, O4); -- -- generate_vtable_call(Rrecv, Rscratch, Rret); -+ __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); -+ __ load_klass(O0_recv, O0_recv); -+ __ verify_oop(O0_recv); -+ -+ __ profile_virtual_call(O0_recv, O4); -+ -+ generate_vtable_call(O0_recv, Rscratch, Rret); - } - - void TemplateTable::fast_invokevfinal(int byte_no) { -@@ -3036,9 +3102,9 @@ - // get return address - AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); - __ set(table, Rtemp); -- __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type -- // Make sure we don't need to mask Rret for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -+ __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type -+ // Make sure we don't need to mask Rret after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); - __ sll(Rret, LogBytesPerWord, Rret); - __ ld_ptr(Rtemp, Rret, Rret); // get return address - -@@ -3047,65 +3113,37 @@ - __ call_from_interpreter(Rscratch, Gargs, Rret); - } - -+ - void TemplateTable::invokespecial(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); - -- Register Rscratch = G3_scratch; -- Register Rtemp = G4_scratch; -- Register Rret = Lscratch; -- -- load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false); -- __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore -- -+ const Register Rret = Lscratch; -+ const Register O0_recv = O0; -+ const Register Rscratch = G3_scratch; -+ -+ prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check -+ __ null_check(O0_recv); -+ -+ // do the call - __ verify_oop(G5_method); -- -- __ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch); -- __ load_receiver(G4_scratch, O0); -- -- // receiver NULL check -- __ null_check(O0); -- - __ profile_call(O4); -- -- // get return address -- AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); -- __ set(table, Rtemp); -- __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type -- // Make sure we don't need to mask Rret for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -- __ sll(Rret, LogBytesPerWord, Rret); -- __ ld_ptr(Rtemp, Rret, Rret); // get return address -- -- // do the call - __ call_from_interpreter(Rscratch, Gargs, Rret); - } - -+ - void TemplateTable::invokestatic(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); - -- Register Rscratch = G3_scratch; -- Register Rtemp = G4_scratch; -- Register Rret = Lscratch; -- -- load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false); -- __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore -- -+ const Register Rret = Lscratch; -+ const Register Rscratch = G3_scratch; -+ -+ prepare_invoke(byte_no, G5_method, Rret); // get f1 methodOop -+ -+ // do the call - __ verify_oop(G5_method); -- - __ profile_call(O4); -- -- // get return address -- AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); -- __ set(table, Rtemp); -- __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type -- // Make sure we don't need to mask Rret for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -- __ sll(Rret, LogBytesPerWord, Rret); -- __ ld_ptr(Rtemp, Rret, Rret); // get return address -- -- // do the call - __ call_from_interpreter(Rscratch, Gargs, Rret); - } - -@@ -3122,7 +3160,7 @@ - Label notFinal; - - // Check for vfinal -- __ set((1 << ConstantPoolCacheEntry::vfinalMethod), Rscratch); -+ __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch); - __ btst(Rflags, Rscratch); - __ br(Assembler::zero, false, Assembler::pt, notFinal); - __ delayed()->nop(); -@@ -3144,53 +3182,37 @@ - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); - -- Register Rscratch = G4_scratch; -- Register Rret = G3_scratch; -- Register Rindex = Lscratch; -- Register Rinterface = G1_scratch; -- Register RklassOop = G5_method; -- Register Rflags = O1; -+ const Register Rinterface = G1_scratch; -+ const Register Rret = G3_scratch; -+ const Register Rindex = Lscratch; -+ const Register O0_recv = O0; -+ const Register O1_flags = O1; -+ const Register O2_klassOop = O2; -+ const Register Rscratch = G4_scratch; - assert_different_registers(Rscratch, G5_method); - -- load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, /*virtual*/ false, false, false); -- __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore -- -- // get receiver -- __ and3(Rflags, 0xFF, Rscratch); // gets number of parameters -- __ load_receiver(Rscratch, O0); -- __ verify_oop(O0); -- -- __ mov(Rflags, Rret); -- -- // get return address -- AddressLiteral table(Interpreter::return_5_addrs_by_index_table()); -- __ set(table, Rscratch); -- __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type -- // Make sure we don't need to mask Rret for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -- __ sll(Rret, LogBytesPerWord, Rret); -- __ ld_ptr(Rscratch, Rret, Rret); // get return address -+ prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags); - - // get receiver klass -- __ null_check(O0, oopDesc::klass_offset_in_bytes()); -- __ load_klass(O0, RklassOop); -- __ verify_oop(RklassOop); -+ __ null_check(O0_recv, oopDesc::klass_offset_in_bytes()); -+ __ load_klass(O0_recv, O2_klassOop); -+ __ verify_oop(O2_klassOop); - - // Special case of invokeinterface called for virtual method of - // java.lang.Object. See cpCacheOop.cpp for details. - // This code isn't produced by javac, but could be produced by - // another compliant java compiler. - Label notMethod; -- __ set((1 << ConstantPoolCacheEntry::methodInterface), Rscratch); -- __ btst(Rflags, Rscratch); -+ __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch); -+ __ btst(O1_flags, Rscratch); - __ br(Assembler::zero, false, Assembler::pt, notMethod); - __ delayed()->nop(); - -- invokeinterface_object_method(RklassOop, Rinterface, Rret, Rflags); -+ invokeinterface_object_method(O2_klassOop, Rinterface, Rret, O1_flags); - - __ bind(notMethod); - -- __ profile_virtual_call(RklassOop, O4); -+ __ profile_virtual_call(O2_klassOop, O4); - - // - // find entry point to call -@@ -3199,9 +3221,9 @@ - // compute start of first itableOffsetEntry (which is at end of vtable) - const int base = instanceKlass::vtable_start_offset() * wordSize; - Label search; -- Register Rtemp = Rflags; -- -- __ ld(RklassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp); -+ Register Rtemp = O1_flags; -+ -+ __ ld(O2_klassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp); - if (align_object_offset(1) > 1) { - __ round_to(Rtemp, align_object_offset(1)); - } -@@ -3212,7 +3234,7 @@ - __ set(base, Rscratch); - __ add(Rscratch, Rtemp, Rtemp); - } -- __ add(RklassOop, Rtemp, Rscratch); -+ __ add(O2_klassOop, Rtemp, Rscratch); - - __ bind(search); - -@@ -3244,7 +3266,7 @@ - assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below"); - __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8; - __ add(Rscratch, Rindex, Rscratch); -- __ ld_ptr(RklassOop, Rscratch, G5_method); -+ __ ld_ptr(O2_klassOop, Rscratch, G5_method); - - // Check for abstract method error. - { -@@ -3260,13 +3282,42 @@ - - __ verify_oop(G5_method); - __ call_from_interpreter(Rcall, Gargs, Rret); -- -+} -+ -+ -+void TemplateTable::invokehandle(int byte_no) { -+ transition(vtos, vtos); -+ assert(byte_no == f12_oop, "use this argument"); -+ -+ if (!EnableInvokeDynamic) { -+ // rewriter does not generate this bytecode -+ __ should_not_reach_here(); -+ return; -+ } -+ -+ const Register Rret = Lscratch; -+ const Register G4_mtype = G4_scratch; // f1 -+ const Register O0_recv = O0; -+ const Register Rscratch = G3_scratch; -+ -+ prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv); -+ __ null_check(O0_recv); -+ -+ // G4: MethodType object (from f1) -+ // G5: MH.linkToCallSite method (from f2) -+ -+ // Note: G4_mtype is already pushed (if necessary) by prepare_invoke -+ -+ // do the call -+ __ verify_oop(G5_method); -+ __ profile_final_call(O4); // FIXME: profile the LambdaForm also -+ __ call_from_interpreter(Rscratch, Gargs, Rret); - } - - - void TemplateTable::invokedynamic(int byte_no) { - transition(vtos, vtos); -- assert(byte_no == f1_oop, "use this argument"); -+ assert(byte_no == f12_oop, "use this argument"); - - if (!EnableInvokeDynamic) { - // We should not encounter this bytecode if !EnableInvokeDynamic. -@@ -3279,42 +3330,24 @@ - return; - } - -- // G5: CallSite object (f1) -- // XX: unused (f2) -- // XX: flags (unused) -- -- Register G5_callsite = G5_method; -- Register Rscratch = G3_scratch; -- Register Rtemp = G1_scratch; -- Register Rret = Lscratch; -- -- load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, -- /*virtual*/ false, /*vfinal*/ false, /*indy*/ true); -- __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore -- -+ const Register Rret = Lscratch; -+ const Register G4_callsite = G4_scratch; -+ const Register Rscratch = G3_scratch; -+ -+ prepare_invoke(byte_no, G5_method, Rret, G4_callsite); -+ -+ // G4: CallSite object (from f1) -+ // G5: MH.linkToCallSite method (from f2) -+ -+ // Note: G4_callsite is already pushed by prepare_invoke -+ -+ // %%% should make a type profile for any invokedynamic that takes a ref argument - // profile this call - __ profile_call(O4); - -- // get return address -- AddressLiteral table(Interpreter::return_5_addrs_by_index_table()); -- __ set(table, Rtemp); -- __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type -- // Make sure we don't need to mask Rret for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -- __ sll(Rret, LogBytesPerWord, Rret); -- __ ld_ptr(Rtemp, Rret, Rret); // get return address -- -- __ verify_oop(G5_callsite); -- __ load_heap_oop(G5_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle); -- __ null_check(G3_method_handle); -- __ verify_oop(G3_method_handle); -- -- // Adjust Rret first so Llast_SP can be same as Rret -- __ add(Rret, -frame::pc_return_offset, O7); -- __ add(Lesp, BytesPerWord, Gargs); // setup parameter pointer -- __ jump_to_method_handle_entry(G3_method_handle, Rtemp, /* emit_delayed_nop */ false); -- // Record SP so we can remove any stack space allocated by adapter transition -- __ delayed()->mov(SP, Llast_SP); -+ // do the call -+ __ verify_oop(G5_method); -+ __ call_from_interpreter(Rscratch, Gargs, Rret); - } - - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/templateTable_sparc.hpp ---- openjdk/hotspot/src/cpu/sparc/vm/templateTable_sparc.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/templateTable_sparc.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -25,6 +25,13 @@ - #ifndef CPU_SPARC_VM_TEMPLATETABLE_SPARC_HPP - #define CPU_SPARC_VM_TEMPLATETABLE_SPARC_HPP - -+ static void prepare_invoke(int byte_no, -+ Register method, // linked method (or i-klass) -+ Register ra, // return address -+ Register index = noreg, // itable index, MethodType, etc. -+ Register recv = noreg, // if caller wants to see it -+ Register flags = noreg // if caller wants to test it -+ ); - // helper function - static void invokevfinal_helper(Register Rcache, Register Rret); - static void invokeinterface_object_method(Register RklassOop, Register Rcall, -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/sparc/vm/vtableStubs_sparc.cpp ---- openjdk/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -70,7 +70,6 @@ - __ load_klass(O0, G3_scratch); - - // set methodOop (in case of interpreted method), and destination address -- int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); - #ifndef PRODUCT - if (DebugVtables) { - Label L; -@@ -82,13 +81,8 @@ - __ bind(L); - } - #endif -- int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); -- if (Assembler::is_simm13(v_off)) { -- __ ld_ptr(G3, v_off, G5_method); -- } else { -- __ set(v_off,G5); -- __ ld_ptr(G3, G5, G5_method); -- } -+ -+ __ lookup_virtual_method(G3_scratch, vtable_index, G5_method); - - #ifndef PRODUCT - if (DebugVtables) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/assembler_x86.cpp ---- openjdk/hotspot/src/cpu/x86/vm/assembler_x86.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/assembler_x86.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -41,6 +41,15 @@ - #include "gc_implementation/g1/heapRegion.hpp" - #endif - -+#ifdef PRODUCT -+#define BLOCK_COMMENT(str) /* nothing */ -+#define STOP(error) stop(error) -+#else -+#define BLOCK_COMMENT(str) block_comment(str) -+#define STOP(error) block_comment(error); stop(error) -+#endif -+ -+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") - // Implementation of AddressLiteral - - AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { -@@ -5393,23 +5402,7 @@ - // To see where a verify_oop failed, get $ebx+40/X for this frame. - // This is the value of eip which points to where verify_oop will return. - if (os::message_box(msg, "Execution stopped, print registers?")) { -- ttyLocker ttyl; -- tty->print_cr("eip = 0x%08x", eip); --#ifndef PRODUCT -- if ((WizardMode || Verbose) && PrintMiscellaneous) { -- tty->cr(); -- findpc(eip); -- tty->cr(); -- } --#endif -- tty->print_cr("rax = 0x%08x", rax); -- tty->print_cr("rbx = 0x%08x", rbx); -- tty->print_cr("rcx = 0x%08x", rcx); -- tty->print_cr("rdx = 0x%08x", rdx); -- tty->print_cr("rdi = 0x%08x", rdi); -- tty->print_cr("rsi = 0x%08x", rsi); -- tty->print_cr("rbp = 0x%08x", rbp); -- tty->print_cr("rsp = 0x%08x", rsp); -+ print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip); - BREAKPOINT; - assert(false, "start up GDB"); - } -@@ -5421,12 +5414,53 @@ - ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); - } - -+void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) { -+ ttyLocker ttyl; -+ FlagSetting fs(Debugging, true); -+ tty->print_cr("eip = 0x%08x", eip); -+#ifndef PRODUCT -+ if ((WizardMode || Verbose) && PrintMiscellaneous) { -+ tty->cr(); -+ findpc(eip); -+ tty->cr(); -+ } -+#endif -+#define PRINT_REG(rax) \ -+ { tty->print("%s = ", #rax); os::print_location(tty, rax); } -+ PRINT_REG(rax); -+ PRINT_REG(rbx); -+ PRINT_REG(rcx); -+ PRINT_REG(rdx); -+ PRINT_REG(rdi); -+ PRINT_REG(rsi); -+ PRINT_REG(rbp); -+ PRINT_REG(rsp); -+#undef PRINT_REG -+ // Print some words near top of staack. -+ int* dump_sp = (int*) rsp; -+ for (int col1 = 0; col1 < 8; col1++) { -+ tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); -+ os::print_location(tty, *dump_sp++); -+ } -+ for (int row = 0; row < 16; row++) { -+ tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); -+ for (int col = 0; col < 8; col++) { -+ tty->print(" 0x%08x", *dump_sp++); -+ } -+ tty->cr(); -+ } -+ // Print some instructions around pc: -+ Disassembler::decode((address)eip-64, (address)eip); -+ tty->print_cr("--------"); -+ Disassembler::decode((address)eip, (address)eip+32); -+} -+ - void MacroAssembler::stop(const char* msg) { - ExternalAddress message((address)msg); - // push address of message - pushptr(message.addr()); - { Label L; call(L, relocInfo::none); bind(L); } // push eip -- pusha(); // push registers -+ pusha(); // push registers - call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); - hlt(); - } -@@ -5443,6 +5477,18 @@ - pop_CPU_state(); - } - -+void MacroAssembler::print_state() { -+ { Label L; call(L, relocInfo::none); bind(L); } // push eip -+ pusha(); // push registers -+ -+ push_CPU_state(); -+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32))); -+ pop_CPU_state(); -+ -+ popa(); -+ addl(rsp, wordSize); -+} -+ - #else // _LP64 - - // 64 bit versions -@@ -5908,14 +5954,33 @@ - } - - void MacroAssembler::warn(const char* msg) { -- push(rsp); -+ push(rbp); -+ movq(rbp, rsp); - andq(rsp, -16); // align stack as required by push_CPU_state and call -- - push_CPU_state(); // keeps alignment at 16 bytes - lea(c_rarg0, ExternalAddress((address) msg)); - call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0); - pop_CPU_state(); -- pop(rsp); -+ mov(rsp, rbp); -+ pop(rbp); -+} -+ -+void MacroAssembler::print_state() { -+ address rip = pc(); -+ pusha(); // get regs on stack -+ push(rbp); -+ movq(rbp, rsp); -+ andq(rsp, -16); // align stack as required by push_CPU_state and call -+ push_CPU_state(); // keeps alignment at 16 bytes -+ -+ lea(c_rarg0, InternalAddress(rip)); -+ lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array -+ call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); -+ -+ pop_CPU_state(); -+ mov(rsp, rbp); -+ pop(rbp); -+ popa(); - } - - #ifndef PRODUCT -@@ -5924,7 +5989,7 @@ - - void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { - // In order to get locks to work, we need to fake a in_VM state -- if (ShowMessageBoxOnError ) { -+ if (ShowMessageBoxOnError) { - JavaThread* thread = JavaThread::current(); - JavaThreadState saved_state = thread->thread_state(); - thread->set_thread_state(_thread_in_vm); -@@ -5938,30 +6003,9 @@ - // XXX correct this offset for amd64 - // This is the value of eip which points to where verify_oop will return. - if (os::message_box(msg, "Execution stopped, print registers?")) { -- ttyLocker ttyl; -- tty->print_cr("rip = 0x%016lx", pc); --#ifndef PRODUCT -- tty->cr(); -- findpc(pc); -- tty->cr(); --#endif -- tty->print_cr("rax = 0x%016lx", regs[15]); -- tty->print_cr("rbx = 0x%016lx", regs[12]); -- tty->print_cr("rcx = 0x%016lx", regs[14]); -- tty->print_cr("rdx = 0x%016lx", regs[13]); -- tty->print_cr("rdi = 0x%016lx", regs[8]); -- tty->print_cr("rsi = 0x%016lx", regs[9]); -- tty->print_cr("rbp = 0x%016lx", regs[10]); -- tty->print_cr("rsp = 0x%016lx", regs[11]); -- tty->print_cr("r8 = 0x%016lx", regs[7]); -- tty->print_cr("r9 = 0x%016lx", regs[6]); -- tty->print_cr("r10 = 0x%016lx", regs[5]); -- tty->print_cr("r11 = 0x%016lx", regs[4]); -- tty->print_cr("r12 = 0x%016lx", regs[3]); -- tty->print_cr("r13 = 0x%016lx", regs[2]); -- tty->print_cr("r14 = 0x%016lx", regs[1]); -- tty->print_cr("r15 = 0x%016lx", regs[0]); -+ print_state64(pc, regs); - BREAKPOINT; -+ assert(false, "start up GDB"); - } - ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); - } else { -@@ -5972,6 +6016,54 @@ - } - } - -+void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { -+ ttyLocker ttyl; -+ FlagSetting fs(Debugging, true); -+ tty->print_cr("rip = 0x%016lx", pc); -+#ifndef PRODUCT -+ tty->cr(); -+ findpc(pc); -+ tty->cr(); -+#endif -+#define PRINT_REG(rax, value) \ -+ { tty->print("%s = ", #rax); os::print_location(tty, value); } -+ PRINT_REG(rax, regs[15]); -+ PRINT_REG(rbx, regs[12]); -+ PRINT_REG(rcx, regs[14]); -+ PRINT_REG(rdx, regs[13]); -+ PRINT_REG(rdi, regs[8]); -+ PRINT_REG(rsi, regs[9]); -+ PRINT_REG(rbp, regs[10]); -+ PRINT_REG(rsp, regs[11]); -+ PRINT_REG(r8 , regs[7]); -+ PRINT_REG(r9 , regs[6]); -+ PRINT_REG(r10, regs[5]); -+ PRINT_REG(r11, regs[4]); -+ PRINT_REG(r12, regs[3]); -+ PRINT_REG(r13, regs[2]); -+ PRINT_REG(r14, regs[1]); -+ PRINT_REG(r15, regs[0]); -+#undef PRINT_REG -+ // Print some words near top of staack. -+ int64_t* rsp = (int64_t*) regs[11]; -+ int64_t* dump_sp = rsp; -+ for (int col1 = 0; col1 < 8; col1++) { -+ tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp); -+ os::print_location(tty, *dump_sp++); -+ } -+ for (int row = 0; row < 25; row++) { -+ tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp); -+ for (int col = 0; col < 4; col++) { -+ tty->print(" 0x%016lx", *dump_sp++); -+ } -+ tty->cr(); -+ } -+ // Print some instructions around pc: -+ Disassembler::decode((address)pc-64, (address)pc); -+ tty->print_cr("--------"); -+ Disassembler::decode((address)pc, (address)pc+32); -+} -+ - #endif // _LP64 - - // Now versions that are common to 32/64 bit -@@ -6341,7 +6433,7 @@ - get_thread(rax); - cmpptr(java_thread, rax); - jcc(Assembler::equal, L); -- stop("MacroAssembler::call_VM_base: rdi not callee saved?"); -+ STOP("MacroAssembler::call_VM_base: rdi not callee saved?"); - bind(L); - } - pop(rax); -@@ -7997,7 +8089,7 @@ - shlptr(tsize, LogHeapWordSize); - cmpptr(t1, tsize); - jcc(Assembler::equal, ok); -- stop("assert(t1 != tlab size)"); -+ STOP("assert(t1 != tlab size)"); - should_not_reach_here(); - - bind(ok); -@@ -8244,6 +8336,19 @@ - } - - -+// virtual method calling -+void MacroAssembler::lookup_virtual_method(Register recv_klass, -+ RegisterOrConstant vtable_index, -+ Register method_result) { -+ const int base = instanceKlass::vtable_start_offset() * wordSize; -+ assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); -+ Address vtable_entry_addr(recv_klass, -+ vtable_index, Address::times_ptr, -+ base + vtableEntry::method_offset_in_bytes()); -+ movptr(method_result, vtable_entry_addr); -+} -+ -+ - void MacroAssembler::check_klass_subtype(Register sub_klass, - Register super_klass, - Register temp_reg, -@@ -8493,6 +8598,7 @@ - // Pass register number to verify_oop_subroutine - char* b = new char[strlen(s) + 50]; - sprintf(b, "verify_oop: %s: %s", reg->name(), s); -+ BLOCK_COMMENT("verify_oop {"); - #ifdef _LP64 - push(rscratch1); // save r10, trashed by movptr() - #endif -@@ -8507,6 +8613,7 @@ - movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); - call(rax); - // Caller pops the arguments (oop, message) and restores rax, r10 -+ BLOCK_COMMENT("} verify_oop"); - } - - -@@ -8527,7 +8634,7 @@ - jcc(Assembler::notZero, L); - char* buf = new char[40]; - sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]); -- stop(buf); -+ STOP(buf); - } else { - jccb(Assembler::notZero, L); - hlt(); -@@ -8543,60 +8650,6 @@ - } - - --// registers on entry: --// - rax ('check' register): required MethodType --// - rcx: method handle --// - rdx, rsi, or ?: killable temp --void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg, -- Register temp_reg, -- Label& wrong_method_type) { -- Address type_addr(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)); -- // compare method type against that of the receiver -- if (UseCompressedOops) { -- load_heap_oop(temp_reg, type_addr); -- cmpptr(mtype_reg, temp_reg); -- } else { -- cmpptr(mtype_reg, type_addr); -- } -- jcc(Assembler::notEqual, wrong_method_type); --} -- -- --// A method handle has a "vmslots" field which gives the size of its --// argument list in JVM stack slots. This field is either located directly --// in every method handle, or else is indirectly accessed through the --// method handle's MethodType. This macro hides the distinction. --void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg, -- Register temp_reg) { -- assert_different_registers(vmslots_reg, mh_reg, temp_reg); -- // load mh.type.form.vmslots -- Register temp2_reg = vmslots_reg; -- load_heap_oop(temp2_reg, Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg))); -- load_heap_oop(temp2_reg, Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg))); -- movl(vmslots_reg, Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg))); --} -- -- --// registers on entry: --// - rcx: method handle --// - rdx: killable temp (interpreted only) --// - rax: killable temp (compiled only) --void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg) { -- assert(mh_reg == rcx, "caller must put MH object in rcx"); -- assert_different_registers(mh_reg, temp_reg); -- -- // pick out the interpreted side of the handler -- // NOTE: vmentry is not an oop! -- movptr(temp_reg, Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes, temp_reg))); -- -- // off we go... -- jmp(Address(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes())); -- -- // for the various stubs which take control at this point, -- // see MethodHandles::generate_method_handle_stub --} -- -- - Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, - int extra_slot_offset) { - // cf. TemplateTable::prepare_invoke(), if (load_receiver). -@@ -8669,14 +8722,14 @@ - movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); - cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); - jcc(Assembler::aboveEqual, next); -- stop("assert(top >= start)"); -+ STOP("assert(top >= start)"); - should_not_reach_here(); - - bind(next); - movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); - cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); - jcc(Assembler::aboveEqual, ok); -- stop("assert(top <= end)"); -+ STOP("assert(top <= end)"); - should_not_reach_here(); - - bind(ok); -@@ -9109,6 +9162,25 @@ - movptr(dst, src); - } - -+void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) { -+ assert_different_registers(src1, tmp); -+#ifdef _LP64 -+ if (UseCompressedOops) { -+ bool did_push = false; -+ if (tmp == noreg) { -+ tmp = rax; -+ push(tmp); -+ did_push = true; -+ assert(!src2.uses(rsp), "can't push"); -+ } -+ load_heap_oop(tmp, src2); -+ cmpptr(src1, tmp); -+ if (did_push) pop(tmp); -+ } else -+#endif -+ cmpptr(src1, src2); -+} -+ - // Used for storing NULLs. - void MacroAssembler::store_heap_oop_null(Address dst) { - #ifdef _LP64 -@@ -9139,7 +9211,7 @@ - push(rscratch1); // cmpptr trashes rscratch1 - cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr())); - jcc(Assembler::equal, ok); -- stop(msg); -+ STOP(msg); - bind(ok); - pop(rscratch1); - } -@@ -9172,7 +9244,7 @@ - Label ok; - testq(r, r); - jcc(Assembler::notEqual, ok); -- stop("null oop passed to encode_heap_oop_not_null"); -+ STOP("null oop passed to encode_heap_oop_not_null"); - bind(ok); - } - #endif -@@ -9193,7 +9265,7 @@ - Label ok; - testq(src, src); - jcc(Assembler::notEqual, ok); -- stop("null oop passed to encode_heap_oop_not_null2"); -+ STOP("null oop passed to encode_heap_oop_not_null2"); - bind(ok); - } - #endif -@@ -9384,7 +9456,7 @@ - cmpptr(rax, StackAlignmentInBytes-wordSize); - pop(rax); - jcc(Assembler::equal, L); -- stop("Stack is not properly aligned!"); -+ STOP("Stack is not properly aligned!"); - bind(L); - } - #endif -@@ -10058,13 +10130,6 @@ - bind(DONE); - } - --#ifdef PRODUCT --#define BLOCK_COMMENT(str) /* nothing */ --#else --#define BLOCK_COMMENT(str) block_comment(str) --#endif -- --#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") - void MacroAssembler::generate_fill(BasicType t, bool aligned, - Register to, Register value, Register count, - Register rtmp, XMMRegister xtmp) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/assembler_x86.hpp ---- openjdk/hotspot/src/cpu/x86/vm/assembler_x86.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/assembler_x86.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -1908,6 +1908,7 @@ - void load_heap_oop(Register dst, Address src); - void load_heap_oop_not_null(Register dst, Address src); - void store_heap_oop(Address dst, Register src); -+ void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg); - - // Used for storing NULL. All other oop constants should be - // stored using routines that take a jobject. -@@ -2085,6 +2086,11 @@ - Register scan_temp, - Label& no_such_interface); - -+ // virtual method calling -+ void lookup_virtual_method(Register recv_klass, -+ RegisterOrConstant vtable_index, -+ Register method_result); -+ - // Test sub_klass against super_klass, with fast and slow paths. - - // The fast path produces a tri-state answer: yes / no / maybe-slow. -@@ -2120,15 +2126,8 @@ - Label& L_success); - - // method handles (JSR 292) -- void check_method_handle_type(Register mtype_reg, Register mh_reg, -- Register temp_reg, -- Label& wrong_method_type); -- void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg, -- Register temp_reg); -- void jump_to_method_handle_entry(Register mh_reg, Register temp_reg); - Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); - -- - //---- - void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 - -@@ -2147,8 +2146,13 @@ - // prints msg and continues - void warn(const char* msg); - -+ // dumps registers and other state -+ void print_state(); -+ - static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); - static void debug64(char* msg, int64_t pc, int64_t regs[]); -+ static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); -+ static void print_state64(int64_t pc, int64_t regs[]); - - void os_breakpoint(); - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/c1_LIRAssembler_x86.cpp ---- openjdk/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -3502,6 +3502,7 @@ - void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { - ciMethod* method = op->profiled_method(); - int bci = op->profiled_bci(); -+ ciMethod* callee = op->profiled_callee(); - - // Update counter for all call types - ciMethodData* md = method->method_data_or_null(); -@@ -3513,9 +3514,11 @@ - __ movoop(mdo, md->constant_encoding()); - Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); - Bytecodes::Code bc = method->java_code_at_bci(bci); -+ const bool callee_is_static = callee->is_loaded() && callee->is_static(); - // Perform additional virtual call profiling for invokevirtual and - // invokeinterface bytecodes - if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && -+ !callee_is_static && // required for optimized MH invokes - C1ProfileVirtualCalls) { - assert(op->recv()->is_single_cpu(), "recv must be allocated"); - Register recv = op->recv()->as_register(); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/cppInterpreter_x86.cpp ---- openjdk/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -868,9 +868,9 @@ - // Need to differentiate between igetfield, agetfield, bgetfield etc. - // because they are different sizes. - // Use the type from the constant pool cache -- __ shrl(rdx, ConstantPoolCacheEntry::tosBits); -- // Make sure we don't need to mask rdx for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -+ __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift); -+ // Make sure we don't need to mask rdx after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); - #ifdef _LP64 - Label notObj; - __ cmpl(rdx, atos); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/frame_x86.cpp ---- openjdk/hotspot/src/cpu/x86/vm/frame_x86.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/frame_x86.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -439,7 +439,6 @@ - // frame::sender_for_compiled_frame - frame frame::sender_for_compiled_frame(RegisterMap* map) const { - assert(map != NULL, "map must be set"); -- assert(!is_ricochet_frame(), "caller must handle this"); - - // frame owned by optimizing compiler - assert(_cb->frame_size() >= 0, "must have non-zero frame size"); -@@ -483,7 +482,6 @@ - if (is_entry_frame()) return sender_for_entry_frame(map); - if (is_interpreted_frame()) return sender_for_interpreter_frame(map); - assert(_cb == CodeCache::find_blob(pc()),"Must be the same"); -- if (is_ricochet_frame()) return sender_for_ricochet_frame(map); - - if (_cb != NULL) { - return sender_for_compiled_frame(map); -@@ -658,9 +656,7 @@ - values.describe(frame_no, fp() + frame::name##_offset, #name) - - void frame::describe_pd(FrameValues& values, int frame_no) { -- if (is_ricochet_frame()) { -- MethodHandles::RicochetFrame::describe(this, values, frame_no); -- } else if (is_interpreted_frame()) { -+ if (is_interpreted_frame()) { - DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp); - DESCRIBE_FP_OFFSET(interpreter_frame_last_sp); - DESCRIBE_FP_OFFSET(interpreter_frame_method); -@@ -682,12 +678,7 @@ - if (_cb != NULL) { - // use the frame size if valid - int size = _cb->frame_size(); -- if ((size > 0) && -- (! is_ricochet_frame())) { -- // Work-around: ricochet explicitly excluded because frame size is not -- // constant for the ricochet blob but its frame_size could not, for -- // some reasons, be declared as <= 0. This potentially confusing -- // size declaration should be fixed as another CR. -+ if (size > 0) { - return unextended_sp() + size; - } - } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/interp_masm_x86_32.cpp ---- openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -253,8 +253,12 @@ - get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); - movptr(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); - const int shift_count = (1 + byte_no) * BitsPerByte; -+ assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || -+ (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), -+ "correct shift count"); - shrptr(bytecode, shift_count); -- andptr(bytecode, 0xFF); -+ assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); -+ andptr(bytecode, ConstantPoolCacheEntry::bytecode_1_mask); - } - - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/interp_masm_x86_64.cpp ---- openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -256,8 +256,12 @@ - // little-endian machines allow us that. - movl(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); - const int shift_count = (1 + byte_no) * BitsPerByte; -+ assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || -+ (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), -+ "correct shift count"); - shrl(bytecode, shift_count); -- andl(bytecode, 0xFF); -+ assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); -+ andl(bytecode, ConstantPoolCacheEntry::bytecode_1_mask); - } - - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/interpreterGenerator_x86.hpp ---- openjdk/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -35,7 +35,6 @@ - address generate_normal_entry(bool synchronized); - address generate_native_entry(bool synchronized); - address generate_abstract_entry(void); -- address generate_method_handle_entry(void); - address generate_math_entry(AbstractInterpreter::MethodKind kind); - address generate_empty_entry(void); - address generate_accessor_entry(void); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/interpreter_x86_32.cpp ---- openjdk/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -230,18 +230,6 @@ - } - - --// Method handle invoker --// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...) --address InterpreterGenerator::generate_method_handle_entry(void) { -- if (!EnableInvokeDynamic) { -- return generate_abstract_entry(); -- } -- -- address entry_point = MethodHandles::generate_method_handle_interpreter_entry(_masm); -- -- return entry_point; --} -- - void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { - - // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/interpreter_x86_64.cpp ---- openjdk/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -317,19 +317,6 @@ - } - - --// Method handle invoker --// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...) --address InterpreterGenerator::generate_method_handle_entry(void) { -- if (!EnableInvokeDynamic) { -- return generate_abstract_entry(); -- } -- -- address entry_point = MethodHandles::generate_method_handle_interpreter_entry(_masm); -- -- return entry_point; --} -- -- - // Empty method, generate a very fast return. - - address InterpreterGenerator::generate_empty_entry(void) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/methodHandles_x86.cpp ---- openjdk/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -32,8 +32,10 @@ - - #ifdef PRODUCT - #define BLOCK_COMMENT(str) /* nothing */ -+#define STOP(error) stop(error) - #else - #define BLOCK_COMMENT(str) __ block_comment(str) -+#define STOP(error) block_comment(error); __ stop(error) - #endif - - #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") -@@ -43,483 +45,24 @@ - return RegisterOrConstant(value); - } - --address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm, -- address interpreted_entry) { -- // Just before the actual machine code entry point, allocate space -- // for a MethodHandleEntry::Data record, so that we can manage everything -- // from one base pointer. -- __ align(wordSize); -- address target = __ pc() + sizeof(Data); -- while (__ pc() < target) { -- __ nop(); -- __ align(wordSize); -- } -- -- MethodHandleEntry* me = (MethodHandleEntry*) __ pc(); -- me->set_end_address(__ pc()); // set a temporary end_address -- me->set_from_interpreted_entry(interpreted_entry); -- me->set_type_checking_entry(NULL); -- -- return (address) me; --} -- --MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm, -- address start_addr) { -- MethodHandleEntry* me = (MethodHandleEntry*) start_addr; -- assert(me->end_address() == start_addr, "valid ME"); -- -- // Fill in the real end_address: -- __ align(wordSize); -- me->set_end_address(__ pc()); -- -- return me; --} -- --// stack walking support -- --frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) { -- RicochetFrame* f = RicochetFrame::from_frame(fr); -- if (map->update_map()) -- frame::update_map_with_saved_link(map, &f->_sender_link); -- return frame(f->extended_sender_sp(), f->exact_sender_sp(), f->sender_link(), f->sender_pc()); --} -- --void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) { -- RicochetFrame* f = RicochetFrame::from_frame(fr); -- -- // pick up the argument type descriptor: -- Thread* thread = Thread::current(); -- Handle cookie(thread, f->compute_saved_args_layout(true, true)); -- -- // process fixed part -- blk->do_oop((oop*)f->saved_target_addr()); -- blk->do_oop((oop*)f->saved_args_layout_addr()); -- -- // process variable arguments: -- if (cookie.is_null()) return; // no arguments to describe -- -- // the cookie is actually the invokeExact method for my target -- // his argument signature is what I'm interested in -- assert(cookie->is_method(), ""); -- methodHandle invoker(thread, methodOop(cookie())); -- assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method"); -- assert(!invoker->is_static(), "must have MH argument"); -- int slot_count = invoker->size_of_parameters(); -- assert(slot_count >= 1, "must include 'this'"); -- intptr_t* base = f->saved_args_base(); -- intptr_t* retval = NULL; -- if (f->has_return_value_slot()) -- retval = f->return_value_slot_addr(); -- int slot_num = slot_count; -- intptr_t* loc = &base[slot_num -= 1]; -- //blk->do_oop((oop*) loc); // original target, which is irrelevant -- int arg_num = 0; -- for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) { -- if (ss.at_return_type()) continue; -- BasicType ptype = ss.type(); -- if (ptype == T_ARRAY) ptype = T_OBJECT; // fold all refs to T_OBJECT -- assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void"); -- loc = &base[slot_num -= type2size[ptype]]; -- bool is_oop = (ptype == T_OBJECT && loc != retval); -- if (is_oop) blk->do_oop((oop*)loc); -- arg_num += 1; -- } -- assert(slot_num == 0, "must have processed all the arguments"); --} -- --oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) { -- oop cookie = NULL; -- if (read_cache) { -- cookie = saved_args_layout(); -- if (cookie != NULL) return cookie; -- } -- oop target = saved_target(); -- oop mtype = java_lang_invoke_MethodHandle::type(target); -- oop mtform = java_lang_invoke_MethodType::form(mtype); -- cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform); -- if (write_cache) { -- (*saved_args_layout_addr()) = cookie; -- } -- return cookie; --} -- --void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm, -- // output params: -- int* bounce_offset, -- int* exception_offset, -- int* frame_size_in_words) { -- (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize; -- -- address start = __ pc(); -- --#ifdef ASSERT -- __ hlt(); __ hlt(); __ hlt(); -- // here's a hint of something special: -- __ push(MAGIC_NUMBER_1); -- __ push(MAGIC_NUMBER_2); --#endif //ASSERT -- __ hlt(); // not reached -- -- // A return PC has just been popped from the stack. -- // Return values are in registers. -- // The ebp points into the RicochetFrame, which contains -- // a cleanup continuation we must return to. -- -- (*bounce_offset) = __ pc() - start; -- BLOCK_COMMENT("ricochet_blob.bounce"); -- -- if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); -- trace_method_handle(_masm, "return/ricochet_blob.bounce"); -- -- __ jmp(frame_address(continuation_offset_in_bytes())); -- __ hlt(); -- DEBUG_ONLY(__ push(MAGIC_NUMBER_2)); -- -- (*exception_offset) = __ pc() - start; -- BLOCK_COMMENT("ricochet_blob.exception"); -- -- // compare this to Interpreter::rethrow_exception_entry, which is parallel code -- // for example, see TemplateInterpreterGenerator::generate_throw_exception -- // Live registers in: -- // rax: exception -- // rdx: return address/pc that threw exception (ignored, always equal to bounce addr) -- __ verify_oop(rax); -- -- // no need to empty_FPU_stack or reinit_heapbase, since caller frame will do the same if needed -- -- // Take down the frame. -- -- // Cf. InterpreterMacroAssembler::remove_activation. -- leave_ricochet_frame(_masm, /*rcx_recv=*/ noreg, -- saved_last_sp_register(), -- /*sender_pc_reg=*/ rdx); -- -- // In between activations - previous activation type unknown yet -- // compute continuation point - the continuation point expects the -- // following registers set up: -- // -- // rax: exception -- // rdx: return address/pc that threw exception -- // rsp: expression stack of caller -- // rbp: ebp of caller -- __ push(rax); // save exception -- __ push(rdx); // save return address -- Register thread_reg = LP64_ONLY(r15_thread) NOT_LP64(rdi); -- NOT_LP64(__ get_thread(thread_reg)); -- __ call_VM_leaf(CAST_FROM_FN_PTR(address, -- SharedRuntime::exception_handler_for_return_address), -- thread_reg, rdx); -- __ mov(rbx, rax); // save exception handler -- __ pop(rdx); // restore return address -- __ pop(rax); // restore exception -- __ jmp(rbx); // jump to exception -- // handler of caller --} -- --void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm, -- Register rcx_recv, -- Register rax_argv, -- address return_handler, -- Register rbx_temp) { -- const Register saved_last_sp = saved_last_sp_register(); -- Address rcx_mh_vmtarget( rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() ); -- Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() ); -- -- // Push the RicochetFrame a word at a time. -- // This creates something similar to an interpreter frame. -- // Cf. TemplateInterpreterGenerator::generate_fixed_frame. -- BLOCK_COMMENT("push RicochetFrame {"); -- DEBUG_ONLY(int rfo = (int) sizeof(RicochetFrame)); -- assert((rfo -= wordSize) == RicochetFrame::sender_pc_offset_in_bytes(), ""); --#define RF_FIELD(push_value, name) \ -- { push_value; \ -- assert((rfo -= wordSize) == RicochetFrame::name##_offset_in_bytes(), ""); } -- RF_FIELD(__ push(rbp), sender_link); -- RF_FIELD(__ push(saved_last_sp), exact_sender_sp); // rsi/r13 -- RF_FIELD(__ pushptr(rcx_amh_conversion), conversion); -- RF_FIELD(__ push(rax_argv), saved_args_base); // can be updated if args are shifted -- RF_FIELD(__ push((int32_t) NULL_WORD), saved_args_layout); // cache for GC layout cookie -- if (UseCompressedOops) { -- __ load_heap_oop(rbx_temp, rcx_mh_vmtarget); -- RF_FIELD(__ push(rbx_temp), saved_target); -- } else { -- RF_FIELD(__ pushptr(rcx_mh_vmtarget), saved_target); -- } -- __ lea(rbx_temp, ExternalAddress(return_handler)); -- RF_FIELD(__ push(rbx_temp), continuation); --#undef RF_FIELD -- assert(rfo == 0, "fully initialized the RicochetFrame"); -- // compute new frame pointer: -- __ lea(rbp, Address(rsp, RicochetFrame::sender_link_offset_in_bytes())); -- // Push guard word #1 in debug mode. -- DEBUG_ONLY(__ push((int32_t) RicochetFrame::MAGIC_NUMBER_1)); -- // For debugging, leave behind an indication of which stub built this frame. -- DEBUG_ONLY({ Label L; __ call(L, relocInfo::none); __ bind(L); }); -- BLOCK_COMMENT("} RicochetFrame"); --} -- --void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm, -- Register rcx_recv, -- Register new_sp_reg, -- Register sender_pc_reg) { -- assert_different_registers(rcx_recv, new_sp_reg, sender_pc_reg); -- const Register saved_last_sp = saved_last_sp_register(); -- // Take down the frame. -- // Cf. InterpreterMacroAssembler::remove_activation. -- BLOCK_COMMENT("end_ricochet_frame {"); -- // TO DO: If (exact_sender_sp - extended_sender_sp) > THRESH, compact the frame down. -- // This will keep stack in bounds even with unlimited tailcalls, each with an adapter. -- if (rcx_recv->is_valid()) -- __ movptr(rcx_recv, RicochetFrame::frame_address(RicochetFrame::saved_target_offset_in_bytes())); -- __ movptr(sender_pc_reg, RicochetFrame::frame_address(RicochetFrame::sender_pc_offset_in_bytes())); -- __ movptr(saved_last_sp, RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes())); -- __ movptr(rbp, RicochetFrame::frame_address(RicochetFrame::sender_link_offset_in_bytes())); -- __ mov(rsp, new_sp_reg); -- BLOCK_COMMENT("} end_ricochet_frame"); --} -- --// Emit code to verify that RBP is pointing at a valid ricochet frame. --#ifndef PRODUCT --enum { -- ARG_LIMIT = 255, SLOP = 4, -- // use this parameter for checking for garbage stack movements: -- UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP) -- // the slop defends against false alarms due to fencepost errors --}; --#endif -- --#ifdef ASSERT --void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) { -- // The stack should look like this: -- // ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args | -- // Check various invariants. -- verify_offsets(); -- -- Register rdi_temp = rdi; -- Register rcx_temp = rcx; -- { __ push(rdi_temp); __ push(rcx_temp); } --#define UNPUSH_TEMPS \ -- { __ pop(rcx_temp); __ pop(rdi_temp); } -- -- Address magic_number_1_addr = RicochetFrame::frame_address(RicochetFrame::magic_number_1_offset_in_bytes()); -- Address magic_number_2_addr = RicochetFrame::frame_address(RicochetFrame::magic_number_2_offset_in_bytes()); -- Address continuation_addr = RicochetFrame::frame_address(RicochetFrame::continuation_offset_in_bytes()); -- Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); -- Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes()); -- -- Label L_bad, L_ok; -- BLOCK_COMMENT("verify_clean {"); -- // Magic numbers must check out: -- __ cmpptr(magic_number_1_addr, (int32_t) MAGIC_NUMBER_1); -- __ jcc(Assembler::notEqual, L_bad); -- __ cmpptr(magic_number_2_addr, (int32_t) MAGIC_NUMBER_2); -- __ jcc(Assembler::notEqual, L_bad); -- -- // Arguments pointer must look reasonable: -- __ movptr(rcx_temp, saved_args_base_addr); -- __ cmpptr(rcx_temp, rbp); -- __ jcc(Assembler::below, L_bad); -- __ subptr(rcx_temp, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize); -- __ cmpptr(rcx_temp, rbp); -- __ jcc(Assembler::above, L_bad); -- -- load_conversion_dest_type(_masm, rdi_temp, conversion_addr); -- __ cmpl(rdi_temp, T_VOID); -- __ jcc(Assembler::equal, L_ok); -- __ movptr(rcx_temp, saved_args_base_addr); -- load_conversion_vminfo(_masm, rdi_temp, conversion_addr); -- __ cmpptr(Address(rcx_temp, rdi_temp, Interpreter::stackElementScale()), -- (int32_t) RETURN_VALUE_PLACEHOLDER); -- __ jcc(Assembler::equal, L_ok); -- __ BIND(L_bad); -- UNPUSH_TEMPS; -- __ stop("damaged ricochet frame"); -- __ BIND(L_ok); -- UNPUSH_TEMPS; -- BLOCK_COMMENT("} verify_clean"); -- --#undef UNPUSH_TEMPS -- --} --#endif //ASSERT -- - void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) { - if (VerifyMethodHandles) - verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), -- "AMH argument is a Class"); -+ "MH argument is a Class"); - __ load_heap_oop(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes())); - } - --void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr) { -- int bits = BitsPerByte; -- int offset = (CONV_VMINFO_SHIFT / bits); -- int shift = (CONV_VMINFO_SHIFT % bits); -- __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset)); -- assert(CONV_VMINFO_MASK == right_n_bits(bits - shift), "else change type of previous load"); -- assert(shift == 0, "no shift needed"); -+#ifdef ASSERT -+static int check_nonzero(const char* xname, int x) { -+ assert(x != 0, err_msg("%s should be nonzero", xname)); -+ return x; - } -- --void MethodHandles::load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr) { -- int bits = BitsPerByte; -- int offset = (CONV_DEST_TYPE_SHIFT / bits); -- int shift = (CONV_DEST_TYPE_SHIFT % bits); -- __ load_unsigned_byte(reg, conversion_field_addr.plus_disp(offset)); -- assert(CONV_TYPE_MASK == right_n_bits(bits - shift), "else change type of previous load"); -- __ shrl(reg, shift); -- DEBUG_ONLY(int conv_type_bits = (int) exact_log2(CONV_TYPE_MASK+1)); -- assert((shift + conv_type_bits) == bits, "left justified in byte"); --} -- --void MethodHandles::load_stack_move(MacroAssembler* _masm, -- Register rdi_stack_move, -- Register rcx_amh, -- bool might_be_negative) { -- BLOCK_COMMENT("load_stack_move {"); -- Address rcx_amh_conversion(rcx_amh, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes()); -- __ movl(rdi_stack_move, rcx_amh_conversion); -- __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT); --#ifdef _LP64 -- if (might_be_negative) { -- // clean high bits of stack motion register (was loaded as an int) -- __ movslq(rdi_stack_move, rdi_stack_move); -- } --#endif //_LP64 --#ifdef ASSERT -- if (VerifyMethodHandles) { -- Label L_ok, L_bad; -- int32_t stack_move_limit = 0x4000; // extra-large -- __ cmpptr(rdi_stack_move, stack_move_limit); -- __ jcc(Assembler::greaterEqual, L_bad); -- __ cmpptr(rdi_stack_move, -stack_move_limit); -- __ jcc(Assembler::greater, L_ok); -- __ bind(L_bad); -- __ stop("load_stack_move of garbage value"); -- __ BIND(L_ok); -- } --#endif -- BLOCK_COMMENT("} load_stack_move"); --} -+#define NONZERO(x) check_nonzero(#x, x) -+#else //ASSERT -+#define NONZERO(x) (x) -+#endif //ASSERT - - #ifdef ASSERT --void MethodHandles::RicochetFrame::verify_offsets() { -- // Check compatibility of this struct with the more generally used offsets of class frame: -- int ebp_off = sender_link_offset_in_bytes(); // offset from struct base to local rbp value -- assert(ebp_off + wordSize*frame::interpreter_frame_method_offset == saved_args_base_offset_in_bytes(), ""); -- assert(ebp_off + wordSize*frame::interpreter_frame_last_sp_offset == conversion_offset_in_bytes(), ""); -- assert(ebp_off + wordSize*frame::interpreter_frame_sender_sp_offset == exact_sender_sp_offset_in_bytes(), ""); -- // These last two have to be exact: -- assert(ebp_off + wordSize*frame::link_offset == sender_link_offset_in_bytes(), ""); -- assert(ebp_off + wordSize*frame::return_addr_offset == sender_pc_offset_in_bytes(), ""); --} -- --void MethodHandles::RicochetFrame::verify() const { -- verify_offsets(); -- assert(magic_number_1() == MAGIC_NUMBER_1, err_msg(PTR_FORMAT " == " PTR_FORMAT, magic_number_1(), MAGIC_NUMBER_1)); -- assert(magic_number_2() == MAGIC_NUMBER_2, err_msg(PTR_FORMAT " == " PTR_FORMAT, magic_number_2(), MAGIC_NUMBER_2)); -- if (!Universe::heap()->is_gc_active()) { -- if (saved_args_layout() != NULL) { -- assert(saved_args_layout()->is_method(), "must be valid oop"); -- } -- if (saved_target() != NULL) { -- assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value"); -- } -- } -- int conv_op = adapter_conversion_op(conversion()); -- assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS || -- conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS || -- conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF, -- "must be a sane conversion"); -- if (has_return_value_slot()) { -- assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, ""); -- } --} --#endif //PRODUCT -- --#ifdef ASSERT --void MethodHandles::verify_argslot(MacroAssembler* _masm, -- Register argslot_reg, -- const char* error_message) { -- // Verify that argslot lies within (rsp, rbp]. -- Label L_ok, L_bad; -- BLOCK_COMMENT("verify_argslot {"); -- __ cmpptr(argslot_reg, rbp); -- __ jccb(Assembler::above, L_bad); -- __ cmpptr(rsp, argslot_reg); -- __ jccb(Assembler::below, L_ok); -- __ bind(L_bad); -- __ stop(error_message); -- __ BIND(L_ok); -- BLOCK_COMMENT("} verify_argslot"); --} -- --void MethodHandles::verify_argslots(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, -- Register arg_slot_base_reg, -- bool negate_argslots, -- const char* error_message) { -- // Verify that [argslot..argslot+size) lies within (rsp, rbp). -- Label L_ok, L_bad; -- Register rdi_temp = rdi; -- BLOCK_COMMENT("verify_argslots {"); -- __ push(rdi_temp); -- if (negate_argslots) { -- if (arg_slots.is_constant()) { -- arg_slots = -1 * arg_slots.as_constant(); -- } else { -- __ movptr(rdi_temp, arg_slots); -- __ negptr(rdi_temp); -- arg_slots = rdi_temp; -- } -- } -- __ lea(rdi_temp, Address(arg_slot_base_reg, arg_slots, Interpreter::stackElementScale())); -- __ cmpptr(rdi_temp, rbp); -- __ pop(rdi_temp); -- __ jcc(Assembler::above, L_bad); -- __ cmpptr(rsp, arg_slot_base_reg); -- __ jcc(Assembler::below, L_ok); -- __ bind(L_bad); -- __ stop(error_message); -- __ BIND(L_ok); -- BLOCK_COMMENT("} verify_argslots"); --} -- --// Make sure that arg_slots has the same sign as the given direction. --// If (and only if) arg_slots is a assembly-time constant, also allow it to be zero. --void MethodHandles::verify_stack_move(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, int direction) { -- bool allow_zero = arg_slots.is_constant(); -- if (direction == 0) { direction = +1; allow_zero = true; } -- assert(stack_move_unit() == -1, "else add extra checks here"); -- if (arg_slots.is_register()) { -- Label L_ok, L_bad; -- BLOCK_COMMENT("verify_stack_move {"); -- // testl(arg_slots.as_register(), -stack_move_unit() - 1); // no need -- // jcc(Assembler::notZero, L_bad); -- __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD); -- if (direction > 0) { -- __ jcc(allow_zero ? Assembler::less : Assembler::lessEqual, L_bad); -- __ cmpptr(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE); -- __ jcc(Assembler::less, L_ok); -- } else { -- __ jcc(allow_zero ? Assembler::greater : Assembler::greaterEqual, L_bad); -- __ cmpptr(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE); -- __ jcc(Assembler::greater, L_ok); -- } -- __ bind(L_bad); -- if (direction > 0) -- __ stop("assert arg_slots > 0"); -- else -- __ stop("assert arg_slots < 0"); -- __ BIND(L_ok); -- BLOCK_COMMENT("} verify_stack_move"); -- } else { -- intptr_t size = arg_slots.as_constant(); -- if (direction < 0) size = -size; -- assert(size >= 0, "correct direction of constant move"); -- assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move"); -- } --} -- - void MethodHandles::verify_klass(MacroAssembler* _masm, - Register obj, KlassHandle klass, - const char* error_message) { -@@ -528,12 +71,15 @@ - klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(), - "must be one of the SystemDictionaryHandles"); - Register temp = rdi; -+ Register temp2 = noreg; -+ LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr - Label L_ok, L_bad; - BLOCK_COMMENT("verify_klass {"); - __ verify_oop(obj); - __ testptr(obj, obj); - __ jcc(Assembler::zero, L_bad); -- __ push(temp); -+ __ push(temp); if (temp2 != noreg) __ push(temp2); -+#define UNPUSH { if (temp2 != noreg) __ pop(temp2); __ pop(temp); } - __ load_klass(temp, obj); - __ cmpptr(temp, ExternalAddress((address) klass_addr)); - __ jcc(Assembler::equal, L_ok); -@@ -541,17 +87,42 @@ - __ movptr(temp, Address(temp, super_check_offset)); - __ cmpptr(temp, ExternalAddress((address) klass_addr)); - __ jcc(Assembler::equal, L_ok); -- __ pop(temp); -+ UNPUSH; - __ bind(L_bad); -- __ stop(error_message); -+ __ STOP(error_message); - __ BIND(L_ok); -- __ pop(temp); -+ UNPUSH; - BLOCK_COMMENT("} verify_klass"); - } -+ -+void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) { -+ Label L; -+ BLOCK_COMMENT("verify_ref_kind {"); -+ __ movl(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes()))); -+ __ shrl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT); -+ __ andl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK); -+ __ cmpl(temp, ref_kind); -+ __ jcc(Assembler::equal, L); -+ { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); -+ jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); -+ if (ref_kind == JVM_REF_invokeVirtual || -+ ref_kind == JVM_REF_invokeSpecial) -+ // could do this for all ref_kinds, but would explode assembly code size -+ trace_method_handle(_masm, buf); -+ __ STOP(buf); -+ } -+ BLOCK_COMMENT("} verify_ref_kind"); -+ __ bind(L); -+} -+ - #endif //ASSERT - --void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp) { -- if (JvmtiExport::can_post_interpreter_events()) { -+void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, -+ bool for_compiler_entry) { -+ assert(method == rbx, "interpreter calling convention"); -+ __ verify_oop(method); -+ -+ if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { - Label run_compiled_code; - // JVMTI events, such as single-stepping, are implemented partly by avoiding running - // compiled code in threads for which the event is enabled. Check here for -@@ -567,470 +138,383 @@ - __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0); - __ jccb(Assembler::zero, run_compiled_code); - __ jmp(Address(method, methodOopDesc::interpreter_entry_offset())); -- __ bind(run_compiled_code); -+ __ BIND(run_compiled_code); - } -- __ jmp(Address(method, methodOopDesc::from_interpreted_offset())); -+ -+ const ByteSize entry_offset = for_compiler_entry ? methodOopDesc::from_compiled_offset() : -+ methodOopDesc::from_interpreted_offset(); -+ __ jmp(Address(method, entry_offset)); - } - -+void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, -+ Register recv, Register method_temp, -+ Register temp2, -+ bool for_compiler_entry) { -+ BLOCK_COMMENT("jump_to_lambda_form {"); -+ // This is the initial entry point of a lazy method handle. -+ // After type checking, it picks up the invoker from the LambdaForm. -+ assert_different_registers(recv, method_temp, temp2); -+ assert(recv != noreg, "required register"); -+ assert(method_temp == rbx, "required register for loading method"); -+ -+ //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); }); -+ -+ // Load the invoker, as MH -> MH.form -> LF.vmentry -+ __ verify_oop(recv); -+ __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()))); -+ __ verify_oop(method_temp); -+ __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()))); -+ __ verify_oop(method_temp); -+ // the following assumes that a methodOop is normally compressed in the vmtarget field: -+ __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()))); -+ __ verify_oop(method_temp); -+ -+ if (VerifyMethodHandles && !for_compiler_entry) { -+ // make sure recv is already on stack -+ __ load_sized_value(temp2, -+ Address(method_temp, methodOopDesc::size_of_parameters_offset()), -+ sizeof(u2), /*is_signed*/ false); -+ // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), ""); -+ Label L; -+ __ cmpptr(recv, __ argument_address(temp2, -1)); -+ __ jcc(Assembler::equal, L); -+ __ movptr(rax, __ argument_address(temp2, -1)); -+ __ STOP("receiver not on stack"); -+ __ BIND(L); -+ } -+ -+ jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry); -+ BLOCK_COMMENT("} jump_to_lambda_form"); -+} -+ -+ - // Code generation --address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { -+address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm, -+ vmIntrinsics::ID iid) { -+ const bool not_for_compiler_entry = false; // this is the interpreter entry -+ assert(is_signature_polymorphic(iid), "expected invoke iid"); -+ if (iid == vmIntrinsics::_invokeGeneric || -+ iid == vmIntrinsics::_compiledLambdaForm) { -+ // Perhaps surprisingly, the symbolic references visible to Java are not directly used. -+ // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. -+ // They all allow an appendix argument. -+ __ hlt(); // empty stubs make SG sick -+ return NULL; -+ } -+ -+ // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) - // rbx: methodOop -- // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots]) -- // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) -- // rdx, rdi: garbage temp, blown away -+ // rdx: argument locator (parameter slot count, added to rsp) -+ // rcx: used as temp to hold mh or receiver -+ // rax, rdi: garbage temps, blown away -+ Register rdx_argp = rdx; // argument list ptr, live on error paths -+ Register rax_temp = rax; -+ Register rcx_mh = rcx; // MH receiver; dies quickly and is recycled -+ Register rbx_method = rbx; // eventual target of this invocation - -- Register rbx_method = rbx; -- Register rcx_recv = rcx; -- Register rax_mtype = rax; -- Register rdx_temp = rdx; -- Register rdi_temp = rdi; -- -- // emit WrongMethodType path first, to enable jccb back-branch from main path -- Label wrong_method_type; -- __ bind(wrong_method_type); -- Label invoke_generic_slow_path, invoke_exact_error_path; -- assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");; -- __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact); -- __ jcc(Assembler::notEqual, invoke_generic_slow_path); -- __ jmp(invoke_exact_error_path); -+ address code_start = __ pc(); - - // here's where control starts out: - __ align(CodeEntryAlignment); - address entry_point = __ pc(); - -- // fetch the MethodType from the method handle into rax (the 'check' register) -- // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list. -- // This would simplify several touchy bits of code. -- // See 6984712: JSR 292 method handle calls need a clean argument base pointer -- { -- Register tem = rbx_method; -- for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) { -- __ movptr(rax_mtype, Address(tem, *pchase)); -- tem = rax_mtype; // in case there is another indirection -+ if (VerifyMethodHandles) { -+ Label L; -+ BLOCK_COMMENT("verify_intrinsic_id {"); -+ __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) iid); -+ __ jcc(Assembler::equal, L); -+ if (iid == vmIntrinsics::_linkToVirtual || -+ iid == vmIntrinsics::_linkToSpecial) { -+ // could do this for all kinds, but would explode assembly code size -+ trace_method_handle(_masm, "bad methodOop::intrinsic_id"); -+ } -+ __ STOP("bad methodOop::intrinsic_id"); -+ __ bind(L); -+ BLOCK_COMMENT("} verify_intrinsic_id"); - } - } - -- // given the MethodType, find out where the MH argument is buried -- __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); -- Register rdx_vmslots = rdx_temp; -- __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp))); -- Address mh_receiver_slot_addr = __ argument_address(rdx_vmslots); -- __ movptr(rcx_recv, mh_receiver_slot_addr); -+ // First task: Find out how big the argument list is. -+ Address rdx_first_arg_addr; -+ int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid); -+ assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic"); -+ if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) { -+ __ load_sized_value(rdx_argp, -+ Address(rbx_method, methodOopDesc::size_of_parameters_offset()), -+ sizeof(u2), /*is_signed*/ false); -+ // assert(sizeof(u2) == sizeof(methodOopDesc::_size_of_parameters), ""); -+ rdx_first_arg_addr = __ argument_address(rdx_argp, -1); -+ } else { -+ DEBUG_ONLY(rdx_argp = noreg); -+ } - - trace_method_handle(_masm, "invokeExact"); -+ if (!is_signature_polymorphic_static(iid)) { -+ __ movptr(rcx_mh, rdx_first_arg_addr); -+ DEBUG_ONLY(rdx_argp = noreg); -+ } - -- __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type); -+ // rdx_first_arg_addr is live! - -- // Nobody uses the MH receiver slot after this. Make sure. -- DEBUG_ONLY(__ movptr(mh_receiver_slot_addr, (int32_t)0x999999)); -+ if (TraceMethodHandles) { -+ const char* name = vmIntrinsics::name_at(iid); -+ if (*name == '_') name += 1; -+ const size_t len = strlen(name) + 50; -+ char* qname = NEW_C_HEAP_ARRAY(char, len, mtInternal); -+ const char* suffix = ""; -+ if (vmIntrinsics::method_for(iid) == NULL || -+ !vmIntrinsics::method_for(iid)->access_flags().is_public()) { -+ if (is_signature_polymorphic_static(iid)) -+ suffix = "/static"; -+ else -+ suffix = "/private"; -+ } -+ jio_snprintf(qname, len, "MethodHandle::interpreter_entry::%s%s", name, suffix); -+ // note: stub look for mh in rcx -+ trace_method_handle(_masm, qname); -+ } - -- __ jump_to_method_handle_entry(rcx_recv, rdi_temp); -+ if (iid == vmIntrinsics::_invokeBasic) { -+ generate_method_handle_dispatch(_masm, iid, rcx_mh, noreg, not_for_compiler_entry); - -- // error path for invokeExact (only) -- __ bind(invoke_exact_error_path); -- // ensure that the top of stack is properly aligned. -- __ mov(rdi, rsp); -- __ andptr(rsp, -StackAlignmentInBytes); // Align the stack for the ABI -- __ pushptr(Address(rdi, 0)); // Pick up the return address -+ } else { -+ // Adjust argument list by popping the trailing MemberName argument. -+ Register rcx_recv = noreg; -+ if (MethodHandles::ref_kind_has_receiver(ref_kind)) { -+ // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack. -+ __ movptr(rcx_recv = rcx, rdx_first_arg_addr); -+ } -+ DEBUG_ONLY(rdx_argp = noreg); -+ Register rbx_member = rbx_method; // MemberName ptr; incoming method ptr is dead now -+ __ pop(rax_temp); // return address -+ __ pop(rbx_member); // extract last argument -+ __ push(rax_temp); // re-push return address -+ generate_method_handle_dispatch(_masm, iid, rcx_recv, rbx_member, not_for_compiler_entry); -+ } - -- // Stub wants expected type in rax and the actual type in rcx -- __ jump(ExternalAddress(StubRoutines::throw_WrongMethodTypeException_entry())); -- -- // for invokeGeneric (only), apply argument and result conversions on the fly -- __ bind(invoke_generic_slow_path); --#ifdef ASSERT -- if (VerifyMethodHandles) { -- Label L; -- __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric); -- __ jcc(Assembler::equal, L); -- __ stop("bad methodOop::intrinsic_id"); -- __ bind(L); -+ if (PrintMethodHandleStubs) { -+ address code_end = __ pc(); -+ tty->print_cr("--------"); -+ tty->print_cr("method handle interpreter entry for %s", vmIntrinsics::name_at(iid)); -+ Disassembler::decode(code_start, code_end); -+ tty->cr(); - } --#endif //ASSERT -- Register rbx_temp = rbx_method; // don't need it now -- -- // make room on the stack for another pointer: -- Register rcx_argslot = rcx_recv; -- __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1)); -- insert_arg_slots(_masm, 2 * stack_move_unit(), -- rcx_argslot, rbx_temp, rdx_temp); -- -- // load up an adapter from the calling type (Java weaves this) -- Register rdx_adapter = rdx_temp; -- __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); -- __ load_heap_oop(rdx_adapter, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp))); -- __ verify_oop(rdx_adapter); -- __ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter); -- // As a trusted first argument, pass the type being called, so the adapter knows -- // the actual types of the arguments and return values. -- // (Generic invokers are shared among form-families of method-type.) -- __ movptr(Address(rcx_argslot, 0 * Interpreter::stackElementSize), rax_mtype); -- // FIXME: assert that rdx_adapter is of the right method-type. -- __ mov(rcx, rdx_adapter); -- trace_method_handle(_masm, "invokeGeneric"); -- __ jump_to_method_handle_entry(rcx, rdi_temp); - - return entry_point; - } - --// Helper to insert argument slots into the stack. --// arg_slots must be a multiple of stack_move_unit() and < 0 --// rax_argslot is decremented to point to the new (shifted) location of the argslot --// But, rdx_temp ends up holding the original value of rax_argslot. --void MethodHandles::insert_arg_slots(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, -- Register rax_argslot, -- Register rbx_temp, Register rdx_temp) { -- // allow constant zero -- if (arg_slots.is_constant() && arg_slots.as_constant() == 0) -- return; -- assert_different_registers(rax_argslot, rbx_temp, rdx_temp, -- (!arg_slots.is_register() ? rsp : arg_slots.as_register())); -- if (VerifyMethodHandles) -- verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame"); -- if (VerifyMethodHandles) -- verify_stack_move(_masm, arg_slots, -1); - -- // We have to insert at least one word, so bang the stack. -- if (UseStackBanging) { -- int frame_size = (arg_slots.is_constant() ? -1 * arg_slots.as_constant() * wordSize : 0); -- if (frame_size <= 0) -- frame_size = 256 * Interpreter::stackElementSize; // conservative -- __ generate_stack_overflow_check(frame_size); -+void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, -+ vmIntrinsics::ID iid, -+ Register receiver_reg, -+ Register member_reg, -+ bool for_compiler_entry) { -+ assert(is_signature_polymorphic(iid), "expected invoke iid"); -+ Register rbx_method = rbx; // eventual target of this invocation -+ // temps used in this code are not used in *either* compiled or interpreted calling sequences -+#ifdef _LP64 -+ Register temp1 = rscratch1; -+ Register temp2 = rscratch2; -+ Register temp3 = rax; -+ if (for_compiler_entry) { -+ assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment"); -+ assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); -+ assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); -+ assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); - } -- -- // Make space on the stack for the inserted argument(s). -- // Then pull down everything shallower than rax_argslot. -- // The stacked return address gets pulled down with everything else. -- // That is, copy [rsp, argslot) downward by -size words. In pseudo-code: -- // rsp -= size; -- // for (rdx = rsp + size; rdx < argslot; rdx++) -- // rdx[-size] = rdx[0] -- // argslot -= size; -- BLOCK_COMMENT("insert_arg_slots {"); -- __ mov(rdx_temp, rsp); // source pointer for copy -- __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale())); -- { -- Label loop; -- __ BIND(loop); -- // pull one word down each time through the loop -- __ movptr(rbx_temp, Address(rdx_temp, 0)); -- __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp); -- __ addptr(rdx_temp, wordSize); -- __ cmpptr(rdx_temp, rax_argslot); -- __ jcc(Assembler::below, loop); -- } -- -- // Now move the argslot down, to point to the opened-up space. -- __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale())); -- BLOCK_COMMENT("} insert_arg_slots"); --} -- --// Helper to remove argument slots from the stack. --// arg_slots must be a multiple of stack_move_unit() and > 0 --void MethodHandles::remove_arg_slots(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, -- Register rax_argslot, -- Register rbx_temp, Register rdx_temp) { -- // allow constant zero -- if (arg_slots.is_constant() && arg_slots.as_constant() == 0) -- return; -- assert_different_registers(rax_argslot, rbx_temp, rdx_temp, -- (!arg_slots.is_register() ? rsp : arg_slots.as_register())); -- if (VerifyMethodHandles) -- verify_argslots(_masm, arg_slots, rax_argslot, false, -- "deleted argument(s) must fall within current frame"); -- if (VerifyMethodHandles) -- verify_stack_move(_masm, arg_slots, +1); -- -- BLOCK_COMMENT("remove_arg_slots {"); -- // Pull up everything shallower than rax_argslot. -- // Then remove the excess space on the stack. -- // The stacked return address gets pulled up with everything else. -- // That is, copy [rsp, argslot) upward by size words. In pseudo-code: -- // for (rdx = argslot-1; rdx >= rsp; --rdx) -- // rdx[size] = rdx[0] -- // argslot += size; -- // rsp += size; -- __ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy -- { -- Label loop; -- __ BIND(loop); -- // pull one word up each time through the loop -- __ movptr(rbx_temp, Address(rdx_temp, 0)); -- __ movptr(Address(rdx_temp, arg_slots, Interpreter::stackElementScale()), rbx_temp); -- __ addptr(rdx_temp, -wordSize); -- __ cmpptr(rdx_temp, rsp); -- __ jcc(Assembler::aboveEqual, loop); -- } -- -- // Now move the argslot up, to point to the just-copied block. -- __ lea(rsp, Address(rsp, arg_slots, Interpreter::stackElementScale())); -- // And adjust the argslot address to point at the deletion point. -- __ lea(rax_argslot, Address(rax_argslot, arg_slots, Interpreter::stackElementScale())); -- BLOCK_COMMENT("} remove_arg_slots"); --} -- --// Helper to copy argument slots to the top of the stack. --// The sequence starts with rax_argslot and is counted by slot_count --// slot_count must be a multiple of stack_move_unit() and >= 0 --// This function blows the temps but does not change rax_argslot. --void MethodHandles::push_arg_slots(MacroAssembler* _masm, -- Register rax_argslot, -- RegisterOrConstant slot_count, -- int skip_words_count, -- Register rbx_temp, Register rdx_temp) { -- assert_different_registers(rax_argslot, rbx_temp, rdx_temp, -- (!slot_count.is_register() ? rbp : slot_count.as_register()), -- rsp); -- assert(Interpreter::stackElementSize == wordSize, "else change this code"); -- -- if (VerifyMethodHandles) -- verify_stack_move(_masm, slot_count, 0); -- -- // allow constant zero -- if (slot_count.is_constant() && slot_count.as_constant() == 0) -- return; -- -- BLOCK_COMMENT("push_arg_slots {"); -- -- Register rbx_top = rbx_temp; -- -- // There is at most 1 word to carry down with the TOS. -- switch (skip_words_count) { -- case 1: __ pop(rdx_temp); break; -- case 0: break; -- default: ShouldNotReachHere(); -- } -- -- if (slot_count.is_constant()) { -- for (int i = slot_count.as_constant() - 1; i >= 0; i--) { -- __ pushptr(Address(rax_argslot, i * wordSize)); -- } -- } else { -- Label L_plural, L_loop, L_break; -- // Emit code to dynamically check for the common cases, zero and one slot. -- __ cmpl(slot_count.as_register(), (int32_t) 1); -- __ jccb(Assembler::greater, L_plural); -- __ jccb(Assembler::less, L_break); -- __ pushptr(Address(rax_argslot, 0)); -- __ jmpb(L_break); -- __ BIND(L_plural); -- -- // Loop for 2 or more: -- // rbx = &rax[slot_count] -- // while (rbx > rax) *(--rsp) = *(--rbx) -- __ lea(rbx_top, Address(rax_argslot, slot_count, Address::times_ptr)); -- __ BIND(L_loop); -- __ subptr(rbx_top, wordSize); -- __ pushptr(Address(rbx_top, 0)); -- __ cmpptr(rbx_top, rax_argslot); -- __ jcc(Assembler::above, L_loop); -- __ bind(L_break); -- } -- switch (skip_words_count) { -- case 1: __ push(rdx_temp); break; -- case 0: break; -- default: ShouldNotReachHere(); -- } -- BLOCK_COMMENT("} push_arg_slots"); --} -- --// in-place movement; no change to rsp --// blows rax_temp, rdx_temp --void MethodHandles::move_arg_slots_up(MacroAssembler* _masm, -- Register rbx_bottom, // invariant -- Address top_addr, // can use rax_temp -- RegisterOrConstant positive_distance_in_slots, -- Register rax_temp, Register rdx_temp) { -- BLOCK_COMMENT("move_arg_slots_up {"); -- assert_different_registers(rbx_bottom, -- rax_temp, rdx_temp, -- positive_distance_in_slots.register_or_noreg()); -- Label L_loop, L_break; -- Register rax_top = rax_temp; -- if (!top_addr.is_same_address(Address(rax_top, 0))) -- __ lea(rax_top, top_addr); -- // Detect empty (or broken) loop: --#ifdef ASSERT -- if (VerifyMethodHandles) { -- // Verify that &bottom < &top (non-empty interval) -- Label L_ok, L_bad; -- if (positive_distance_in_slots.is_register()) { -- __ cmpptr(positive_distance_in_slots.as_register(), (int32_t) 0); -- __ jcc(Assembler::lessEqual, L_bad); -- } -- __ cmpptr(rbx_bottom, rax_top); -- __ jcc(Assembler::below, L_ok); -- __ bind(L_bad); -- __ stop("valid bounds (copy up)"); -- __ BIND(L_ok); -+#else -+ Register temp1 = (for_compiler_entry ? rsi : rdx); -+ Register temp2 = rdi; -+ Register temp3 = rax; -+ if (for_compiler_entry) { -+ assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : rcx), "only valid assignment"); -+ assert_different_registers(temp1, rcx, rdx); -+ assert_different_registers(temp2, rcx, rdx); -+ assert_different_registers(temp3, rcx, rdx); - } - #endif -- __ cmpptr(rbx_bottom, rax_top); -- __ jccb(Assembler::aboveEqual, L_break); -- // work rax down to rbx, copying contiguous data upwards -- // In pseudo-code: -- // [rbx, rax) = &[bottom, top) -- // while (--rax >= rbx) *(rax + distance) = *(rax + 0), rax--; -- __ BIND(L_loop); -- __ subptr(rax_top, wordSize); -- __ movptr(rdx_temp, Address(rax_top, 0)); -- __ movptr( Address(rax_top, positive_distance_in_slots, Address::times_ptr), rdx_temp); -- __ cmpptr(rax_top, rbx_bottom); -- __ jcc(Assembler::above, L_loop); -- assert(Interpreter::stackElementSize == wordSize, "else change loop"); -- __ bind(L_break); -- BLOCK_COMMENT("} move_arg_slots_up"); --} -+ assert_different_registers(temp1, temp2, temp3, receiver_reg); -+ assert_different_registers(temp1, temp2, temp3, member_reg); -+ if (!for_compiler_entry) -+ assert_different_registers(temp1, temp2, temp3, saved_last_sp_register()); // don't trash lastSP - --// in-place movement; no change to rsp --// blows rax_temp, rdx_temp --void MethodHandles::move_arg_slots_down(MacroAssembler* _masm, -- Address bottom_addr, // can use rax_temp -- Register rbx_top, // invariant -- RegisterOrConstant negative_distance_in_slots, -- Register rax_temp, Register rdx_temp) { -- BLOCK_COMMENT("move_arg_slots_down {"); -- assert_different_registers(rbx_top, -- negative_distance_in_slots.register_or_noreg(), -- rax_temp, rdx_temp); -- Label L_loop, L_break; -- Register rax_bottom = rax_temp; -- if (!bottom_addr.is_same_address(Address(rax_bottom, 0))) -- __ lea(rax_bottom, bottom_addr); -- // Detect empty (or broken) loop: --#ifdef ASSERT -- assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, ""); -- if (VerifyMethodHandles) { -- // Verify that &bottom < &top (non-empty interval) -- Label L_ok, L_bad; -- if (negative_distance_in_slots.is_register()) { -- __ cmpptr(negative_distance_in_slots.as_register(), (int32_t) 0); -- __ jcc(Assembler::greaterEqual, L_bad); -+ if (iid == vmIntrinsics::_invokeBasic) { -+ // indirect through MH.form.vmentry.vmtarget -+ jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry); -+ -+ } else { -+ // The method is a member invoker used by direct method handles. -+ if (VerifyMethodHandles) { -+ // make sure the trailing argument really is a MemberName (caller responsibility) -+ verify_klass(_masm, member_reg, SystemDictionaryHandles::MemberName_klass(), -+ "MemberName required for invokeVirtual etc."); - } -- __ cmpptr(rax_bottom, rbx_top); -- __ jcc(Assembler::below, L_ok); -- __ bind(L_bad); -- __ stop("valid bounds (copy down)"); -- __ BIND(L_ok); -+ -+ Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes())); -+ Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes())); -+ Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())); -+ -+ Register temp1_recv_klass = temp1; -+ if (iid != vmIntrinsics::_linkToStatic) { -+ __ verify_oop(receiver_reg); -+ if (iid == vmIntrinsics::_linkToSpecial) { -+ // Don't actually load the klass; just null-check the receiver. -+ __ null_check(receiver_reg); -+ } else { -+ // load receiver klass itself -+ __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes()); -+ __ load_klass(temp1_recv_klass, receiver_reg); -+ __ verify_oop(temp1_recv_klass); -+ } -+ BLOCK_COMMENT("check_receiver {"); -+ // The receiver for the MemberName must be in receiver_reg. -+ // Check the receiver against the MemberName.clazz -+ if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { -+ // Did not load it above... -+ __ load_klass(temp1_recv_klass, receiver_reg); -+ __ verify_oop(temp1_recv_klass); -+ } -+ if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { -+ Label L_ok; -+ Register temp2_defc = temp2; -+ __ load_heap_oop(temp2_defc, member_clazz); -+ load_klass_from_Class(_masm, temp2_defc); -+ __ verify_oop(temp2_defc); -+ __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok); -+ // If we get here, the type check failed! -+ __ STOP("receiver class disagrees with MemberName.clazz"); -+ __ bind(L_ok); -+ } -+ BLOCK_COMMENT("} check_receiver"); -+ } -+ if (iid == vmIntrinsics::_linkToSpecial || -+ iid == vmIntrinsics::_linkToStatic) { -+ DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass -+ } -+ -+ // Live registers at this point: -+ // member_reg - MemberName that was the trailing argument -+ // temp1_recv_klass - klass of stacked receiver, if needed -+ // rsi/r13 - interpreter linkage (if interpreted) -+ // rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled) -+ -+ bool method_is_live = false; -+ switch (iid) { -+ case vmIntrinsics::_linkToSpecial: -+ if (VerifyMethodHandles) { -+ verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); -+ } -+ __ load_heap_oop(rbx_method, member_vmtarget); -+ method_is_live = true; -+ break; -+ -+ case vmIntrinsics::_linkToStatic: -+ if (VerifyMethodHandles) { -+ verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); -+ } -+ __ load_heap_oop(rbx_method, member_vmtarget); -+ method_is_live = true; -+ break; -+ -+ case vmIntrinsics::_linkToVirtual: -+ { -+ // same as TemplateTable::invokevirtual, -+ // minus the CP setup and profiling: -+ -+ if (VerifyMethodHandles) { -+ verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3); -+ } -+ -+ // pick out the vtable index from the MemberName, and then we can discard it: -+ Register temp2_index = temp2; -+ __ movptr(temp2_index, member_vmindex); -+ -+ if (VerifyMethodHandles) { -+ Label L_index_ok; -+ __ cmpl(temp2_index, 0); -+ __ jcc(Assembler::greaterEqual, L_index_ok); -+ __ STOP("no virtual index"); -+ __ BIND(L_index_ok); -+ } -+ -+ // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget -+ // at this point. And VerifyMethodHandles has already checked clazz, if needed. -+ -+ // get target methodOop & entry point -+ __ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method); -+ method_is_live = true; -+ break; -+ } -+ -+ case vmIntrinsics::_linkToInterface: -+ { -+ // same as TemplateTable::invokeinterface -+ // (minus the CP setup and profiling, with different argument motion) -+ if (VerifyMethodHandles) { -+ verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3); -+ } -+ -+ Register temp3_intf = temp3; -+ __ load_heap_oop(temp3_intf, member_clazz); -+ load_klass_from_Class(_masm, temp3_intf); -+ __ verify_oop(temp3_intf); -+ -+ Register rbx_index = rbx_method; -+ __ movptr(rbx_index, member_vmindex); -+ if (VerifyMethodHandles) { -+ Label L; -+ __ cmpl(rbx_index, 0); -+ __ jcc(Assembler::greaterEqual, L); -+ __ STOP("invalid vtable index for MH.invokeInterface"); -+ __ bind(L); -+ } -+ -+ // given intf, index, and recv klass, dispatch to the implementation method -+ Label L_no_such_interface; -+ __ lookup_interface_method(temp1_recv_klass, temp3_intf, -+ // note: next two args must be the same: -+ rbx_index, rbx_method, -+ temp2, -+ L_no_such_interface); -+ -+ __ verify_oop(rbx_method); -+ jump_from_method_handle(_masm, rbx_method, temp2, for_compiler_entry); -+ __ hlt(); -+ -+ __ bind(L_no_such_interface); -+ __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); -+ break; -+ } -+ -+ default: -+ fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); -+ break; -+ } -+ -+ if (method_is_live) { -+ // live at this point: rbx_method, rsi/r13 (if interpreted) -+ -+ // After figuring out which concrete method to call, jump into it. -+ // Note that this works in the interpreter with no data motion. -+ // But the compiled version will require that rcx_recv be shifted out. -+ __ verify_oop(rbx_method); -+ jump_from_method_handle(_masm, rbx_method, temp1, for_compiler_entry); -+ } - } --#endif -- __ cmpptr(rax_bottom, rbx_top); -- __ jccb(Assembler::aboveEqual, L_break); -- // work rax up to rbx, copying contiguous data downwards -- // In pseudo-code: -- // [rax, rbx) = &[bottom, top) -- // while (rax < rbx) *(rax - distance) = *(rax + 0), rax++; -- __ BIND(L_loop); -- __ movptr(rdx_temp, Address(rax_bottom, 0)); -- __ movptr( Address(rax_bottom, negative_distance_in_slots, Address::times_ptr), rdx_temp); -- __ addptr(rax_bottom, wordSize); -- __ cmpptr(rax_bottom, rbx_top); -- __ jcc(Assembler::below, L_loop); -- assert(Interpreter::stackElementSize == wordSize, "else change loop"); -- __ bind(L_break); -- BLOCK_COMMENT("} move_arg_slots_down"); --} -- --// Copy from a field or array element to a stacked argument slot. --// is_element (ignored) says whether caller is loading an array element instead of an instance field. --void MethodHandles::move_typed_arg(MacroAssembler* _masm, -- BasicType type, bool is_element, -- Address slot_dest, Address value_src, -- Register rbx_temp, Register rdx_temp) { -- BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)"); -- if (type == T_OBJECT || type == T_ARRAY) { -- __ load_heap_oop(rbx_temp, value_src); -- __ movptr(slot_dest, rbx_temp); -- } else if (type != T_VOID) { -- int arg_size = type2aelembytes(type); -- bool arg_is_signed = is_signed_subword_type(type); -- int slot_size = (arg_size > wordSize) ? arg_size : wordSize; -- __ load_sized_value( rdx_temp, value_src, arg_size, arg_is_signed, rbx_temp); -- __ store_sized_value( slot_dest, rdx_temp, slot_size, rbx_temp); -- } -- BLOCK_COMMENT("} move_typed_arg"); --} -- --void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type, -- Address return_slot) { -- BLOCK_COMMENT("move_return_value {"); -- // Old versions of the JVM must clean the FPU stack after every return. --#ifndef _LP64 --#ifdef COMPILER2 -- // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases -- if ((type == T_FLOAT && UseSSE < 1) || (type == T_DOUBLE && UseSSE < 2)) { -- for (int i = 1; i < 8; i++) { -- __ ffree(i); -- } -- } else if (UseSSE < 2) { -- __ empty_FPU_stack(); -- } --#endif //COMPILER2 --#endif //!_LP64 -- -- // Look at the type and pull the value out of the corresponding register. -- if (type == T_VOID) { -- // nothing to do -- } else if (type == T_OBJECT) { -- __ movptr(return_slot, rax); -- } else if (type == T_INT || is_subword_type(type)) { -- // write the whole word, even if only 32 bits is significant -- __ movptr(return_slot, rax); -- } else if (type == T_LONG) { -- // store the value by parts -- // Note: We assume longs are continguous (if misaligned) on the interpreter stack. -- __ store_sized_value(return_slot, rax, BytesPerLong, rdx); -- } else if (NOT_LP64((type == T_FLOAT && UseSSE < 1) || -- (type == T_DOUBLE && UseSSE < 2) ||) -- false) { -- // Use old x86 FPU registers: -- if (type == T_FLOAT) -- __ fstp_s(return_slot); -- else -- __ fstp_d(return_slot); -- } else if (type == T_FLOAT) { -- __ movflt(return_slot, xmm0); -- } else if (type == T_DOUBLE) { -- __ movdbl(return_slot, xmm0); -- } else { -- ShouldNotReachHere(); -- } -- BLOCK_COMMENT("} move_return_value"); - } - - #ifndef PRODUCT --#define DESCRIBE_RICOCHET_OFFSET(rf, name) \ -- values.describe(frame_no, (intptr_t *) (((uintptr_t)rf) + MethodHandles::RicochetFrame::name##_offset_in_bytes()), #name) -- --void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no) { -- address bp = (address) fr->fp(); -- RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes()); -- -- // ricochet slots -- DESCRIBE_RICOCHET_OFFSET(rf, exact_sender_sp); -- DESCRIBE_RICOCHET_OFFSET(rf, conversion); -- DESCRIBE_RICOCHET_OFFSET(rf, saved_args_base); -- DESCRIBE_RICOCHET_OFFSET(rf, saved_args_layout); -- DESCRIBE_RICOCHET_OFFSET(rf, saved_target); -- DESCRIBE_RICOCHET_OFFSET(rf, continuation); -- -- // relevant ricochet targets (in caller frame) -- values.describe(-1, rf->saved_args_base(), err_msg("*saved_args_base for #%d", frame_no)); --} --#endif // ASSERT -- --#ifndef PRODUCT --extern "C" void print_method_handle(oop mh); - void trace_method_handle_stub(const char* adaptername, - oop mh, - intptr_t* saved_regs, - intptr_t* entry_sp) { - // called as a leaf from native code: do not block the JVM! -- bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have rcx_mh -+ bool has_mh = (strstr(adaptername, "/static") == NULL && -+ strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH - const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx"; -- tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT, adaptername, mh_reg_name, mh, entry_sp); -+ tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT, -+ adaptername, mh_reg_name, -+ mh, entry_sp); - - if (Verbose) { - tty->print_cr("Registers:"); -@@ -1094,12 +578,18 @@ - values.describe(-1, dump_fp, "fp for #1 "); - values.describe(-1, dump_sp, "sp for #1"); - } -+ values.describe(-1, entry_sp, "raw top of stack"); - - tty->print_cr("Stack layout:"); - values.print(p); - } -- if (has_mh) -- print_method_handle(mh); -+ if (has_mh && mh->is_oop()) { -+ mh->print(); -+ if (java_lang_invoke_MethodHandle::is_instance(mh)) { -+ if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0) -+ java_lang_invoke_MethodHandle::form(mh)->print(); -+ } -+ } - } - } - -@@ -1167,1387 +657,3 @@ - } - #endif //PRODUCT - --// which conversion op types are implemented here? --int MethodHandles::adapter_conversion_ops_supported_mask() { -- return ((1<from_compiled_entry(), "method must be linked"); -- -- const Register rax_pc = rax; -- __ pop(rax_pc); // caller PC -- __ mov(rsp, saved_last_sp); // cut the stack back to where the caller started -- -- Register rbx_method = rbx_temp; -- __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method)); -- -- const int jobject_oop_offset = 0; -- __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject -- -- __ movptr(saved_last_sp, rsp); -- __ subptr(rsp, 3 * wordSize); -- __ push(rax_pc); // restore caller PC -- -- __ movl (__ argument_address(constant(2)), rarg0_code); -- __ movptr(__ argument_address(constant(1)), rarg1_actual); -- __ movptr(__ argument_address(constant(0)), rarg2_required); -- jump_from_method_handle(_masm, rbx_method, rax); -- } -- break; -- -- case _invokestatic_mh: -- case _invokespecial_mh: -- { -- Register rbx_method = rbx_temp; -- __ load_heap_oop(rbx_method, rcx_mh_vmtarget); // target is a methodOop -- __ verify_oop(rbx_method); -- // same as TemplateTable::invokestatic or invokespecial, -- // minus the CP setup and profiling: -- if (ek == _invokespecial_mh) { -- // Must load & check the first argument before entering the target method. -- __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp); -- __ movptr(rcx_recv, __ argument_address(rax_argslot, -1)); -- __ null_check(rcx_recv); -- __ verify_oop(rcx_recv); -- } -- jump_from_method_handle(_masm, rbx_method, rax); -- } -- break; -- -- case _invokevirtual_mh: -- { -- // same as TemplateTable::invokevirtual, -- // minus the CP setup and profiling: -- -- // pick out the vtable index and receiver offset from the MH, -- // and then we can discard it: -- __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp); -- Register rbx_index = rbx_temp; -- __ movl(rbx_index, rcx_dmh_vmindex); -- // Note: The verifier allows us to ignore rcx_mh_vmtarget. -- __ movptr(rcx_recv, __ argument_address(rax_argslot, -1)); -- __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes()); -- -- // get receiver klass -- Register rax_klass = rax_argslot; -- __ load_klass(rax_klass, rcx_recv); -- __ verify_oop(rax_klass); -- -- // get target methodOop & entry point -- const int base = instanceKlass::vtable_start_offset() * wordSize; -- assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); -- Address vtable_entry_addr(rax_klass, -- rbx_index, Address::times_ptr, -- base + vtableEntry::method_offset_in_bytes()); -- Register rbx_method = rbx_temp; -- __ movptr(rbx_method, vtable_entry_addr); -- -- __ verify_oop(rbx_method); -- jump_from_method_handle(_masm, rbx_method, rax); -- } -- break; -- -- case _invokeinterface_mh: -- { -- // same as TemplateTable::invokeinterface, -- // minus the CP setup and profiling: -- -- // pick out the interface and itable index from the MH. -- __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp); -- Register rdx_intf = rdx_temp; -- Register rbx_index = rbx_temp; -- __ load_heap_oop(rdx_intf, rcx_mh_vmtarget); -- __ movl(rbx_index, rcx_dmh_vmindex); -- __ movptr(rcx_recv, __ argument_address(rax_argslot, -1)); -- __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes()); -- -- // get receiver klass -- Register rax_klass = rax_argslot; -- __ load_klass(rax_klass, rcx_recv); -- __ verify_oop(rax_klass); -- -- Register rbx_method = rbx_index; -- -- // get interface klass -- Label no_such_interface; -- __ verify_oop(rdx_intf); -- __ lookup_interface_method(rax_klass, rdx_intf, -- // note: next two args must be the same: -- rbx_index, rbx_method, -- rdi_temp, -- no_such_interface); -- -- __ verify_oop(rbx_method); -- jump_from_method_handle(_masm, rbx_method, rax); -- __ hlt(); -- -- __ bind(no_such_interface); -- // Throw an exception. -- // For historical reasons, it will be IncompatibleClassChangeError. -- __ mov(rbx_temp, rcx_recv); // rarg2_required might be RCX -- assert_different_registers(rarg2_required, rbx_temp); -- __ movptr(rarg2_required, Address(rdx_intf, java_mirror_offset)); // required interface -- __ mov( rarg1_actual, rbx_temp); // bad receiver -- __ movl( rarg0_code, (int) Bytecodes::_invokeinterface); // who is complaining? -- __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); -- } -- break; -- -- case _bound_ref_mh: -- case _bound_int_mh: -- case _bound_long_mh: -- case _bound_ref_direct_mh: -- case _bound_int_direct_mh: -- case _bound_long_direct_mh: -- { -- const bool direct_to_method = (ek >= _bound_ref_direct_mh); -- BasicType arg_type = ek_bound_mh_arg_type(ek); -- int arg_slots = type2size[arg_type]; -- -- // make room for the new argument: -- __ movl(rax_argslot, rcx_bmh_vmargslot); -- __ lea(rax_argslot, __ argument_address(rax_argslot)); -- -- insert_arg_slots(_masm, arg_slots * stack_move_unit(), rax_argslot, rbx_temp, rdx_temp); -- -- // store bound argument into the new stack slot: -- __ load_heap_oop(rbx_temp, rcx_bmh_argument); -- if (arg_type == T_OBJECT) { -- __ movptr(Address(rax_argslot, 0), rbx_temp); -- } else { -- Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type)); -- move_typed_arg(_masm, arg_type, false, -- Address(rax_argslot, 0), -- prim_value_addr, -- rbx_temp, rdx_temp); -- } -- -- if (direct_to_method) { -- Register rbx_method = rbx_temp; -- __ load_heap_oop(rbx_method, rcx_mh_vmtarget); -- __ verify_oop(rbx_method); -- jump_from_method_handle(_masm, rbx_method, rax); -- } else { -- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); -- __ verify_oop(rcx_recv); -- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); -- } -- } -- break; -- -- case _adapter_opt_profiling: -- if (java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes() != 0) { -- Address rcx_mh_vmcount(rcx_recv, java_lang_invoke_CountingMethodHandle::vmcount_offset_in_bytes()); -- __ incrementl(rcx_mh_vmcount); -- } -- // fall through -- -- case _adapter_retype_only: -- case _adapter_retype_raw: -- // immediately jump to the next MH layer: -- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); -- __ verify_oop(rcx_recv); -- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); -- // This is OK when all parameter types widen. -- // It is also OK when a return type narrows. -- break; -- -- case _adapter_check_cast: -- { -- // temps: -- Register rbx_klass = rbx_temp; // interesting AMH data -- -- // check a reference argument before jumping to the next layer of MH: -- __ movl(rax_argslot, rcx_amh_vmargslot); -- vmarg = __ argument_address(rax_argslot); -- -- // What class are we casting to? -- __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object! -- load_klass_from_Class(_masm, rbx_klass); -- -- Label done; -- __ movptr(rdx_temp, vmarg); -- __ testptr(rdx_temp, rdx_temp); -- __ jcc(Assembler::zero, done); // no cast if null -- __ load_klass(rdx_temp, rdx_temp); -- -- // live at this point: -- // - rbx_klass: klass required by the target method -- // - rdx_temp: argument klass to test -- // - rcx_recv: adapter method handle -- __ check_klass_subtype(rdx_temp, rbx_klass, rax_argslot, done); -- -- // If we get here, the type check failed! -- // Call the wrong_method_type stub, passing the failing argument type in rax. -- Register rax_mtype = rax_argslot; -- __ movl(rax_argslot, rcx_amh_vmargslot); // reload argslot field -- __ movptr(rdx_temp, vmarg); -- -- assert_different_registers(rarg2_required, rdx_temp); -- __ load_heap_oop(rarg2_required, rcx_amh_argument); // required class -- __ mov( rarg1_actual, rdx_temp); // bad object -- __ movl( rarg0_code, (int) Bytecodes::_checkcast); // who is complaining? -- __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); -- -- __ bind(done); -- // get the new MH: -- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); -- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); -- } -- break; -- -- case _adapter_prim_to_prim: -- case _adapter_ref_to_prim: -- case _adapter_prim_to_ref: -- // handled completely by optimized cases -- __ stop("init_AdapterMethodHandle should not issue this"); -- break; -- -- case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim --//case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim -- case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim -- case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim -- { -- // perform an in-place conversion to int or an int subword -- __ movl(rax_argslot, rcx_amh_vmargslot); -- vmarg = __ argument_address(rax_argslot); -- -- switch (ek) { -- case _adapter_opt_i2i: -- __ movl(rdx_temp, vmarg); -- break; -- case _adapter_opt_l2i: -- { -- // just delete the extra slot; on a little-endian machine we keep the first -- __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); -- remove_arg_slots(_masm, -stack_move_unit(), -- rax_argslot, rbx_temp, rdx_temp); -- vmarg = Address(rax_argslot, -Interpreter::stackElementSize); -- __ movl(rdx_temp, vmarg); -- } -- break; -- case _adapter_opt_unboxi: -- { -- // Load the value up from the heap. -- __ movptr(rdx_temp, vmarg); -- int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); --#ifdef ASSERT -- for (int bt = T_BOOLEAN; bt < T_INT; bt++) { -- if (is_subword_type(BasicType(bt))) -- assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), ""); -- } --#endif -- __ null_check(rdx_temp, value_offset); -- __ movl(rdx_temp, Address(rdx_temp, value_offset)); -- // We load this as a word. Because we are little-endian, -- // the low bits will be correct, but the high bits may need cleaning. -- // The vminfo will guide us to clean those bits. -- } -- break; -- default: -- ShouldNotReachHere(); -- } -- -- // Do the requested conversion and store the value. -- Register rbx_vminfo = rbx_temp; -- load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion); -- -- // get the new MH: -- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); -- // (now we are done with the old MH) -- -- // original 32-bit vmdata word must be of this form: -- // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 | -- __ xchgptr(rcx, rbx_vminfo); // free rcx for shifts -- __ shll(rdx_temp /*, rcx*/); -- Label zero_extend, done; -- __ testl(rcx, CONV_VMINFO_SIGN_FLAG); -- __ jccb(Assembler::zero, zero_extend); -- -- // this path is taken for int->byte, int->short -- __ sarl(rdx_temp /*, rcx*/); -- __ jmpb(done); -- -- __ bind(zero_extend); -- // this is taken for int->char -- __ shrl(rdx_temp /*, rcx*/); -- -- __ bind(done); -- __ movl(vmarg, rdx_temp); // Store the value. -- __ xchgptr(rcx, rbx_vminfo); // restore rcx_recv -- -- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); -- } -- break; -- -- case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim -- case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim -- { -- // perform an in-place int-to-long or ref-to-long conversion -- __ movl(rax_argslot, rcx_amh_vmargslot); -- -- // on a little-endian machine we keep the first slot and add another after -- __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); -- insert_arg_slots(_masm, stack_move_unit(), -- rax_argslot, rbx_temp, rdx_temp); -- Address vmarg1(rax_argslot, -Interpreter::stackElementSize); -- Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize); -- -- switch (ek) { -- case _adapter_opt_i2l: -- { --#ifdef _LP64 -- __ movslq(rdx_temp, vmarg1); // Load sign-extended -- __ movq(vmarg1, rdx_temp); // Store into first slot --#else -- __ movl(rdx_temp, vmarg1); -- __ sarl(rdx_temp, BitsPerInt - 1); // __ extend_sign() -- __ movl(vmarg2, rdx_temp); // store second word --#endif -- } -- break; -- case _adapter_opt_unboxl: -- { -- // Load the value up from the heap. -- __ movptr(rdx_temp, vmarg1); -- int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG); -- assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), ""); -- __ null_check(rdx_temp, value_offset); --#ifdef _LP64 -- __ movq(rbx_temp, Address(rdx_temp, value_offset)); -- __ movq(vmarg1, rbx_temp); --#else -- __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt)); -- __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt)); -- __ movl(vmarg1, rbx_temp); -- __ movl(vmarg2, rdx_temp); --#endif -- } -- break; -- default: -- ShouldNotReachHere(); -- } -- -- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); -- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); -- } -- break; -- -- case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim -- case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim -- { -- // perform an in-place floating primitive conversion -- __ movl(rax_argslot, rcx_amh_vmargslot); -- __ lea(rax_argslot, __ argument_address(rax_argslot, 1)); -- if (ek == _adapter_opt_f2d) { -- insert_arg_slots(_masm, stack_move_unit(), -- rax_argslot, rbx_temp, rdx_temp); -- } -- Address vmarg(rax_argslot, -Interpreter::stackElementSize); -- --#ifdef _LP64 -- if (ek == _adapter_opt_f2d) { -- __ movflt(xmm0, vmarg); -- __ cvtss2sd(xmm0, xmm0); -- __ movdbl(vmarg, xmm0); -- } else { -- __ movdbl(xmm0, vmarg); -- __ cvtsd2ss(xmm0, xmm0); -- __ movflt(vmarg, xmm0); -- } --#else //_LP64 -- if (ek == _adapter_opt_f2d) { -- __ fld_s(vmarg); // load float to ST0 -- __ fstp_d(vmarg); // store double -- } else { -- __ fld_d(vmarg); // load double to ST0 -- __ fstp_s(vmarg); // store single -- } --#endif //_LP64 -- -- if (ek == _adapter_opt_d2f) { -- remove_arg_slots(_masm, -stack_move_unit(), -- rax_argslot, rbx_temp, rdx_temp); -- } -- -- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); -- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); -- } -- break; -- -- case _adapter_swap_args: -- case _adapter_rot_args: -- // handled completely by optimized cases -- __ stop("init_AdapterMethodHandle should not issue this"); -- break; -- -- case _adapter_opt_swap_1: -- case _adapter_opt_swap_2: -- case _adapter_opt_rot_1_up: -- case _adapter_opt_rot_1_down: -- case _adapter_opt_rot_2_up: -- case _adapter_opt_rot_2_down: -- { -- int swap_slots = ek_adapter_opt_swap_slots(ek); -- int rotate = ek_adapter_opt_swap_mode(ek); -- -- // 'argslot' is the position of the first argument to swap -- __ movl(rax_argslot, rcx_amh_vmargslot); -- __ lea(rax_argslot, __ argument_address(rax_argslot)); -- -- // 'vminfo' is the second -- Register rbx_destslot = rbx_temp; -- load_conversion_vminfo(_masm, rbx_destslot, rcx_amh_conversion); -- __ lea(rbx_destslot, __ argument_address(rbx_destslot)); -- if (VerifyMethodHandles) -- verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame"); -- -- assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here"); -- if (!rotate) { -- // simple swap -- for (int i = 0; i < swap_slots; i++) { -- __ movptr(rdi_temp, Address(rax_argslot, i * wordSize)); -- __ movptr(rdx_temp, Address(rbx_destslot, i * wordSize)); -- __ movptr(Address(rax_argslot, i * wordSize), rdx_temp); -- __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp); -- } -- } else { -- // A rotate is actually pair of moves, with an "odd slot" (or pair) -- // changing place with a series of other slots. -- // First, push the "odd slot", which is going to get overwritten -- for (int i = swap_slots - 1; i >= 0; i--) { -- // handle one with rdi_temp instead of a push: -- if (i == 0) __ movptr(rdi_temp, Address(rax_argslot, i * wordSize)); -- else __ pushptr( Address(rax_argslot, i * wordSize)); -- } -- if (rotate > 0) { -- // Here is rotate > 0: -- // (low mem) (high mem) -- // | dest: more_slots... | arg: odd_slot :arg+1 | -- // => -- // | dest: odd_slot | dest+1: more_slots... :arg+1 | -- // work argslot down to destslot, copying contiguous data upwards -- // pseudo-code: -- // rax = src_addr - swap_bytes -- // rbx = dest_addr -- // while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--; -- move_arg_slots_up(_masm, -- rbx_destslot, -- Address(rax_argslot, 0), -- swap_slots, -- rax_argslot, rdx_temp); -- } else { -- // Here is the other direction, rotate < 0: -- // (low mem) (high mem) -- // | arg: odd_slot | arg+1: more_slots... :dest+1 | -- // => -- // | arg: more_slots... | dest: odd_slot :dest+1 | -- // work argslot up to destslot, copying contiguous data downwards -- // pseudo-code: -- // rax = src_addr + swap_bytes -- // rbx = dest_addr -- // while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++; -- // dest_slot denotes an exclusive upper limit -- int limit_bias = OP_ROT_ARGS_DOWN_LIMIT_BIAS; -- if (limit_bias != 0) -- __ addptr(rbx_destslot, - limit_bias * wordSize); -- move_arg_slots_down(_masm, -- Address(rax_argslot, swap_slots * wordSize), -- rbx_destslot, -- -swap_slots, -- rax_argslot, rdx_temp); -- __ subptr(rbx_destslot, swap_slots * wordSize); -- } -- // pop the original first chunk into the destination slot, now free -- for (int i = 0; i < swap_slots; i++) { -- if (i == 0) __ movptr(Address(rbx_destslot, i * wordSize), rdi_temp); -- else __ popptr(Address(rbx_destslot, i * wordSize)); -- } -- } -- -- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); -- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); -- } -- break; -- -- case _adapter_dup_args: -- { -- // 'argslot' is the position of the first argument to duplicate -- __ movl(rax_argslot, rcx_amh_vmargslot); -- __ lea(rax_argslot, __ argument_address(rax_argslot)); -- -- // 'stack_move' is negative number of words to duplicate -- Register rdi_stack_move = rdi_temp; -- load_stack_move(_masm, rdi_stack_move, rcx_recv, true); -- -- if (VerifyMethodHandles) { -- verify_argslots(_masm, rdi_stack_move, rax_argslot, true, -- "copied argument(s) must fall within current frame"); -- } -- -- if (UseStackBanging) { -- // Bang the stack before pushing args. -- int frame_size = 256 * Interpreter::stackElementSize; // conservative -- __ generate_stack_overflow_check(frame_size + sizeof(RicochetFrame)); -- } -- // insert location is always the bottom of the argument list: -- Address insert_location = __ argument_address(constant(0)); -- int pre_arg_words = insert_location.disp() / wordSize; // return PC is pushed -- assert(insert_location.base() == rsp, ""); -- -- __ negl(rdi_stack_move); -- push_arg_slots(_masm, rax_argslot, rdi_stack_move, -- pre_arg_words, rbx_temp, rdx_temp); -- -- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); -- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); -- } -- break; -- -- case _adapter_drop_args: -- { -- // 'argslot' is the position of the first argument to nuke -- __ movl(rax_argslot, rcx_amh_vmargslot); -- __ lea(rax_argslot, __ argument_address(rax_argslot)); -- -- // (must do previous push after argslot address is taken) -- -- // 'stack_move' is number of words to drop -- Register rdi_stack_move = rdi_temp; -- load_stack_move(_masm, rdi_stack_move, rcx_recv, false); -- remove_arg_slots(_masm, rdi_stack_move, -- rax_argslot, rbx_temp, rdx_temp); -- -- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); -- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); -- } -- break; -- -- case _adapter_collect_args: -- case _adapter_fold_args: -- case _adapter_spread_args: -- // handled completely by optimized cases -- __ stop("init_AdapterMethodHandle should not issue this"); -- break; -- -- case _adapter_opt_collect_ref: -- case _adapter_opt_collect_int: -- case _adapter_opt_collect_long: -- case _adapter_opt_collect_float: -- case _adapter_opt_collect_double: -- case _adapter_opt_collect_void: -- case _adapter_opt_collect_0_ref: -- case _adapter_opt_collect_1_ref: -- case _adapter_opt_collect_2_ref: -- case _adapter_opt_collect_3_ref: -- case _adapter_opt_collect_4_ref: -- case _adapter_opt_collect_5_ref: -- case _adapter_opt_filter_S0_ref: -- case _adapter_opt_filter_S1_ref: -- case _adapter_opt_filter_S2_ref: -- case _adapter_opt_filter_S3_ref: -- case _adapter_opt_filter_S4_ref: -- case _adapter_opt_filter_S5_ref: -- case _adapter_opt_collect_2_S0_ref: -- case _adapter_opt_collect_2_S1_ref: -- case _adapter_opt_collect_2_S2_ref: -- case _adapter_opt_collect_2_S3_ref: -- case _adapter_opt_collect_2_S4_ref: -- case _adapter_opt_collect_2_S5_ref: -- case _adapter_opt_fold_ref: -- case _adapter_opt_fold_int: -- case _adapter_opt_fold_long: -- case _adapter_opt_fold_float: -- case _adapter_opt_fold_double: -- case _adapter_opt_fold_void: -- case _adapter_opt_fold_1_ref: -- case _adapter_opt_fold_2_ref: -- case _adapter_opt_fold_3_ref: -- case _adapter_opt_fold_4_ref: -- case _adapter_opt_fold_5_ref: -- { -- // Given a fresh incoming stack frame, build a new ricochet frame. -- // On entry, TOS points at a return PC, and RBP is the callers frame ptr. -- // RSI/R13 has the caller's exact stack pointer, which we must also preserve. -- // RCX contains an AdapterMethodHandle of the indicated kind. -- -- // Relevant AMH fields: -- // amh.vmargslot: -- // points to the trailing edge of the arguments -- // to filter, collect, or fold. For a boxing operation, -- // it points just after the single primitive value. -- // amh.argument: -- // recursively called MH, on |collect| arguments -- // amh.vmtarget: -- // final destination MH, on return value, etc. -- // amh.conversion.dest: -- // tells what is the type of the return value -- // (not needed here, since dest is also derived from ek) -- // amh.conversion.vminfo: -- // points to the trailing edge of the return value -- // when the vmtarget is to be called; this is -- // equal to vmargslot + (retained ? |collect| : 0) -- -- // Pass 0 or more argument slots to the recursive target. -- int collect_count_constant = ek_adapter_opt_collect_count(ek); -- -- // The collected arguments are copied from the saved argument list: -- int collect_slot_constant = ek_adapter_opt_collect_slot(ek); -- -- assert(ek_orig == _adapter_collect_args || -- ek_orig == _adapter_fold_args, ""); -- bool retain_original_args = (ek_orig == _adapter_fold_args); -- -- // The return value is replaced (or inserted) at the 'vminfo' argslot. -- // Sometimes we can compute this statically. -- int dest_slot_constant = -1; -- if (!retain_original_args) -- dest_slot_constant = collect_slot_constant; -- else if (collect_slot_constant >= 0 && collect_count_constant >= 0) -- // We are preserving all the arguments, and the return value is prepended, -- // so the return slot is to the left (above) the |collect| sequence. -- dest_slot_constant = collect_slot_constant + collect_count_constant; -- -- // Replace all those slots by the result of the recursive call. -- // The result type can be one of ref, int, long, float, double, void. -- // In the case of void, nothing is pushed on the stack after return. -- BasicType dest = ek_adapter_opt_collect_type(ek); -- assert(dest == type2wfield[dest], "dest is a stack slot type"); -- int dest_count = type2size[dest]; -- assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size"); -- -- // Choose a return continuation. -- EntryKind ek_ret = _adapter_opt_return_any; -- if (dest != T_CONFLICT && OptimizeMethodHandles) { -- switch (dest) { -- case T_INT : ek_ret = _adapter_opt_return_int; break; -- case T_LONG : ek_ret = _adapter_opt_return_long; break; -- case T_FLOAT : ek_ret = _adapter_opt_return_float; break; -- case T_DOUBLE : ek_ret = _adapter_opt_return_double; break; -- case T_OBJECT : ek_ret = _adapter_opt_return_ref; break; -- case T_VOID : ek_ret = _adapter_opt_return_void; break; -- default : ShouldNotReachHere(); -- } -- if (dest == T_OBJECT && dest_slot_constant >= 0) { -- EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant); -- if (ek_try <= _adapter_opt_return_LAST && -- ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) { -- ek_ret = ek_try; -- } -- } -- assert(ek_adapter_opt_return_type(ek_ret) == dest, ""); -- } -- -- // Already pushed: ... keep1 | collect | keep2 | sender_pc | -- // push(sender_pc); -- -- // Compute argument base: -- Register rax_argv = rax_argslot; -- __ lea(rax_argv, __ argument_address(constant(0))); -- -- // Push a few extra argument words, if we need them to store the return value. -- { -- int extra_slots = 0; -- if (retain_original_args) { -- extra_slots = dest_count; -- } else if (collect_count_constant == -1) { -- extra_slots = dest_count; // collect_count might be zero; be generous -- } else if (dest_count > collect_count_constant) { -- extra_slots = (dest_count - collect_count_constant); -- } else { -- // else we know we have enough dead space in |collect| to repurpose for return values -- } -- DEBUG_ONLY(extra_slots += 1); -- if (extra_slots > 0) { -- __ pop(rbx_temp); // return value -- __ subptr(rsp, (extra_slots * Interpreter::stackElementSize)); -- // Push guard word #2 in debug mode. -- DEBUG_ONLY(__ movptr(Address(rsp, 0), (int32_t) RicochetFrame::MAGIC_NUMBER_2)); -- __ push(rbx_temp); -- } -- } -- -- RicochetFrame::enter_ricochet_frame(_masm, rcx_recv, rax_argv, -- entry(ek_ret)->from_interpreted_entry(), rbx_temp); -- -- // Now pushed: ... keep1 | collect | keep2 | RF | -- // some handy frame slots: -- Address exact_sender_sp_addr = RicochetFrame::frame_address(RicochetFrame::exact_sender_sp_offset_in_bytes()); -- Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); -- Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes()); -- --#ifdef ASSERT -- if (VerifyMethodHandles && dest != T_CONFLICT) { -- BLOCK_COMMENT("verify AMH.conv.dest"); -- load_conversion_dest_type(_masm, rbx_temp, conversion_addr); -- Label L_dest_ok; -- __ cmpl(rbx_temp, (int) dest); -- __ jcc(Assembler::equal, L_dest_ok); -- if (dest == T_INT) { -- for (int bt = T_BOOLEAN; bt < T_INT; bt++) { -- if (is_subword_type(BasicType(bt))) { -- __ cmpl(rbx_temp, (int) bt); -- __ jcc(Assembler::equal, L_dest_ok); -- } -- } -- } -- __ stop("bad dest in AMH.conv"); -- __ BIND(L_dest_ok); -- } --#endif //ASSERT -- -- // Find out where the original copy of the recursive argument sequence begins. -- Register rax_coll = rax_argv; -- { -- RegisterOrConstant collect_slot = collect_slot_constant; -- if (collect_slot_constant == -1) { -- __ movl(rdi_temp, rcx_amh_vmargslot); -- collect_slot = rdi_temp; -- } -- if (collect_slot_constant != 0) -- __ lea(rax_coll, Address(rax_argv, collect_slot, Interpreter::stackElementScale())); -- // rax_coll now points at the trailing edge of |collect| and leading edge of |keep2| -- } -- -- // Replace the old AMH with the recursive MH. (No going back now.) -- // In the case of a boxing call, the recursive call is to a 'boxer' method, -- // such as Integer.valueOf or Long.valueOf. In the case of a filter -- // or collect call, it will take one or more arguments, transform them, -- // and return some result, to store back into argument_base[vminfo]. -- __ load_heap_oop(rcx_recv, rcx_amh_argument); -- if (VerifyMethodHandles) verify_method_handle(_masm, rcx_recv); -- -- // Push a space for the recursively called MH first: -- __ push((int32_t)NULL_WORD); -- -- // Calculate |collect|, the number of arguments we are collecting. -- Register rdi_collect_count = rdi_temp; -- RegisterOrConstant collect_count; -- if (collect_count_constant >= 0) { -- collect_count = collect_count_constant; -- } else { -- __ load_method_handle_vmslots(rdi_collect_count, rcx_recv, rdx_temp); -- collect_count = rdi_collect_count; -- } --#ifdef ASSERT -- if (VerifyMethodHandles && collect_count_constant >= 0) { -- __ load_method_handle_vmslots(rbx_temp, rcx_recv, rdx_temp); -- Label L_count_ok; -- __ cmpl(rbx_temp, collect_count_constant); -- __ jcc(Assembler::equal, L_count_ok); -- __ stop("bad vminfo in AMH.conv"); -- __ BIND(L_count_ok); -- } --#endif //ASSERT -- -- // copy |collect| slots directly to TOS: -- push_arg_slots(_masm, rax_coll, collect_count, 0, rbx_temp, rdx_temp); -- // Now pushed: ... keep1 | collect | keep2 | RF... | collect | -- // rax_coll still points at the trailing edge of |collect| and leading edge of |keep2| -- -- // If necessary, adjust the saved arguments to make room for the eventual return value. -- // Normal adjustment: ... keep1 | +dest+ | -collect- | keep2 | RF... | collect | -- // If retaining args: ... keep1 | +dest+ | collect | keep2 | RF... | collect | -- // In the non-retaining case, this might move keep2 either up or down. -- // We don't have to copy the whole | RF... collect | complex, -- // but we must adjust RF.saved_args_base. -- // Also, from now on, we will forget about the original copy of |collect|. -- // If we are retaining it, we will treat it as part of |keep2|. -- // For clarity we will define |keep3| = |collect|keep2| or |keep2|. -- -- BLOCK_COMMENT("adjust trailing arguments {"); -- // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements. -- int open_count = dest_count; -- RegisterOrConstant close_count = collect_count_constant; -- Register rdi_close_count = rdi_collect_count; -- if (retain_original_args) { -- close_count = constant(0); -- } else if (collect_count_constant == -1) { -- close_count = rdi_collect_count; -- } -- -- // How many slots need moving? This is simply dest_slot (0 => no |keep3|). -- RegisterOrConstant keep3_count; -- Register rsi_keep3_count = rsi; // can repair from RF.exact_sender_sp -- if (dest_slot_constant >= 0) { -- keep3_count = dest_slot_constant; -- } else { -- load_conversion_vminfo(_masm, rsi_keep3_count, conversion_addr); -- keep3_count = rsi_keep3_count; -- } --#ifdef ASSERT -- if (VerifyMethodHandles && dest_slot_constant >= 0) { -- load_conversion_vminfo(_masm, rbx_temp, conversion_addr); -- Label L_vminfo_ok; -- __ cmpl(rbx_temp, dest_slot_constant); -- __ jcc(Assembler::equal, L_vminfo_ok); -- __ stop("bad vminfo in AMH.conv"); -- __ BIND(L_vminfo_ok); -- } --#endif //ASSERT -- -- // tasks remaining: -- bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0); -- bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0)); -- bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant()); -- -- if (stomp_dest | fix_arg_base) { -- // we will probably need an updated rax_argv value -- if (collect_slot_constant >= 0) { -- // rax_coll already holds the leading edge of |keep2|, so tweak it -- assert(rax_coll == rax_argv, "elided a move"); -- if (collect_slot_constant != 0) -- __ subptr(rax_argv, collect_slot_constant * Interpreter::stackElementSize); -- } else { -- // Just reload from RF.saved_args_base. -- __ movptr(rax_argv, saved_args_base_addr); -- } -- } -- -- // Old and new argument locations (based at slot 0). -- // Net shift (&new_argv - &old_argv) is (close_count - open_count). -- bool zero_open_count = (open_count == 0); // remember this bit of info -- if (move_keep3 && fix_arg_base) { -- // It will be easier to have everything in one register: -- if (close_count.is_register()) { -- // Deduct open_count from close_count register to get a clean +/- value. -- __ subptr(close_count.as_register(), open_count); -- } else { -- close_count = close_count.as_constant() - open_count; -- } -- open_count = 0; -- } -- Address old_argv(rax_argv, 0); -- Address new_argv(rax_argv, close_count, Interpreter::stackElementScale(), -- - open_count * Interpreter::stackElementSize); -- -- // First decide if any actual data are to be moved. -- // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change. -- // (As it happens, all movements involve an argument list size change.) -- -- // If there are variable parameters, use dynamic checks to skip around the whole mess. -- Label L_done; -- if (!keep3_count.is_constant()) { -- __ testl(keep3_count.as_register(), keep3_count.as_register()); -- __ jcc(Assembler::zero, L_done); -- } -- if (!close_count.is_constant()) { -- __ cmpl(close_count.as_register(), open_count); -- __ jcc(Assembler::equal, L_done); -- } -- -- if (move_keep3 && fix_arg_base) { -- bool emit_move_down = false, emit_move_up = false, emit_guard = false; -- if (!close_count.is_constant()) { -- emit_move_down = emit_guard = !zero_open_count; -- emit_move_up = true; -- } else if (open_count != close_count.as_constant()) { -- emit_move_down = (open_count > close_count.as_constant()); -- emit_move_up = !emit_move_down; -- } -- Label L_move_up; -- if (emit_guard) { -- __ cmpl(close_count.as_register(), open_count); -- __ jcc(Assembler::greater, L_move_up); -- } -- -- if (emit_move_down) { -- // Move arguments down if |+dest+| > |-collect-| -- // (This is rare, except when arguments are retained.) -- // This opens space for the return value. -- if (keep3_count.is_constant()) { -- for (int i = 0; i < keep3_count.as_constant(); i++) { -- __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize)); -- __ movptr( new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp); -- } -- } else { -- Register rbx_argv_top = rbx_temp; -- __ lea(rbx_argv_top, old_argv.plus_disp(keep3_count, Interpreter::stackElementScale())); -- move_arg_slots_down(_masm, -- old_argv, // beginning of old argv -- rbx_argv_top, // end of old argv -- close_count, // distance to move down (must be negative) -- rax_argv, rdx_temp); -- // Used argv as an iteration variable; reload from RF.saved_args_base. -- __ movptr(rax_argv, saved_args_base_addr); -- } -- } -- -- if (emit_guard) { -- __ jmp(L_done); // assumes emit_move_up is true also -- __ BIND(L_move_up); -- } -- -- if (emit_move_up) { -- -- // Move arguments up if |+dest+| < |-collect-| -- // (This is usual, except when |keep3| is empty.) -- // This closes up the space occupied by the now-deleted collect values. -- if (keep3_count.is_constant()) { -- for (int i = keep3_count.as_constant() - 1; i >= 0; i--) { -- __ movptr(rdx_temp, old_argv.plus_disp(i * Interpreter::stackElementSize)); -- __ movptr( new_argv.plus_disp(i * Interpreter::stackElementSize), rdx_temp); -- } -- } else { -- Address argv_top = old_argv.plus_disp(keep3_count, Interpreter::stackElementScale()); -- move_arg_slots_up(_masm, -- rax_argv, // beginning of old argv -- argv_top, // end of old argv -- close_count, // distance to move up (must be positive) -- rbx_temp, rdx_temp); -- } -- } -- } -- __ BIND(L_done); -- -- if (fix_arg_base) { -- // adjust RF.saved_args_base by adding (close_count - open_count) -- if (!new_argv.is_same_address(Address(rax_argv, 0))) -- __ lea(rax_argv, new_argv); -- __ movptr(saved_args_base_addr, rax_argv); -- } -- -- if (stomp_dest) { -- // Stomp the return slot, so it doesn't hold garbage. -- // This isn't strictly necessary, but it may help detect bugs. -- int forty_two = RicochetFrame::RETURN_VALUE_PLACEHOLDER; -- __ movptr(Address(rax_argv, keep3_count, Address::times_ptr), -- (int32_t) forty_two); -- // uses rsi_keep3_count -- } -- BLOCK_COMMENT("} adjust trailing arguments"); -- -- BLOCK_COMMENT("do_recursive_call"); -- __ mov(saved_last_sp, rsp); // set rsi/r13 for callee -- __ pushptr(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr()).addr()); -- // The globally unique bounce address has two purposes: -- // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame). -- // 2. When returned to, it cuts back the stack and redirects control flow -- // to the return handler. -- // The return handler will further cut back the stack when it takes -- // down the RF. Perhaps there is a way to streamline this further. -- -- if (UseStackBanging) { -- // Bang the stack before recursive call. -- // Even if slots == 0, we are inside a RicochetFrame. -- int frame_size = collect_count.is_constant() ? collect_count.as_constant() * wordSize : -1; -- if (frame_size < 0) { -- frame_size = 256 * Interpreter::stackElementSize; // conservative -- } -- __ generate_stack_overflow_check(frame_size + sizeof(RicochetFrame)); -- } -- // State during recursive call: -- // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc | -- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); -- -- break; -- } -- -- case _adapter_opt_return_ref: -- case _adapter_opt_return_int: -- case _adapter_opt_return_long: -- case _adapter_opt_return_float: -- case _adapter_opt_return_double: -- case _adapter_opt_return_void: -- case _adapter_opt_return_S0_ref: -- case _adapter_opt_return_S1_ref: -- case _adapter_opt_return_S2_ref: -- case _adapter_opt_return_S3_ref: -- case _adapter_opt_return_S4_ref: -- case _adapter_opt_return_S5_ref: -- { -- BasicType dest_type_constant = ek_adapter_opt_return_type(ek); -- int dest_slot_constant = ek_adapter_opt_return_slot(ek); -- -- if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); -- -- if (dest_slot_constant == -1) { -- // The current stub is a general handler for this dest_type. -- // It can be called from _adapter_opt_return_any below. -- // Stash the address in a little table. -- assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob"); -- address return_handler = __ pc(); -- _adapter_return_handlers[dest_type_constant] = return_handler; -- if (dest_type_constant == T_INT) { -- // do the subword types too -- for (int bt = T_BOOLEAN; bt < T_INT; bt++) { -- if (is_subword_type(BasicType(bt)) && -- _adapter_return_handlers[bt] == NULL) { -- _adapter_return_handlers[bt] = return_handler; -- } -- } -- } -- } -- -- Register rbx_arg_base = rbx_temp; -- assert_different_registers(rax, rdx, // possibly live return value registers -- rdi_temp, rbx_arg_base); -- -- Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); -- Address saved_args_base_addr = RicochetFrame::frame_address(RicochetFrame::saved_args_base_offset_in_bytes()); -- -- __ movptr(rbx_arg_base, saved_args_base_addr); -- RegisterOrConstant dest_slot = dest_slot_constant; -- if (dest_slot_constant == -1) { -- load_conversion_vminfo(_masm, rdi_temp, conversion_addr); -- dest_slot = rdi_temp; -- } -- // Store the result back into the argslot. -- // This code uses the interpreter calling sequence, in which the return value -- // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop. -- // There are certain irregularities with floating point values, which can be seen -- // in TemplateInterpreterGenerator::generate_return_entry_for. -- move_return_value(_masm, dest_type_constant, Address(rbx_arg_base, dest_slot, Interpreter::stackElementScale())); -- -- RicochetFrame::leave_ricochet_frame(_masm, rcx_recv, rbx_arg_base, rdx_temp); -- __ push(rdx_temp); // repush the return PC -- -- // Load the final target and go. -- if (VerifyMethodHandles) verify_method_handle(_masm, rcx_recv); -- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); -- __ hlt(); // -------------------- -- break; -- } -- -- case _adapter_opt_return_any: -- { -- if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm); -- Register rdi_conv = rdi_temp; -- assert_different_registers(rax, rdx, // possibly live return value registers -- rdi_conv, rbx_temp); -- -- Address conversion_addr = RicochetFrame::frame_address(RicochetFrame::conversion_offset_in_bytes()); -- load_conversion_dest_type(_masm, rdi_conv, conversion_addr); -- __ lea(rbx_temp, ExternalAddress((address) &_adapter_return_handlers[0])); -- __ movptr(rbx_temp, Address(rbx_temp, rdi_conv, Address::times_ptr)); -- --#ifdef ASSERT -- { Label L_badconv; -- __ testptr(rbx_temp, rbx_temp); -- __ jccb(Assembler::zero, L_badconv); -- __ jmp(rbx_temp); -- __ bind(L_badconv); -- __ stop("bad method handle return"); -- } --#else //ASSERT -- __ jmp(rbx_temp); --#endif //ASSERT -- break; -- } -- -- case _adapter_opt_spread_0: -- case _adapter_opt_spread_1_ref: -- case _adapter_opt_spread_2_ref: -- case _adapter_opt_spread_3_ref: -- case _adapter_opt_spread_4_ref: -- case _adapter_opt_spread_5_ref: -- case _adapter_opt_spread_ref: -- case _adapter_opt_spread_byte: -- case _adapter_opt_spread_char: -- case _adapter_opt_spread_short: -- case _adapter_opt_spread_int: -- case _adapter_opt_spread_long: -- case _adapter_opt_spread_float: -- case _adapter_opt_spread_double: -- { -- // spread an array out into a group of arguments -- int length_constant = ek_adapter_opt_spread_count(ek); -- bool length_can_be_zero = (length_constant == 0); -- if (length_constant < 0) { -- // some adapters with variable length must handle the zero case -- if (!OptimizeMethodHandles || -- ek_adapter_opt_spread_type(ek) != T_OBJECT) -- length_can_be_zero = true; -- } -- -- // find the address of the array argument -- __ movl(rax_argslot, rcx_amh_vmargslot); -- __ lea(rax_argslot, __ argument_address(rax_argslot)); -- -- // grab another temp -- Register rsi_temp = rsi; -- -- // arx_argslot points both to the array and to the first output arg -- vmarg = Address(rax_argslot, 0); -- -- // Get the array value. -- Register rdi_array = rdi_temp; -- Register rdx_array_klass = rdx_temp; -- BasicType elem_type = ek_adapter_opt_spread_type(ek); -- int elem_slots = type2size[elem_type]; // 1 or 2 -- int array_slots = 1; // array is always a T_OBJECT -- int length_offset = arrayOopDesc::length_offset_in_bytes(); -- int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type); -- __ movptr(rdi_array, vmarg); -- -- Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done; -- if (length_can_be_zero) { -- // handle the null pointer case, if zero is allowed -- Label L_skip; -- if (length_constant < 0) { -- load_conversion_vminfo(_masm, rbx_temp, rcx_amh_conversion); -- __ testl(rbx_temp, rbx_temp); -- __ jcc(Assembler::notZero, L_skip); -- } -- __ testptr(rdi_array, rdi_array); -- __ jcc(Assembler::notZero, L_skip); -- -- // If 'rsi' contains the 'saved_last_sp' (this is only the -- // case in a 32-bit version of the VM) we have to save 'rsi' -- // on the stack because later on (at 'L_array_is_empty') 'rsi' -- // will be overwritten. -- if (rsi_temp == saved_last_sp) { -- __ push(saved_last_sp); -- // Need to re-push return PC to keep it on stack top. -- __ lea(saved_last_sp, ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr()).addr()); -- __ push(saved_last_sp); -- } -- // Also prepare a handy macro which restores 'rsi' if required. --#define UNPUSH_RSI \ -- { if (rsi_temp == saved_last_sp) { __ pop(saved_last_sp); __ pop(saved_last_sp); } } -- -- __ jmp(L_array_is_empty); -- __ bind(L_skip); -- } -- __ null_check(rdi_array, oopDesc::klass_offset_in_bytes()); -- __ load_klass(rdx_array_klass, rdi_array); -- -- // Save 'rsi' if required (see comment above). Do this only -- // after the null check such that the exception handler which is -- // called in the case of a null pointer exception will not be -- // confused by the extra value on the stack (it expects the -- // return pointer on top of the stack) -- if (rsi_temp == saved_last_sp) { -- __ push(saved_last_sp); -- // Need to re-push return PC to keep it on stack top. -- __ lea(saved_last_sp, ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr()).addr()); -- __ push(saved_last_sp); -- } -- -- // Check the array type. -- Register rbx_klass = rbx_temp; -- __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object! -- load_klass_from_Class(_masm, rbx_klass); -- -- Label ok_array_klass, bad_array_klass, bad_array_length; -- __ check_klass_subtype(rdx_array_klass, rbx_klass, rsi_temp, ok_array_klass); -- // If we get here, the type check failed! -- __ jmp(bad_array_klass); -- __ BIND(ok_array_klass); -- -- // Check length. -- if (length_constant >= 0) { -- __ cmpl(Address(rdi_array, length_offset), length_constant); -- } else { -- Register rbx_vminfo = rbx_temp; -- load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion); -- __ cmpl(rbx_vminfo, Address(rdi_array, length_offset)); -- } -- __ jcc(Assembler::notEqual, bad_array_length); -- -- Register rdx_argslot_limit = rdx_temp; -- -- // Array length checks out. Now insert any required stack slots. -- if (length_constant == -1) { -- // Form a pointer to the end of the affected region. -- __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize)); -- // 'stack_move' is negative number of words to insert -- // This number already accounts for elem_slots. -- Register rsi_stack_move = rsi_temp; -- load_stack_move(_masm, rsi_stack_move, rcx_recv, true); -- __ cmpptr(rsi_stack_move, 0); -- assert(stack_move_unit() < 0, "else change this comparison"); -- __ jcc(Assembler::less, L_insert_arg_space); -- __ jcc(Assembler::equal, L_copy_args); -- // single argument case, with no array movement -- __ BIND(L_array_is_empty); -- remove_arg_slots(_masm, -stack_move_unit() * array_slots, -- rax_argslot, rbx_temp, rdx_temp); -- __ jmp(L_args_done); // no spreading to do -- __ BIND(L_insert_arg_space); -- // come here in the usual case, stack_move < 0 (2 or more spread arguments) -- Register rdi_temp = rdi_array; // spill this -- insert_arg_slots(_masm, rsi_stack_move, -- rax_argslot, rbx_temp, rdi_temp); -- // reload the array since rsi was killed -- // reload from rdx_argslot_limit since rax_argslot is now decremented -- __ movptr(rdi_array, Address(rdx_argslot_limit, -Interpreter::stackElementSize)); -- } else if (length_constant >= 1) { -- int new_slots = (length_constant * elem_slots) - array_slots; -- insert_arg_slots(_masm, new_slots * stack_move_unit(), -- rax_argslot, rbx_temp, rdx_temp); -- } else if (length_constant == 0) { -- __ BIND(L_array_is_empty); -- remove_arg_slots(_masm, -stack_move_unit() * array_slots, -- rax_argslot, rbx_temp, rdx_temp); -- } else { -- ShouldNotReachHere(); -- } -- -- // Copy from the array to the new slots. -- // Note: Stack change code preserves integrity of rax_argslot pointer. -- // So even after slot insertions, rax_argslot still points to first argument. -- // Beware: Arguments that are shallow on the stack are deep in the array, -- // and vice versa. So a downward-growing stack (the usual) has to be copied -- // elementwise in reverse order from the source array. -- __ BIND(L_copy_args); -- if (length_constant == -1) { -- // [rax_argslot, rdx_argslot_limit) is the area we are inserting into. -- // Array element [0] goes at rdx_argslot_limit[-wordSize]. -- Register rdi_source = rdi_array; -- __ lea(rdi_source, Address(rdi_array, elem0_offset)); -- Register rdx_fill_ptr = rdx_argslot_limit; -- Label loop; -- __ BIND(loop); -- __ addptr(rdx_fill_ptr, -Interpreter::stackElementSize * elem_slots); -- move_typed_arg(_masm, elem_type, true, -- Address(rdx_fill_ptr, 0), Address(rdi_source, 0), -- rbx_temp, rsi_temp); -- __ addptr(rdi_source, type2aelembytes(elem_type)); -- __ cmpptr(rdx_fill_ptr, rax_argslot); -- __ jcc(Assembler::above, loop); -- } else if (length_constant == 0) { -- // nothing to copy -- } else { -- int elem_offset = elem0_offset; -- int slot_offset = length_constant * Interpreter::stackElementSize; -- for (int index = 0; index < length_constant; index++) { -- slot_offset -= Interpreter::stackElementSize * elem_slots; // fill backward -- move_typed_arg(_masm, elem_type, true, -- Address(rax_argslot, slot_offset), Address(rdi_array, elem_offset), -- rbx_temp, rsi_temp); -- elem_offset += type2aelembytes(elem_type); -- } -- } -- __ BIND(L_args_done); -- -- // Arguments are spread. Move to next method handle. -- UNPUSH_RSI; -- __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); -- __ jump_to_method_handle_entry(rcx_recv, rdx_temp); -- -- __ bind(bad_array_klass); -- UNPUSH_RSI; -- assert(!vmarg.uses(rarg2_required), "must be different registers"); -- __ load_heap_oop( rarg2_required, Address(rdx_array_klass, java_mirror_offset)); // required type -- __ movptr( rarg1_actual, vmarg); // bad array -- __ movl( rarg0_code, (int) Bytecodes::_aaload); // who is complaining? -- __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); -- -- __ bind(bad_array_length); -- UNPUSH_RSI; -- assert(!vmarg.uses(rarg2_required), "must be different registers"); -- __ mov( rarg2_required, rcx_recv); // AMH requiring a certain length -- __ movptr( rarg1_actual, vmarg); // bad array -- __ movl( rarg0_code, (int) Bytecodes::_arraylength); // who is complaining? -- __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); --#undef UNPUSH_RSI -- -- break; -- } -- -- default: -- // do not require all platforms to recognize all adapter types -- __ nop(); -- return; -- } -- BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek))); -- __ hlt(); -- -- address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry); -- __ unimplemented(entry_name(ek)); // %%% FIXME: NYI -- -- init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie)); --} -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/methodHandles_x86.hpp ---- openjdk/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -27,266 +27,12 @@ - - // Adapters - enum /* platform_dependent_constants */ { -- adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 15000)) LP64_ONLY(32000 DEBUG_ONLY(+ 120000)) --}; -- --public: -- --// The stack just after the recursive call from a ricochet frame --// looks something like this. Offsets are marked in words, not bytes. --// rsi (r13 on LP64) is part of the interpreter calling sequence --// which tells the callee where is my real rsp (for frame walking). --// (...lower memory addresses) --// rsp: [ return pc ] always the global RicochetBlob::bounce_addr --// rsp+1: [ recursive arg N ] --// rsp+2: [ recursive arg N-1 ] --// ... --// rsp+N: [ recursive arg 1 ] --// rsp+N+1: [ recursive method handle ] --// ... --// rbp-6: [ cleanup continuation pc ] <-- (struct RicochetFrame) --// rbp-5: [ saved target MH ] the MH we will call on the saved args --// rbp-4: [ saved args layout oop ] an int[] array which describes argument layout --// rbp-3: [ saved args pointer ] address of transformed adapter arg M (slot 0) --// rbp-2: [ conversion ] information about how the return value is used --// rbp-1: [ exact sender sp ] exact TOS (rsi/r13) of original sender frame --// rbp+0: [ saved sender fp ] (for original sender of AMH) --// rbp+1: [ saved sender pc ] (back to original sender of AMH) --// rbp+2: [ transformed adapter arg M ] <-- (extended TOS of original sender) --// rbp+3: [ transformed adapter arg M-1] --// ... --// rbp+M+1: [ transformed adapter arg 1 ] --// rbp+M+2: [ padding ] <-- (rbp + saved args base offset) --// ... [ optional padding] --// (higher memory addresses...) --// --// The arguments originally passed by the original sender --// are lost, and arbitrary amounts of stack motion might have --// happened due to argument transformation. --// (This is done by C2I/I2C adapters and non-direct method handles.) --// This is why there is an unpredictable amount of memory between --// the extended and exact TOS of the sender. --// The ricochet adapter itself will also (in general) perform --// transformations before the recursive call. --// --// The transformed and saved arguments, immediately above the saved --// return PC, are a well-formed method handle invocation ready to execute. --// When the GC needs to walk the stack, these arguments are described --// via the saved arg types oop, an int[] array with a private format. --// This array is derived from the type of the transformed adapter --// method handle, which also sits at the base of the saved argument --// bundle. Since the GC may not be able to fish out the int[] --// array, so it is pushed explicitly on the stack. This may be --// an unnecessary expense. --// --// The following register conventions are significant at this point: --// rsp the thread stack, as always; preserved by caller --// rsi/r13 exact TOS of recursive frame (contents of [rbp-2]) --// rcx recursive method handle (contents of [rsp+N+1]) --// rbp preserved by caller (not used by caller) --// Unless otherwise specified, all registers can be blown by the call. --// --// If this frame must be walked, the transformed adapter arguments --// will be found with the help of the saved arguments descriptor. --// --// Therefore, the descriptor must match the referenced arguments. --// The arguments must be followed by at least one word of padding, --// which will be necessary to complete the final method handle call. --// That word is not treated as holding an oop. Neither is the word --// --// The word pointed to by the return argument pointer is not --// treated as an oop, even if points to a saved argument. --// This allows the saved argument list to have a "hole" in it --// to receive an oop from the recursive call. --// (The hole might temporarily contain RETURN_VALUE_PLACEHOLDER.) --// --// When the recursive callee returns, RicochetBlob::bounce_addr will --// immediately jump to the continuation stored in the RF. --// This continuation will merge the recursive return value --// into the saved argument list. At that point, the original --// rsi, rbp, and rsp will be reloaded, the ricochet frame will --// disappear, and the final target of the adapter method handle --// will be invoked on the transformed argument list. -- --class RicochetFrame { -- friend class MethodHandles; -- friend class VMStructs; -- -- private: -- intptr_t* _continuation; // what to do when control gets back here -- oopDesc* _saved_target; // target method handle to invoke on saved_args -- oopDesc* _saved_args_layout; // caching point for MethodTypeForm.vmlayout cookie -- intptr_t* _saved_args_base; // base of pushed arguments (slot 0, arg N) (-3) -- intptr_t _conversion; // misc. information from original AdapterMethodHandle (-2) -- intptr_t* _exact_sender_sp; // parallel to interpreter_frame_sender_sp (-1) -- intptr_t* _sender_link; // *must* coincide with frame::link_offset (0) -- address _sender_pc; // *must* coincide with frame::return_addr_offset (1) -- -- public: -- intptr_t* continuation() const { return _continuation; } -- oop saved_target() const { return _saved_target; } -- oop saved_args_layout() const { return _saved_args_layout; } -- intptr_t* saved_args_base() const { return _saved_args_base; } -- intptr_t conversion() const { return _conversion; } -- intptr_t* exact_sender_sp() const { return _exact_sender_sp; } -- intptr_t* sender_link() const { return _sender_link; } -- address sender_pc() const { return _sender_pc; } -- -- intptr_t* extended_sender_sp() const { -- // The extended sender SP is above the current RicochetFrame. -- return (intptr_t*) (((address) this) + sizeof(RicochetFrame)); -- } -- -- intptr_t return_value_slot_number() const { -- return adapter_conversion_vminfo(conversion()); -- } -- BasicType return_value_type() const { -- return adapter_conversion_dest_type(conversion()); -- } -- bool has_return_value_slot() const { -- return return_value_type() != T_VOID; -- } -- intptr_t* return_value_slot_addr() const { -- assert(has_return_value_slot(), ""); -- return saved_arg_slot_addr(return_value_slot_number()); -- } -- intptr_t* saved_target_slot_addr() const { -- return saved_arg_slot_addr(saved_args_length()); -- } -- intptr_t* saved_arg_slot_addr(int slot) const { -- assert(slot >= 0, ""); -- return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) ); -- } -- -- jint saved_args_length() const; -- jint saved_arg_offset(int arg) const; -- -- // GC interface -- oop* saved_target_addr() { return (oop*)&_saved_target; } -- oop* saved_args_layout_addr() { return (oop*)&_saved_args_layout; } -- -- oop compute_saved_args_layout(bool read_cache, bool write_cache); -- -- // Compiler/assembler interface. -- static int continuation_offset_in_bytes() { return offset_of(RicochetFrame, _continuation); } -- static int saved_target_offset_in_bytes() { return offset_of(RicochetFrame, _saved_target); } -- static int saved_args_layout_offset_in_bytes(){ return offset_of(RicochetFrame, _saved_args_layout); } -- static int saved_args_base_offset_in_bytes() { return offset_of(RicochetFrame, _saved_args_base); } -- static int conversion_offset_in_bytes() { return offset_of(RicochetFrame, _conversion); } -- static int exact_sender_sp_offset_in_bytes() { return offset_of(RicochetFrame, _exact_sender_sp); } -- static int sender_link_offset_in_bytes() { return offset_of(RicochetFrame, _sender_link); } -- static int sender_pc_offset_in_bytes() { return offset_of(RicochetFrame, _sender_pc); } -- -- // This value is not used for much, but it apparently must be nonzero. -- static int frame_size_in_bytes() { return sender_link_offset_in_bytes(); } -- --#ifdef ASSERT -- // The magic number is supposed to help find ricochet frames within the bytes of stack dumps. -- enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E }; -- static int magic_number_1_offset_in_bytes() { return -wordSize; } -- static int magic_number_2_offset_in_bytes() { return sizeof(RicochetFrame); } -- intptr_t magic_number_1() const { return *(intptr_t*)((address)this + magic_number_1_offset_in_bytes()); }; -- intptr_t magic_number_2() const { return *(intptr_t*)((address)this + magic_number_2_offset_in_bytes()); }; --#endif //ASSERT -- -- enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) }; -- -- static void verify_offsets() NOT_DEBUG_RETURN; -- void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc. -- void zap_arguments() NOT_DEBUG_RETURN; -- -- static void generate_ricochet_blob(MacroAssembler* _masm, -- // output params: -- int* bounce_offset, -- int* exception_offset, -- int* frame_size_in_words); -- -- static void enter_ricochet_frame(MacroAssembler* _masm, -- Register rcx_recv, -- Register rax_argv, -- address return_handler, -- Register rbx_temp); -- static void leave_ricochet_frame(MacroAssembler* _masm, -- Register rcx_recv, -- Register new_sp_reg, -- Register sender_pc_reg); -- -- static Address frame_address(int offset = 0) { -- // The RicochetFrame is found by subtracting a constant offset from rbp. -- return Address(rbp, - sender_link_offset_in_bytes() + offset); -- } -- -- static RicochetFrame* from_frame(const frame& fr) { -- address bp = (address) fr.fp(); -- RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes()); -- rf->verify(); -- return rf; -- } -- -- static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN; -- -- static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN; -+ adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 25000)) LP64_ONLY(32000 DEBUG_ONLY(+ 150000)) - }; - - // Additional helper methods for MethodHandles code generation: - public: - static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg); -- static void load_conversion_vminfo(MacroAssembler* _masm, Register reg, Address conversion_field_addr); -- static void load_conversion_dest_type(MacroAssembler* _masm, Register reg, Address conversion_field_addr); -- -- static void load_stack_move(MacroAssembler* _masm, -- Register rdi_stack_move, -- Register rcx_amh, -- bool might_be_negative); -- -- static void insert_arg_slots(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, -- Register rax_argslot, -- Register rbx_temp, Register rdx_temp); -- -- static void remove_arg_slots(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, -- Register rax_argslot, -- Register rbx_temp, Register rdx_temp); -- -- static void push_arg_slots(MacroAssembler* _masm, -- Register rax_argslot, -- RegisterOrConstant slot_count, -- int skip_words_count, -- Register rbx_temp, Register rdx_temp); -- -- static void move_arg_slots_up(MacroAssembler* _masm, -- Register rbx_bottom, // invariant -- Address top_addr, // can use rax_temp -- RegisterOrConstant positive_distance_in_slots, -- Register rax_temp, Register rdx_temp); -- -- static void move_arg_slots_down(MacroAssembler* _masm, -- Address bottom_addr, // can use rax_temp -- Register rbx_top, // invariant -- RegisterOrConstant negative_distance_in_slots, -- Register rax_temp, Register rdx_temp); -- -- static void move_typed_arg(MacroAssembler* _masm, -- BasicType type, bool is_element, -- Address slot_dest, Address value_src, -- Register rbx_temp, Register rdx_temp); -- -- static void move_return_value(MacroAssembler* _masm, BasicType type, -- Address return_slot); -- -- static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, -- const char* error_message) NOT_DEBUG_RETURN; -- -- static void verify_argslots(MacroAssembler* _masm, -- RegisterOrConstant argslot_count, -- Register argslot_reg, -- bool negate_argslot, -- const char* error_message) NOT_DEBUG_RETURN; -- -- static void verify_stack_move(MacroAssembler* _masm, -- RegisterOrConstant arg_slots, -- int direction) NOT_DEBUG_RETURN; - - static void verify_klass(MacroAssembler* _masm, - Register obj, KlassHandle klass, -@@ -297,9 +43,17 @@ - "reference is a MH"); - } - -+ static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN; -+ - // Similar to InterpreterMacroAssembler::jump_from_interpreted. - // Takes care of special dispatch from single stepping too. -- static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp); -+ static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, -+ bool for_compiler_entry); -+ -+ static void jump_to_lambda_form(MacroAssembler* _masm, -+ Register recv, Register method_temp, -+ Register temp2, -+ bool for_compiler_entry); - - static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN; - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/sharedRuntime_x86_32.cpp ---- openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -643,6 +643,19 @@ - __ movdbl(r, Address(saved_sp, next_val_off)); - } - -+static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, -+ address code_start, address code_end, -+ Label& L_ok) { -+ Label L_fail; -+ __ lea(temp_reg, ExternalAddress(code_start)); -+ __ cmpptr(pc_reg, temp_reg); -+ __ jcc(Assembler::belowEqual, L_fail); -+ __ lea(temp_reg, ExternalAddress(code_end)); -+ __ cmpptr(pc_reg, temp_reg); -+ __ jcc(Assembler::below, L_ok); -+ __ bind(L_fail); -+} -+ - static void gen_i2c_adapter(MacroAssembler *masm, - int total_args_passed, - int comp_args_on_stack, -@@ -653,9 +666,53 @@ - // we may do a i2c -> c2i transition if we lose a race where compiled - // code goes non-entrant while we get args ready. - -+ // Adapters can be frameless because they do not require the caller -+ // to perform additional cleanup work, such as correcting the stack pointer. -+ // An i2c adapter is frameless because the *caller* frame, which is interpreted, -+ // routinely repairs its own stack pointer (from interpreter_frame_last_sp), -+ // even if a callee has modified the stack pointer. -+ // A c2i adapter is frameless because the *callee* frame, which is interpreted, -+ // routinely repairs its caller's stack pointer (from sender_sp, which is set -+ // up via the senderSP register). -+ // In other words, if *either* the caller or callee is interpreted, we can -+ // get the stack pointer repaired after a call. -+ // This is why c2i and i2c adapters cannot be indefinitely composed. -+ // In particular, if a c2i adapter were to somehow call an i2c adapter, -+ // both caller and callee would be compiled methods, and neither would -+ // clean up the stack pointer changes performed by the two adapters. -+ // If this happens, control eventually transfers back to the compiled -+ // caller, but with an uncorrected stack, causing delayed havoc. -+ - // Pick up the return address - __ movptr(rax, Address(rsp, 0)); - -+ if (VerifyAdapterCalls && -+ (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { -+ // So, let's test for cascading c2i/i2c adapters right now. -+ // assert(Interpreter::contains($return_addr) || -+ // StubRoutines::contains($return_addr), -+ // "i2c adapter must return to an interpreter frame"); -+ __ block_comment("verify_i2c { "); -+ Label L_ok; -+ if (Interpreter::code() != NULL) -+ range_check(masm, rax, rdi, -+ Interpreter::code()->code_start(), Interpreter::code()->code_end(), -+ L_ok); -+ if (StubRoutines::code1() != NULL) -+ range_check(masm, rax, rdi, -+ StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), -+ L_ok); -+ if (StubRoutines::code2() != NULL) -+ range_check(masm, rax, rdi, -+ StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), -+ L_ok); -+ const char* msg = "i2c adapter must return to an interpreter frame"; -+ __ block_comment(msg); -+ __ stop(msg); -+ __ bind(L_ok); -+ __ block_comment("} verify_i2ce "); -+ } -+ - // Must preserve original SP for loading incoming arguments because - // we need to align the outgoing SP for compiled code. - __ movptr(rdi, rsp); -@@ -1293,6 +1350,89 @@ - __ bind(done); - } - -+static void verify_oop_args(MacroAssembler* masm, -+ int total_args_passed, -+ const BasicType* sig_bt, -+ const VMRegPair* regs) { -+ Register temp_reg = rbx; // not part of any compiled calling seq -+ if (VerifyOops) { -+ for (int i = 0; i < total_args_passed; i++) { -+ if (sig_bt[i] == T_OBJECT || -+ sig_bt[i] == T_ARRAY) { -+ VMReg r = regs[i].first(); -+ assert(r->is_valid(), "bad oop arg"); -+ if (r->is_stack()) { -+ __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); -+ __ verify_oop(temp_reg); -+ } else { -+ __ verify_oop(r->as_Register()); -+ } -+ } -+ } -+ } -+} -+ -+static void gen_special_dispatch(MacroAssembler* masm, -+ int total_args_passed, -+ int comp_args_on_stack, -+ vmIntrinsics::ID special_dispatch, -+ const BasicType* sig_bt, -+ const VMRegPair* regs) { -+ verify_oop_args(masm, total_args_passed, sig_bt, regs); -+ -+ // Now write the args into the outgoing interpreter space -+ bool has_receiver = false; -+ Register receiver_reg = noreg; -+ int member_arg_pos = -1; -+ Register member_reg = noreg; -+ int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch); -+ if (ref_kind != 0) { -+ member_arg_pos = total_args_passed - 1; // trailing MemberName argument -+ member_reg = rbx; // known to be free at this point -+ has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); -+ } else if (special_dispatch == vmIntrinsics::_invokeBasic) { -+ has_receiver = true; -+ } else { -+ guarantee(false, err_msg("special_dispatch=%d", special_dispatch)); -+ } -+ -+ if (member_reg != noreg) { -+ // Load the member_arg into register, if necessary. -+ assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob"); -+ assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object"); -+ VMReg r = regs[member_arg_pos].first(); -+ assert(r->is_valid(), "bad member arg"); -+ if (r->is_stack()) { -+ __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); -+ } else { -+ // no data motion is needed -+ member_reg = r->as_Register(); -+ } -+ } -+ -+ if (has_receiver) { -+ // Make sure the receiver is loaded into a register. -+ assert(total_args_passed > 0, "oob"); -+ assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); -+ VMReg r = regs[0].first(); -+ assert(r->is_valid(), "bad receiver arg"); -+ if (r->is_stack()) { -+ // Porting note: This assumes that compiled calling conventions always -+ // pass the receiver oop in a register. If this is not true on some -+ // platform, pick a temp and load the receiver from stack. -+ assert(false, "receiver always in a register"); -+ receiver_reg = rcx; // known to be free at this point -+ __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); -+ } else { -+ // no data motion is needed -+ receiver_reg = r->as_Register(); -+ } -+ } -+ -+ // Figure out which address we are really jumping to: -+ MethodHandles::generate_method_handle_dispatch(masm, special_dispatch, -+ receiver_reg, member_reg, /*for_compiler_entry:*/ true); -+} - - // --------------------------------------------------------------------------- - // Generate a native wrapper for a given method. The method takes arguments -@@ -1323,14 +1463,37 @@ - // transition back to thread_in_Java - // return to caller - // --nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, -+nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, - methodHandle method, - int compile_id, - int total_in_args, - int comp_args_on_stack, -- BasicType *in_sig_bt, -- VMRegPair *in_regs, -+ BasicType* in_sig_bt, -+ VMRegPair* in_regs, - BasicType ret_type) { -+ if (method->is_method_handle_intrinsic()) { -+ vmIntrinsics::ID iid = method->intrinsic_id(); -+ intptr_t start = (intptr_t)__ pc(); -+ int vep_offset = ((intptr_t)__ pc()) - start; -+ gen_special_dispatch(masm, -+ total_in_args, -+ comp_args_on_stack, -+ method->intrinsic_id(), -+ in_sig_bt, -+ in_regs); -+ int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period -+ __ flush(); -+ int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually -+ return nmethod::new_native_nmethod(method, -+ compile_id, -+ masm->code(), -+ vep_offset, -+ frame_complete, -+ stack_slots / VMRegImpl::slots_per_word, -+ in_ByteSize(-1), -+ in_ByteSize(-1), -+ (OopMapSet*)NULL); -+ } - bool is_critical_native = true; - address native_func = method->critical_native_function(); - if (native_func == NULL) { -@@ -1436,7 +1599,7 @@ - if (in_regs[i].first()->is_Register()) { - const Register reg = in_regs[i].first()->as_Register(); - switch (in_sig_bt[i]) { -- case T_ARRAY: -+ case T_ARRAY: // critical array (uses 2 slots on LP64) - case T_BOOLEAN: - case T_BYTE: - case T_SHORT: -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/sharedRuntime_x86_64.cpp ---- openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -590,6 +590,19 @@ - __ jmp(rcx); - } - -+static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, -+ address code_start, address code_end, -+ Label& L_ok) { -+ Label L_fail; -+ __ lea(temp_reg, ExternalAddress(code_start)); -+ __ cmpptr(pc_reg, temp_reg); -+ __ jcc(Assembler::belowEqual, L_fail); -+ __ lea(temp_reg, ExternalAddress(code_end)); -+ __ cmpptr(pc_reg, temp_reg); -+ __ jcc(Assembler::below, L_ok); -+ __ bind(L_fail); -+} -+ - static void gen_i2c_adapter(MacroAssembler *masm, - int total_args_passed, - int comp_args_on_stack, -@@ -605,9 +618,53 @@ - // save code can segv when fxsave instructions find improperly - // aligned stack pointer. - -+ // Adapters can be frameless because they do not require the caller -+ // to perform additional cleanup work, such as correcting the stack pointer. -+ // An i2c adapter is frameless because the *caller* frame, which is interpreted, -+ // routinely repairs its own stack pointer (from interpreter_frame_last_sp), -+ // even if a callee has modified the stack pointer. -+ // A c2i adapter is frameless because the *callee* frame, which is interpreted, -+ // routinely repairs its caller's stack pointer (from sender_sp, which is set -+ // up via the senderSP register). -+ // In other words, if *either* the caller or callee is interpreted, we can -+ // get the stack pointer repaired after a call. -+ // This is why c2i and i2c adapters cannot be indefinitely composed. -+ // In particular, if a c2i adapter were to somehow call an i2c adapter, -+ // both caller and callee would be compiled methods, and neither would -+ // clean up the stack pointer changes performed by the two adapters. -+ // If this happens, control eventually transfers back to the compiled -+ // caller, but with an uncorrected stack, causing delayed havoc. -+ - // Pick up the return address - __ movptr(rax, Address(rsp, 0)); - -+ if (VerifyAdapterCalls && -+ (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { -+ // So, let's test for cascading c2i/i2c adapters right now. -+ // assert(Interpreter::contains($return_addr) || -+ // StubRoutines::contains($return_addr), -+ // "i2c adapter must return to an interpreter frame"); -+ __ block_comment("verify_i2c { "); -+ Label L_ok; -+ if (Interpreter::code() != NULL) -+ range_check(masm, rax, r11, -+ Interpreter::code()->code_start(), Interpreter::code()->code_end(), -+ L_ok); -+ if (StubRoutines::code1() != NULL) -+ range_check(masm, rax, r11, -+ StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), -+ L_ok); -+ if (StubRoutines::code2() != NULL) -+ range_check(masm, rax, r11, -+ StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), -+ L_ok); -+ const char* msg = "i2c adapter must return to an interpreter frame"; -+ __ block_comment(msg); -+ __ stop(msg); -+ __ bind(L_ok); -+ __ block_comment("} verify_i2ce "); -+ } -+ - // Must preserve original SP for loading incoming arguments because - // we need to align the outgoing SP for compiled code. - __ movptr(r11, rsp); -@@ -1366,6 +1423,14 @@ - } - - -+// Different signatures may require very different orders for the move -+// to avoid clobbering other arguments. There's no simple way to -+// order them safely. Compute a safe order for issuing stores and -+// break any cycles in those stores. This code is fairly general but -+// it's not necessary on the other platforms so we keep it in the -+// platform dependent code instead of moving it into a shared file. -+// (See bugs 7013347 & 7145024.) -+// Note that this code is specific to LP64. - class ComputeMoveOrder: public StackObj { - class MoveOperation: public ResourceObj { - friend class ComputeMoveOrder; -@@ -1532,6 +1597,89 @@ - } - }; - -+static void verify_oop_args(MacroAssembler* masm, -+ int total_args_passed, -+ const BasicType* sig_bt, -+ const VMRegPair* regs) { -+ Register temp_reg = rbx; // not part of any compiled calling seq -+ if (VerifyOops) { -+ for (int i = 0; i < total_args_passed; i++) { -+ if (sig_bt[i] == T_OBJECT || -+ sig_bt[i] == T_ARRAY) { -+ VMReg r = regs[i].first(); -+ assert(r->is_valid(), "bad oop arg"); -+ if (r->is_stack()) { -+ __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); -+ __ verify_oop(temp_reg); -+ } else { -+ __ verify_oop(r->as_Register()); -+ } -+ } -+ } -+ } -+} -+ -+static void gen_special_dispatch(MacroAssembler* masm, -+ int total_args_passed, -+ int comp_args_on_stack, -+ vmIntrinsics::ID special_dispatch, -+ const BasicType* sig_bt, -+ const VMRegPair* regs) { -+ verify_oop_args(masm, total_args_passed, sig_bt, regs); -+ -+ // Now write the args into the outgoing interpreter space -+ bool has_receiver = false; -+ Register receiver_reg = noreg; -+ int member_arg_pos = -1; -+ Register member_reg = noreg; -+ int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch); -+ if (ref_kind != 0) { -+ member_arg_pos = total_args_passed - 1; // trailing MemberName argument -+ member_reg = rbx; // known to be free at this point -+ has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); -+ } else if (special_dispatch == vmIntrinsics::_invokeBasic) { -+ has_receiver = true; -+ } else { -+ guarantee(false, err_msg("special_dispatch=%d", special_dispatch)); -+ } -+ -+ if (member_reg != noreg) { -+ // Load the member_arg into register, if necessary. -+ assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob"); -+ assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object"); -+ VMReg r = regs[member_arg_pos].first(); -+ assert(r->is_valid(), "bad member arg"); -+ if (r->is_stack()) { -+ __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); -+ } else { -+ // no data motion is needed -+ member_reg = r->as_Register(); -+ } -+ } -+ -+ if (has_receiver) { -+ // Make sure the receiver is loaded into a register. -+ assert(total_args_passed > 0, "oob"); -+ assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); -+ VMReg r = regs[0].first(); -+ assert(r->is_valid(), "bad receiver arg"); -+ if (r->is_stack()) { -+ // Porting note: This assumes that compiled calling conventions always -+ // pass the receiver oop in a register. If this is not true on some -+ // platform, pick a temp and load the receiver from stack. -+ assert(false, "receiver always in a register"); -+ receiver_reg = j_rarg0; // known to be free at this point -+ __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); -+ } else { -+ // no data motion is needed -+ receiver_reg = r->as_Register(); -+ } -+ } -+ -+ // Figure out which address we are really jumping to: -+ MethodHandles::generate_method_handle_dispatch(masm, special_dispatch, -+ receiver_reg, member_reg, /*for_compiler_entry:*/ true); -+} - - // --------------------------------------------------------------------------- - // Generate a native wrapper for a given method. The method takes arguments -@@ -1539,14 +1687,60 @@ - // convention (handlizes oops, etc), transitions to native, makes the call, - // returns to java state (possibly blocking), unhandlizes any result and - // returns. --nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, -+// -+// Critical native functions are a shorthand for the use of -+// GetPrimtiveArrayCritical and disallow the use of any other JNI -+// functions. The wrapper is expected to unpack the arguments before -+// passing them to the callee and perform checks before and after the -+// native call to ensure that they GC_locker -+// lock_critical/unlock_critical semantics are followed. Some other -+// parts of JNI setup are skipped like the tear down of the JNI handle -+// block and the check for pending exceptions it's impossible for them -+// to be thrown. -+// -+// They are roughly structured like this: -+// if (GC_locker::needs_gc()) -+// SharedRuntime::block_for_jni_critical(); -+// tranistion to thread_in_native -+// unpack arrray arguments and call native entry point -+// check for safepoint in progress -+// check if any thread suspend flags are set -+// call into JVM and possible unlock the JNI critical -+// if a GC was suppressed while in the critical native. -+// transition back to thread_in_Java -+// return to caller -+// -+nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, - methodHandle method, - int compile_id, - int total_in_args, - int comp_args_on_stack, -- BasicType *in_sig_bt, -- VMRegPair *in_regs, -+ BasicType* in_sig_bt, -+ VMRegPair* in_regs, - BasicType ret_type) { -+ if (method->is_method_handle_intrinsic()) { -+ vmIntrinsics::ID iid = method->intrinsic_id(); -+ intptr_t start = (intptr_t)__ pc(); -+ int vep_offset = ((intptr_t)__ pc()) - start; -+ gen_special_dispatch(masm, -+ total_in_args, -+ comp_args_on_stack, -+ method->intrinsic_id(), -+ in_sig_bt, -+ in_regs); -+ int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period -+ __ flush(); -+ int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually -+ return nmethod::new_native_nmethod(method, -+ compile_id, -+ masm->code(), -+ vep_offset, -+ frame_complete, -+ stack_slots / VMRegImpl::slots_per_word, -+ in_ByteSize(-1), -+ in_ByteSize(-1), -+ (OopMapSet*)NULL); -+ } - bool is_critical_native = true; - address native_func = method->critical_native_function(); - if (native_func == NULL) { -@@ -1658,7 +1852,7 @@ - case T_SHORT: - case T_CHAR: - case T_INT: single_slots++; break; -- case T_ARRAY: -+ case T_ARRAY: // specific to LP64 (7145024) - case T_LONG: double_slots++; break; - default: ShouldNotReachHere(); - } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/stubGenerator_x86_32.cpp ---- openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -2315,12 +2315,6 @@ - CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); - - // Build this early so it's available for the interpreter -- StubRoutines::_throw_WrongMethodTypeException_entry = -- generate_throw_exception("WrongMethodTypeException throw_exception", -- CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException), -- rax, rcx); -- -- // Build this early so it's available for the interpreter - StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); - } - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/stubGenerator_x86_64.cpp ---- openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -3063,14 +3063,6 @@ - - StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); - -- // Build this early so it's available for the interpreter. Stub -- // expects the required and actual types as register arguments in -- // j_rarg0 and j_rarg1 respectively. -- StubRoutines::_throw_WrongMethodTypeException_entry = -- generate_throw_exception("WrongMethodTypeException throw_exception", -- CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException), -- rax, rcx); -- - // Build this early so it's available for the interpreter. - StubRoutines::_throw_StackOverflowError_entry = - generate_throw_exception("StackOverflowError throw_exception", -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/templateInterpreter_x86_32.cpp ---- openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -708,9 +708,9 @@ - // Need to differentiate between igetfield, agetfield, bgetfield etc. - // because they are different sizes. - // Use the type from the constant pool cache -- __ shrl(rdx, ConstantPoolCacheEntry::tosBits); -- // Make sure we don't need to mask rdx for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -+ __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift); -+ // Make sure we don't need to mask rdx after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); - __ cmpl(rdx, btos); - __ jcc(Assembler::notEqual, notByte); - __ load_signed_byte(rax, field_address); -@@ -1510,7 +1510,6 @@ - case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; - case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; - case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; -- case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break; - - case Interpreter::java_lang_math_sin : // fall thru - case Interpreter::java_lang_math_cos : // fall thru -@@ -1521,7 +1520,10 @@ - case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break; - case Interpreter::java_lang_ref_reference_get - : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; -- default : ShouldNotReachHere(); break; -+ default: -+ fatal(err_msg("unexpected method kind: %d", kind)); -+ break; -+k; - } - - if (entry_point) return entry_point; -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/templateInterpreter_x86_64.cpp ---- openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -681,9 +681,9 @@ - // Need to differentiate between igetfield, agetfield, bgetfield etc. - // because they are different sizes. - // Use the type from the constant pool cache -- __ shrl(rdx, ConstantPoolCacheEntry::tosBits); -- // Make sure we don't need to mask edx for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -+ __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift); -+ // Make sure we don't need to mask edx after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); - - __ cmpl(rdx, atos); - __ jcc(Assembler::notEqual, notObj); -@@ -1521,12 +1521,11 @@ - switch (kind) { - case Interpreter::zerolocals : break; - case Interpreter::zerolocals_synchronized: synchronized = true; break; -- case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break; -- case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break; -- case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break; -- case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break; -- case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break; -- case Interpreter::method_handle : entry_point = ((InterpreterGenerator*) this)->generate_method_handle_entry();break; -+ case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break; -+ case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break; -+ case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; -+ case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; -+ case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; - - case Interpreter::java_lang_math_sin : // fall thru - case Interpreter::java_lang_math_cos : // fall thru -@@ -1537,7 +1536,9 @@ - case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break; - case Interpreter::java_lang_ref_reference_get - : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; -- default : ShouldNotReachHere(); break; -+ default: -+ fatal(err_msg("unexpected method kind: %d", kind)); -+ break; - } - - if (entry_point) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/templateTable_x86_32.cpp ---- openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -446,13 +446,13 @@ - const Register cache = rcx; - const Register index = rdx; - -- resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); -+ resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); - if (VerifyOops) { - __ verify_oop(rax); - } - - Label L_done, L_throw_exception; -- const Register con_klass_temp = rcx; // same as Rcache -+ const Register con_klass_temp = rcx; // same as cache - __ load_klass(con_klass_temp, rax); - __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr())); - __ jcc(Assembler::notEqual, L_done); -@@ -2084,15 +2084,15 @@ - Register Rcache, - Register index, - size_t index_size) { -- Register temp = rbx; -- -+ const Register temp = rbx; - assert_different_registers(result, Rcache, index, temp); - - Label resolved; -- if (byte_no == f1_oop) { -- // We are resolved if the f1 field contains a non-null object (CallSite, etc.) -- // This kind of CP cache entry does not need to match the flags byte, because -+ if (byte_no == f12_oop) { -+ // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.) -+ // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because - // there is a 1-1 relation between bytecode type and CP entry type. -+ // The caller will also load a methodOop from f2. - assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD) - __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); - __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset())); -@@ -2112,15 +2112,18 @@ - case Bytecodes::_getstatic : // fall through - case Bytecodes::_putstatic : // fall through - case Bytecodes::_getfield : // fall through -- case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; -+ case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; - case Bytecodes::_invokevirtual : // fall through - case Bytecodes::_invokespecial : // fall through - case Bytecodes::_invokestatic : // fall through -- case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; -- case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; -- case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; -- case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; -- default : ShouldNotReachHere(); break; -+ case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; -+ case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; -+ case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; -+ case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; -+ case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; -+ default: -+ fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); -+ break; - } - __ movl(temp, (int)bytecode()); - __ call_VM(noreg, entry, temp); -@@ -2149,7 +2152,7 @@ - __ movl(flags, Address(cache, index, Address::times_ptr, - in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()))); - -- // klass overwrite register -+ // klass overwrite register - if (is_static) { - __ movptr(obj, Address(cache, index, Address::times_ptr, - in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset()))); -@@ -2161,7 +2164,7 @@ - Register itable_index, - Register flags, - bool is_invokevirtual, -- bool is_invokevfinal /*unused*/, -+ bool is_invokevfinal, /*unused*/ - bool is_invokedynamic) { - // setup registers - const Register cache = rcx; -@@ -2171,28 +2174,33 @@ - assert_different_registers(itable_index, flags); - assert_different_registers(itable_index, cache, index); - // determine constant pool cache field offsets -+ assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); - const int method_offset = in_bytes( - constantPoolCacheOopDesc::base_offset() + -- (is_invokevirtual -+ ((byte_no == f2_byte) - ? ConstantPoolCacheEntry::f2_offset() -- : ConstantPoolCacheEntry::f1_offset() -- ) -- ); -+ : ConstantPoolCacheEntry::f1_offset())); - const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + - ConstantPoolCacheEntry::flags_offset()); - // access constant pool cache fields - const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + - ConstantPoolCacheEntry::f2_offset()); - -- if (byte_no == f1_oop) { -- // Resolved f1_oop goes directly into 'method' register. -- assert(is_invokedynamic, ""); -- resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4)); -+ if (byte_no == f12_oop) { -+ // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'. -+ // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset). -+ // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle. -+ size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); -+ resolve_cache_and_index(byte_no, itable_index, cache, index, index_size); -+ __ movptr(method, Address(cache, index, Address::times_ptr, index_offset)); -+ itable_index = noreg; // hack to disable load below - } else { - resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); - __ movptr(method, Address(cache, index, Address::times_ptr, method_offset)); - } - if (itable_index != noreg) { -+ // pick up itable index from f2 also: -+ assert(byte_no == f1_byte, "already picked up f1"); - __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset)); - } - __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset)); -@@ -2260,10 +2268,10 @@ - - Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; - -- __ shrl(flags, ConstantPoolCacheEntry::tosBits); -+ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); - assert(btos == 0, "change code, btos != 0"); - // btos -- __ andptr(flags, 0x0f); -+ __ andptr(flags, ConstantPoolCacheEntry::tos_state_mask); - __ jcc(Assembler::notZero, notByte); - - __ load_signed_byte(rax, lo ); -@@ -2415,9 +2423,9 @@ - __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset + - ConstantPoolCacheEntry::flags_offset()))); - __ mov(rbx, rsp); -- __ shrl(rcx, ConstantPoolCacheEntry::tosBits); -- // Make sure we don't need to mask rcx for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -+ __ shrl(rcx, ConstantPoolCacheEntry::tos_state_shift); -+ // Make sure we don't need to mask rcx after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); - __ cmpl(rcx, ltos); - __ jccb(Assembler::equal, two_word); - __ cmpl(rcx, dtos); -@@ -2467,7 +2475,7 @@ - - Label notVolatile, Done; - __ movl(rdx, flags); -- __ shrl(rdx, ConstantPoolCacheEntry::volatileField); -+ __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - __ andl(rdx, 0x1); - - // field addresses -@@ -2476,9 +2484,9 @@ - - Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; - -- __ shrl(flags, ConstantPoolCacheEntry::tosBits); -+ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); - assert(btos == 0, "change code, btos != 0"); -- __ andl(flags, 0x0f); -+ __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); - __ jcc(Assembler::notZero, notByte); - - // btos -@@ -2726,7 +2734,7 @@ - // volatile_barrier( ); - - Label notVolatile, Done; -- __ shrl(rdx, ConstantPoolCacheEntry::volatileField); -+ __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - __ andl(rdx, 0x1); - // Check for volatile store - __ testl(rdx, rdx); -@@ -2892,19 +2900,29 @@ - } - - --void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) { -+void TemplateTable::prepare_invoke(int byte_no, -+ Register method, // linked method (or i-klass) -+ Register index, // itable index, MethodType, etc. -+ Register recv, // if caller wants to see it -+ Register flags // if caller wants to test it -+ ) { - // determine flags -- Bytecodes::Code code = bytecode(); -+ const Bytecodes::Code code = bytecode(); - const bool is_invokeinterface = code == Bytecodes::_invokeinterface; - const bool is_invokedynamic = code == Bytecodes::_invokedynamic; -+ const bool is_invokehandle = code == Bytecodes::_invokehandle; - const bool is_invokevirtual = code == Bytecodes::_invokevirtual; - const bool is_invokespecial = code == Bytecodes::_invokespecial; -- const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic); -- const bool receiver_null_check = is_invokespecial; -- const bool save_flags = is_invokeinterface || is_invokevirtual; -+ const bool load_receiver = (recv != noreg); -+ const bool save_flags = (flags != noreg); -+ assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); -+ assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal"); -+ assert(flags == noreg || flags == rdx, ""); -+ assert(recv == noreg || recv == rcx, ""); -+ - // setup registers & access constant pool cache -- const Register recv = rcx; -- const Register flags = rdx; -+ if (recv == noreg) recv = rcx; -+ if (flags == noreg) flags = rdx; - assert_different_registers(method, index, recv, flags); - - // save 'interpreter return address' -@@ -2912,37 +2930,43 @@ - - load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); - -+ // maybe push appendix to arguments (just before return address) -+ if (is_invokedynamic || is_invokehandle) { -+ Label L_no_push; -+ __ verify_oop(index); -+ __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift)); -+ __ jccb(Assembler::zero, L_no_push); -+ // Push the appendix as a trailing parameter. -+ // This must be done before we get the receiver, -+ // since the parameter_size includes it. -+ __ push(index); // push appendix (MethodType, CallSite, etc.) -+ __ bind(L_no_push); -+ } -+ - // load receiver if needed (note: no return address pushed yet) - if (load_receiver) { -- assert(!is_invokedynamic, ""); - __ movl(recv, flags); -- __ andl(recv, 0xFF); -- // recv count is 0 based? -- Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1)); -+ __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask); -+ const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address -+ const int receiver_is_at_end = -1; // back off one slot to get receiver -+ Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end); - __ movptr(recv, recv_addr); - __ verify_oop(recv); - } - -- // do null check if needed -- if (receiver_null_check) { -- __ null_check(recv); -- } -- - if (save_flags) { - __ mov(rsi, flags); - } - - // compute return type -- __ shrl(flags, ConstantPoolCacheEntry::tosBits); -- // Make sure we don't need to mask flags for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -+ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); -+ // Make sure we don't need to mask flags after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); - // load return address - { -- address table_addr; -- if (is_invokeinterface || is_invokedynamic) -- table_addr = (address)Interpreter::return_5_addrs_by_index_table(); -- else -- table_addr = (address)Interpreter::return_3_addrs_by_index_table(); -+ const address table_addr = (is_invokeinterface || is_invokedynamic) ? -+ (address)Interpreter::return_5_addrs_by_index_table() : -+ (address)Interpreter::return_3_addrs_by_index_table(); - ExternalAddress table(table_addr); - __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))); - } -@@ -2950,7 +2974,7 @@ - // push return address - __ push(flags); - -- // Restore flag value from the constant pool cache, and restore rsi -+ // Restore flags value from the constant pool cache, and restore rsi - // for later null checks. rsi is the bytecode pointer - if (save_flags) { - __ mov(flags, rsi); -@@ -2959,22 +2983,26 @@ - } - - --void TemplateTable::invokevirtual_helper(Register index, Register recv, -- Register flags) { -- -+void TemplateTable::invokevirtual_helper(Register index, -+ Register recv, -+ Register flags) { - // Uses temporary registers rax, rdx - assert_different_registers(index, recv, rax, rdx); -+ assert(index == rbx, ""); -+ assert(recv == rcx, ""); - - // Test for an invoke of a final method - Label notFinal; - __ movl(rax, flags); -- __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod)); -+ __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift)); - __ jcc(Assembler::zero, notFinal); - -- Register method = index; // method must be rbx, -- assert(method == rbx, "methodOop must be rbx, for interpreter calling convention"); -+ const Register method = index; // method must be rbx -+ assert(method == rbx, -+ "methodOop must be rbx for interpreter calling convention"); - - // do the call - the index is actually the method to call -+ // that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop - __ verify_oop(method); - - // It's final, need a null check here! -@@ -2989,7 +3017,6 @@ - - // get receiver klass - __ null_check(recv, oopDesc::klass_offset_in_bytes()); -- // Keep recv in rcx for callee expects it there - __ load_klass(rax, recv); - __ verify_oop(rax); - -@@ -2997,9 +3024,7 @@ - __ profile_virtual_call(rax, rdi, rdx); - - // get target methodOop & entry point -- const int base = instanceKlass::vtable_start_offset() * wordSize; -- assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below"); -- __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes())); -+ __ lookup_virtual_method(rax, index, method); - __ jump_from_interpreted(method, rdx); - } - -@@ -3007,9 +3032,12 @@ - void TemplateTable::invokevirtual(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f2_byte, "use this argument"); -- prepare_invoke(rbx, noreg, byte_no); -- -- // rbx,: index -+ prepare_invoke(byte_no, -+ rbx, // method or vtable index -+ noreg, // unused itable index -+ rcx, rdx); // recv, flags -+ -+ // rbx: index - // rcx: receiver - // rdx: flags - -@@ -3020,7 +3048,10 @@ - void TemplateTable::invokespecial(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); -- prepare_invoke(rbx, noreg, byte_no); -+ prepare_invoke(byte_no, rbx, noreg, // get f1 methodOop -+ rcx); // get receiver also for null check -+ __ verify_oop(rcx); -+ __ null_check(rcx); - // do the call - __ verify_oop(rbx); - __ profile_call(rax); -@@ -3031,7 +3062,7 @@ - void TemplateTable::invokestatic(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); -- prepare_invoke(rbx, noreg, byte_no); -+ prepare_invoke(byte_no, rbx); // get f1 methodOop - // do the call - __ verify_oop(rbx); - __ profile_call(rax); -@@ -3049,10 +3080,11 @@ - void TemplateTable::invokeinterface(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); -- prepare_invoke(rax, rbx, byte_no); -- -- // rax,: Interface -- // rbx,: index -+ prepare_invoke(byte_no, rax, rbx, // get f1 klassOop, f2 itable index -+ rcx, rdx); // recv, flags -+ -+ // rax: interface klass (from f1) -+ // rbx: itable index (from f2) - // rcx: receiver - // rdx: flags - -@@ -3062,7 +3094,7 @@ - // another compliant java compiler. - Label notMethod; - __ movl(rdi, rdx); -- __ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface)); -+ __ andl(rdi, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift)); - __ jcc(Assembler::zero, notMethod); - - invokevirtual_helper(rbx, rcx, rdx); -@@ -3070,6 +3102,7 @@ - - // Get receiver klass into rdx - also a null check - __ restore_locals(); // restore rdi -+ __ null_check(rcx, oopDesc::klass_offset_in_bytes()); - __ load_klass(rdx, rcx); - __ verify_oop(rdx); - -@@ -3084,7 +3117,7 @@ - rbx, rsi, - no_such_interface); - -- // rbx,: methodOop to call -+ // rbx: methodOop to call - // rcx: receiver - // Check for abstract method error - // Note: This should be done more efficiently via a throw_abstract_method_error -@@ -3123,9 +3156,39 @@ - __ should_not_reach_here(); - } - -+void TemplateTable::invokehandle(int byte_no) { -+ transition(vtos, vtos); -+ assert(byte_no == f12_oop, "use this argument"); -+ const Register rbx_method = rbx; // (from f2) -+ const Register rax_mtype = rax; // (from f1) -+ const Register rcx_recv = rcx; -+ const Register rdx_flags = rdx; -+ -+ if (!EnableInvokeDynamic) { -+ // rewriter does not generate this bytecode -+ __ should_not_reach_here(); -+ return; -+ } -+ -+ prepare_invoke(byte_no, -+ rbx_method, rax_mtype, // get f2 methodOop, f1 MethodType -+ rcx_recv); -+ __ verify_oop(rbx_method); -+ __ verify_oop(rcx_recv); -+ __ null_check(rcx_recv); -+ -+ // Note: rax_mtype is already pushed (if necessary) by prepare_invoke -+ -+ // FIXME: profile the LambdaForm also -+ __ profile_final_call(rax); -+ -+ __ jump_from_interpreted(rbx_method, rdx); -+} -+ -+ - void TemplateTable::invokedynamic(int byte_no) { - transition(vtos, vtos); -- assert(byte_no == f1_oop, "use this argument"); -+ assert(byte_no == f12_oop, "use this argument"); - - if (!EnableInvokeDynamic) { - // We should not encounter this bytecode if !EnableInvokeDynamic. -@@ -3138,26 +3201,23 @@ - return; - } - -- prepare_invoke(rax, rbx, byte_no); -- -- // rax: CallSite object (f1) -- // rbx: unused (f2) -- // rcx: receiver address -- // rdx: flags (unused) -- -- Register rax_callsite = rax; -- Register rcx_method_handle = rcx; -+ const Register rbx_method = rbx; -+ const Register rax_callsite = rax; -+ -+ prepare_invoke(byte_no, rbx_method, rax_callsite); -+ -+ // rax: CallSite object (from f1) -+ // rbx: MH.linkToCallSite method (from f2) -+ -+ // Note: rax_callsite is already pushed by prepare_invoke - - // %%% should make a type profile for any invokedynamic that takes a ref argument - // profile this call - __ profile_call(rsi); - - __ verify_oop(rax_callsite); -- __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx))); -- __ null_check(rcx_method_handle); -- __ verify_oop(rcx_method_handle); -- __ prepare_to_jump_from_interpreted(); -- __ jump_to_method_handle_entry(rcx_method_handle, rdx); -+ -+ __ jump_from_interpreted(rbx_method, rdx); - } - - //---------------------------------------------------------------------------------------------------- -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/templateTable_x86_32.hpp ---- openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_32.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_32.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -25,10 +25,15 @@ - #ifndef CPU_X86_VM_TEMPLATETABLE_X86_32_HPP - #define CPU_X86_VM_TEMPLATETABLE_X86_32_HPP - -- static void prepare_invoke(Register method, Register index, int byte_no); -+ static void prepare_invoke(int byte_no, -+ Register method, // linked method (or i-klass) -+ Register index = noreg, // itable index, MethodType, etc. -+ Register recv = noreg, // if caller wants to see it -+ Register flags = noreg // if caller wants to test it -+ ); - static void invokevirtual_helper(Register index, Register recv, - Register flags); -- static void volatile_barrier(Assembler::Membar_mask_bits order_constraint ); -+ static void volatile_barrier(Assembler::Membar_mask_bits order_constraint); - - // Helpers - static void index_check(Register array, Register index); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/templateTable_x86_64.cpp ---- openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -458,7 +458,7 @@ - const Register cache = rcx; - const Register index = rdx; - -- resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); -+ resolve_cache_and_index(f12_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); - if (VerifyOops) { - __ verify_oop(rax); - } -@@ -2125,10 +2125,11 @@ - assert_different_registers(result, Rcache, index, temp); - - Label resolved; -- if (byte_no == f1_oop) { -- // We are resolved if the f1 field contains a non-null object (CallSite, etc.) -- // This kind of CP cache entry does not need to match the flags byte, because -+ if (byte_no == f12_oop) { -+ // We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.) -+ // This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because - // there is a 1-1 relation between bytecode type and CP entry type. -+ // The caller will also load a methodOop from f2. - assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD) - __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); - __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset())); -@@ -2157,6 +2158,9 @@ - case Bytecodes::_invokeinterface: - entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); - break; -+ case Bytecodes::_invokehandle: -+ entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); -+ break; - case Bytecodes::_invokedynamic: - entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); - break; -@@ -2167,7 +2171,7 @@ - entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); - break; - default: -- ShouldNotReachHere(); -+ fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); - break; - } - __ movl(temp, (int) bytecode()); -@@ -2180,7 +2184,7 @@ - __ bind(resolved); - } - --// The Rcache and index registers must be set before call -+// The cache and index registers must be set before call - void TemplateTable::load_field_cp_cache_entry(Register obj, - Register cache, - Register index, -@@ -2191,17 +2195,17 @@ - - ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); - // Field offset -- __ movptr(off, Address(cache, index, Address::times_8, -+ __ movptr(off, Address(cache, index, Address::times_ptr, - in_bytes(cp_base_offset + - ConstantPoolCacheEntry::f2_offset()))); - // Flags -- __ movl(flags, Address(cache, index, Address::times_8, -+ __ movl(flags, Address(cache, index, Address::times_ptr, - in_bytes(cp_base_offset + - ConstantPoolCacheEntry::flags_offset()))); - - // klass overwrite register - if (is_static) { -- __ movptr(obj, Address(cache, index, Address::times_8, -+ __ movptr(obj, Address(cache, index, Address::times_ptr, - in_bytes(cp_base_offset + - ConstantPoolCacheEntry::f1_offset()))); - } -@@ -2222,9 +2226,10 @@ - assert_different_registers(itable_index, flags); - assert_different_registers(itable_index, cache, index); - // determine constant pool cache field offsets -+ assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); - const int method_offset = in_bytes( - constantPoolCacheOopDesc::base_offset() + -- (is_invokevirtual -+ ((byte_no == f2_byte) - ? ConstantPoolCacheEntry::f2_offset() - : ConstantPoolCacheEntry::f1_offset())); - const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + -@@ -2233,15 +2238,21 @@ - const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + - ConstantPoolCacheEntry::f2_offset()); - -- if (byte_no == f1_oop) { -- // Resolved f1_oop goes directly into 'method' register. -- assert(is_invokedynamic, ""); -- resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4)); -+ if (byte_no == f12_oop) { -+ // Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'. -+ // Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset). -+ // See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle. -+ size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); -+ resolve_cache_and_index(byte_no, itable_index, cache, index, index_size); -+ __ movptr(method, Address(cache, index, Address::times_ptr, index_offset)); -+ itable_index = noreg; // hack to disable load below - } else { - resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); - __ movptr(method, Address(cache, index, Address::times_ptr, method_offset)); - } - if (itable_index != noreg) { -+ // pick up itable index from f2 also: -+ assert(byte_no == f1_byte, "already picked up f1"); - __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset)); - } - __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset)); -@@ -2317,10 +2328,11 @@ - Label Done, notByte, notInt, notShort, notChar, - notLong, notFloat, notObj, notDouble; - -- __ shrl(flags, ConstantPoolCacheEntry::tosBits); -+ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); -+ // Make sure we don't need to mask edx after the above shift - assert(btos == 0, "change code, btos != 0"); - -- __ andl(flags, 0x0F); -+ __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); - __ jcc(Assembler::notZero, notByte); - // btos - __ load_signed_byte(rax, field); -@@ -2466,10 +2478,9 @@ - Address::times_8, - in_bytes(cp_base_offset + - ConstantPoolCacheEntry::flags_offset()))); -- __ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits); -- // Make sure we don't need to mask rcx for tosBits after the -- // above shift -- ConstantPoolCacheEntry::verify_tosBits(); -+ __ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift); -+ // Make sure we don't need to mask rcx after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); - __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue - __ cmpl(c_rarg3, ltos); - __ cmovptr(Assembler::equal, -@@ -2516,7 +2527,7 @@ - - Label notVolatile, Done; - __ movl(rdx, flags); -- __ shrl(rdx, ConstantPoolCacheEntry::volatileField); -+ __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - __ andl(rdx, 0x1); - - // field address -@@ -2525,10 +2536,10 @@ - Label notByte, notInt, notShort, notChar, - notLong, notFloat, notObj, notDouble; - -- __ shrl(flags, ConstantPoolCacheEntry::tosBits); -+ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); - - assert(btos == 0, "change code, btos != 0"); -- __ andl(flags, 0x0f); -+ __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); - __ jcc(Assembler::notZero, notByte); - - // btos -@@ -2745,7 +2756,7 @@ - // Assembler::StoreStore)); - - Label notVolatile; -- __ shrl(rdx, ConstantPoolCacheEntry::volatileField); -+ __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - __ andl(rdx, 0x1); - - // Get object from stack -@@ -2826,7 +2837,7 @@ - // __ movl(rdx, Address(rcx, rbx, Address::times_8, - // in_bytes(constantPoolCacheOopDesc::base_offset() + - // ConstantPoolCacheEntry::flags_offset()))); -- // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); -+ // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - // __ andl(rdx, 0x1); - // } - __ movptr(rbx, Address(rcx, rbx, Address::times_8, -@@ -2914,7 +2925,7 @@ - // __ movl(rdx, Address(rcx, rdx, Address::times_8, - // in_bytes(constantPoolCacheOopDesc::base_offset() + - // ConstantPoolCacheEntry::flags_offset()))); -- // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); -+ // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - // __ testl(rdx, 0x1); - // __ jcc(Assembler::zero, notVolatile); - // __ membar(Assembler::LoadLoad); -@@ -2934,19 +2945,29 @@ - ShouldNotReachHere(); - } - --void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) { -+void TemplateTable::prepare_invoke(int byte_no, -+ Register method, // linked method (or i-klass) -+ Register index, // itable index, MethodType, etc. -+ Register recv, // if caller wants to see it -+ Register flags // if caller wants to test it -+ ) { - // determine flags -- Bytecodes::Code code = bytecode(); -+ const Bytecodes::Code code = bytecode(); - const bool is_invokeinterface = code == Bytecodes::_invokeinterface; - const bool is_invokedynamic = code == Bytecodes::_invokedynamic; -+ const bool is_invokehandle = code == Bytecodes::_invokehandle; - const bool is_invokevirtual = code == Bytecodes::_invokevirtual; - const bool is_invokespecial = code == Bytecodes::_invokespecial; -- const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic); -- const bool receiver_null_check = is_invokespecial; -- const bool save_flags = is_invokeinterface || is_invokevirtual; -+ const bool load_receiver = (recv != noreg); -+ const bool save_flags = (flags != noreg); -+ assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); -+ assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal"); -+ assert(flags == noreg || flags == rdx, ""); -+ assert(recv == noreg || recv == rcx, ""); -+ - // setup registers & access constant pool cache -- const Register recv = rcx; -- const Register flags = rdx; -+ if (recv == noreg) recv = rcx; -+ if (flags == noreg) flags = rdx; - assert_different_registers(method, index, recv, flags); - - // save 'interpreter return address' -@@ -2954,36 +2975,44 @@ - - load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); - -- // load receiver if needed (note: no return address pushed yet) -+ // maybe push appendix to arguments (just before return address) -+ if (is_invokedynamic || is_invokehandle) { -+ Label L_no_push; -+ __ verify_oop(index); -+ __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift)); -+ __ jccb(Assembler::zero, L_no_push); -+ // Push the appendix as a trailing parameter. -+ // This must be done before we get the receiver, -+ // since the parameter_size includes it. -+ __ push(index); // push appendix (MethodType, CallSite, etc.) -+ __ bind(L_no_push); -+ } -+ -+ // load receiver if needed (after appendix is pushed so parameter size is correct) -+ // Note: no return address pushed yet - if (load_receiver) { -- assert(!is_invokedynamic, ""); - __ movl(recv, flags); -- __ andl(recv, 0xFF); -- Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1)); -+ __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask); -+ const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address -+ const int receiver_is_at_end = -1; // back off one slot to get receiver -+ Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end); - __ movptr(recv, recv_addr); - __ verify_oop(recv); - } - -- // do null check if needed -- if (receiver_null_check) { -- __ null_check(recv); -- } -- - if (save_flags) { - __ movl(r13, flags); - } - - // compute return type -- __ shrl(flags, ConstantPoolCacheEntry::tosBits); -- // Make sure we don't need to mask flags for tosBits after the above shift -- ConstantPoolCacheEntry::verify_tosBits(); -+ __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); -+ // Make sure we don't need to mask flags after the above shift -+ ConstantPoolCacheEntry::verify_tos_state_shift(); - // load return address - { -- address table_addr; -- if (is_invokeinterface || is_invokedynamic) -- table_addr = (address)Interpreter::return_5_addrs_by_index_table(); -- else -- table_addr = (address)Interpreter::return_3_addrs_by_index_table(); -+ const address table_addr = (is_invokeinterface || is_invokedynamic) ? -+ (address)Interpreter::return_5_addrs_by_index_table() : -+ (address)Interpreter::return_3_addrs_by_index_table(); - ExternalAddress table(table_addr); - __ lea(rscratch1, table); - __ movptr(flags, Address(rscratch1, flags, Address::times_ptr)); -@@ -2992,7 +3021,7 @@ - // push return address - __ push(flags); - -- // Restore flag field from the constant pool cache, and restore esi -+ // Restore flags value from the constant pool cache, and restore rsi - // for later null checks. r13 is the bytecode pointer - if (save_flags) { - __ movl(flags, r13); -@@ -3006,11 +3035,13 @@ - Register flags) { - // Uses temporary registers rax, rdx - assert_different_registers(index, recv, rax, rdx); -+ assert(index == rbx, ""); -+ assert(recv == rcx, ""); - - // Test for an invoke of a final method - Label notFinal; - __ movl(rax, flags); -- __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod)); -+ __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift)); - __ jcc(Assembler::zero, notFinal); - - const Register method = index; // method must be rbx -@@ -3018,6 +3049,7 @@ - "methodOop must be rbx for interpreter calling convention"); - - // do the call - the index is actually the method to call -+ // that is, f2 is a vtable index if !is_vfinal, else f2 is a methodOop - __ verify_oop(method); - - // It's final, need a null check here! -@@ -3033,20 +3065,13 @@ - // get receiver klass - __ null_check(recv, oopDesc::klass_offset_in_bytes()); - __ load_klass(rax, recv); -- - __ verify_oop(rax); - - // profile this call - __ profile_virtual_call(rax, r14, rdx); - - // get target methodOop & entry point -- const int base = instanceKlass::vtable_start_offset() * wordSize; -- assert(vtableEntry::size() * wordSize == 8, -- "adjust the scaling in the code below"); -- __ movptr(method, Address(rax, index, -- Address::times_8, -- base + vtableEntry::method_offset_in_bytes())); -- __ movptr(rdx, Address(method, methodOopDesc::interpreter_entry_offset())); -+ __ lookup_virtual_method(rax, index, method); - __ jump_from_interpreted(method, rdx); - } - -@@ -3054,7 +3079,10 @@ - void TemplateTable::invokevirtual(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f2_byte, "use this argument"); -- prepare_invoke(rbx, noreg, byte_no); -+ prepare_invoke(byte_no, -+ rbx, // method or vtable index -+ noreg, // unused itable index -+ rcx, rdx); // recv, flags - - // rbx: index - // rcx: receiver -@@ -3067,7 +3095,10 @@ - void TemplateTable::invokespecial(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); -- prepare_invoke(rbx, noreg, byte_no); -+ prepare_invoke(byte_no, rbx, noreg, // get f1 methodOop -+ rcx); // get receiver also for null check -+ __ verify_oop(rcx); -+ __ null_check(rcx); - // do the call - __ verify_oop(rbx); - __ profile_call(rax); -@@ -3078,7 +3109,7 @@ - void TemplateTable::invokestatic(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); -- prepare_invoke(rbx, noreg, byte_no); -+ prepare_invoke(byte_no, rbx); // get f1 methodOop - // do the call - __ verify_oop(rbx); - __ profile_call(rax); -@@ -3094,10 +3125,11 @@ - void TemplateTable::invokeinterface(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); -- prepare_invoke(rax, rbx, byte_no); -- -- // rax: Interface -- // rbx: index -+ prepare_invoke(byte_no, rax, rbx, // get f1 klassOop, f2 itable index -+ rcx, rdx); // recv, flags -+ -+ // rax: interface klass (from f1) -+ // rbx: itable index (from f2) - // rcx: receiver - // rdx: flags - -@@ -3107,14 +3139,15 @@ - // another compliant java compiler. - Label notMethod; - __ movl(r14, rdx); -- __ andl(r14, (1 << ConstantPoolCacheEntry::methodInterface)); -+ __ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift)); - __ jcc(Assembler::zero, notMethod); - - invokevirtual_helper(rbx, rcx, rdx); - __ bind(notMethod); - - // Get receiver klass into rdx - also a null check -- __ restore_locals(); // restore r14 -+ __ restore_locals(); // restore r14 -+ __ null_check(rcx, oopDesc::klass_offset_in_bytes()); - __ load_klass(rdx, rcx); - __ verify_oop(rdx); - -@@ -3129,7 +3162,7 @@ - rbx, r13, - no_such_interface); - -- // rbx,: methodOop to call -+ // rbx: methodOop to call - // rcx: receiver - // Check for abstract method error - // Note: This should be done more efficiently via a throw_abstract_method_error -@@ -3166,12 +3199,42 @@ - InterpreterRuntime::throw_IncompatibleClassChangeError)); - // the call_VM checks for exception, so we should never return here. - __ should_not_reach_here(); -- return; - } - -+ -+void TemplateTable::invokehandle(int byte_no) { -+ transition(vtos, vtos); -+ assert(byte_no == f12_oop, "use this argument"); -+ const Register rbx_method = rbx; // f2 -+ const Register rax_mtype = rax; // f1 -+ const Register rcx_recv = rcx; -+ const Register rdx_flags = rdx; -+ -+ if (!EnableInvokeDynamic) { -+ // rewriter does not generate this bytecode -+ __ should_not_reach_here(); -+ return; -+ } -+ -+ prepare_invoke(byte_no, -+ rbx_method, rax_mtype, // get f2 methodOop, f1 MethodType -+ rcx_recv); -+ __ verify_oop(rbx_method); -+ __ verify_oop(rcx_recv); -+ __ null_check(rcx_recv); -+ -+ // Note: rax_mtype is already pushed (if necessary) by prepare_invoke -+ -+ // FIXME: profile the LambdaForm also -+ __ profile_final_call(rax); -+ -+ __ jump_from_interpreted(rbx_method, rdx); -+} -+ -+ - void TemplateTable::invokedynamic(int byte_no) { - transition(vtos, vtos); -- assert(byte_no == f1_oop, "use this argument"); -+ assert(byte_no == f12_oop, "use this argument"); - - if (!EnableInvokeDynamic) { - // We should not encounter this bytecode if !EnableInvokeDynamic. -@@ -3184,26 +3247,23 @@ - return; - } - -- prepare_invoke(rax, rbx, byte_no); -- -- // rax: CallSite object (f1) -- // rbx: unused (f2) -- // rcx: receiver address -- // rdx: flags (unused) -- -- Register rax_callsite = rax; -- Register rcx_method_handle = rcx; -+ const Register rbx_method = rbx; -+ const Register rax_callsite = rax; -+ -+ prepare_invoke(byte_no, rbx_method, rax_callsite); -+ -+ // rax: CallSite object (from f1) -+ // rbx: MH.linkToCallSite method (from f2) -+ -+ // Note: rax_callsite is already pushed by prepare_invoke - - // %%% should make a type profile for any invokedynamic that takes a ref argument - // profile this call - __ profile_call(r13); - - __ verify_oop(rax_callsite); -- __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx))); -- __ null_check(rcx_method_handle); -- __ verify_oop(rcx_method_handle); -- __ prepare_to_jump_from_interpreted(); -- __ jump_to_method_handle_entry(rcx_method_handle, rdx); -+ -+ __ jump_from_interpreted(rbx_method, rdx); - } - - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/templateTable_x86_64.hpp ---- openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -25,7 +25,12 @@ - #ifndef CPU_X86_VM_TEMPLATETABLE_X86_64_HPP - #define CPU_X86_VM_TEMPLATETABLE_X86_64_HPP - -- static void prepare_invoke(Register method, Register index, int byte_no); -+ static void prepare_invoke(int byte_no, -+ Register method, // linked method (or i-klass) -+ Register index = noreg, // itable index, MethodType, etc. -+ Register recv = noreg, // if caller wants to see it -+ Register flags = noreg // if caller wants to test it -+ ); - static void invokevirtual_helper(Register index, Register recv, - Register flags); - static void volatile_barrier(Assembler::Membar_mask_bits order_constraint); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/vtableStubs_x86_32.cpp ---- openjdk/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -76,8 +76,7 @@ - // get receiver klass - address npe_addr = __ pc(); - __ movptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes())); -- // compute entry offset (in words) -- int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); -+ - #ifndef PRODUCT - if (DebugVtables) { - Label L; -@@ -93,7 +92,8 @@ - const Register method = rbx; - - // load methodOop and target address -- __ movptr(method, Address(rax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes())); -+ __ lookup_virtual_method(rax, vtable_index, method); -+ - if (DebugVtables) { - Label L; - __ cmpptr(method, (int32_t)NULL_WORD); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/x86/vm/vtableStubs_x86_64.cpp ---- openjdk/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -69,10 +69,6 @@ - address npe_addr = __ pc(); - __ load_klass(rax, j_rarg0); - -- // compute entry offset (in words) -- int entry_offset = -- instanceKlass::vtable_start_offset() + vtable_index * vtableEntry::size(); -- - #ifndef PRODUCT - if (DebugVtables) { - Label L; -@@ -90,9 +86,8 @@ - // load methodOop and target address - const Register method = rbx; - -- __ movptr(method, Address(rax, -- entry_offset * wordSize + -- vtableEntry::method_offset_in_bytes())); -+ __ lookup_virtual_method(rax, vtable_index, method); -+ - if (DebugVtables) { - Label L; - __ cmpptr(method, (int32_t)NULL_WORD); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp ---- openjdk/hotspot/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -31,12 +31,17 @@ - return _masm; - } - -- protected: -- address generate_entry(address entry_point) { -- ZeroEntry *entry = (ZeroEntry *) assembler()->pc(); -- assembler()->advance(sizeof(ZeroEntry)); -+ public: -+ static address generate_entry_impl(MacroAssembler* masm, address entry_point) { -+ ZeroEntry *entry = (ZeroEntry *) masm->pc(); -+ masm->advance(sizeof(ZeroEntry)); - entry->set_entry_point(entry_point); - return (address) entry; - } - -+ protected: -+ address generate_entry(address entry_point) { -+ return generate_entry_impl(assembler(), entry_point); -+ } -+ - #endif // CPU_ZERO_VM_CPPINTERPRETERGENERATOR_ZERO_HPP -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/zero/vm/cppInterpreter_zero.cpp ---- openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -188,25 +188,6 @@ - method, istate->osr_entry(), istate->osr_buf(), THREAD); - return; - } -- else if (istate->msg() == BytecodeInterpreter::call_method_handle) { -- oop method_handle = istate->callee(); -- -- // Trim back the stack to put the parameters at the top -- stack->set_sp(istate->stack() + 1); -- -- // Make the call -- process_method_handle(method_handle, THREAD); -- fixup_after_potential_safepoint(); -- -- // Convert the result -- istate->set_stack(stack->sp() - 1); -- -- // Restore the stack -- stack->set_sp(istate->stack_limit() + 1); -- -- // Resume the interpreter -- istate->set_msg(BytecodeInterpreter::method_resume); -- } - else { - ShouldNotReachHere(); - } -@@ -543,35 +524,35 @@ - if (entry->is_volatile()) { - switch (entry->flag_state()) { - case ctos: -- SET_LOCALS_INT(object->char_field_acquire(entry->f2()), 0); -+ SET_LOCALS_INT(object->char_field_acquire(entry->f2_as_index()), 0); - break; - - case btos: -- SET_LOCALS_INT(object->byte_field_acquire(entry->f2()), 0); -+ SET_LOCALS_INT(object->byte_field_acquire(entry->f2_as_index()), 0); - break; - - case stos: -- SET_LOCALS_INT(object->short_field_acquire(entry->f2()), 0); -+ SET_LOCALS_INT(object->short_field_acquire(entry->f2_as_index()), 0); - break; - - case itos: -- SET_LOCALS_INT(object->int_field_acquire(entry->f2()), 0); -+ SET_LOCALS_INT(object->int_field_acquire(entry->f2_as_index()), 0); - break; - - case ltos: -- SET_LOCALS_LONG(object->long_field_acquire(entry->f2()), 0); -+ SET_LOCALS_LONG(object->long_field_acquire(entry->f2_as_index()), 0); - break; - - case ftos: -- SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2()), 0); -+ SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2_as_index()), 0); - break; - - case dtos: -- SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2()), 0); -+ SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2_as_index()), 0); - break; - - case atos: -- SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2()), 0); -+ SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2_as_index()), 0); - break; - - default: -@@ -581,35 +562,35 @@ - else { - switch (entry->flag_state()) { - case ctos: -- SET_LOCALS_INT(object->char_field(entry->f2()), 0); -+ SET_LOCALS_INT(object->char_field(entry->f2_as_index()), 0); - break; - - case btos: -- SET_LOCALS_INT(object->byte_field(entry->f2()), 0); -+ SET_LOCALS_INT(object->byte_field(entry->f2_as_index()), 0); - break; - - case stos: -- SET_LOCALS_INT(object->short_field(entry->f2()), 0); -+ SET_LOCALS_INT(object->short_field(entry->f2_as_index()), 0); - break; - - case itos: -- SET_LOCALS_INT(object->int_field(entry->f2()), 0); -+ SET_LOCALS_INT(object->int_field(entry->f2_as_index()), 0); - break; - - case ltos: -- SET_LOCALS_LONG(object->long_field(entry->f2()), 0); -+ SET_LOCALS_LONG(object->long_field(entry->f2_as_index()), 0); - break; - - case ftos: -- SET_LOCALS_FLOAT(object->float_field(entry->f2()), 0); -+ SET_LOCALS_FLOAT(object->float_field(entry->f2_as_index()), 0); - break; - - case dtos: -- SET_LOCALS_DOUBLE(object->double_field(entry->f2()), 0); -+ SET_LOCALS_DOUBLE(object->double_field(entry->f2_as_index()), 0); - break; - - case atos: -- SET_LOCALS_OBJECT(object->obj_field(entry->f2()), 0); -+ SET_LOCALS_OBJECT(object->obj_field(entry->f2_as_index()), 0); - break; - - default: -@@ -637,829 +618,6 @@ - return 0; - } - --int CppInterpreter::method_handle_entry(methodOop method, -- intptr_t UNUSED, TRAPS) { -- JavaThread *thread = (JavaThread *) THREAD; -- ZeroStack *stack = thread->zero_stack(); -- int argument_slots = method->size_of_parameters(); -- int result_slots = type2size[result_type_of(method)]; -- intptr_t *vmslots = stack->sp(); -- intptr_t *unwind_sp = vmslots + argument_slots; -- -- // Find the MethodType -- address p = (address) method; -- for (jint* pc = method->method_type_offsets_chain(); (*pc) != -1; pc++) { -- p = *(address*)(p + (*pc)); -- } -- oop method_type = (oop) p; -- -- // The MethodHandle is in the slot after the arguments -- oop form = java_lang_invoke_MethodType::form(method_type); -- int num_vmslots = java_lang_invoke_MethodTypeForm::vmslots(form); -- assert(argument_slots == num_vmslots + 1, "should be"); -- oop method_handle = VMSLOTS_OBJECT(num_vmslots); -- -- // InvokeGeneric requires some extra shuffling -- oop mhtype = java_lang_invoke_MethodHandle::type(method_handle); -- bool is_exact = mhtype == method_type; -- if (!is_exact) { -- if (method->intrinsic_id() == vmIntrinsics::_invokeExact) { -- CALL_VM_NOCHECK_NOFIX( -- SharedRuntime::throw_WrongMethodTypeException( -- thread, method_type, mhtype)); -- // NB all oops trashed! -- assert(HAS_PENDING_EXCEPTION, "should do"); -- stack->set_sp(unwind_sp); -- return 0; -- } -- assert(method->intrinsic_id() == vmIntrinsics::_invokeGeneric, "should be"); -- -- // Load up an adapter from the calling type -- // NB the x86 code for this (in methodHandles_x86.cpp, search for -- // "genericInvoker") is really really odd. I'm hoping it's trying -- // to accomodate odd VM/class library combinations I can ignore. -- oop adapter = java_lang_invoke_MethodTypeForm::genericInvoker(form); -- if (adapter == NULL) { -- CALL_VM_NOCHECK_NOFIX( -- SharedRuntime::throw_WrongMethodTypeException( -- thread, method_type, mhtype)); -- // NB all oops trashed! -- assert(HAS_PENDING_EXCEPTION, "should do"); -- stack->set_sp(unwind_sp); -- return 0; -- } -- -- // Adapters are shared among form-families of method-type. The -- // type being called is passed as a trusted first argument so that -- // the adapter knows the actual types of its arguments and return -- // values. -- insert_vmslots(num_vmslots + 1, 1, THREAD); -- if (HAS_PENDING_EXCEPTION) { -- // NB all oops trashed! -- stack->set_sp(unwind_sp); -- return 0; -- } -- -- vmslots = stack->sp(); -- num_vmslots++; -- SET_VMSLOTS_OBJECT(method_type, num_vmslots); -- -- method_handle = adapter; -- } -- -- CPPINT_DEBUG( tty->print_cr( "Process method_handle sp: 0x%x unwind_sp: 0x%x result_slots: %d.", \ -- stack->sp(), unwind_sp, result_slots ); ) -- -- // Start processing -- process_method_handle(method_handle, THREAD); -- if (HAS_PENDING_EXCEPTION) -- result_slots = 0; -- -- // If this is an invokeExact then the eventual callee will not -- // have unwound the method handle argument so we have to do it. -- // If a result is being returned the it will be above the method -- // handle argument we're unwinding. -- if (is_exact) { -- intptr_t result[2]; -- for (int i = 0; i < result_slots; i++) -- result[i] = stack->pop(); -- stack->pop(); -- for (int i = result_slots - 1; i >= 0; i--) -- stack->push(result[i]); -- } -- -- // Check -- CPPINT_DEBUG( tty->print_cr( "Exiting method_handle_entry, sp: 0x%x unwind_sp: 0x%x result_slots: %d.", \ -- stack->sp(), unwind_sp, result_slots ); ) -- assert(stack->sp() == unwind_sp - result_slots, "should be"); -- -- // No deoptimized frames on the stack -- return 0; --} -- --void CppInterpreter::process_method_handle(oop method_handle, TRAPS) { -- -- JavaThread *thread = (JavaThread *) THREAD; -- ZeroStack *stack = thread->zero_stack(); -- intptr_t *vmslots = stack->sp(); -- -- bool direct_to_method = false; -- BasicType src_rtype = T_ILLEGAL; -- BasicType dst_rtype = T_ILLEGAL; -- -- MethodHandleEntry *entry = -- java_lang_invoke_MethodHandle::vmentry(method_handle); -- MethodHandles::EntryKind entry_kind = -- (MethodHandles::EntryKind) (((intptr_t) entry) & 0xffffffff); -- -- methodOop method = NULL; -- CPPINT_DEBUG( tty->print_cr( "\nEntering %s 0x%x.",MethodHandles::entry_name(entry_kind), (char *)vmslots ); ) -- switch (entry_kind) { -- case MethodHandles::_invokestatic_mh: -- direct_to_method = true; -- break; -- -- case MethodHandles::_invokespecial_mh: -- case MethodHandles::_invokevirtual_mh: -- case MethodHandles::_invokeinterface_mh: -- { -- oop receiver = -- VMSLOTS_OBJECT( -- java_lang_invoke_MethodHandle::vmslots(method_handle) - 1); -- if (receiver == NULL) { -- stack->set_sp(calculate_unwind_sp(stack, method_handle)); -- CALL_VM_NOCHECK_NOFIX( -- throw_exception( -- thread, vmSymbols::java_lang_NullPointerException())); -- // NB all oops trashed! -- assert(HAS_PENDING_EXCEPTION, "should do"); -- return; -- } -- if (entry_kind != MethodHandles::_invokespecial_mh) { -- int index = java_lang_invoke_DirectMethodHandle::vmindex(method_handle); -- instanceKlass* rcvrKlass = -- (instanceKlass *) receiver->klass()->klass_part(); -- if (entry_kind == MethodHandles::_invokevirtual_mh) { -- method = (methodOop) rcvrKlass->start_of_vtable()[index]; -- } -- else { -- oop iclass = java_lang_invoke_MethodHandle::vmtarget(method_handle); -- itableOffsetEntry* ki = -- (itableOffsetEntry *) rcvrKlass->start_of_itable(); -- int i, length = rcvrKlass->itable_length(); -- for (i = 0; i < length; i++, ki++ ) { -- if (ki->interface_klass() == iclass) -- break; -- } -- if (i == length) { -- stack->set_sp(calculate_unwind_sp(stack, method_handle)); -- CALL_VM_NOCHECK_NOFIX( -- throw_exception( -- thread, vmSymbols::java_lang_IncompatibleClassChangeError())); -- // NB all oops trashed! -- assert(HAS_PENDING_EXCEPTION, "should do"); -- return; -- } -- itableMethodEntry* im = ki->first_method_entry(receiver->klass()); -- method = im[index].method(); -- if (method == NULL) { -- stack->set_sp(calculate_unwind_sp(stack, method_handle)); -- CALL_VM_NOCHECK_NOFIX( -- throw_exception( -- thread, vmSymbols::java_lang_AbstractMethodError())); -- // NB all oops trashed! -- assert(HAS_PENDING_EXCEPTION, "should do"); -- return; -- } -- } -- } -- } -- direct_to_method = true; -- break; -- -- case MethodHandles::_bound_ref_direct_mh: -- case MethodHandles::_bound_int_direct_mh: -- case MethodHandles::_bound_long_direct_mh: -- direct_to_method = true; -- // fall through -- case MethodHandles::_bound_ref_mh: -- case MethodHandles::_bound_int_mh: -- case MethodHandles::_bound_long_mh: -- { -- // BasicType arg_type = T_ILLEGAL; -- // int arg_mask = -1; -- // int arg_slots = -1; -- // MethodHandles::get_ek_bound_mh_info( -- // entry_kind, arg_type, arg_mask, arg_slots); -- BasicType arg_type = MethodHandles::ek_bound_mh_arg_type(entry_kind); -- int arg_mask = 0; -- int arg_slots = type2size[arg_type];; -- -- int arg_slot = -- java_lang_invoke_BoundMethodHandle::vmargslot(method_handle); -- -- // Create the new slot(s) -- intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); -- insert_vmslots(arg_slot, arg_slots, THREAD); -- if (HAS_PENDING_EXCEPTION) { -- // all oops trashed -- stack->set_sp(unwind_sp); -- return; -- } -- vmslots = stack->sp(); -- -- // Store bound argument into new stack slot -- oop arg = java_lang_invoke_BoundMethodHandle::argument(method_handle); -- if (arg_type == T_OBJECT) { -- assert(arg_slots == 1, "should be"); -- SET_VMSLOTS_OBJECT(arg, arg_slot); -- } -- else { -- jvalue arg_value; -- arg_type = java_lang_boxing_object::get_value(arg, &arg_value); -- switch (arg_type) { -- case T_BOOLEAN: -- SET_VMSLOTS_INT(arg_value.z, arg_slot); -- break; -- case T_CHAR: -- SET_VMSLOTS_INT(arg_value.c, arg_slot); -- break; -- case T_BYTE: -- SET_VMSLOTS_INT(arg_value.b, arg_slot); -- break; -- case T_SHORT: -- SET_VMSLOTS_INT(arg_value.s, arg_slot); -- break; -- case T_INT: -- SET_VMSLOTS_INT(arg_value.i, arg_slot); -- break; -- case T_FLOAT: -- SET_VMSLOTS_FLOAT(arg_value.f, arg_slot); -- break; -- case T_LONG: -- SET_VMSLOTS_LONG(arg_value.j, arg_slot + 1); -- break; -- case T_DOUBLE: -- SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot + 1); -- break; -- default: -- tty->print_cr("unhandled type %s", type2name(arg_type)); -- ShouldNotReachHere(); -- } -- } -- } -- break; -- -- case MethodHandles::_adapter_retype_only: -- case MethodHandles::_adapter_retype_raw: -- src_rtype = result_type_of_handle( -- java_lang_invoke_MethodHandle::vmtarget(method_handle)); -- dst_rtype = result_type_of_handle(method_handle); -- break; -- -- case MethodHandles::_adapter_check_cast: -- { -- int arg_slot = -- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); -- oop arg = VMSLOTS_OBJECT(arg_slot); -- if (arg != NULL) { -- klassOop objKlassOop = arg->klass(); -- klassOop klassOf = java_lang_Class::as_klassOop( -- java_lang_invoke_AdapterMethodHandle::argument(method_handle)); -- -- if (objKlassOop != klassOf && -- !objKlassOop->klass_part()->is_subtype_of(klassOf)) { -- ResourceMark rm(THREAD); -- const char* objName = Klass::cast(objKlassOop)->external_name(); -- const char* klassName = Klass::cast(klassOf)->external_name(); -- char* message = SharedRuntime::generate_class_cast_message( -- objName, klassName); -- -- stack->set_sp(calculate_unwind_sp(stack, method_handle)); -- CALL_VM_NOCHECK_NOFIX( -- throw_exception( -- thread, vmSymbols::java_lang_ClassCastException(), message)); -- // NB all oops trashed! -- assert(HAS_PENDING_EXCEPTION, "should do"); -- return; -- } -- } -- } -- break; -- -- case MethodHandles::_adapter_dup_args: -- { -- int arg_slot = -- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); -- int conv = -- java_lang_invoke_AdapterMethodHandle::conversion(method_handle); -- int num_slots = -MethodHandles::adapter_conversion_stack_move(conv); -- assert(num_slots > 0, "should be"); -- -- // Create the new slot(s) -- intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); -- stack->overflow_check(num_slots, THREAD); -- if (HAS_PENDING_EXCEPTION) { -- // all oops trashed -- stack->set_sp(unwind_sp); -- return; -- } -- -- // Duplicate the arguments -- for (int i = num_slots - 1; i >= 0; i--) -- stack->push(*VMSLOTS_SLOT(arg_slot + i)); -- -- vmslots = stack->sp(); // unused, but let the compiler figure that out -- } -- break; -- -- case MethodHandles::_adapter_drop_args: -- { -- int arg_slot = -- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); -- int conv = -- java_lang_invoke_AdapterMethodHandle::conversion(method_handle); -- int num_slots = MethodHandles::adapter_conversion_stack_move(conv); -- assert(num_slots > 0, "should be"); -- -- remove_vmslots(arg_slot, num_slots, THREAD); // doesn't trap -- vmslots = stack->sp(); // unused, but let the compiler figure that out -- } -- break; -- -- case MethodHandles::_adapter_opt_swap_1: -- case MethodHandles::_adapter_opt_swap_2: -- case MethodHandles::_adapter_opt_rot_1_up: -- case MethodHandles::_adapter_opt_rot_1_down: -- case MethodHandles::_adapter_opt_rot_2_up: -- case MethodHandles::_adapter_opt_rot_2_down: -- { -- int arg1 = -- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); -- int conv = -- java_lang_invoke_AdapterMethodHandle::conversion(method_handle); -- int arg2 = MethodHandles::adapter_conversion_vminfo(conv); -- -- // int swap_bytes = 0, rotate = 0; -- // MethodHandles::get_ek_adapter_opt_swap_rot_info( -- // entry_kind, swap_bytes, rotate); -- int swap_slots = MethodHandles::ek_adapter_opt_swap_slots(entry_kind); -- int rotate = MethodHandles::ek_adapter_opt_swap_mode(entry_kind); -- int swap_bytes = swap_slots * Interpreter::stackElementSize; -- swap_slots = swap_bytes >> LogBytesPerWord; -- -- intptr_t tmp; -- switch (rotate) { -- case 0: // swap -- for (int i = 0; i < swap_slots; i++) { -- tmp = *VMSLOTS_SLOT(arg1 + i); -- SET_VMSLOTS_SLOT(VMSLOTS_SLOT(arg2 + i), arg1 + i); -- SET_VMSLOTS_SLOT(&tmp, arg2 + i); -- } -- break; -- -- case 1: // up -- assert(arg1 - swap_slots > arg2, "should be"); -- -- tmp = *VMSLOTS_SLOT(arg1); -- for (int i = arg1 - swap_slots; i >= arg2; i--) -- SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i + swap_slots); -- SET_VMSLOTS_SLOT(&tmp, arg2); -- -- break; -- -- case -1: // down -- assert(arg2 - swap_slots > arg1, "should be"); -- -- tmp = *VMSLOTS_SLOT(arg1); -- for (int i = arg1 + swap_slots; i <= arg2; i++) -- SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i - swap_slots); -- SET_VMSLOTS_SLOT(&tmp, arg2); -- break; -- -- default: -- ShouldNotReachHere(); -- } -- } -- break; -- -- case MethodHandles::_adapter_opt_i2l: -- { -- int arg_slot = -- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); -- int arg = VMSLOTS_INT(arg_slot); -- intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); -- insert_vmslots(arg_slot, 1, THREAD); -- if (HAS_PENDING_EXCEPTION) { -- // all oops trashed -- stack->set_sp(unwind_sp); -- return; -- } -- vmslots = stack->sp(); -- arg_slot++; -- SET_VMSLOTS_LONG(arg, arg_slot); -- } -- break; -- -- case MethodHandles::_adapter_opt_unboxi: -- case MethodHandles::_adapter_opt_unboxl: -- { -- int arg_slot = -- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); -- oop arg = VMSLOTS_OBJECT(arg_slot); -- jvalue arg_value; -- if (arg == NULL) { -- // queue a nullpointer exception for the caller -- stack->set_sp(calculate_unwind_sp(stack, method_handle)); -- CALL_VM_NOCHECK_NOFIX( -- throw_exception( -- thread, vmSymbols::java_lang_NullPointerException())); -- // NB all oops trashed! -- assert(HAS_PENDING_EXCEPTION, "should do"); -- return; -- } -- BasicType arg_type = java_lang_boxing_object::get_value(arg, &arg_value); -- if (arg_type == T_LONG || arg_type == T_DOUBLE) { -- intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); -- insert_vmslots(arg_slot, 1, THREAD); -- if (HAS_PENDING_EXCEPTION) { -- // all oops trashed -- stack->set_sp(unwind_sp); -- return; -- } -- vmslots = stack->sp(); -- arg_slot++; -- } -- switch (arg_type) { -- case T_BOOLEAN: -- SET_VMSLOTS_INT(arg_value.z, arg_slot); -- break; -- case T_CHAR: -- SET_VMSLOTS_INT(arg_value.c, arg_slot); -- break; -- case T_BYTE: -- SET_VMSLOTS_INT(arg_value.b, arg_slot); -- break; -- case T_SHORT: -- SET_VMSLOTS_INT(arg_value.s, arg_slot); -- break; -- case T_INT: -- SET_VMSLOTS_INT(arg_value.i, arg_slot); -- break; -- case T_FLOAT: -- SET_VMSLOTS_FLOAT(arg_value.f, arg_slot); -- break; -- case T_LONG: -- SET_VMSLOTS_LONG(arg_value.j, arg_slot); -- break; -- case T_DOUBLE: -- SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot); -- break; -- default: -- tty->print_cr("unhandled type %s", type2name(arg_type)); -- ShouldNotReachHere(); -- } -- } -- break; -- -- case MethodHandles::_adapter_opt_spread_0: -- case MethodHandles::_adapter_opt_spread_1_ref: -- case MethodHandles::_adapter_opt_spread_2_ref: -- case MethodHandles::_adapter_opt_spread_3_ref: -- case MethodHandles::_adapter_opt_spread_4_ref: -- case MethodHandles::_adapter_opt_spread_5_ref: -- case MethodHandles::_adapter_opt_spread_ref: -- case MethodHandles::_adapter_opt_spread_byte: -- case MethodHandles::_adapter_opt_spread_char: -- case MethodHandles::_adapter_opt_spread_short: -- case MethodHandles::_adapter_opt_spread_int: -- case MethodHandles::_adapter_opt_spread_long: -- case MethodHandles::_adapter_opt_spread_float: -- case MethodHandles::_adapter_opt_spread_double: -- { -- -- // spread an array out into a group of arguments -- -- int arg_slot = -- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); -- // Fetch the argument, which we will cast to the required array type. -- oop arg = VMSLOTS_OBJECT(arg_slot); -- -- BasicType elem_type = -- MethodHandles::ek_adapter_opt_spread_type(entry_kind); -- int elem_slots = -- type2size[elem_type]; // 1 or 2 -- int array_slots = -- 1; // array is always a T_OBJECT -- int length_offset = -- arrayOopDesc::length_offset_in_bytes(); -- int elem0_offset = -- arrayOopDesc::base_offset_in_bytes(elem_type); -- int length_constant = -- MethodHandles::ek_adapter_opt_spread_count(entry_kind); -- int array_length = 0; -- void *array_elem0 = NULL; -- -- CPPINT_DEBUG( tty->print_cr( \ -- "ENTERING _adapter_opt_spread: %s %d %d 0x%x 0x%x", \ -- type2name(elem_type), arg_slot, length_constant, (char *)arg, stack->sp() ); ) -- -- // If the spread count is -1, the length is "variable" ie controlled -- // by the array length. -- // See ek_adapter_opt_spread_count in methodHandles.hpp -- // If array lenth is 0 or spread count is 0 , we will remove the argslot. -- -- bool length_can_be_zero = (length_constant == 0); -- if (length_constant < 0) { -- // some adapters with variable length must handle the zero case -- if (!OptimizeMethodHandles || -- elem_type != T_OBJECT) -- length_can_be_zero = true; -- } -- -- if (arg == NULL) { -- CPPINT_DEBUG( tty->print_cr( \ -- "arg NULL implies Array_length == 0, remove slot." ); ) -- // remove arg slot -- remove_vmslots(arg_slot, 1, THREAD); // doesn't trap -- vmslots = stack->sp(); // unused, but let the compiler figure that out -- CPPINT_DEBUG( tty->print_cr( \ -- " >> Would LEAVE _adapter_opt_spread with NPE." ); ) --#ifdef _NOT_DEF_ -- // queue a nullpointer exception for the caller -- stack->set_sp(calculate_unwind_sp(stack, method_handle)); -- CALL_VM_NOCHECK_NOFIX( -- throw_exception( -- thread, -- vmSymbols::java_lang_NullPointerException())); -- // NB all oops trashed! -- assert(HAS_PENDING_EXCEPTION, "should do"); -- return; --#endif -- } else { // (arg != NULL) -- klassOop objKlassOop = arg->klass(); -- klassOop klassOf = java_lang_Class::as_klassOop( -- java_lang_invoke_AdapterMethodHandle::argument(method_handle)); -- -- if (objKlassOop != klassOf && -- !objKlassOop->klass_part()->is_subtype_of(klassOf)) { -- CPPINT_DEBUG( tty->print_cr( \ -- "CLASS CAST ERROR #1 in _adapter_opt_spread." ); ) -- ResourceMark rm(THREAD); -- const char* objName = Klass::cast(objKlassOop)->external_name(); -- const char* klassName = Klass::cast(klassOf)->external_name(); -- char* message = SharedRuntime::generate_class_cast_message( -- objName, klassName); -- -- stack->set_sp(calculate_unwind_sp(stack, method_handle)); -- CALL_VM_NOCHECK_NOFIX( -- throw_exception( -- thread, -- vmSymbols::java_lang_ClassCastException(), message)); -- // NB all oops trashed! -- assert(HAS_PENDING_EXCEPTION, "should do"); -- return; -- } -- -- // Check the array type. -- -- klassOop array_klass_oop = NULL; -- BasicType array_type = java_lang_Class::as_BasicType( -- java_lang_invoke_AdapterMethodHandle::argument(method_handle), -- &array_klass_oop); -- arrayKlassHandle array_klass(THREAD, array_klass_oop); -- -- assert(array_type == T_OBJECT, ""); -- assert(Klass::cast(array_klass_oop)->oop_is_array(), ""); -- if (!(array_type == T_OBJECT) || -- !(Klass::cast(array_klass_oop)->oop_is_array())) { -- CPPINT_DEBUG( tty->print_cr( \ -- "CLASS CAST ERROR #2 not an array in _adapter_opt_spread." ); ) -- ResourceMark rm(THREAD); -- const char* objName = Klass::cast(objKlassOop)->external_name(); -- const char* klassName = Klass::cast(klassOf)->external_name(); -- char* message = SharedRuntime::generate_class_cast_message( -- objName, klassName); -- stack->set_sp(calculate_unwind_sp(stack, method_handle)); -- CALL_VM_NOCHECK_NOFIX( -- throw_exception( -- thread, -- vmSymbols::java_lang_ClassCastException(), message)); -- // NB all oops trashed! -- assert(HAS_PENDING_EXCEPTION, "should do"); -- return; -- } -- -- klassOop element_klass_oop = NULL; -- BasicType element_type = -- java_lang_Class::as_BasicType(array_klass->component_mirror(), -- &element_klass_oop); -- KlassHandle element_klass(THREAD, element_klass_oop); -- if ((elem_type != T_OBJECT) && (elem_type != element_type)) { -- CPPINT_DEBUG( tty->print_cr( \ -- "CLASS CAST ERROR #3 invalid type %s != %s in _adapter_opt_spread.", \ -- type2name(elem_type), type2name(element_type) ); ) -- ResourceMark rm(THREAD); -- const char* objName = Klass::cast(objKlassOop)->external_name(); -- const char* klassName = Klass::cast(klassOf)->external_name(); -- char* message = SharedRuntime::generate_class_cast_message( -- objName, klassName); -- stack->set_sp(calculate_unwind_sp(stack, method_handle)); -- CALL_VM_NOCHECK_NOFIX( -- throw_exception( -- thread, -- vmSymbols::java_lang_ClassCastException(), message)); -- // NB all oops trashed! -- assert(HAS_PENDING_EXCEPTION, "should do"); -- return; -- } -- -- array_length = arrayOop(arg)->length(); -- -- // Check the required length. -- if (length_constant > 0) { // must match ? -- if ( array_length != length_constant ) { -- CPPINT_DEBUG( tty->print_cr( \ -- "ARRY INDEX ERROR #4 invalid array length in _adapter_opt_spread." ); ) -- //fixme ArrayIndexOutOfBoundsException ? -- ResourceMark rm(THREAD); -- const char* objName = Klass::cast(objKlassOop)->external_name(); -- const char* klassName = Klass::cast(klassOf)->external_name(); -- char* message = SharedRuntime::generate_class_cast_message( -- objName, klassName); -- -- stack->set_sp(calculate_unwind_sp(stack, method_handle)); -- CALL_VM_NOCHECK_NOFIX( -- throw_exception( -- thread, -- vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message)); -- // NB all oops trashed! -- assert(HAS_PENDING_EXCEPTION, "should do"); -- return; -- } -- // use array_length ? -- } else { // length_constant == [ -1 or 0 ] -- if ( (array_length > 0) || length_can_be_zero ) { -- // use array_length. -- } else { // array_length 0 and not length_can_be_zero -- CPPINT_DEBUG( tty->print_cr( \ -- "ARRY INDEX ERROR #5 arry length 0 in _adapter_opt_spread." ); ) -- //fixme ArrayIndexOutOfBoundsException ? -- ResourceMark rm(THREAD); -- const char* objName = Klass::cast(objKlassOop)->external_name(); -- const char* klassName = Klass::cast(klassOf)->external_name(); -- char* message = SharedRuntime::generate_class_cast_message( -- objName, klassName); -- -- stack->set_sp(calculate_unwind_sp(stack, method_handle)); -- CALL_VM_NOCHECK_NOFIX( -- throw_exception( -- thread, -- vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message)); -- // NB all oops trashed! -- assert(HAS_PENDING_EXCEPTION, "should do"); -- return; -- } -- } -- -- // Array length checked out. Now insert any required arg slots. -- // array_length - 1 more slots if array_length > 0 -- // otherwise if array_length == 0 remove arg_slot. -- -- if ( array_length > 0 ) { -- int slots = (array_length * elem_slots) - 1; -- CPPINT_DEBUG( tty->print_cr( \ -- "array_length %d %d slots needed in _adapter_opt_spread.",\ -- array_length, slots); ) -- debug_only(if (elem_slots == 2) \ -- assert ((slots % 2 == 1)," bad slots calc")); -- if ( slots > 0 ) { -- intptr_t *unwind_sp = -- calculate_unwind_sp(stack, method_handle); -- insert_vmslots(arg_slot, slots, THREAD); -- if (HAS_PENDING_EXCEPTION) { -- // all oops trashed -- stack->set_sp(unwind_sp); -- return; -- } -- } -- vmslots = stack->sp(); -- arg_slot += slots; -- -- array_elem0 = arrayOop(arg)->base(elem_type); -- -- // Copy from the array to the new arg slots. -- // [from native : Beware: Arguments that are shallow -- // on the stack are deep in the array, -- // and vice versa. So a downward-growing stack (the usual) -- // has to be copied elementwise in reverse order -- // from the source array.] -- -- void * array_elem = array_elem0; -- int top_slot = arg_slot; -- -- debug_only(if (elem_slots == 2) \ -- assert ((((ulong)(char *)&vmslots[top_slot]) % \ -- (u_int)type2aelembytes(elem_type) == 0), \ -- " bad arg alignment")); -- -- CPPINT_DEBUG( tty->print_cr( \ -- "BEGIN ARRY LOOP %d %d 0x%x 0x%x _adapter_opt_spread.",\ -- array_length, top_slot, &vmslots[top_slot], array_elem ); ) -- -- for (int index = 0; index < array_length; index++) { -- switch (elem_type) { -- case T_BYTE: -- SET_VMSLOTS_INT(*(jint*)array_elem, top_slot); -- break; -- case T_CHAR: -- SET_VMSLOTS_INT(*(jint*)array_elem, top_slot); -- break; -- case T_SHORT: -- SET_VMSLOTS_INT(*(jint*)array_elem, top_slot); -- break; -- case T_INT: -- SET_VMSLOTS_INT(*(jint*)array_elem, top_slot); -- break; -- case T_FLOAT: -- SET_VMSLOTS_FLOAT(*(jfloat*)array_elem,top_slot); -- break; -- case T_LONG: -- SET_VMSLOTS_LONG(*(jlong*)array_elem, top_slot); -- break; -- case T_DOUBLE: -- SET_VMSLOTS_DOUBLE(*(jdouble*)array_elem, top_slot); -- break; -- case T_OBJECT: -- SET_VMSLOTS_OBJECT(*(oopDesc**)array_elem, top_slot); -- break; -- default: -- tty->print_cr("unhandled type %s", type2name(elem_type)); -- ShouldNotReachHere(); -- } -- array_elem = (void*)((char *)array_elem + -- type2aelembytes(element_type)); -- top_slot -= elem_slots; -- } -- arg_slot++; -- } -- } -- if ((array_length == 0) && (arg != NULL)) { -- CPPINT_DEBUG( tty->print_cr( \ -- "Array_length == 0, will remove slot." ); ) -- // remove arg slot -- remove_vmslots(arg_slot, 1, THREAD); // doesn't trap -- // unused, but let the compiler figure that out -- vmslots = stack->sp(); -- // -- } -- CPPINT_DEBUG( tty->print_cr( \ -- "LEAVING _adapter_opt_spread: %s 0x%x 0x%x \n", \ -- type2name(elem_type), (char *)arg, (char *)stack->sp() ); ) -- } -- break; -- default: -- tty->print_cr("unhandled entry_kind %s", -- MethodHandles::entry_name(entry_kind)); -- ShouldNotReachHere(); -- } -- -- -- // Continue along the chain -- if (direct_to_method) { -- if (method == NULL) { -- method = -- (methodOop) java_lang_invoke_MethodHandle::vmtarget(method_handle); -- } -- address entry_point = method->from_interpreted_entry(); -- Interpreter::invoke_method(method, entry_point, THREAD); -- } -- else { -- process_method_handle( -- java_lang_invoke_MethodHandle::vmtarget(method_handle), THREAD); -- } -- // NB all oops now trashed -- -- // Adapt the result type, if necessary -- if (src_rtype != dst_rtype && !HAS_PENDING_EXCEPTION) { -- switch (dst_rtype) { -- case T_VOID: -- for (int i = 0; i < type2size[src_rtype]; i++) -- stack->pop(); -- return; -- -- case T_INT: -- switch (src_rtype) { -- case T_VOID: -- stack->overflow_check(1, CHECK); -- stack->push(0); -- return; -- -- case T_BOOLEAN: -- case T_CHAR: -- case T_BYTE: -- case T_SHORT: -- return; -- } -- // INT results sometimes need narrowing -- case T_BOOLEAN: -- case T_CHAR: -- case T_BYTE: -- case T_SHORT: -- switch (src_rtype) { -- case T_INT: -- return; -- } -- } -- -- tty->print_cr("unhandled conversion:"); -- tty->print_cr("src_rtype = %s", type2name(src_rtype)); -- tty->print_cr("dst_rtype = %s", type2name(dst_rtype)); -- ShouldNotReachHere(); -- } -- CPPINT_DEBUG( tty->print_cr( "LEAVING %s\n",MethodHandles::entry_name(entry_kind) ); ) --} -- - // The new slots will be inserted before slot insert_before. - // Slots < insert_before will have the same slot number after the insert. - // Slots >= insert_before will become old_slot + num_slots. -@@ -1499,8 +657,7 @@ - intptr_t* CppInterpreter::calculate_unwind_sp(ZeroStack* stack, - oop method_handle) { - oop method_type = java_lang_invoke_MethodHandle::type(method_handle); -- oop form = java_lang_invoke_MethodType::form(method_type); -- int argument_slots = java_lang_invoke_MethodTypeForm::vmslots(form); -+ int argument_slots = java_lang_invoke_MethodType::ptype_slot_count(method_type); - - return stack->sp() + argument_slots; - } -@@ -1713,10 +870,6 @@ - entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); - break; - -- case Interpreter::method_handle: -- entry_point = ((InterpreterGenerator*) this)->generate_method_handle_entry(); -- break; -- - case Interpreter::java_lang_math_sin: - case Interpreter::java_lang_math_cos: - case Interpreter::java_lang_math_tan: -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/zero/vm/interpreterGenerator_zero.hpp ---- openjdk/hotspot/src/cpu/zero/vm/interpreterGenerator_zero.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/zero/vm/interpreterGenerator_zero.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -38,6 +38,5 @@ - address generate_empty_entry(); - address generate_accessor_entry(); - address generate_Reference_get_entry(); -- address generate_method_handle_entry(); - - #endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/zero/vm/interpreter_zero.cpp ---- openjdk/hotspot/src/cpu/zero/vm/interpreter_zero.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/zero/vm/interpreter_zero.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -70,14 +70,6 @@ - return generate_entry((address) ShouldNotCallThisEntry()); - } - --address InterpreterGenerator::generate_method_handle_entry() { --#ifdef CC_INTERP -- return generate_entry((address) CppInterpreter::method_handle_entry); --#else -- return generate_entry((address) ShouldNotCallThisEntry()); --#endif // CC_INTERP --} -- - bool AbstractInterpreter::can_be_compiled(methodHandle m) { - return true; - } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/zero/vm/methodHandles_zero.cpp ---- openjdk/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -24,89 +24,161 @@ - */ - - #include "precompiled.hpp" -+#include "interpreter/interpreterGenerator.hpp" - #include "interpreter/interpreter.hpp" - #include "memory/allocation.inline.hpp" - #include "prims/methodHandles.hpp" - - #define __ _masm-> - --int MethodHandles::adapter_conversion_ops_supported_mask() { -- return ((1<zero_stack(); -+ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); -+ interpreterState istate = frame->interpreter_state(); -+ -+ // Trim back the stack to put the parameters at the top -+ stack->set_sp(istate->stack() + 1); -+ -+ Interpreter::invoke_method(method, method->from_interpreted_entry(), THREAD); -+ -+ // Convert the result -+ istate->set_stack(stack->sp() - 1); -+ -+ } -+ -+oop MethodHandles::popFromStack(TRAPS) { -+ -+ JavaThread *thread = (JavaThread *) THREAD; -+ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); -+ interpreterState istate = frame->interpreter_state(); -+ intptr_t* topOfStack = istate->stack(); -+ -+ oop top = STACK_OBJECT(-1); -+ MORE_STACK(-1); -+ istate->set_stack(topOfStack); -+ -+ return top; -+ - } - --void MethodHandles::generate_method_handle_stub(MacroAssembler* masm, -- MethodHandles::EntryKind ek) { -- init_entry(ek, (MethodHandleEntry *) ek); --} --void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm, -- // output params: -- int* bounce_offset, -- int* exception_offset, -- int* frame_size_in_words) { -- (*frame_size_in_words) = 0; -- address start = __ pc(); -- (*bounce_offset) = __ pc() - start; -- (*exception_offset) = __ pc() - start; -+int MethodHandles::method_handle_entry_invokeBasic(methodOop method, intptr_t UNUSED, TRAPS) { -+ -+ JavaThread *thread = (JavaThread *) THREAD; -+ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); -+ interpreterState istate = frame->interpreter_state(); -+ intptr_t* topOfStack = istate->stack(); -+ -+ // 'this' is a MethodHandle. We resolve the target method by accessing this.form.vmentry.vmtarget. -+ int numArgs = method->size_of_parameters(); -+ oop lform1 = java_lang_invoke_MethodHandle::form(STACK_OBJECT(-numArgs)); // this.form -+ oop vmEntry1 = java_lang_invoke_LambdaForm::vmentry(lform1); -+ methodOop vmtarget = (methodOop) java_lang_invoke_MemberName::vmtarget(vmEntry1); -+ -+ invoke_target(vmtarget, THREAD); -+ -+ // No deoptimized frames on the stack -+ return 0; - } - --frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) { -- //RicochetFrame* f = RicochetFrame::from_frame(fr); -- // Cf. is_interpreted_frame path of frame::sender -- // intptr_t* younger_sp = fr.sp(); -- // intptr_t* sp = fr.sender_sp(); -- // return frame(sp, younger_sp, this_frame_adjusted_stack); -- ShouldNotCallThis(); -+int MethodHandles::method_handle_entry_linkToStaticOrSpecial(methodOop method, intptr_t UNUSED, TRAPS) { -+ -+ // Pop appendix argument from stack. This is a MemberName which we resolve to the -+ // target method. -+ oop vmentry = popFromStack(THREAD); -+ -+ methodOop vmtarget = (methodOop) java_lang_invoke_MemberName::vmtarget(vmentry); -+ -+ invoke_target(vmtarget, THREAD); -+ -+ return 0; - } - --void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) { -- // ResourceMark rm; -- // RicochetFrame* f = RicochetFrame::from_frame(fr); -+int MethodHandles::method_handle_entry_linkToInterface(methodOop method, intptr_t UNUSED, TRAPS) { -+ JavaThread *thread = (JavaThread *) THREAD; -+ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); -+ interpreterState istate = frame->interpreter_state(); - -- // pick up the argument type descriptor: -- // Thread* thread = Thread::current(); -- // process fixed part -- // blk->do_oop((oop*)f->saved_target_addr()); -- // blk->do_oop((oop*)f->saved_args_layout_addr()); -+ // Pop appendix argument from stack. This is a MemberName which we resolve to the -+ // target method. -+ oop vmentry = popFromStack(THREAD); -+ intptr_t* topOfStack = istate->stack(); - -- // process variable arguments: -- // if (cookie.is_null()) return; // no arguments to describe -+ // Resolve target method by looking up in the receiver object's itable. -+ klassOop clazz = java_lang_Class::as_klassOop(java_lang_invoke_MemberName::clazz(vmentry)); -+ intptr_t vmindex = java_lang_invoke_MemberName::vmindex(vmentry); -+ methodOop target = (methodOop) java_lang_invoke_MemberName::vmtarget(vmentry); - -- // the cookie is actually the invokeExact method for my target -- // his argument signature is what I'm interested in -- // assert(cookie->is_method(), ""); -- // methodHandle invoker(thread, methodOop(cookie())); -- // assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method"); -- // assert(!invoker->is_static(), "must have MH argument"); -- // int slot_count = invoker->size_of_parameters(); -- // assert(slot_count >= 1, "must include 'this'"); -- // intptr_t* base = f->saved_args_base(); -- // intptr_t* retval = NULL; -- // if (f->has_return_value_slot()) -- // retval = f->return_value_slot_addr(); -- // int slot_num = slot_count - 1; -- // intptr_t* loc = &base[slot_num]; -- //blk->do_oop((oop*) loc); // original target, which is irrelevant -- // int arg_num = 0; -- // for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) { -- // if (ss.at_return_type()) continue; -- // BasicType ptype = ss.type(); -- // if (ptype == T_ARRAY) ptype = T_OBJECT; // fold all refs to T_OBJECT -- // assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void"); -- // slot_num -= type2size[ptype]; -- // loc = &base[slot_num]; -- // bool is_oop = (ptype == T_OBJECT && loc != retval); -- // if (is_oop) blk->do_oop((oop*)loc); -- // arg_num += 1; -- // } -- // assert(slot_num == 0, "must have processed all the arguments"); -+ int numArgs = target->size_of_parameters(); -+ oop recv = STACK_OBJECT(-numArgs); -+ -+ instanceKlass* recvKlass = (instanceKlass *) recv->klass()->klass_part(); -+ itableOffsetEntry* ki = (itableOffsetEntry*) recvKlass->start_of_itable(); -+ int i; -+ for ( i = 0 ; i < recvKlass->itable_length() ; i++, ki++ ) { -+ if (ki->interface_klass() == clazz) break; -+ } -+ -+ itableMethodEntry* im = ki->first_method_entry(recv->klass()); -+ methodOop vmtarget = im[vmindex].method(); -+ -+ invoke_target(vmtarget, THREAD); -+ -+ return 0; - } -+ -+int MethodHandles::method_handle_entry_linkToVirtual(methodOop method, intptr_t UNUSED, TRAPS) { -+ JavaThread *thread = (JavaThread *) THREAD; -+ -+ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame(); -+ interpreterState istate = frame->interpreter_state(); -+ -+ // Pop appendix argument from stack. This is a MemberName which we resolve to the -+ // target method. -+ oop vmentry = popFromStack(THREAD); -+ intptr_t* topOfStack = istate->stack(); -+ -+ // Resolve target method by looking up in the receiver object's vtable. -+ intptr_t vmindex = java_lang_invoke_MemberName::vmindex(vmentry); -+ methodOop target = (methodOop) java_lang_invoke_MemberName::vmtarget(vmentry); -+ int numArgs = target->size_of_parameters(); -+ oop recv = STACK_OBJECT(-numArgs); -+ instanceKlass* recvKlass_part = (instanceKlass *) recv->klass()->klass_part(); -+ -+ klassVtable* vtable = recvKlass_part->vtable(); -+ methodOop vmtarget = vtable->method_at(vmindex); -+ -+ invoke_target(vmtarget, THREAD); -+ -+ return 0; -+} -+ -+int MethodHandles::method_handle_entry_invalid(methodOop method, intptr_t UNUSED, TRAPS) { -+ ShouldNotReachHere(); -+ return 0; -+} -+ -+address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* masm, -+ vmIntrinsics::ID iid) { -+ switch (iid) { -+ case vmIntrinsics::_invokeGeneric: -+ case vmIntrinsics::_compiledLambdaForm: -+ // Perhaps surprisingly, the symbolic references visible to Java are not directly used. -+ // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. -+ // They all allow an appendix argument. -+ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invalid); -+ case vmIntrinsics::_invokeBasic: -+ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invokeBasic); -+ case vmIntrinsics::_linkToStatic: -+ case vmIntrinsics::_linkToSpecial: -+ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToStaticOrSpecial); -+ case vmIntrinsics::_linkToInterface: -+ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToInterface); -+ case vmIntrinsics::_linkToVirtual: -+ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToVirtual); -+ default: -+ ShouldNotReachHere(); -+ return NULL; -+ } -+} -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/zero/vm/methodHandles_zero.hpp ---- openjdk/hotspot/src/cpu/zero/vm/methodHandles_zero.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/zero/vm/methodHandles_zero.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -26,29 +26,16 @@ - - // Adapters - enum /* platform_dependent_constants */ { -- adapter_code_size = 0 -+ adapter_code_size = sizeof(ZeroEntry) * (Interpreter::method_handle_invoke_LAST - Interpreter::method_handle_invoke_FIRST + 1) - }; - --class RicochetFrame : public ResourceObj { -- friend class MethodHandles; -- private: -- /* -- RF field x86 SPARC -- sender_pc *(rsp+0) I7-0x8 -- sender_link rbp I6+BIAS -- exact_sender_sp rsi/r13 I5_savedSP -- conversion *(rcx+&amh_conv) L5_conv -- saved_args_base rax L4_sab (cf. Gargs = G4) -- saved_args_layout #NULL L3_sal -- saved_target *(rcx+&mh_vmtgt) L2_stgt -- continuation #STUB_CON L1_cont -- */ -- public: -- --static void generate_ricochet_blob(MacroAssembler* _masm, -- // output params: -- int* bounce_offset, -- int* exception_offset, -- int* frame_size_in_words); -+private: -+ static oop popFromStack(TRAPS); -+ static void invoke_target(methodOop method, TRAPS); -+ static int method_handle_entry_invokeBasic(methodOop method, intptr_t UNUSED, TRAPS); -+ static int method_handle_entry_linkToStaticOrSpecial(methodOop method, intptr_t UNUSED, TRAPS); -+ static int method_handle_entry_linkToVirtual(methodOop method, intptr_t UNUSED, TRAPS); -+ static int method_handle_entry_linkToInterface(methodOop method, intptr_t UNUSED, TRAPS); -+ static int method_handle_entry_invalid(methodOop method, intptr_t UNUSED, TRAPS); - - }; -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/cpu/zero/vm/register_zero.hpp ---- openjdk/hotspot/src/cpu/zero/vm/register_zero.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/cpu/zero/vm/register_zero.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -114,5 +114,8 @@ - }; - - CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1)); -+#ifndef DONT_USE_REGISTER_DEFINES -+#define noreg ((Register)(noreg_RegisterEnumValue)) -+#endif - - #endif // CPU_ZERO_VM_REGISTER_ZERO_HPP -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/adlc/output_h.cpp ---- openjdk/hotspot/src/share/vm/adlc/output_h.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/adlc/output_h.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -674,16 +674,19 @@ - else if( inst.is_ideal_mem() ) { - // Print out the field name if available to improve readability - fprintf(fp, " if (ra->C->alias_type(adr_type())->field() != NULL) {\n"); -- fprintf(fp, " st->print(\" ! Field \");\n"); -- fprintf(fp, " if( ra->C->alias_type(adr_type())->is_volatile() )\n"); -- fprintf(fp, " st->print(\" Volatile\");\n"); -- fprintf(fp, " ra->C->alias_type(adr_type())->field()->holder()->name()->print_symbol_on(st);\n"); -+ fprintf(fp, " ciField* f = ra->C->alias_type(adr_type())->field();\n"); -+ fprintf(fp, " st->print(\" ! Field: \");\n"); -+ fprintf(fp, " if (f->is_volatile())\n"); -+ fprintf(fp, " st->print(\"volatile \");\n"); -+ fprintf(fp, " f->holder()->name()->print_symbol_on(st);\n"); - fprintf(fp, " st->print(\".\");\n"); -- fprintf(fp, " ra->C->alias_type(adr_type())->field()->name()->print_symbol_on(st);\n"); -+ fprintf(fp, " f->name()->print_symbol_on(st);\n"); -+ fprintf(fp, " if (f->is_constant())\n"); -+ fprintf(fp, " st->print(\" (constant)\");\n"); - fprintf(fp, " } else\n"); - // Make sure 'Volatile' gets printed out -- fprintf(fp, " if( ra->C->alias_type(adr_type())->is_volatile() )\n"); -- fprintf(fp, " st->print(\" Volatile!\");\n"); -+ fprintf(fp, " if (ra->C->alias_type(adr_type())->is_volatile())\n"); -+ fprintf(fp, " st->print(\" volatile!\");\n"); - } - - // Complete the definition of the format function -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/asm/assembler.cpp ---- openjdk/hotspot/src/share/vm/asm/assembler.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/asm/assembler.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -318,6 +318,16 @@ - } - } - -+RegisterOrConstant AbstractAssembler::delayed_value(int(*value_fn)(), Register tmp, int offset) { -+ intptr_t val = (intptr_t) (*value_fn)(); -+ if (val != 0) return val + offset; -+ return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset); -+} -+RegisterOrConstant AbstractAssembler::delayed_value(address(*value_fn)(), Register tmp, int offset) { -+ intptr_t val = (intptr_t) (*value_fn)(); -+ if (val != 0) return val + offset; -+ return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset); -+} - intptr_t* AbstractAssembler::delayed_value_addr(int(*value_fn)()) { - DelayedConstant* dcon = DelayedConstant::add(T_INT, (DelayedConstant::value_fn_t) value_fn); - return &dcon->value; -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/asm/assembler.hpp ---- openjdk/hotspot/src/share/vm/asm/assembler.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/asm/assembler.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -406,12 +406,8 @@ - // offsets in code which must be generated before the object class is loaded. - // Field offsets are never zero, since an object's header (mark word) - // is located at offset zero. -- RegisterOrConstant delayed_value(int(*value_fn)(), Register tmp, int offset = 0) { -- return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset); -- } -- RegisterOrConstant delayed_value(address(*value_fn)(), Register tmp, int offset = 0) { -- return delayed_value_impl(delayed_value_addr(value_fn), tmp, offset); -- } -+ RegisterOrConstant delayed_value(int(*value_fn)(), Register tmp, int offset = 0); -+ RegisterOrConstant delayed_value(address(*value_fn)(), Register tmp, int offset = 0); - virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset) = 0; - // Last overloading is platform-dependent; look in assembler_.cpp. - static intptr_t* delayed_value_addr(int(*constant_fn)()); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/asm/register.hpp ---- openjdk/hotspot/src/share/vm/asm/register.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/asm/register.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -103,7 +103,8 @@ - ) { - assert( - a != b, -- "registers must be different" -+ err_msg("registers must be different: a=%d, b=%d", -+ a, b) - ); - } - -@@ -116,7 +117,8 @@ - assert( - a != b && a != c - && b != c, -- "registers must be different" -+ err_msg("registers must be different: a=%d, b=%d, c=%d", -+ a, b, c) - ); - } - -@@ -131,7 +133,8 @@ - a != b && a != c && a != d - && b != c && b != d - && c != d, -- "registers must be different" -+ err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d", -+ a, b, c, d) - ); - } - -@@ -148,7 +151,8 @@ - && b != c && b != d && b != e - && c != d && c != e - && d != e, -- "registers must be different" -+ err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d", -+ a, b, c, d, e) - ); - } - -@@ -167,7 +171,8 @@ - && c != d && c != e && c != f - && d != e && d != f - && e != f, -- "registers must be different" -+ err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d", -+ a, b, c, d, e, f) - ); - } - -@@ -188,7 +193,8 @@ - && d != e && d != f && d != g - && e != f && e != g - && f != g, -- "registers must be different" -+ err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d", -+ a, b, c, d, e, f, g) - ); - } - -@@ -211,7 +217,34 @@ - && e != f && e != g && e != h - && f != g && f != h - && g != h, -- "registers must be different" -+ err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d", -+ a, b, c, d, e, f, g, h) -+ ); -+} -+ -+ -+inline void assert_different_registers( -+ AbstractRegister a, -+ AbstractRegister b, -+ AbstractRegister c, -+ AbstractRegister d, -+ AbstractRegister e, -+ AbstractRegister f, -+ AbstractRegister g, -+ AbstractRegister h, -+ AbstractRegister i -+) { -+ assert( -+ a != b && a != c && a != d && a != e && a != f && a != g && a != h && a != i -+ && b != c && b != d && b != e && b != f && b != g && b != h && b != i -+ && c != d && c != e && c != f && c != g && c != h && c != i -+ && d != e && d != f && d != g && d != h && d != i -+ && e != f && e != g && e != h && e != i -+ && f != g && f != h && f != i -+ && g != h && g != i -+ && h != i, -+ err_msg("registers must be different: a=%d, b=%d, c=%d, d=%d, e=%d, f=%d, g=%d, h=%d, i=%d", -+ a, b, c, d, e, f, g, h, i) - ); - } - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_Canonicalizer.cpp ---- openjdk/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -540,6 +540,7 @@ - } - } - -+void Canonicalizer::do_TypeCast (TypeCast* x) {} - void Canonicalizer::do_Invoke (Invoke* x) {} - void Canonicalizer::do_NewInstance (NewInstance* x) {} - void Canonicalizer::do_NewTypeArray (NewTypeArray* x) {} -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_Canonicalizer.hpp ---- openjdk/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -74,6 +74,7 @@ - virtual void do_IfInstanceOf (IfInstanceOf* x); - virtual void do_Convert (Convert* x); - virtual void do_NullCheck (NullCheck* x); -+ virtual void do_TypeCast (TypeCast* x); - virtual void do_Invoke (Invoke* x); - virtual void do_NewInstance (NewInstance* x); - virtual void do_NewTypeArray (NewTypeArray* x); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_Compilation.cpp ---- openjdk/hotspot/src/share/vm/c1/c1_Compilation.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_Compilation.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -523,7 +523,7 @@ - assert(msg != NULL, "bailout message must exist"); - if (!bailed_out()) { - // keep first bailout message -- if (PrintBailouts) tty->print_cr("compilation bailout: %s", msg); -+ if (PrintCompilation || PrintBailouts) tty->print_cr("compilation bailout: %s", msg); - _bailout_msg = msg; - } - } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_FrameMap.cpp ---- openjdk/hotspot/src/share/vm/c1/c1_FrameMap.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_FrameMap.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -92,7 +92,6 @@ - for (i = 0; i < sizeargs;) { - BasicType t = sig_bt[i]; - assert(t != T_VOID, "should be skipping these"); -- - LIR_Opr opr = map_to_opr(t, regs + i, outgoing); - args->append(opr); - if (opr->is_address()) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_FrameMap.hpp ---- openjdk/hotspot/src/share/vm/c1/c1_FrameMap.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_FrameMap.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -181,8 +181,8 @@ - - // for outgoing calls, these also update the reserved area to - // include space for arguments and any ABI area. -- CallingConvention* c_calling_convention (const BasicTypeArray* signature); -- CallingConvention* java_calling_convention (const BasicTypeArray* signature, bool outgoing); -+ CallingConvention* c_calling_convention(const BasicTypeArray* signature); -+ CallingConvention* java_calling_convention(const BasicTypeArray* signature, bool outgoing); - - // deopt support - ByteSize sp_offset_for_orig_pc() { return sp_offset_for_monitor_base(_num_monitors); } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_GraphBuilder.cpp ---- openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -31,7 +31,7 @@ - #include "ci/ciCallSite.hpp" - #include "ci/ciField.hpp" - #include "ci/ciKlass.hpp" --#include "ci/ciMethodHandle.hpp" -+#include "ci/ciMemberName.hpp" - #include "compiler/compileBroker.hpp" - #include "interpreter/bytecode.hpp" - #include "runtime/sharedRuntime.hpp" -@@ -914,11 +914,11 @@ - - void GraphBuilder::store_local(ValueType* type, int index) { - Value x = pop(type); -- store_local(state(), x, type, index); -+ store_local(state(), x, index); - } - - --void GraphBuilder::store_local(ValueStack* state, Value x, ValueType* type, int index) { -+void GraphBuilder::store_local(ValueStack* state, Value x, int index) { - if (parsing_jsr()) { - // We need to do additional tracking of the location of the return - // address for jsrs since we don't handle arbitrary jsr/ret -@@ -1533,7 +1533,7 @@ - case T_ARRAY: - case T_OBJECT: - if (field_val.as_object()->should_be_constant()) { -- constant = new Constant(as_ValueType(field_val)); -+ constant = new Constant(as_ValueType(field_val)); - } - break; - -@@ -1560,12 +1560,51 @@ - append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching)); - } - break; -- case Bytecodes::_getfield : -- { -+ case Bytecodes::_getfield: { -+ // Check for compile-time constants, i.e., trusted final non-static fields. -+ Instruction* constant = NULL; -+ obj = apop(); -+ ObjectType* obj_type = obj->type()->as_ObjectType(); -+ if (obj_type->is_constant() && !PatchALot) { -+ ciObject* const_oop = obj_type->constant_value(); -+ if (field->is_constant()) { -+ ciConstant field_val = field->constant_value_of(const_oop); -+ BasicType field_type = field_val.basic_type(); -+ switch (field_type) { -+ case T_ARRAY: -+ case T_OBJECT: -+ if (field_val.as_object()->should_be_constant()) { -+ constant = new Constant(as_ValueType(field_val)); -+ } -+ break; -+ default: -+ constant = new Constant(as_ValueType(field_val)); -+ } -+ } else { -+ // For constant CallSites treat the target field as a compile time constant. -+ if (const_oop->is_call_site()) { -+ ciCallSite* call_site = const_oop->as_call_site(); -+ if (field->is_call_site_target()) { -+ ciMethodHandle* target = call_site->get_target(); -+ if (target != NULL) { // just in case -+ ciConstant field_val(T_OBJECT, target); -+ constant = new Constant(as_ValueType(field_val)); -+ // Add a dependence for invalidation of the optimization. -+ if (!call_site->is_constant_call_site()) { -+ dependency_recorder()->assert_call_site_target_value(call_site, target); -+ } -+ } -+ } -+ } -+ } -+ } -+ if (constant != NULL) { -+ push(type, append(constant)); -+ } else { - if (state_before == NULL) { - state_before = copy_state_for_exception(); - } -- LoadField* load = new LoadField(apop(), offset, field, false, state_before, needs_patching); -+ LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching); - Value replacement = !needs_patching ? _memory->load(load) : load; - if (replacement != load) { - assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked"); -@@ -1573,22 +1612,23 @@ - } else { - push(type, append(load)); - } -- break; -- } -- -- case Bytecodes::_putfield : -- { Value val = pop(type); -- if (state_before == NULL) { -- state_before = copy_state_for_exception(); -- } -- StoreField* store = new StoreField(apop(), offset, field, val, false, state_before, needs_patching); -- if (!needs_patching) store = _memory->store(store); -- if (store != NULL) { -- append(store); -- } - } - break; -- default : -+ } -+ case Bytecodes::_putfield: { -+ Value val = pop(type); -+ obj = apop(); -+ if (state_before == NULL) { -+ state_before = copy_state_for_exception(); -+ } -+ StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching); -+ if (!needs_patching) store = _memory->store(store); -+ if (store != NULL) { -+ append(store); -+ } -+ break; -+ } -+ default: - ShouldNotReachHere(); - break; - } -@@ -1602,38 +1642,73 @@ - - - void GraphBuilder::invoke(Bytecodes::Code code) { -+ const bool has_receiver = -+ code == Bytecodes::_invokespecial || -+ code == Bytecodes::_invokevirtual || -+ code == Bytecodes::_invokeinterface; -+ const bool is_invokedynamic = (code == Bytecodes::_invokedynamic); -+ - bool will_link; -- ciMethod* target = stream()->get_method(will_link); -+ ciMethod* target = stream()->get_method(will_link); -+ ciKlass* holder = stream()->get_declared_method_holder(); -+ const Bytecodes::Code bc_raw = stream()->cur_bc_raw(); -+ -+ // FIXME bail out for now -+ if ((bc_raw == Bytecodes::_invokehandle || is_invokedynamic) && !will_link) { -+ BAILOUT("unlinked call site (FIXME needs patching or recompile support)"); -+ } -+ - // we have to make sure the argument size (incl. the receiver) - // is correct for compilation (the call would fail later during - // linkage anyway) - was bug (gri 7/28/99) -- if (target->is_loaded() && target->is_static() != (code == Bytecodes::_invokestatic)) BAILOUT("will cause link error"); -+ { -+ // Use raw to get rewritten bytecode. -+ const bool is_invokestatic = bc_raw == Bytecodes::_invokestatic; -+ const bool allow_static = -+ is_invokestatic || -+ bc_raw == Bytecodes::_invokehandle || -+ bc_raw == Bytecodes::_invokedynamic; -+ if (target->is_loaded()) { -+ if (( target->is_static() && !allow_static) || -+ (!target->is_static() && is_invokestatic)) { -+ BAILOUT("will cause link error"); -+ } -+ } -+ } - ciInstanceKlass* klass = target->holder(); - - // check if CHA possible: if so, change the code to invoke_special - ciInstanceKlass* calling_klass = method()->holder(); -- ciKlass* holder = stream()->get_declared_method_holder(); - ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder); - ciInstanceKlass* actual_recv = callee_holder; - -- // some methods are obviously bindable without any type checks so -- // convert them directly to an invokespecial. -- if (target->is_loaded() && !target->is_abstract() && -- target->can_be_statically_bound() && code == Bytecodes::_invokevirtual) { -- code = Bytecodes::_invokespecial; -+ // Some methods are obviously bindable without any type checks so -+ // convert them directly to an invokespecial or invokestatic. -+ if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) { -+ switch (bc_raw) { -+ case Bytecodes::_invokevirtual: code = Bytecodes::_invokespecial; break; -+ case Bytecodes::_invokehandle: code = Bytecodes::_invokestatic; break; -+ } - } - -- bool is_invokedynamic = code == Bytecodes::_invokedynamic; -+ // Push appendix argument (MethodType, CallSite, etc.), if one. -+ if (stream()->has_appendix()) { -+ ciObject* appendix = stream()->get_appendix(); -+ Value arg = append(new Constant(new ObjectConstant(appendix))); -+ apush(arg); -+ } - - // NEEDS_CLEANUP -- // I've added the target-is_loaded() test below but I don't really understand -+ // I've added the target->is_loaded() test below but I don't really understand - // how klass->is_loaded() can be true and yet target->is_loaded() is false. - // this happened while running the JCK invokevirtual tests under doit. TKR - ciMethod* cha_monomorphic_target = NULL; - ciMethod* exact_target = NULL; - Value better_receiver = NULL; - if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() && -- !target->is_method_handle_invoke()) { -+ !(// %%% FIXME: Are both of these relevant? -+ target->is_method_handle_intrinsic() || -+ target->is_compiled_lambda_form())) { - Value receiver = NULL; - ciInstanceKlass* receiver_klass = NULL; - bool type_is_exact = false; -@@ -1757,23 +1832,15 @@ - code == Bytecodes::_invokedynamic) { - ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target; - bool success = false; -- if (target->is_method_handle_invoke()) { -+ if (target->is_method_handle_intrinsic()) { - // method handle invokes -- success = !is_invokedynamic ? for_method_handle_inline(target) : for_invokedynamic_inline(target); -- } -- if (!success) { -+ success = for_method_handle_inline(target); -+ } else { - // static binding => check if callee is ok -- success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), better_receiver); -+ success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver); - } - CHECK_BAILOUT(); - --#ifndef PRODUCT -- // printing -- if (PrintInlining && !success) { -- // if it was successfully inlined, then it was already printed. -- print_inline_result(inline_target, success); -- } --#endif - clear_inline_bailout(); - if (success) { - // Register dependence if JVMTI has either breakpoint -@@ -1784,8 +1851,13 @@ - } - return; - } -+ } else { -+ print_inlining(target, "no static binding", /*success*/ false); - } -+ } else { -+ print_inlining(target, "not inlineable", /*success*/ false); - } -+ - // If we attempted an inline which did not succeed because of a - // bailout during construction of the callee graph, the entire - // compilation has to be aborted. This is fairly rare and currently -@@ -1799,10 +1871,6 @@ - - // inlining not successful => standard invoke - bool is_loaded = target->is_loaded(); -- bool has_receiver = -- code == Bytecodes::_invokespecial || -- code == Bytecodes::_invokevirtual || -- code == Bytecodes::_invokeinterface; - ValueType* result_type = as_ValueType(target->return_type()); - - // We require the debug info to be the "state before" because -@@ -1851,7 +1919,7 @@ - } else if (exact_target != NULL) { - target_klass = exact_target->holder(); - } -- profile_call(recv, target_klass); -+ profile_call(target, recv, target_klass); - } - } - -@@ -3088,30 +3156,61 @@ - } - - --bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Value receiver) { -- // Clear out any existing inline bailout condition -+bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Code bc, Value receiver) { -+ const char* msg = NULL; -+ -+ // clear out any existing inline bailout condition - clear_inline_bailout(); - -- if (callee->should_exclude()) { -- // callee is excluded -- INLINE_BAILOUT("excluded by CompilerOracle") -- } else if (callee->should_not_inline()) { -- // callee is excluded -- INLINE_BAILOUT("disallowed by CompilerOracle") -- } else if (!callee->can_be_compiled()) { -- // callee is not compilable (prob. has breakpoints) -- INLINE_BAILOUT("not compilable (disabled)") -- } else if (callee->intrinsic_id() != vmIntrinsics::_none && try_inline_intrinsics(callee)) { -- // intrinsics can be native or not -+ // exclude methods we don't want to inline -+ msg = should_not_inline(callee); -+ if (msg != NULL) { -+ print_inlining(callee, msg, /*success*/ false); -+ return false; -+ } -+ -+ // handle intrinsics -+ if (callee->intrinsic_id() != vmIntrinsics::_none) { -+ if (try_inline_intrinsics(callee)) { -+ print_inlining(callee, "intrinsic"); -+ return true; -+ } -+ // try normal inlining -+ } -+ -+ // certain methods cannot be parsed at all -+ msg = check_can_parse(callee); -+ if (msg != NULL) { -+ print_inlining(callee, msg, /*success*/ false); -+ return false; -+ } -+ -+ // If bytecode not set use the current one. -+ if (bc == Bytecodes::_illegal) { -+ bc = code(); -+ } -+ if (try_inline_full(callee, holder_known, bc, receiver)) - return true; -- } else if (callee->is_native()) { -- // non-intrinsic natives cannot be inlined -- INLINE_BAILOUT("non-intrinsic native") -- } else if (callee->is_abstract()) { -- INLINE_BAILOUT("abstract") -- } else { -- return try_inline_full(callee, holder_known, NULL, receiver); -- } -+ print_inlining(callee, _inline_bailout_msg, /*success*/ false); -+ return false; -+} -+ -+ -+const char* GraphBuilder::check_can_parse(ciMethod* callee) const { -+ // Certain methods cannot be parsed at all: -+ if ( callee->is_native()) return "native method"; -+ if ( callee->is_abstract()) return "abstract method"; -+ if (!callee->can_be_compiled()) return "not compilable (disabled)"; -+ return NULL; -+} -+ -+ -+// negative filter: should callee NOT be inlined? returns NULL, ok to inline, or rejection msg -+const char* GraphBuilder::should_not_inline(ciMethod* callee) const { -+ if ( callee->should_exclude()) return "excluded by CompilerOracle"; -+ if ( callee->should_not_inline()) return "disallowed by CompilerOracle"; -+ if ( callee->dont_inline()) return "don't inline by annotation"; -+ return NULL; - } - - -@@ -3286,7 +3385,7 @@ - recv = args->at(0); - null_check(recv); - } -- profile_call(recv, NULL); -+ profile_call(callee, recv, NULL); - } - } - } -@@ -3297,13 +3396,6 @@ - Value value = append_split(result); - if (result_type != voidType) push(result_type, value); - --#ifndef PRODUCT -- // printing -- if (PrintInlining) { -- print_inline_result(callee, true); -- } --#endif -- - // done - return true; - } -@@ -3459,7 +3551,7 @@ - } - - --bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBegin* cont_block, Value receiver) { -+bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecodes::Code bc, Value receiver) { - assert(!callee->is_native(), "callee must not be native"); - if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) { - INLINE_BAILOUT("inlining prohibited by policy"); -@@ -3490,8 +3582,8 @@ - if (callee->should_inline()) { - // ignore heuristic controls on inlining - } else { -- if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("too-deep inlining"); -- if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining"); -+ if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("inlining too deep"); -+ if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep"); - if (callee->code_size_for_inlining() > max_inline_size() ) INLINE_BAILOUT("callee is too large"); - - // don't inline throwable methods unless the inlining tree is rooted in a throwable class -@@ -3510,28 +3602,25 @@ - if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) { - INLINE_BAILOUT("total inlining greater than DesiredMethodLimit"); - } -+ // printing -+ print_inlining(callee, ""); - } - --#ifndef PRODUCT -- // printing -- if (PrintInlining) { -- print_inline_result(callee, true); -- } --#endif -- - // NOTE: Bailouts from this point on, which occur at the - // GraphBuilder level, do not cause bailout just of the inlining but - // in fact of the entire compilation. - - BlockBegin* orig_block = block(); - -+ const bool is_invokedynamic = bc == Bytecodes::_invokedynamic; -+ const bool has_receiver = (bc != Bytecodes::_invokestatic && !is_invokedynamic); -+ - const int args_base = state()->stack_size() - callee->arg_size(); - assert(args_base >= 0, "stack underflow during inlining"); - - // Insert null check if necessary - Value recv = NULL; -- if (code() != Bytecodes::_invokestatic && -- code() != Bytecodes::_invokedynamic) { -+ if (has_receiver) { - // note: null check must happen even if first instruction of callee does - // an implicit null check since the callee is in a different scope - // and we must make sure exception handling does the right thing -@@ -3547,7 +3636,7 @@ - compilation()->set_would_profile(true); - - if (profile_calls()) { -- profile_call(recv, holder_known ? callee->holder() : NULL); -+ profile_call(callee, recv, holder_known ? callee->holder() : NULL); - } - } - -@@ -3556,7 +3645,7 @@ - // fall-through of control flow, all return instructions of the - // callee will need to be replaced by Goto's pointing to this - // continuation point. -- BlockBegin* cont = cont_block != NULL ? cont_block : block_at(next_bci()); -+ BlockBegin* cont = block_at(next_bci()); - bool continuation_existed = true; - if (cont == NULL) { - cont = new BlockBegin(next_bci()); -@@ -3589,17 +3678,10 @@ - // note: this will also ensure that all arguments are computed before being passed - ValueStack* callee_state = state(); - ValueStack* caller_state = state()->caller_state(); -- { int i = args_base; -- while (i < caller_state->stack_size()) { -- const int par_no = i - args_base; -- Value arg = caller_state->stack_at_inc(i); -- // NOTE: take base() of arg->type() to avoid problems storing -- // constants -- if (receiver != NULL && par_no == 0) { -- arg = receiver; -- } -- store_local(callee_state, arg, arg->type()->base(), par_no); -- } -+ for (int i = args_base; i < caller_state->stack_size(); ) { -+ const int arg_no = i - args_base; -+ Value arg = caller_state->stack_at_inc(i); -+ store_local(callee_state, arg, arg_no); - } - - // Remove args from stack. -@@ -3675,29 +3757,27 @@ - // block merging. This allows load elimination and CSE to take place - // across multiple callee scopes if they are relatively simple, and - // is currently essential to making inlining profitable. -- if (cont_block == NULL) { -- if (num_returns() == 1 -- && block() == orig_block -- && block() == inline_cleanup_block()) { -- _last = inline_cleanup_return_prev(); -- _state = inline_cleanup_state(); -- } else if (continuation_preds == cont->number_of_preds()) { -- // Inlining caused that the instructions after the invoke in the -- // caller are not reachable any more. So skip filling this block -- // with instructions! -- assert(cont == continuation(), ""); -+ if (num_returns() == 1 -+ && block() == orig_block -+ && block() == inline_cleanup_block()) { -+ _last = inline_cleanup_return_prev(); -+ _state = inline_cleanup_state(); -+ } else if (continuation_preds == cont->number_of_preds()) { -+ // Inlining caused that the instructions after the invoke in the -+ // caller are not reachable any more. So skip filling this block -+ // with instructions! -+ assert(cont == continuation(), ""); -+ assert(_last && _last->as_BlockEnd(), ""); -+ _skip_block = true; -+ } else { -+ // Resume parsing in continuation block unless it was already parsed. -+ // Note that if we don't change _last here, iteration in -+ // iterate_bytecodes_for_block will stop when we return. -+ if (!continuation()->is_set(BlockBegin::was_visited_flag)) { -+ // add continuation to work list instead of parsing it immediately - assert(_last && _last->as_BlockEnd(), ""); -+ scope_data()->parent()->add_to_work_list(continuation()); - _skip_block = true; -- } else { -- // Resume parsing in continuation block unless it was already parsed. -- // Note that if we don't change _last here, iteration in -- // iterate_bytecodes_for_block will stop when we return. -- if (!continuation()->is_set(BlockBegin::was_visited_flag)) { -- // add continuation to work list instead of parsing it immediately -- assert(_last && _last->as_BlockEnd(), ""); -- scope_data()->parent()->add_to_work_list(continuation()); -- _skip_block = true; -- } - } - } - -@@ -3715,114 +3795,88 @@ - - - bool GraphBuilder::for_method_handle_inline(ciMethod* callee) { -- assert(!callee->is_static(), "change next line"); -- int index = state()->stack_size() - (callee->arg_size_no_receiver() + 1); -- Value receiver = state()->stack_at(index); -- -- if (receiver->type()->is_constant()) { -- ciMethodHandle* method_handle = receiver->type()->as_ObjectType()->constant_value()->as_method_handle(); -- -- // Set the callee to have access to the class and signature in -- // the MethodHandleCompiler. -- method_handle->set_callee(callee); -- method_handle->set_caller(method()); -- -- // Get an adapter for the MethodHandle. -- ciMethod* method_handle_adapter = method_handle->get_method_handle_adapter(); -- if (method_handle_adapter != NULL) { -- return try_inline(method_handle_adapter, /*holder_known=*/ true); -- } -- } else if (receiver->as_CheckCast()) { -- // Match MethodHandle.selectAlternative idiom -- Phi* phi = receiver->as_CheckCast()->obj()->as_Phi(); -- -- if (phi != NULL && phi->operand_count() == 2) { -- // Get the two MethodHandle inputs from the Phi. -- Value op1 = phi->operand_at(0); -- Value op2 = phi->operand_at(1); -- ObjectType* op1type = op1->type()->as_ObjectType(); -- ObjectType* op2type = op2->type()->as_ObjectType(); -- -- if (op1type->is_constant() && op2type->is_constant()) { -- ciMethodHandle* mh1 = op1type->constant_value()->as_method_handle(); -- ciMethodHandle* mh2 = op2type->constant_value()->as_method_handle(); -- -- // Set the callee to have access to the class and signature in -- // the MethodHandleCompiler. -- mh1->set_callee(callee); -- mh1->set_caller(method()); -- mh2->set_callee(callee); -- mh2->set_caller(method()); -- -- // Get adapters for the MethodHandles. -- ciMethod* mh1_adapter = mh1->get_method_handle_adapter(); -- ciMethod* mh2_adapter = mh2->get_method_handle_adapter(); -- -- if (mh1_adapter != NULL && mh2_adapter != NULL) { -- set_inline_cleanup_info(); -- -- // Build the If guard -- BlockBegin* one = new BlockBegin(next_bci()); -- BlockBegin* two = new BlockBegin(next_bci()); -- BlockBegin* end = new BlockBegin(next_bci()); -- Instruction* iff = append(new If(phi, If::eql, false, op1, one, two, NULL, false)); -- block()->set_end(iff->as_BlockEnd()); -- -- // Connect up the states -- one->merge(block()->end()->state()); -- two->merge(block()->end()->state()); -- -- // Save the state for the second inlinee -- ValueStack* state_before = copy_state_before(); -- -- // Parse first adapter -- _last = _block = one; -- if (!try_inline_full(mh1_adapter, /*holder_known=*/ true, end, NULL)) { -- restore_inline_cleanup_info(); -- block()->clear_end(); // remove appended iff -- return false; -- } -- -- // Parse second adapter -- _last = _block = two; -- _state = state_before; -- if (!try_inline_full(mh2_adapter, /*holder_known=*/ true, end, NULL)) { -- restore_inline_cleanup_info(); -- block()->clear_end(); // remove appended iff -- return false; -- } -- -- connect_to_end(end); -+ ValueStack* state_before = state()->copy_for_parsing(); -+ vmIntrinsics::ID iid = callee->intrinsic_id(); -+ switch (iid) { -+ case vmIntrinsics::_invokeBasic: -+ { -+ // get MethodHandle receiver -+ const int args_base = state()->stack_size() - callee->arg_size(); -+ ValueType* type = state()->stack_at(args_base)->type(); -+ if (type->is_constant()) { -+ ciMethod* target = type->as_ObjectType()->constant_value()->as_method_handle()->get_vmtarget(); -+ guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove -+ Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; -+ if (try_inline(target, /*holder_known*/ true, bc)) { - return true; - } -+ } else { -+ print_inlining(callee, "receiver not constant", /*success*/ false); - } - } -+ break; -+ -+ case vmIntrinsics::_linkToVirtual: -+ case vmIntrinsics::_linkToStatic: -+ case vmIntrinsics::_linkToSpecial: -+ case vmIntrinsics::_linkToInterface: -+ { -+ // pop MemberName argument -+ const int args_base = state()->stack_size() - callee->arg_size(); -+ ValueType* type = apop()->type(); -+ if (type->is_constant()) { -+ ciMethod* target = type->as_ObjectType()->constant_value()->as_member_name()->get_vmtarget(); -+ // If the target is another method handle invoke try recursivly to get -+ // a better target. -+ if (target->is_method_handle_intrinsic()) { -+ if (for_method_handle_inline(target)) { -+ return true; -+ } -+ } else { -+ ciSignature* signature = target->signature(); -+ const int receiver_skip = target->is_static() ? 0 : 1; -+ // Cast receiver to its type. -+ if (!target->is_static()) { -+ ciKlass* tk = signature->accessing_klass(); -+ Value obj = state()->stack_at(args_base); -+ if (obj->exact_type() == NULL && -+ obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) { -+ TypeCast* c = new TypeCast(tk, obj, state_before); -+ append(c); -+ state()->stack_at_put(args_base, c); -+ } -+ } -+ // Cast reference arguments to its type. -+ for (int i = 0, j = 0; i < signature->count(); i++) { -+ ciType* t = signature->type_at(i); -+ if (t->is_klass()) { -+ ciKlass* tk = t->as_klass(); -+ Value obj = state()->stack_at(args_base + receiver_skip + j); -+ if (obj->exact_type() == NULL && -+ obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) { -+ TypeCast* c = new TypeCast(t, obj, state_before); -+ append(c); -+ state()->stack_at_put(args_base + receiver_skip + j, c); -+ } -+ } -+ j += t->size(); // long and double take two slots -+ } -+ Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; -+ if (try_inline(target, /*holder_known*/ true, bc)) { -+ return true; -+ } -+ } -+ } else { -+ print_inlining(callee, "MemberName not constant", /*success*/ false); -+ } -+ } -+ break; -+ -+ default: -+ fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); -+ break; - } -- return false; --} -- -- --bool GraphBuilder::for_invokedynamic_inline(ciMethod* callee) { -- // Get the MethodHandle from the CallSite. -- ciCallSite* call_site = stream()->get_call_site(); -- ciMethodHandle* method_handle = call_site->get_target(); -- -- // Set the callee to have access to the class and signature in the -- // MethodHandleCompiler. -- method_handle->set_callee(callee); -- method_handle->set_caller(method()); -- -- // Get an adapter for the MethodHandle. -- ciMethod* method_handle_adapter = method_handle->get_invokedynamic_adapter(); -- if (method_handle_adapter != NULL) { -- if (try_inline(method_handle_adapter, /*holder_known=*/ true)) { -- // Add a dependence for invalidation of the optimization. -- if (!call_site->is_constant_call_site()) { -- dependency_recorder()->assert_call_site_target_value(call_site, method_handle); -- } -- return true; -- } -- } -+ set_state(state_before); - return false; - } - -@@ -4014,22 +4068,24 @@ - } - - --#ifndef PRODUCT --void GraphBuilder::print_inline_result(ciMethod* callee, bool res) { -- CompileTask::print_inlining(callee, scope()->level(), bci(), _inline_bailout_msg); -- if (res && CIPrintMethodCodes) { -+void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) { -+ if (!PrintInlining) return; -+ assert(msg != NULL, "must be"); -+ CompileTask::print_inlining(callee, scope()->level(), bci(), msg); -+ if (success && CIPrintMethodCodes) { - callee->print_codes(); - } - } - - -+#ifndef PRODUCT - void GraphBuilder::print_stats() { - vmap()->print(); - } - #endif // PRODUCT - --void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) { -- append(new ProfileCall(method(), bci(), recv, known_holder)); -+void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder) { -+ append(new ProfileCall(method(), bci(), callee, recv, known_holder)); - } - - void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_GraphBuilder.hpp ---- openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -225,7 +225,7 @@ - void load_constant(); - void load_local(ValueType* type, int index); - void store_local(ValueType* type, int index); -- void store_local(ValueStack* state, Value value, ValueType* type, int index); -+ void store_local(ValueStack* state, Value value, int index); - void load_indexed (BasicType type); - void store_indexed(BasicType type); - void stack_op(Bytecodes::Code code); -@@ -337,14 +337,16 @@ - void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false); - - // inliners -- bool try_inline( ciMethod* callee, bool holder_known, Value receiver = NULL); -+ bool try_inline( ciMethod* callee, bool holder_known, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = NULL); - bool try_inline_intrinsics(ciMethod* callee); -- bool try_inline_full( ciMethod* callee, bool holder_known, BlockBegin* cont_block, Value receiver); -+ bool try_inline_full( ciMethod* callee, bool holder_known, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = NULL); - bool try_inline_jsr(int jsr_dest_bci); - -+ const char* check_can_parse(ciMethod* callee) const; -+ const char* should_not_inline(ciMethod* callee) const; -+ - // JSR 292 support - bool for_method_handle_inline(ciMethod* callee); -- bool for_invokedynamic_inline(ciMethod* callee); - - // helpers - void inline_bailout(const char* msg); -@@ -366,9 +368,9 @@ - bool append_unsafe_prefetch(ciMethod* callee, bool is_store, bool is_static); - void append_unsafe_CAS(ciMethod* callee); - -- NOT_PRODUCT(void print_inline_result(ciMethod* callee, bool res);) -+ void print_inlining(ciMethod* callee, const char* msg, bool success = true); - -- void profile_call(Value recv, ciKlass* predicted_holder); -+ void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder); - void profile_invocation(ciMethod* inlinee, ValueStack* state); - - // Shortcuts to profiling control. -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_Instruction.cpp ---- openjdk/hotspot/src/share/vm/c1/c1_Instruction.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_Instruction.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -161,6 +161,12 @@ - return NULL; - } - -+ciType* Constant::exact_type() const { -+ if (type()->is_object()) { -+ return type()->as_ObjectType()->exact_type(); -+ } -+ return NULL; -+} - - ciType* LoadIndexed::exact_type() const { - ciType* array_type = array()->exact_type(); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_Instruction.hpp ---- openjdk/hotspot/src/share/vm/c1/c1_Instruction.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_Instruction.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -66,6 +66,7 @@ - class IfOp; - class Convert; - class NullCheck; -+class TypeCast; - class OsrEntry; - class ExceptionObject; - class StateSplit; -@@ -174,6 +175,7 @@ - virtual void do_IfOp (IfOp* x) = 0; - virtual void do_Convert (Convert* x) = 0; - virtual void do_NullCheck (NullCheck* x) = 0; -+ virtual void do_TypeCast (TypeCast* x) = 0; - virtual void do_Invoke (Invoke* x) = 0; - virtual void do_NewInstance (NewInstance* x) = 0; - virtual void do_NewTypeArray (NewTypeArray* x) = 0; -@@ -304,7 +306,8 @@ - - bool has_printable_bci() const { return NOT_PRODUCT(_printable_bci != -99) PRODUCT_ONLY(false); } - -- protected: -+ //protected: -+ public: - void set_type(ValueType* type) { - assert(type != NULL, "type must exist"); - _type = type; -@@ -486,6 +489,7 @@ - virtual TypeCheck* as_TypeCheck() { return NULL; } - virtual CheckCast* as_CheckCast() { return NULL; } - virtual InstanceOf* as_InstanceOf() { return NULL; } -+ virtual TypeCast* as_TypeCast() { return NULL; } - virtual AccessMonitor* as_AccessMonitor() { return NULL; } - virtual MonitorEnter* as_MonitorEnter() { return NULL; } - virtual MonitorExit* as_MonitorExit() { return NULL; } -@@ -636,8 +640,8 @@ - // accessors - int java_index() const { return _java_index; } - -- ciType* declared_type() const { return _declared_type; } -- ciType* exact_type() const; -+ virtual ciType* declared_type() const { return _declared_type; } -+ virtual ciType* exact_type() const; - - // generic - virtual void input_values_do(ValueVisitor* f) { /* no values */ } -@@ -648,13 +652,13 @@ - public: - // creation - Constant(ValueType* type): -- Instruction(type, NULL, true) -+ Instruction(type, NULL, /*type_is_constant*/ true) - { - assert(type->is_constant(), "must be a constant"); - } - - Constant(ValueType* type, ValueStack* state_before): -- Instruction(type, state_before, true) -+ Instruction(type, state_before, /*type_is_constant*/ true) - { - assert(state_before != NULL, "only used for constants which need patching"); - assert(type->is_constant(), "must be a constant"); -@@ -668,6 +672,7 @@ - virtual intx hash() const; - virtual bool is_equal(Value v) const; - -+ virtual ciType* exact_type() const; - - enum CompareResult { not_comparable = -1, cond_false, cond_true }; - -@@ -1101,6 +1106,29 @@ - }; - - -+// This node is supposed to cast the type of another node to a more precise -+// declared type. -+LEAF(TypeCast, Instruction) -+ private: -+ ciType* _declared_type; -+ Value _obj; -+ -+ public: -+ // The type of this node is the same type as the object type (and it might be constant). -+ TypeCast(ciType* type, Value obj, ValueStack* state_before) -+ : Instruction(obj->type(), state_before, obj->type()->is_constant()), -+ _declared_type(type), -+ _obj(obj) {} -+ -+ // accessors -+ ciType* declared_type() const { return _declared_type; } -+ Value obj() const { return _obj; } -+ -+ // generic -+ virtual void input_values_do(ValueVisitor* f) { f->visit(&_obj); } -+}; -+ -+ - BASE(StateSplit, Instruction) - private: - ValueStack* _state; -@@ -1164,6 +1192,7 @@ - - // JSR 292 support - bool is_invokedynamic() const { return code() == Bytecodes::_invokedynamic; } -+ bool is_method_handle_intrinsic() const { return target()->is_method_handle_intrinsic(); } - - virtual bool needs_exception_state() const { return false; } - -@@ -2275,14 +2304,16 @@ - private: - ciMethod* _method; - int _bci_of_invoke; -+ ciMethod* _callee; // the method that is called at the given bci - Value _recv; - ciKlass* _known_holder; - - public: -- ProfileCall(ciMethod* method, int bci, Value recv, ciKlass* known_holder) -+ ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder) - : Instruction(voidType) - , _method(method) - , _bci_of_invoke(bci) -+ , _callee(callee) - , _recv(recv) - , _known_holder(known_holder) - { -@@ -2292,6 +2323,7 @@ - - ciMethod* method() { return _method; } - int bci_of_invoke() { return _bci_of_invoke; } -+ ciMethod* callee() { return _callee; } - Value recv() { return _recv; } - ciKlass* known_holder() { return _known_holder; } - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_InstructionPrinter.cpp ---- openjdk/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -137,12 +137,16 @@ - ciMethod* m = (ciMethod*)value; - output()->print("", m->holder()->name()->as_utf8(), m->name()->as_utf8()); - } else { -- output()->print("", value->constant_encoding()); -+ output()->print(""); - } - } else if (type->as_InstanceConstant() != NULL) { - ciInstance* value = type->as_InstanceConstant()->value(); - if (value->is_loaded()) { -- output()->print("", value->constant_encoding()); -+ output()->print(""); - } else { - output()->print("", value); - } -@@ -453,6 +457,14 @@ - } - - -+void InstructionPrinter::do_TypeCast(TypeCast* x) { -+ output()->print("type_cast("); -+ print_value(x->obj()); -+ output()->print(") "); -+ print_klass(x->declared_type()->klass()); -+} -+ -+ - void InstructionPrinter::do_Invoke(Invoke* x) { - if (x->receiver() != NULL) { - print_value(x->receiver()); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_InstructionPrinter.hpp ---- openjdk/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -101,6 +101,7 @@ - virtual void do_IfOp (IfOp* x); - virtual void do_Convert (Convert* x); - virtual void do_NullCheck (NullCheck* x); -+ virtual void do_TypeCast (TypeCast* x); - virtual void do_Invoke (Invoke* x); - virtual void do_NewInstance (NewInstance* x); - virtual void do_NewTypeArray (NewTypeArray* x); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_LIR.hpp ---- openjdk/hotspot/src/share/vm/c1/c1_LIR.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_LIR.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -1,5 +1,5 @@ - /* -- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it -@@ -26,6 +26,7 @@ - #define SHARE_VM_C1_C1_LIR_HPP - - #include "c1/c1_ValueType.hpp" -+#include "oops/methodOop.hpp" - - class BlockBegin; - class BlockList; -@@ -1160,8 +1161,9 @@ - return - is_invokedynamic() // An invokedynamic is always a MethodHandle call site. - || -- (method()->holder()->name() == ciSymbol::java_lang_invoke_MethodHandle() && -- methodOopDesc::is_method_handle_invoke_name(method()->name()->sid())); -+ method()->is_compiled_lambda_form() // Java-generated adapter -+ || -+ method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic - } - - intptr_t vtable_offset() const { -@@ -1796,18 +1798,20 @@ - - private: - ciMethod* _profiled_method; -- int _profiled_bci; -- LIR_Opr _mdo; -- LIR_Opr _recv; -- LIR_Opr _tmp1; -- ciKlass* _known_holder; -+ int _profiled_bci; -+ ciMethod* _profiled_callee; -+ LIR_Opr _mdo; -+ LIR_Opr _recv; -+ LIR_Opr _tmp1; -+ ciKlass* _known_holder; - - public: - // Destroys recv -- LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder) -+ LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder) - : LIR_Op(code, LIR_OprFact::illegalOpr, NULL) // no result, no info - , _profiled_method(profiled_method) - , _profiled_bci(profiled_bci) -+ , _profiled_callee(profiled_callee) - , _mdo(mdo) - , _recv(recv) - , _tmp1(t1) -@@ -1815,6 +1819,7 @@ - - ciMethod* profiled_method() const { return _profiled_method; } - int profiled_bci() const { return _profiled_bci; } -+ ciMethod* profiled_callee() const { return _profiled_callee; } - LIR_Opr mdo() const { return _mdo; } - LIR_Opr recv() const { return _recv; } - LIR_Opr tmp1() const { return _tmp1; } -@@ -2116,8 +2121,8 @@ - CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, - ciMethod* profiled_method, int profiled_bci); - // methodDataOop profiling -- void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { -- append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass)); -+ void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { -+ append(new LIR_OpProfileCall(lir_profile_call, method, bci, callee, mdo, recv, t1, cha_klass)); - } - }; - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_LIRGenerator.cpp ---- openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -1910,6 +1910,14 @@ - } - - -+void LIRGenerator::do_TypeCast(TypeCast* x) { -+ LIRItem value(x->obj(), this); -+ value.load_item(); -+ // the result is the same as from the node we are casting -+ set_result(x, value.result()); -+} -+ -+ - void LIRGenerator::do_Throw(Throw* x) { - LIRItem exception(x->exception(), this); - exception.load_item(); -@@ -2737,7 +2745,10 @@ - // JSR 292 - // Preserve the SP over MethodHandle call sites. - ciMethod* target = x->target(); -- if (target->is_method_handle_invoke()) { -+ bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant? -+ target->is_method_handle_intrinsic() || -+ target->is_compiled_lambda_form()); -+ if (is_method_handle_invoke) { - info->set_is_method_handle_invoke(true); - __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr()); - } -@@ -2813,7 +2824,7 @@ - - // JSR 292 - // Restore the SP after MethodHandle call sites. -- if (target->is_method_handle_invoke()) { -+ if (is_method_handle_invoke) { - __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer()); - } - -@@ -2959,7 +2970,7 @@ - recv = new_register(T_OBJECT); - __ move(value.result(), recv); - } -- __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder()); -+ __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder()); - } - - void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_LIRGenerator.hpp ---- openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -494,6 +494,7 @@ - virtual void do_IfOp (IfOp* x); - virtual void do_Convert (Convert* x); - virtual void do_NullCheck (NullCheck* x); -+ virtual void do_TypeCast (TypeCast* x); - virtual void do_Invoke (Invoke* x); - virtual void do_NewInstance (NewInstance* x); - virtual void do_NewTypeArray (NewTypeArray* x); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_Optimizer.cpp ---- openjdk/hotspot/src/share/vm/c1/c1_Optimizer.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_Optimizer.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -478,6 +478,7 @@ - void do_IfOp (IfOp* x); - void do_Convert (Convert* x); - void do_NullCheck (NullCheck* x); -+ void do_TypeCast (TypeCast* x); - void do_Invoke (Invoke* x); - void do_NewInstance (NewInstance* x); - void do_NewTypeArray (NewTypeArray* x); -@@ -648,6 +649,7 @@ - void NullCheckVisitor::do_IfOp (IfOp* x) {} - void NullCheckVisitor::do_Convert (Convert* x) {} - void NullCheckVisitor::do_NullCheck (NullCheck* x) { nce()->handle_NullCheck(x); } -+void NullCheckVisitor::do_TypeCast (TypeCast* x) {} - void NullCheckVisitor::do_Invoke (Invoke* x) { nce()->handle_Invoke(x); } - void NullCheckVisitor::do_NewInstance (NewInstance* x) { nce()->handle_NewInstance(x); } - void NullCheckVisitor::do_NewTypeArray (NewTypeArray* x) { nce()->handle_NewArray(x); } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_ValueMap.hpp ---- openjdk/hotspot/src/share/vm/c1/c1_ValueMap.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_ValueMap.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -178,6 +178,7 @@ - void do_IfOp (IfOp* x) { /* nothing to do */ } - void do_Convert (Convert* x) { /* nothing to do */ } - void do_NullCheck (NullCheck* x) { /* nothing to do */ } -+ void do_TypeCast (TypeCast* x) { /* nothing to do */ } - void do_NewInstance (NewInstance* x) { /* nothing to do */ } - void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ } - void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_ValueStack.cpp ---- openjdk/hotspot/src/share/vm/c1/c1_ValueStack.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_ValueStack.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -195,6 +195,7 @@ - - void ValueStack::print() { - scope()->method()->print_name(); -+ tty->cr(); - if (stack_is_empty()) { - tty->print_cr("empty stack"); - } else { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_ValueStack.hpp ---- openjdk/hotspot/src/share/vm/c1/c1_ValueStack.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_ValueStack.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -142,6 +142,10 @@ - return x; - } - -+ void stack_at_put(int i, Value x) { -+ _stack.at_put(i, x); -+ } -+ - // pinning support - void pin_stack_for_linear_scan(); - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_ValueType.cpp ---- openjdk/hotspot/src/share/vm/c1/c1_ValueType.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_ValueType.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -101,6 +101,23 @@ - ciObject* InstanceConstant::constant_value() const { return _value; } - ciObject* ClassConstant::constant_value() const { return _value; } - -+ciType* ObjectConstant::exact_type() const { -+ ciObject* c = constant_value(); -+ return (c != NULL && !c->is_null_object()) ? c->klass() : NULL; -+} -+ciType* ArrayConstant::exact_type() const { -+ ciObject* c = constant_value(); -+ return (c != NULL && !c->is_null_object()) ? c->klass() : NULL; -+} -+ciType* InstanceConstant::exact_type() const { -+ ciObject* c = constant_value(); -+ return (c != NULL && !c->is_null_object()) ? c->klass() : NULL; -+} -+ciType* ClassConstant::exact_type() const { -+ ciObject* c = constant_value(); -+ return (c != NULL && !c->is_null_object()) ? c->klass() : NULL; -+} -+ - - ValueType* as_ValueType(BasicType type) { - switch (type) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/c1/c1_ValueType.hpp ---- openjdk/hotspot/src/share/vm/c1/c1_ValueType.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/c1/c1_ValueType.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -297,7 +297,8 @@ - virtual const char tchar() const { return 'a'; } - virtual const char* name() const { return "object"; } - virtual ObjectType* as_ObjectType() { return this; } -- virtual ciObject* constant_value() const { ShouldNotReachHere(); return NULL; } -+ virtual ciObject* constant_value() const { ShouldNotReachHere(); return NULL; } -+ virtual ciType* exact_type() const { return NULL; } - bool is_loaded() const; - jobject encoding() const; - }; -@@ -315,6 +316,7 @@ - virtual bool is_constant() const { return true; } - virtual ObjectConstant* as_ObjectConstant() { return this; } - virtual ciObject* constant_value() const; -+ virtual ciType* exact_type() const; - }; - - -@@ -334,9 +336,9 @@ - ciArray* value() const { return _value; } - - virtual bool is_constant() const { return true; } -- - virtual ArrayConstant* as_ArrayConstant() { return this; } - virtual ciObject* constant_value() const; -+ virtual ciType* exact_type() const; - }; - - -@@ -356,9 +358,9 @@ - ciInstance* value() const { return _value; } - - virtual bool is_constant() const { return true; } -- - virtual InstanceConstant* as_InstanceConstant(){ return this; } - virtual ciObject* constant_value() const; -+ virtual ciType* exact_type() const; - }; - - -@@ -378,9 +380,9 @@ - ciInstanceKlass* value() const { return _value; } - - virtual bool is_constant() const { return true; } -- - virtual ClassConstant* as_ClassConstant() { return this; } - virtual ciObject* constant_value() const; -+ virtual ciType* exact_type() const; - }; - - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/bcEscapeAnalyzer.cpp ---- openjdk/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -238,9 +238,11 @@ - - // some methods are obviously bindable without any type checks so - // convert them directly to an invokespecial. -- if (target->is_loaded() && !target->is_abstract() && -- target->can_be_statically_bound() && code == Bytecodes::_invokevirtual) { -- code = Bytecodes::_invokespecial; -+ if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) { -+ switch (code) { -+ case Bytecodes::_invokevirtual: code = Bytecodes::_invokespecial; break; -+ case Bytecodes::_invokehandle: code = Bytecodes::_invokestatic; break; -+ } - } - - // compute size of arguments -@@ -866,7 +868,12 @@ - { bool will_link; - ciMethod* target = s.get_method(will_link); - ciKlass* holder = s.get_declared_method_holder(); -- invoke(state, s.cur_bc(), target, holder); -+ // Push appendix argument, if one. -+ if (s.has_appendix()) { -+ state.apush(unknown_obj); -+ } -+ // Pass in raw bytecode because we need to see invokehandle instructions. -+ invoke(state, s.cur_bc_raw(), target, holder); - ciType* return_type = target->return_type(); - if (!return_type->is_primitive_type()) { - state.apush(unknown_obj); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciClassList.hpp ---- openjdk/hotspot/src/share/vm/ci/ciClassList.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/ciClassList.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -47,6 +47,7 @@ - class ciNullObject; - class ciInstance; - class ciCallSite; -+class ciMemberName; - class ciMethodHandle; - class ciMethod; - class ciMethodData; -@@ -100,6 +101,7 @@ - friend class ciObject; \ - friend class ciNullObject; \ - friend class ciInstance; \ -+friend class ciMemberName; \ - friend class ciMethod; \ - friend class ciMethodData; \ - friend class ciMethodHandle; \ -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciEnv.cpp ---- openjdk/hotspot/src/share/vm/ci/ciEnv.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/ciEnv.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -50,7 +50,6 @@ - #include "oops/oop.inline.hpp" - #include "oops/oop.inline2.hpp" - #include "prims/jvmtiExport.hpp" --#include "prims/methodHandleWalk.hpp" - #include "runtime/init.hpp" - #include "runtime/reflection.hpp" - #include "runtime/sharedRuntime.hpp" -@@ -582,7 +581,7 @@ - assert(index < 0, "only one kind of index at a time"); - ConstantPoolCacheEntry* cpc_entry = cpool->cache()->entry_at(cache_index); - index = cpc_entry->constant_pool_index(); -- oop obj = cpc_entry->f1(); -+ oop obj = cpc_entry->f1_as_instance(); - if (obj != NULL) { - assert(obj->is_instance() || obj->is_array(), "must be a Java reference"); - ciObject* ciobj = get_object(obj); -@@ -750,7 +749,7 @@ - - if (cpool->has_preresolution() - || (holder == ciEnv::MethodHandle_klass() && -- methodOopDesc::is_method_handle_invoke_name(name_sym))) { -+ MethodHandles::is_signature_polymorphic_name(holder->get_klassOop(), name_sym))) { - // Short-circuit lookups for JSR 292-related call sites. - // That is, do not rely only on name-based lookups, because they may fail - // if the names are not resolvable in the boot class loader (7056328). -@@ -760,11 +759,13 @@ - case Bytecodes::_invokespecial: - case Bytecodes::_invokestatic: - { -- methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index, bc); -+ oop appendix_oop = NULL; -+ methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index); - if (m != NULL) { - return get_object(m)->as_method(); - } - } -+ break; - } - } - -@@ -800,27 +801,28 @@ - // Compare the following logic with InterpreterRuntime::resolve_invokedynamic. - assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic"); - -- bool is_resolved = cpool->cache()->main_entry_at(index)->is_resolved(bc); -- if (is_resolved && cpool->cache()->secondary_entry_at(index)->is_f1_null()) -- // FIXME: code generation could allow for null (unlinked) call site -- is_resolved = false; -+ ConstantPoolCacheEntry* secondary_entry = cpool->cache()->secondary_entry_at(index); -+ bool is_resolved = !secondary_entry->is_f1_null(); -+ // FIXME: code generation could allow for null (unlinked) call site -+ // The call site could be made patchable as follows: -+ // Load the appendix argument from the constant pool. -+ // Test the appendix argument and jump to a known deopt routine if it is null. -+ // Jump through a patchable call site, which is initially a deopt routine. -+ // Patch the call site to the nmethod entry point of the static compiled lambda form. -+ // As with other two-component call sites, both values must be independently verified. - -- // Call site might not be resolved yet. We could create a real invoker method from the -- // compiler, but it is simpler to stop the code path here with an unlinked method. -+ // Call site might not be resolved yet. -+ // Stop the code path here with an unlinked method. - if (!is_resolved) { - ciInstanceKlass* holder = get_object(SystemDictionary::MethodHandle_klass())->as_instance_klass(); -- ciSymbol* name = ciSymbol::invokeExact_name(); -+ ciSymbol* name = ciSymbol::invokeBasic_name(); - ciSymbol* signature = get_symbol(cpool->signature_ref_at(index)); - return get_unloaded_method(holder, name, signature, accessor); - } - -- // Get the invoker methodOop from the constant pool. -- oop f1_value = cpool->cache()->main_entry_at(index)->f1(); -- methodOop signature_invoker = (methodOop) f1_value; -- assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(), -- "correct result from LinkResolver::resolve_invokedynamic"); -- -- return get_object(signature_invoker)->as_method(); -+ // Get the invoker methodOop and the extra argument from the constant pool. -+ methodOop adapter = secondary_entry->f2_as_vfinal_method(); -+ return get_object(adapter)->as_method(); - } - - -@@ -1131,7 +1133,7 @@ - // ------------------------------------------------------------------ - // ciEnv::notice_inlined_method() - void ciEnv::notice_inlined_method(ciMethod* method) { -- _num_inlined_bytecodes += method->code_size(); -+ _num_inlined_bytecodes += method->code_size_for_inlining(); - } - - // ------------------------------------------------------------------ -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciMemberName.cpp ---- /dev/null Thu Jan 01 00:00:00 1970 +0000 -+++ openjdk/hotspot/src/share/vm/ci/ciMemberName.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -0,0 +1,39 @@ -+/* -+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. -+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -+ * -+ * This code is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 only, as -+ * published by the Free Software Foundation. -+ * -+ * This code is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -+ * version 2 for more details (a copy is included in the LICENSE file that -+ * accompanied this code). -+ * -+ * You should have received a copy of the GNU General Public License version -+ * 2 along with this work; if not, write to the Free Software Foundation, -+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -+ * or visit www.oracle.com if you need additional information or have any -+ * questions. -+ * -+ */ -+ -+#include "precompiled.hpp" -+#include "ci/ciClassList.hpp" -+#include "ci/ciMemberName.hpp" -+#include "ci/ciUtilities.hpp" -+#include "classfile/javaClasses.hpp" -+ -+// ------------------------------------------------------------------ -+// ciMemberName::get_vmtarget -+// -+// Return: MN.vmtarget -+ciMethod* ciMemberName::get_vmtarget() const { -+ VM_ENTRY_MARK; -+ oop vmtarget_oop = java_lang_invoke_MemberName::vmtarget(get_oop()); -+ return CURRENT_ENV->get_object(vmtarget_oop)->as_method(); -+} -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciMemberName.hpp ---- /dev/null Thu Jan 01 00:00:00 1970 +0000 -+++ openjdk/hotspot/src/share/vm/ci/ciMemberName.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -0,0 +1,44 @@ -+/* -+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. -+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -+ * -+ * This code is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 only, as -+ * published by the Free Software Foundation. -+ * -+ * This code is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -+ * version 2 for more details (a copy is included in the LICENSE file that -+ * accompanied this code). -+ * -+ * You should have received a copy of the GNU General Public License version -+ * 2 along with this work; if not, write to the Free Software Foundation, -+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -+ * or visit www.oracle.com if you need additional information or have any -+ * questions. -+ * -+ */ -+ -+#ifndef SHARE_VM_CI_CIMEMBERNAME_HPP -+#define SHARE_VM_CI_CIMEMBERNAME_HPP -+ -+#include "ci/ciCallProfile.hpp" -+#include "ci/ciInstance.hpp" -+ -+// ciMemberName -+// -+// The class represents a java.lang.invoke.MemberName object. -+class ciMemberName : public ciInstance { -+public: -+ ciMemberName(instanceHandle h_i) : ciInstance(h_i) {} -+ -+ // What kind of ciObject is this? -+ bool is_member_name() const { return true; } -+ -+ ciMethod* get_vmtarget() const; -+}; -+ -+#endif // SHARE_VM_CI_CIMEMBERNAME_HPP -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciMethod.cpp ---- openjdk/hotspot/src/share/vm/ci/ciMethod.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/ciMethod.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -770,39 +770,37 @@ - // invokedynamic support - - // ------------------------------------------------------------------ --// ciMethod::is_method_handle_invoke -+// ciMethod::is_method_handle_intrinsic - // --// Return true if the method is an instance of one of the two --// signature-polymorphic MethodHandle methods, invokeExact or invokeGeneric. --bool ciMethod::is_method_handle_invoke() const { -- if (!is_loaded()) { -- bool flag = (holder()->name() == ciSymbol::java_lang_invoke_MethodHandle() && -- methodOopDesc::is_method_handle_invoke_name(name()->sid())); -- return flag; -- } -- VM_ENTRY_MARK; -- return get_methodOop()->is_method_handle_invoke(); -+// Return true if the method is an instance of the JVM-generated -+// signature-polymorphic MethodHandle methods, _invokeBasic, _linkToVirtual, etc. -+bool ciMethod::is_method_handle_intrinsic() const { -+ vmIntrinsics::ID iid = _intrinsic_id; // do not check if loaded -+ return (MethodHandles::is_signature_polymorphic(iid) && -+ MethodHandles::is_signature_polymorphic_intrinsic(iid)); - } - - // ------------------------------------------------------------------ --// ciMethod::is_method_handle_adapter -+// ciMethod::is_compiled_lambda_form - // - // Return true if the method is a generated MethodHandle adapter. --// These are built by MethodHandleCompiler. --bool ciMethod::is_method_handle_adapter() const { -- if (!is_loaded()) return false; -- VM_ENTRY_MARK; -- return get_methodOop()->is_method_handle_adapter(); -+// These are built by Java code. -+bool ciMethod::is_compiled_lambda_form() const { -+ vmIntrinsics::ID iid = _intrinsic_id; // do not check if loaded -+ return iid == vmIntrinsics::_compiledLambdaForm; - } - --ciInstance* ciMethod::method_handle_type() { -- check_is_loaded(); -- VM_ENTRY_MARK; -- oop mtype = get_methodOop()->method_handle_type(); -- return CURRENT_THREAD_ENV->get_object(mtype)->as_instance(); -+// ------------------------------------------------------------------ -+// ciMethod::has_member_arg -+// -+// Return true if the method is a linker intrinsic like _linkToVirtual. -+// These are built by the JVM. -+bool ciMethod::has_member_arg() const { -+ vmIntrinsics::ID iid = _intrinsic_id; // do not check if loaded -+ return (MethodHandles::is_signature_polymorphic(iid) && -+ MethodHandles::has_member_arg(iid)); - } - -- - // ------------------------------------------------------------------ - // ciMethod::ensure_method_data - // -@@ -1025,28 +1023,13 @@ - // ------------------------------------------------------------------ - // ciMethod::code_size_for_inlining - // --// Code size for inlining decisions. --// --// Don't fully count method handle adapters against inlining budgets: --// the metric we use here is the number of call sites in the adapter --// as they are probably the instructions which generate some code. -+// Code size for inlining decisions. This method returns a code -+// size of 1 for methods which has the ForceInline annotation. - int ciMethod::code_size_for_inlining() { - check_is_loaded(); -- -- // Method handle adapters -- if (is_method_handle_adapter()) { -- // Count call sites -- int call_site_count = 0; -- ciBytecodeStream iter(this); -- while (iter.next() != ciBytecodeStream::EOBC()) { -- if (Bytecodes::is_invoke(iter.cur_bc())) { -- call_site_count++; -- } -- } -- return call_site_count; -+ if (get_methodOop()->force_inline()) { -+ return 1; - } -- -- // Normal method - return code_size(); - } - -@@ -1128,7 +1111,8 @@ - constantPoolHandle pool (THREAD, get_methodOop()->constants()); - methodHandle spec_method; - KlassHandle spec_klass; -- LinkResolver::resolve_method(spec_method, spec_klass, pool, refinfo_index, THREAD); -+ Bytecodes::Code code = (is_static ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual); -+ LinkResolver::resolve_method_statically(spec_method, spec_klass, code, pool, refinfo_index, THREAD); - if (HAS_PENDING_EXCEPTION) { - CLEAR_PENDING_EXCEPTION; - return false; -@@ -1208,8 +1192,16 @@ - // - // Print the name of this method, without signature. - void ciMethod::print_short_name(outputStream* st) { -- check_is_loaded(); -- GUARDED_VM_ENTRY(get_methodOop()->print_short_name(st);) -+ if (is_loaded()) { -+ GUARDED_VM_ENTRY(get_methodOop()->print_short_name(st);); -+ } else { -+ // Fall back if method is not loaded. -+ holder()->print_name_on(st); -+ st->print("::"); -+ name()->print_symbol_on(st); -+ if (WizardMode) -+ signature()->as_symbol()->print_symbol_on(st); -+ } - } - - // ------------------------------------------------------------------ -@@ -1224,6 +1216,7 @@ - holder()->print_name_on(st); - st->print(" signature="); - signature()->as_symbol()->print_symbol_on(st); -+ st->print(" arg_size=%d", arg_size()); - if (is_loaded()) { - st->print(" loaded=true flags="); - flags().print_member_flags(st); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciMethod.hpp ---- openjdk/hotspot/src/share/vm/ci/ciMethod.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/ciMethod.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -133,16 +133,20 @@ - return _signature->size() + (_flags.is_static() ? 0 : 1); - } - // Report the number of elements on stack when invoking this method. -- // This is different than the regular arg_size because invokdynamic -+ // This is different than the regular arg_size because invokedynamic - // has an implicit receiver. - int invoke_arg_size(Bytecodes::Code code) const { -- int arg_size = _signature->size(); -- // Add a receiver argument, maybe: -- if (code != Bytecodes::_invokestatic && -- code != Bytecodes::_invokedynamic) { -- arg_size++; -+ if (is_loaded()) { -+ return arg_size(); -+ } else { -+ int arg_size = _signature->size(); -+ // Add a receiver argument, maybe: -+ if (code != Bytecodes::_invokestatic && -+ code != Bytecodes::_invokedynamic) { -+ arg_size++; -+ } -+ return arg_size; - } -- return arg_size; - } - - -@@ -160,6 +164,9 @@ - // Code size for inlining decisions. - int code_size_for_inlining(); - -+ bool force_inline() { return get_methodOop()->force_inline(); } -+ bool dont_inline() { return get_methodOop()->dont_inline(); } -+ - int comp_level(); - int highest_osr_comp_level(); - -@@ -256,9 +263,9 @@ - int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC - - // JSR 292 support -- bool is_method_handle_invoke() const; -- bool is_method_handle_adapter() const; -- ciInstance* method_handle_type(); -+ bool is_method_handle_intrinsic() const; -+ bool is_compiled_lambda_form() const; -+ bool has_member_arg() const; - - // What kind of ciObject is this? - bool is_method() { return true; } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciMethodHandle.cpp ---- openjdk/hotspot/src/share/vm/ci/ciMethodHandle.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/ciMethodHandle.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -24,84 +24,18 @@ - - #include "precompiled.hpp" - #include "ci/ciClassList.hpp" --#include "ci/ciInstance.hpp" --#include "ci/ciMethodData.hpp" - #include "ci/ciMethodHandle.hpp" - #include "ci/ciUtilities.hpp" --#include "prims/methodHandleWalk.hpp" --#include "prims/methodHandles.hpp" -- --// ciMethodHandle -+#include "classfile/javaClasses.hpp" - - // ------------------------------------------------------------------ --// ciMethodHandle::get_adapter -+// ciMethodHandle::get_vmtarget - // --// Return an adapter for this MethodHandle. --ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) { -+// Return: MH.form -> LF.vmentry -> MN.vmtarget -+ciMethod* ciMethodHandle::get_vmtarget() const { - VM_ENTRY_MARK; -- Handle h(get_oop()); -- methodHandle callee(_callee->get_methodOop()); -- assert(callee->is_method_handle_invoke(), ""); -- oop mt1 = callee->method_handle_type(); -- oop mt2 = java_lang_invoke_MethodHandle::type(h()); -- if (!java_lang_invoke_MethodType::equals(mt1, mt2)) { -- if (PrintMiscellaneous && (Verbose || WizardMode)) { -- tty->print_cr("ciMethodHandle::get_adapter: types not equal"); -- mt1->print(); mt2->print(); -- } -- return NULL; -- } -- // We catch all exceptions here that could happen in the method -- // handle compiler and stop the VM. -- MethodHandleCompiler mhc(h, callee->name(), callee->signature(), _profile.count(), is_invokedynamic, THREAD); -- if (!HAS_PENDING_EXCEPTION) { -- methodHandle m = mhc.compile(THREAD); -- if (!HAS_PENDING_EXCEPTION) { -- return CURRENT_ENV->get_object(m())->as_method(); -- } -- } -- if (PrintMiscellaneous && (Verbose || WizardMode)) { -- tty->print("*** ciMethodHandle::get_adapter => "); -- PENDING_EXCEPTION->print(); -- tty->print("*** get_adapter (%s): ", is_invokedynamic ? "indy" : "mh"); ((ciObject*)this)->print(); -- } -- CLEAR_PENDING_EXCEPTION; -- return NULL; -+ oop form_oop = java_lang_invoke_MethodHandle::form(get_oop()); -+ oop vmentry_oop = java_lang_invoke_LambdaForm::vmentry(form_oop); -+ oop vmtarget_oop = java_lang_invoke_MemberName::vmtarget(vmentry_oop); -+ return CURRENT_ENV->get_object(vmtarget_oop)->as_method(); - } -- --// ------------------------------------------------------------------ --// ciMethodHandle::get_adapter --// --// Return an adapter for this MethodHandle. --ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) { -- ciMethod* result = get_adapter_impl(is_invokedynamic); -- if (result) { -- // Fake up the MDO maturity. -- ciMethodData* mdo = result->method_data(); -- if (mdo != NULL && _caller->method_data() != NULL && _caller->method_data()->is_mature()) { -- mdo->set_mature(); -- } -- } -- return result; --} -- -- --#ifdef ASSERT --// ------------------------------------------------------------------ --// ciMethodHandle::print_chain_impl --// --// Implementation of the print method. --void ciMethodHandle::print_chain_impl() { -- ASSERT_IN_VM; -- MethodHandleChain::print(get_oop()); --} -- -- --// ------------------------------------------------------------------ --// ciMethodHandle::print_chain --// --// Implementation of the print_chain method. --void ciMethodHandle::print_chain() { -- GUARDED_VM_ENTRY(print_chain_impl();); --} --#endif -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciMethodHandle.hpp ---- openjdk/hotspot/src/share/vm/ci/ciMethodHandle.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/ciMethodHandle.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -25,61 +25,20 @@ - #ifndef SHARE_VM_CI_CIMETHODHANDLE_HPP - #define SHARE_VM_CI_CIMETHODHANDLE_HPP - --#include "ci/ciCallProfile.hpp" -+#include "ci/ciClassList.hpp" - #include "ci/ciInstance.hpp" --#include "prims/methodHandles.hpp" - - // ciMethodHandle - // - // The class represents a java.lang.invoke.MethodHandle object. - class ciMethodHandle : public ciInstance { --private: -- ciMethod* _callee; -- ciMethod* _caller; -- ciCallProfile _profile; -- ciMethod* _method_handle_adapter; -- ciMethod* _invokedynamic_adapter; -- -- // Return an adapter for this MethodHandle. -- ciMethod* get_adapter_impl(bool is_invokedynamic); -- ciMethod* get_adapter( bool is_invokedynamic); -- --protected: -- void print_chain_impl() NOT_DEBUG_RETURN; -- - public: -- ciMethodHandle(instanceHandle h_i) : -- ciInstance(h_i), -- _callee(NULL), -- _caller(NULL), -- _method_handle_adapter(NULL), -- _invokedynamic_adapter(NULL) -- {} -+ ciMethodHandle(instanceHandle h_i) : ciInstance(h_i) {} - - // What kind of ciObject is this? - bool is_method_handle() const { return true; } - -- void set_callee(ciMethod* m) { _callee = m; } -- void set_caller(ciMethod* m) { _caller = m; } -- void set_call_profile(ciCallProfile profile) { _profile = profile; } -- -- // Return an adapter for a MethodHandle call. -- ciMethod* get_method_handle_adapter() { -- if (_method_handle_adapter == NULL) { -- _method_handle_adapter = get_adapter(false); -- } -- return _method_handle_adapter; -- } -- -- // Return an adapter for an invokedynamic call. -- ciMethod* get_invokedynamic_adapter() { -- if (_invokedynamic_adapter == NULL) { -- _invokedynamic_adapter = get_adapter(true); -- } -- return _invokedynamic_adapter; -- } -- -- void print_chain() NOT_DEBUG_RETURN; -+ ciMethod* get_vmtarget() const; - }; - - #endif // SHARE_VM_CI_CIMETHODHANDLE_HPP -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciObject.hpp ---- openjdk/hotspot/src/share/vm/ci/ciObject.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/ciObject.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -138,13 +138,14 @@ - jobject constant_encoding(); - - // What kind of ciObject is this? -- virtual bool is_null_object() const { return false; } -- virtual bool is_call_site() const { return false; } -- virtual bool is_cpcache() const { return false; } -+ virtual bool is_null_object() const { return false; } -+ virtual bool is_call_site() const { return false; } -+ virtual bool is_cpcache() const { return false; } - virtual bool is_instance() { return false; } -+ virtual bool is_member_name() const { return false; } - virtual bool is_method() { return false; } - virtual bool is_method_data() { return false; } -- virtual bool is_method_handle() const { return false; } -+ virtual bool is_method_handle() const { return false; } - virtual bool is_array() { return false; } - virtual bool is_obj_array() { return false; } - virtual bool is_type_array() { return false; } -@@ -208,6 +209,10 @@ - assert(is_instance(), "bad cast"); - return (ciInstance*)this; - } -+ ciMemberName* as_member_name() { -+ assert(is_member_name(), "bad cast"); -+ return (ciMemberName*)this; -+ } - ciMethod* as_method() { - assert(is_method(), "bad cast"); - return (ciMethod*)this; -@@ -290,7 +295,8 @@ - } - - // Print debugging output about this ciObject. -- void print(outputStream* st = tty); -+ void print(outputStream* st); -+ void print() { print(tty); } // GDB cannot handle default arguments - - // Print debugging output about the oop this ciObject represents. - void print_oop(outputStream* st = tty); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciObjectFactory.cpp ---- openjdk/hotspot/src/share/vm/ci/ciObjectFactory.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/ciObjectFactory.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -28,6 +28,7 @@ - #include "ci/ciInstance.hpp" - #include "ci/ciInstanceKlass.hpp" - #include "ci/ciInstanceKlassKlass.hpp" -+#include "ci/ciMemberName.hpp" - #include "ci/ciMethod.hpp" - #include "ci/ciMethodData.hpp" - #include "ci/ciMethodHandle.hpp" -@@ -344,6 +345,8 @@ - instanceHandle h_i(THREAD, (instanceOop)o); - if (java_lang_invoke_CallSite::is_instance(o)) - return new (arena()) ciCallSite(h_i); -+ else if (java_lang_invoke_MemberName::is_instance(o)) -+ return new (arena()) ciMemberName(h_i); - else if (java_lang_invoke_MethodHandle::is_instance(o)) - return new (arena()) ciMethodHandle(h_i); - else -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciSignature.hpp ---- openjdk/hotspot/src/share/vm/ci/ciSignature.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/ciSignature.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -39,10 +39,11 @@ - ciKlass* _accessing_klass; - - GrowableArray* _types; -- int _size; -- int _count; -+ int _size; // number of stack slots required for arguments -+ int _count; // number of parameter types in the signature - - friend class ciMethod; -+ friend class ciBytecodeStream; - friend class ciObjectFactory; - - ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* signature); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciStreams.cpp ---- openjdk/hotspot/src/share/vm/ci/ciStreams.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/ciStreams.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -364,6 +364,29 @@ - } - - // ------------------------------------------------------------------ -+// ciBytecodeStream::has_appendix -+// -+// Returns true if there is an appendix argument stored in the -+// constant pool cache at the current bci. -+bool ciBytecodeStream::has_appendix() { -+ VM_ENTRY_MARK; -+ constantPoolHandle cpool(_method->get_methodOop()->constants()); -+ return constantPoolOopDesc::has_appendix_at_if_loaded(cpool, get_method_index()); -+} -+ -+// ------------------------------------------------------------------ -+// ciBytecodeStream::get_appendix -+// -+// Return the appendix argument stored in the constant pool cache at -+// the current bci. -+ciObject* ciBytecodeStream::get_appendix() { -+ VM_ENTRY_MARK; -+ constantPoolHandle cpool(_method->get_methodOop()->constants()); -+ oop appendix_oop = constantPoolOopDesc::appendix_at_if_loaded(cpool, get_method_index()); -+ return CURRENT_ENV->get_object(appendix_oop); -+} -+ -+// ------------------------------------------------------------------ - // ciBytecodeStream::get_declared_method_holder - // - // Get the declared holder of the currently referenced method. -@@ -378,9 +401,9 @@ - VM_ENTRY_MARK; - constantPoolHandle cpool(_method->get_methodOop()->constants()); - bool ignore; -- // report as InvokeDynamic for invokedynamic, which is syntactically classless -+ // report as MethodHandle for invokedynamic, which is syntactically classless - if (cur_bc() == Bytecodes::_invokedynamic) -- return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_lang_invoke_InvokeDynamic(), false); -+ return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_lang_invoke_MethodHandle(), false); - return CURRENT_ENV->get_klass_by_index(cpool, get_method_holder_index(), ignore, _holder); - } - -@@ -396,6 +419,24 @@ - } - - // ------------------------------------------------------------------ -+// ciBytecodeStream::get_declared_method_signature -+// -+// Get the declared signature of the currently referenced method. -+// -+// This is always the same as the signature of the resolved method -+// itself, except for _invokehandle and _invokedynamic calls. -+// -+ciSignature* ciBytecodeStream::get_declared_method_signature() { -+ int sig_index = get_method_signature_index(); -+ VM_ENTRY_MARK; -+ ciEnv* env = CURRENT_ENV; -+ constantPoolHandle cpool(_method->get_methodOop()->constants()); -+ Symbol* sig_sym = cpool->symbol_at(sig_index); -+ ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass(); -+ return new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym)); -+} -+ -+// ------------------------------------------------------------------ - // ciBytecodeStream::get_method_signature_index - // - // Get the constant pool index of the signature of the method -@@ -434,7 +475,7 @@ - // Get the CallSite from the constant pool cache. - int method_index = get_method_index(); - ConstantPoolCacheEntry* cpcache_entry = cpcache->secondary_entry_at(method_index); -- oop call_site_oop = cpcache_entry->f1(); -+ oop call_site_oop = cpcache_entry->f1_as_instance(); - - // Create a CallSite object and return it. - return CURRENT_ENV->get_object(call_site_oop)->as_call_site(); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciStreams.hpp ---- openjdk/hotspot/src/share/vm/ci/ciStreams.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/ciStreams.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -259,8 +259,11 @@ - - // If this is a method invocation bytecode, get the invoked method. - ciMethod* get_method(bool& will_link); -+ bool has_appendix(); -+ ciObject* get_appendix(); - ciKlass* get_declared_method_holder(); - int get_method_holder_index(); -+ ciSignature* get_declared_method_signature(); - int get_method_signature_index(); - - ciCPCache* get_cpcache() const; -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciSymbol.cpp ---- openjdk/hotspot/src/share/vm/ci/ciSymbol.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/ciSymbol.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -83,6 +83,10 @@ - GUARDED_VM_ENTRY(return get_symbol()->starts_with(prefix, len);) - } - -+bool ciSymbol::is_signature_polymorphic_name() const { -+ GUARDED_VM_ENTRY(return MethodHandles::is_signature_polymorphic_name(get_symbol());) -+} -+ - // ------------------------------------------------------------------ - // ciSymbol::index_of - // -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciSymbol.hpp ---- openjdk/hotspot/src/share/vm/ci/ciSymbol.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/ciSymbol.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -107,6 +107,8 @@ - - // Are two ciSymbols equal? - bool equals(ciSymbol* obj) { return this->_symbol == obj->get_symbol(); } -+ -+ bool is_signature_polymorphic_name() const; - }; - - #endif // SHARE_VM_CI_CISYMBOL_HPP -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/ci/ciTypeFlow.cpp ---- openjdk/hotspot/src/share/vm/ci/ciTypeFlow.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/ci/ciTypeFlow.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -643,9 +643,9 @@ - // ------------------------------------------------------------------ - // ciTypeFlow::StateVector::do_invoke - void ciTypeFlow::StateVector::do_invoke(ciBytecodeStream* str, -- bool has_receiver) { -+ bool has_receiver_foo) { - bool will_link; -- ciMethod* method = str->get_method(will_link); -+ ciMethod* callee = str->get_method(will_link); - if (!will_link) { - // We weren't able to find the method. - if (str->cur_bc() == Bytecodes::_invokedynamic) { -@@ -654,12 +654,24 @@ - (Deoptimization::Reason_uninitialized, - Deoptimization::Action_reinterpret)); - } else { -- ciKlass* unloaded_holder = method->holder(); -+ ciKlass* unloaded_holder = callee->holder(); - trap(str, unloaded_holder, str->get_method_holder_index()); - } - } else { -- ciSignature* signature = method->signature(); -+ // TODO Use Bytecode_invoke after metadata changes. -+ //Bytecode_invoke inv(str->method(), str->cur_bci()); -+ //const bool has_receiver = callee->is_loaded() ? !callee->is_static() : inv.has_receiver(); -+ Bytecode inv(str); -+ Bytecodes::Code code = inv.invoke_code(); -+ const bool has_receiver = callee->is_loaded() ? !callee->is_static() : code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic; -+ -+ ciSignature* signature = callee->signature(); - ciSignatureStream sigstr(signature); -+ // Push appendix argument, if one. -+ if (str->has_appendix()) { -+ ciObject* appendix = str->get_appendix(); -+ push_object(appendix->klass()); -+ } - int arg_size = signature->size(); - int stack_base = stack_size() - arg_size; - int i = 0; -@@ -677,6 +689,7 @@ - for (int j = 0; j < arg_size; j++) { - pop(); - } -+ assert(!callee->is_loaded() || has_receiver == !callee->is_static(), "mismatch"); - if (has_receiver) { - // Check this? - pop_object(); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/classFileParser.cpp ---- openjdk/hotspot/src/share/vm/classfile/classFileParser.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/classfile/classFileParser.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -2128,12 +2128,6 @@ - _has_vanilla_constructor = true; - } - -- if (EnableInvokeDynamic && (m->is_method_handle_invoke() || -- m->is_method_handle_adapter())) { -- THROW_MSG_(vmSymbols::java_lang_VirtualMachineError(), -- "Method handle invokers must be defined internally to the VM", nullHandle); -- } -- - return m; - } - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/javaClasses.cpp ---- openjdk/hotspot/src/share/vm/classfile/javaClasses.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/classfile/javaClasses.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -126,6 +126,13 @@ - if (!find_field(ik, name_symbol, signature_symbol, &fd, allow_super)) { - ResourceMark rm; - tty->print_cr("Invalid layout of %s at %s", ik->external_name(), name_symbol->as_C_string()); -+#ifndef PRODUCT -+ klass_oop->print(); -+ tty->print_cr("all fields:"); -+ for (AllFieldStream fs(instanceKlass::cast(klass_oop)); !fs.done(); fs.next()) { -+ tty->print_cr(" name: %s, sig: %s, flags: %08x", fs.name()->as_C_string(), fs.signature()->as_C_string(), fs.access_flags().as_int()); -+ } -+#endif //PRODUCT - fatal("Invalid layout of preloaded class"); - } - dest_offset = fd.offset(); -@@ -1455,6 +1462,7 @@ - nmethod* nm = NULL; - bool skip_fillInStackTrace_check = false; - bool skip_throwableInit_check = false; -+ bool skip_hidden = false; - - for (frame fr = thread->last_frame(); max_depth != total_count;) { - methodOop method = NULL; -@@ -1534,6 +1542,12 @@ - skip_throwableInit_check = true; - } - } -+ if (method->is_hidden()) { -+ if (skip_hidden) continue; -+ } else { -+ // start skipping hidden frames after first non-hidden frame -+ skip_hidden = !ShowHiddenFrames; -+ } - bt.push(method, bci, CHECK); - total_count++; - } -@@ -1724,6 +1738,8 @@ - java_lang_StackTraceElement::set_methodName(element(), methodname); - // Fill in source file name - Symbol* source = instanceKlass::cast(method->method_holder())->source_file_name(); -+ if (ShowHiddenFrames && source == NULL) -+ source = vmSymbols::unknown_class_name(); - oop filename = StringTable::intern(source, CHECK_0); - java_lang_StackTraceElement::set_fileName(element(), filename); - // File in source line number -@@ -1736,6 +1752,9 @@ - } else { - // Returns -1 if no LineNumberTable, and otherwise actual line number - line_number = method->line_number_from_bci(bci); -+ if (line_number == -1 && ShowHiddenFrames) { -+ line_number = bci + 1000000; -+ } - } - java_lang_StackTraceElement::set_lineNumber(element(), line_number); - -@@ -2377,8 +2396,7 @@ - // Support for java_lang_invoke_MethodHandle - - int java_lang_invoke_MethodHandle::_type_offset; --int java_lang_invoke_MethodHandle::_vmtarget_offset; --int java_lang_invoke_MethodHandle::_vmentry_offset; -+int java_lang_invoke_MethodHandle::_form_offset; - - int java_lang_invoke_MemberName::_clazz_offset; - int java_lang_invoke_MemberName::_name_offset; -@@ -2387,21 +2405,16 @@ - int java_lang_invoke_MemberName::_vmtarget_offset; - int java_lang_invoke_MemberName::_vmindex_offset; - --int java_lang_invoke_DirectMethodHandle::_vmindex_offset; -- --int java_lang_invoke_BoundMethodHandle::_argument_offset; --int java_lang_invoke_BoundMethodHandle::_vmargslot_offset; -- --int java_lang_invoke_AdapterMethodHandle::_conversion_offset; -- --int java_lang_invoke_CountingMethodHandle::_vmcount_offset; -+int java_lang_invoke_LambdaForm::_vmentry_offset; - - void java_lang_invoke_MethodHandle::compute_offsets() { - klassOop klass_oop = SystemDictionary::MethodHandle_klass(); - if (klass_oop != NULL && EnableInvokeDynamic) { -- bool allow_super = false; -- compute_offset(_type_offset, klass_oop, vmSymbols::type_name(), vmSymbols::java_lang_invoke_MethodType_signature(), allow_super); -- METHODHANDLE_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET); -+ compute_offset(_type_offset, klass_oop, vmSymbols::type_name(), vmSymbols::java_lang_invoke_MethodType_signature()); -+ compute_optional_offset(_form_offset, klass_oop, vmSymbols::form_name(), vmSymbols::java_lang_invoke_LambdaForm_signature()); -+ if (_form_offset == 0) { -+ EnableInvokeDynamic = false; -+ } - } - } - -@@ -2412,50 +2425,17 @@ - compute_offset(_name_offset, klass_oop, vmSymbols::name_name(), vmSymbols::string_signature()); - compute_offset(_type_offset, klass_oop, vmSymbols::type_name(), vmSymbols::object_signature()); - compute_offset(_flags_offset, klass_oop, vmSymbols::flags_name(), vmSymbols::int_signature()); -- compute_offset(_vmindex_offset, klass_oop, vmSymbols::vmindex_name(), vmSymbols::int_signature()); - MEMBERNAME_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET); - } - } - --void java_lang_invoke_DirectMethodHandle::compute_offsets() { -- klassOop k = SystemDictionary::DirectMethodHandle_klass(); -- if (k != NULL && EnableInvokeDynamic) { -- DIRECTMETHODHANDLE_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET); -+void java_lang_invoke_LambdaForm::compute_offsets() { -+ klassOop klass_oop = SystemDictionary::LambdaForm_klass(); -+ if (klass_oop != NULL && EnableInvokeDynamic) { -+ compute_offset(_vmentry_offset, klass_oop, vmSymbols::vmentry_name(), vmSymbols::java_lang_invoke_MemberName_signature()); - } - } - --void java_lang_invoke_BoundMethodHandle::compute_offsets() { -- klassOop k = SystemDictionary::BoundMethodHandle_klass(); -- if (k != NULL && EnableInvokeDynamic) { -- compute_offset(_vmargslot_offset, k, vmSymbols::vmargslot_name(), vmSymbols::int_signature(), true); -- compute_offset(_argument_offset, k, vmSymbols::argument_name(), vmSymbols::object_signature(), true); -- } --} -- --void java_lang_invoke_AdapterMethodHandle::compute_offsets() { -- klassOop k = SystemDictionary::AdapterMethodHandle_klass(); -- if (k != NULL && EnableInvokeDynamic) { -- compute_offset(_conversion_offset, k, vmSymbols::conversion_name(), vmSymbols::int_signature(), true); -- } --} -- --void java_lang_invoke_CountingMethodHandle::compute_offsets() { -- klassOop k = SystemDictionary::CountingMethodHandle_klass(); -- if (k != NULL && EnableInvokeDynamic) { -- compute_offset(_vmcount_offset, k, vmSymbols::vmcount_name(), vmSymbols::int_signature(), true); -- } --} -- --int java_lang_invoke_CountingMethodHandle::vmcount(oop mh) { -- assert(is_instance(mh), "CMH only"); -- return mh->int_field(_vmcount_offset); --} -- --void java_lang_invoke_CountingMethodHandle::set_vmcount(oop mh, int count) { -- assert(is_instance(mh), "CMH only"); -- mh->int_field_put(_vmcount_offset, count); --} -- - oop java_lang_invoke_MethodHandle::type(oop mh) { - return mh->obj_field(_type_offset); - } -@@ -2464,31 +2444,14 @@ - mh->obj_field_put(_type_offset, mtype); - } - --// fetch type.form.vmslots, which is the number of JVM stack slots --// required to carry the arguments of this MH --int java_lang_invoke_MethodHandle::vmslots(oop mh) { -- oop mtype = type(mh); -- if (mtype == NULL) return 0; // Java code would get NPE -- oop form = java_lang_invoke_MethodType::form(mtype); -- if (form == NULL) return 0; // Java code would get NPE -- return java_lang_invoke_MethodTypeForm::vmslots(form); -+oop java_lang_invoke_MethodHandle::form(oop mh) { -+ assert(_form_offset != 0, ""); -+ return mh->obj_field(_form_offset); - } - --// fetch the low-level entry point for this mh --MethodHandleEntry* java_lang_invoke_MethodHandle::vmentry(oop mh) { -- return (MethodHandleEntry*) mh->address_field(_vmentry_offset); --} -- --void java_lang_invoke_MethodHandle::set_vmentry(oop mh, MethodHandleEntry* me) { -- assert(_vmentry_offset != 0, "must be present"); -- -- // This is always the final step that initializes a valid method handle: -- mh->release_address_field_put(_vmentry_offset, (address) me); -- -- // There should be enough memory barriers on exit from native methods -- // to ensure that the MH is fully initialized to all threads before -- // Java code can publish it in global data structures. -- // But just in case, we use release_address_field_put. -+void java_lang_invoke_MethodHandle::set_form(oop mh, oop lform) { -+ assert(_form_offset != 0, ""); -+ mh->obj_field_put(_form_offset, lform); - } - - /// MemberName accessors -@@ -2540,57 +2503,40 @@ - - void java_lang_invoke_MemberName::set_vmtarget(oop mname, oop ref) { - assert(is_instance(mname), "wrong type"); -+#ifdef ASSERT -+ // check the type of the vmtarget -+ if (ref != NULL) { -+ switch (flags(mname) & (MN_IS_METHOD | -+ MN_IS_CONSTRUCTOR | -+ MN_IS_FIELD)) { -+ case MN_IS_METHOD: -+ case MN_IS_CONSTRUCTOR: -+ assert(ref->is_method(), "should be a method"); -+ break; -+ case MN_IS_FIELD: -+ assert(ref->is_klass(), "should be a class"); -+ break; -+ default: -+ ShouldNotReachHere(); -+ } -+ } -+#endif //ASSERT - mname->obj_field_put(_vmtarget_offset, ref); - } - --int java_lang_invoke_MemberName::vmindex(oop mname) { -+intptr_t java_lang_invoke_MemberName::vmindex(oop mname) { - assert(is_instance(mname), "wrong type"); -- return mname->int_field(_vmindex_offset); -+ return (intptr_t) mname->address_field(_vmindex_offset); - } - --void java_lang_invoke_MemberName::set_vmindex(oop mname, int index) { -+void java_lang_invoke_MemberName::set_vmindex(oop mname, intptr_t index) { - assert(is_instance(mname), "wrong type"); -- mname->int_field_put(_vmindex_offset, index); -+ mname->address_field_put(_vmindex_offset, (address) index); - } - --oop java_lang_invoke_MethodHandle::vmtarget(oop mh) { -- assert(is_instance(mh), "MH only"); -- return mh->obj_field(_vmtarget_offset); --} -- --void java_lang_invoke_MethodHandle::set_vmtarget(oop mh, oop ref) { -- assert(is_instance(mh), "MH only"); -- mh->obj_field_put(_vmtarget_offset, ref); --} -- --int java_lang_invoke_DirectMethodHandle::vmindex(oop mh) { -- assert(is_instance(mh), "DMH only"); -- return mh->int_field(_vmindex_offset); --} -- --void java_lang_invoke_DirectMethodHandle::set_vmindex(oop mh, int index) { -- assert(is_instance(mh), "DMH only"); -- mh->int_field_put(_vmindex_offset, index); --} -- --int java_lang_invoke_BoundMethodHandle::vmargslot(oop mh) { -- assert(is_instance(mh), "BMH only"); -- return mh->int_field(_vmargslot_offset); --} -- --oop java_lang_invoke_BoundMethodHandle::argument(oop mh) { -- assert(is_instance(mh), "BMH only"); -- return mh->obj_field(_argument_offset); --} -- --int java_lang_invoke_AdapterMethodHandle::conversion(oop mh) { -- assert(is_instance(mh), "AMH only"); -- return mh->int_field(_conversion_offset); --} -- --void java_lang_invoke_AdapterMethodHandle::set_conversion(oop mh, int conv) { -- assert(is_instance(mh), "AMH only"); -- mh->int_field_put(_conversion_offset, conv); -+oop java_lang_invoke_LambdaForm::vmentry(oop lform) { -+ assert(is_instance(lform), "wrong type"); -+ return lform->obj_field(_vmentry_offset); - } - - -@@ -2598,14 +2544,12 @@ - - int java_lang_invoke_MethodType::_rtype_offset; - int java_lang_invoke_MethodType::_ptypes_offset; --int java_lang_invoke_MethodType::_form_offset; - - void java_lang_invoke_MethodType::compute_offsets() { - klassOop k = SystemDictionary::MethodType_klass(); - if (k != NULL) { - compute_offset(_rtype_offset, k, vmSymbols::rtype_name(), vmSymbols::class_signature()); - compute_offset(_ptypes_offset, k, vmSymbols::ptypes_name(), vmSymbols::class_array_signature()); -- compute_offset(_form_offset, k, vmSymbols::form_name(), vmSymbols::java_lang_invoke_MethodTypeForm_signature()); - } - } - -@@ -2635,6 +2579,8 @@ - } - - bool java_lang_invoke_MethodType::equals(oop mt1, oop mt2) { -+ if (mt1 == mt2) -+ return true; - if (rtype(mt1) != rtype(mt2)) - return false; - if (ptype_count(mt1) != ptype_count(mt2)) -@@ -2656,11 +2602,6 @@ - return (objArrayOop) mt->obj_field(_ptypes_offset); - } - --oop java_lang_invoke_MethodType::form(oop mt) { -- assert(is_instance(mt), "must be a MethodType"); -- return mt->obj_field(_form_offset); --} -- - oop java_lang_invoke_MethodType::ptype(oop mt, int idx) { - return ptypes(mt)->obj_at(idx); - } -@@ -2669,62 +2610,20 @@ - return ptypes(mt)->length(); - } - -- -- --// Support for java_lang_invoke_MethodTypeForm -- --int java_lang_invoke_MethodTypeForm::_vmslots_offset; --int java_lang_invoke_MethodTypeForm::_vmlayout_offset; --int java_lang_invoke_MethodTypeForm::_erasedType_offset; --int java_lang_invoke_MethodTypeForm::_genericInvoker_offset; -- --void java_lang_invoke_MethodTypeForm::compute_offsets() { -- klassOop k = SystemDictionary::MethodTypeForm_klass(); -- if (k != NULL) { -- compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), true); -- compute_optional_offset(_vmlayout_offset, k, vmSymbols::vmlayout_name(), vmSymbols::object_signature()); -- compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_lang_invoke_MethodType_signature(), true); -- compute_optional_offset(_genericInvoker_offset, k, vmSymbols::genericInvoker_name(), vmSymbols::java_lang_invoke_MethodHandle_signature(), true); -- if (_genericInvoker_offset == 0) _genericInvoker_offset = -1; // set to explicit "empty" value -- METHODTYPEFORM_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET); -+int java_lang_invoke_MethodType::ptype_slot_count(oop mt) { -+ objArrayOop pts = ptypes(mt); -+ int count = pts->length(); -+ int slots = 0; -+ for (int i = 0; i < count; i++) { -+ BasicType bt = java_lang_Class::as_BasicType(pts->obj_at(i)); -+ slots += type2size[bt]; - } -+ return slots; - } - --int java_lang_invoke_MethodTypeForm::vmslots(oop mtform) { -- assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); -- assert(_vmslots_offset > 0, ""); -- return mtform->int_field(_vmslots_offset); --} -- --oop java_lang_invoke_MethodTypeForm::vmlayout(oop mtform) { -- assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); -- assert(_vmlayout_offset > 0, ""); -- return mtform->obj_field(_vmlayout_offset); --} -- --oop java_lang_invoke_MethodTypeForm::init_vmlayout(oop mtform, oop cookie) { -- assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); -- oop previous = vmlayout(mtform); -- if (previous != NULL) { -- return previous; // someone else beat us to it -- } -- HeapWord* cookie_addr = (HeapWord*) mtform->obj_field_addr(_vmlayout_offset); -- OrderAccess::storestore(); // make sure our copy is fully committed -- previous = oopDesc::atomic_compare_exchange_oop(cookie, cookie_addr, previous); -- if (previous != NULL) { -- return previous; // someone else beat us to it -- } -- return cookie; --} -- --oop java_lang_invoke_MethodTypeForm::erasedType(oop mtform) { -- assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); -- return mtform->obj_field(_erasedType_offset); --} -- --oop java_lang_invoke_MethodTypeForm::genericInvoker(oop mtform) { -- assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); -- return mtform->obj_field(_genericInvoker_offset); -+int java_lang_invoke_MethodType::rtype_slot_count(oop mt) { -+ BasicType bt = java_lang_Class::as_BasicType(rtype(mt)); -+ return type2size[bt]; - } - - -@@ -2825,10 +2724,26 @@ - } - - oop java_lang_ClassLoader::parent(oop loader) { -- assert(loader->is_oop(), "loader must be oop"); -+ assert(is_instance(loader), "loader must be oop"); - return loader->obj_field(parent_offset); - } - -+bool java_lang_ClassLoader::isAncestor(oop loader, oop cl) { -+ assert(is_instance(loader), "loader must be oop"); -+ assert(cl == NULL || is_instance(cl), "cl argument must be oop"); -+ oop acl = loader; -+ debug_only(jint loop_count = 0); -+ // This loop taken verbatim from ClassLoader.java: -+ do { -+ acl = parent(acl); -+ if (cl == acl) { -+ return true; -+ } -+ assert(++loop_count > 0, "loop_count overflow"); -+ } while (acl != NULL); -+ return false; -+} -+ - - // For class loader classes, parallelCapable defined - // based on non-null field -@@ -3115,13 +3030,9 @@ - if (EnableInvokeDynamic) { - java_lang_invoke_MethodHandle::compute_offsets(); - java_lang_invoke_MemberName::compute_offsets(); -- java_lang_invoke_DirectMethodHandle::compute_offsets(); -- java_lang_invoke_BoundMethodHandle::compute_offsets(); -- java_lang_invoke_AdapterMethodHandle::compute_offsets(); -+ java_lang_invoke_LambdaForm::compute_offsets(); - java_lang_invoke_MethodType::compute_offsets(); -- java_lang_invoke_MethodTypeForm::compute_offsets(); - java_lang_invoke_CallSite::compute_offsets(); -- java_lang_invoke_CountingMethodHandle::compute_offsets(); - } - java_security_AccessControlContext::compute_offsets(); - // Initialize reflection classes. The layouts of these classes -@@ -3351,7 +3262,14 @@ - } - } - ResourceMark rm; -- tty->print_cr("Invalid layout of %s at %s", instanceKlass::cast(klass_oop)->external_name(), name()->as_C_string()); -+ tty->print_cr("Invalid layout of %s at %s/%s%s", instanceKlass::cast(klass_oop)->external_name(), name()->as_C_string(), signature()->as_C_string(), may_be_java ? " (may_be_java)" : ""); -+#ifndef PRODUCT -+ klass_oop->print(); -+ tty->print_cr("all fields:"); -+ for (AllFieldStream fs(instanceKlass::cast(klass_oop)); !fs.done(); fs.next()) { -+ tty->print_cr(" name: %s, sig: %s, flags: %08x", fs.name()->as_C_string(), fs.signature()->as_C_string(), fs.access_flags().as_int()); -+ } -+#endif //PRODUCT - fatal("Invalid layout of preloaded class"); - return -1; - } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/javaClasses.hpp ---- openjdk/hotspot/src/share/vm/classfile/javaClasses.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/classfile/javaClasses.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -883,19 +883,14 @@ - - // Interface to java.lang.invoke.MethodHandle objects - --#define METHODHANDLE_INJECTED_FIELDS(macro) \ -- macro(java_lang_invoke_MethodHandle, vmentry, intptr_signature, false) \ -- macro(java_lang_invoke_MethodHandle, vmtarget, object_signature, true) -- - class MethodHandleEntry; - - class java_lang_invoke_MethodHandle: AllStatic { - friend class JavaClasses; - - private: -- static int _vmentry_offset; // assembly code trampoline for MH -- static int _vmtarget_offset; // class-specific target reference -- static int _type_offset; // the MethodType of this MH -+ static int _type_offset; // the MethodType of this MH -+ static int _form_offset; // the LambdaForm of this MH - - static void compute_offsets(); - -@@ -904,13 +899,8 @@ - static oop type(oop mh); - static void set_type(oop mh, oop mtype); - -- static oop vmtarget(oop mh); -- static void set_vmtarget(oop mh, oop target); -- -- static MethodHandleEntry* vmentry(oop mh); -- static void set_vmentry(oop mh, MethodHandleEntry* data); -- -- static int vmslots(oop mh); -+ static oop form(oop mh); -+ static void set_form(oop mh, oop lform); - - // Testers - static bool is_subclass(klassOop klass) { -@@ -922,149 +912,45 @@ - - // Accessors for code generation: - static int type_offset_in_bytes() { return _type_offset; } -- static int vmtarget_offset_in_bytes() { return _vmtarget_offset; } -- static int vmentry_offset_in_bytes() { return _vmentry_offset; } -+ static int form_offset_in_bytes() { return _form_offset; } - }; - --#define DIRECTMETHODHANDLE_INJECTED_FIELDS(macro) \ -- macro(java_lang_invoke_DirectMethodHandle, vmindex, int_signature, true) -+// Interface to java.lang.invoke.LambdaForm objects -+// (These are a private interface for managing adapter code generation.) - --class java_lang_invoke_DirectMethodHandle: public java_lang_invoke_MethodHandle { -+class java_lang_invoke_LambdaForm: AllStatic { - friend class JavaClasses; - - private: -- static int _vmindex_offset; // negative or vtable idx or itable idx -+ static int _vmentry_offset; // type is MemberName -+ - static void compute_offsets(); - - public: - // Accessors -- static int vmindex(oop mh); -- static void set_vmindex(oop mh, int index); -+ static oop vmentry(oop lform); -+ static void set_vmentry(oop lform, oop invoker); - - // Testers - static bool is_subclass(klassOop klass) { -- return Klass::cast(klass)->is_subclass_of(SystemDictionary::DirectMethodHandle_klass()); -+ return SystemDictionary::LambdaForm_klass() != NULL && -+ Klass::cast(klass)->is_subclass_of(SystemDictionary::LambdaForm_klass()); - } - static bool is_instance(oop obj) { - return obj != NULL && is_subclass(obj->klass()); - } - - // Accessors for code generation: -- static int vmindex_offset_in_bytes() { return _vmindex_offset; } -+ static int vmentry_offset_in_bytes() { return _vmentry_offset; } - }; - --class java_lang_invoke_BoundMethodHandle: public java_lang_invoke_MethodHandle { -- friend class JavaClasses; -- -- private: -- static int _argument_offset; // argument value bound into this MH -- static int _vmargslot_offset; // relevant argument slot (<= vmslots) -- static void compute_offsets(); -- --public: -- static oop argument(oop mh); -- static void set_argument(oop mh, oop ref); -- -- static jint vmargslot(oop mh); -- static void set_vmargslot(oop mh, jint slot); -- -- // Testers -- static bool is_subclass(klassOop klass) { -- return Klass::cast(klass)->is_subclass_of(SystemDictionary::BoundMethodHandle_klass()); -- } -- static bool is_instance(oop obj) { -- return obj != NULL && is_subclass(obj->klass()); -- } -- -- static int argument_offset_in_bytes() { return _argument_offset; } -- static int vmargslot_offset_in_bytes() { return _vmargslot_offset; } --}; -- --class java_lang_invoke_AdapterMethodHandle: public java_lang_invoke_BoundMethodHandle { -- friend class JavaClasses; -- -- private: -- static int _conversion_offset; // type of conversion to apply -- static void compute_offsets(); -- -- public: -- static int conversion(oop mh); -- static void set_conversion(oop mh, int conv); -- -- // Testers -- static bool is_subclass(klassOop klass) { -- return Klass::cast(klass)->is_subclass_of(SystemDictionary::AdapterMethodHandle_klass()); -- } -- static bool is_instance(oop obj) { -- return obj != NULL && is_subclass(obj->klass()); -- } -- -- // Relevant integer codes (keep these in synch. with MethodHandleNatives.Constants): -- enum { -- OP_RETYPE_ONLY = 0x0, // no argument changes; straight retype -- OP_RETYPE_RAW = 0x1, // straight retype, trusted (void->int, Object->T) -- OP_CHECK_CAST = 0x2, // ref-to-ref conversion; requires a Class argument -- OP_PRIM_TO_PRIM = 0x3, // converts from one primitive to another -- OP_REF_TO_PRIM = 0x4, // unboxes a wrapper to produce a primitive -- OP_PRIM_TO_REF = 0x5, // boxes a primitive into a wrapper -- OP_SWAP_ARGS = 0x6, // swap arguments (vminfo is 2nd arg) -- OP_ROT_ARGS = 0x7, // rotate arguments (vminfo is displaced arg) -- OP_DUP_ARGS = 0x8, // duplicates one or more arguments (at TOS) -- OP_DROP_ARGS = 0x9, // remove one or more argument slots -- OP_COLLECT_ARGS = 0xA, // combine arguments using an auxiliary function -- OP_SPREAD_ARGS = 0xB, // expand in place a varargs array (of known size) -- OP_FOLD_ARGS = 0xC, // combine but do not remove arguments; prepend result -- //OP_UNUSED_13 = 0xD, // unused code, perhaps for reified argument lists -- CONV_OP_LIMIT = 0xE, // limit of CONV_OP enumeration -- -- CONV_OP_MASK = 0xF00, // this nybble contains the conversion op field -- CONV_TYPE_MASK = 0x0F, // fits T_ADDRESS and below -- CONV_VMINFO_MASK = 0x0FF, // LSB is reserved for JVM use -- CONV_VMINFO_SHIFT = 0, // position of bits in CONV_VMINFO_MASK -- CONV_OP_SHIFT = 8, // position of bits in CONV_OP_MASK -- CONV_DEST_TYPE_SHIFT = 12, // byte 2 has the adapter BasicType (if needed) -- CONV_SRC_TYPE_SHIFT = 16, // byte 2 has the source BasicType (if needed) -- CONV_STACK_MOVE_SHIFT = 20, // high 12 bits give signed SP change -- CONV_STACK_MOVE_MASK = (1 << (32 - CONV_STACK_MOVE_SHIFT)) - 1 -- }; -- -- static int conversion_offset_in_bytes() { return _conversion_offset; } --}; -- -- --// A simple class that maintains an invocation count --class java_lang_invoke_CountingMethodHandle: public java_lang_invoke_MethodHandle { -- friend class JavaClasses; -- -- private: -- static int _vmcount_offset; -- static void compute_offsets(); -- -- public: -- // Accessors -- static int vmcount(oop mh); -- static void set_vmcount(oop mh, int count); -- -- // Testers -- static bool is_subclass(klassOop klass) { -- return SystemDictionary::CountingMethodHandle_klass() != NULL && -- Klass::cast(klass)->is_subclass_of(SystemDictionary::CountingMethodHandle_klass()); -- } -- static bool is_instance(oop obj) { -- return obj != NULL && is_subclass(obj->klass()); -- } -- -- // Accessors for code generation: -- static int vmcount_offset_in_bytes() { return _vmcount_offset; } --}; -- -- - - // Interface to java.lang.invoke.MemberName objects - // (These are a private interface for Java code to query the class hierarchy.) - --#define MEMBERNAME_INJECTED_FIELDS(macro) \ -- macro(java_lang_invoke_MemberName, vmtarget, object_signature, true) -+#define MEMBERNAME_INJECTED_FIELDS(macro) \ -+ macro(java_lang_invoke_MemberName, vmindex, intptr_signature, false) \ -+ macro(java_lang_invoke_MemberName, vmtarget, object_signature, false) - - class java_lang_invoke_MemberName: AllStatic { - friend class JavaClasses; -@@ -1076,7 +962,7 @@ - // private Object type; // may be null if not yet materialized - // private int flags; // modifier bits; see reflect.Modifier - // private Object vmtarget; // VM-specific target value -- // private int vmindex; // method index within class or interface -+ // private intptr_t vmindex; // member index within class or interface - static int _clazz_offset; - static int _name_offset; - static int _type_offset; -@@ -1100,15 +986,11 @@ - static int flags(oop mname); - static void set_flags(oop mname, int flags); - -- static int modifiers(oop mname) { return (u2) flags(mname); } -- static void set_modifiers(oop mname, int mods) -- { set_flags(mname, (flags(mname) &~ (u2)-1) | (u2)mods); } -- - static oop vmtarget(oop mname); - static void set_vmtarget(oop mname, oop target); - -- static int vmindex(oop mname); -- static void set_vmindex(oop mname, int index); -+ static intptr_t vmindex(oop mname); -+ static void set_vmindex(oop mname, intptr_t index); - - // Testers - static bool is_subclass(klassOop klass) { -@@ -1124,9 +1006,11 @@ - MN_IS_CONSTRUCTOR = 0x00020000, // constructor - MN_IS_FIELD = 0x00040000, // field - MN_IS_TYPE = 0x00080000, // nested type -- MN_SEARCH_SUPERCLASSES = 0x00100000, // for MHN.getMembers -- MN_SEARCH_INTERFACES = 0x00200000, // for MHN.getMembers -- VM_INDEX_UNINITIALIZED = -99 -+ MN_REFERENCE_KIND_SHIFT = 24, // refKind -+ MN_REFERENCE_KIND_MASK = 0x0F000000 >> MN_REFERENCE_KIND_SHIFT, -+ // The SEARCH_* bits are not for MN.flags but for the matchFlags argument of MHN.getMembers: -+ MN_SEARCH_SUPERCLASSES = 0x00100000, // walk super classes -+ MN_SEARCH_INTERFACES = 0x00200000 // walk implemented interfaces - }; - - // Accessors for code generation: -@@ -1147,7 +1031,6 @@ - private: - static int _rtype_offset; - static int _ptypes_offset; -- static int _form_offset; - - static void compute_offsets(); - -@@ -1155,11 +1038,13 @@ - // Accessors - static oop rtype(oop mt); - static objArrayOop ptypes(oop mt); -- static oop form(oop mt); - - static oop ptype(oop mt, int index); - static int ptype_count(oop mt); - -+ static int ptype_slot_count(oop mt); // extra counts for long/double -+ static int rtype_slot_count(oop mt); // extra counts for long/double -+ - static Symbol* as_signature(oop mt, bool intern_if_not_found, TRAPS); - static void print_signature(oop mt, outputStream* st); - -@@ -1172,40 +1057,6 @@ - // Accessors for code generation: - static int rtype_offset_in_bytes() { return _rtype_offset; } - static int ptypes_offset_in_bytes() { return _ptypes_offset; } -- static int form_offset_in_bytes() { return _form_offset; } --}; -- --#define METHODTYPEFORM_INJECTED_FIELDS(macro) \ -- macro(java_lang_invoke_MethodTypeForm, vmslots, int_signature, true) \ -- macro(java_lang_invoke_MethodTypeForm, vmlayout, object_signature, true) -- --class java_lang_invoke_MethodTypeForm: AllStatic { -- friend class JavaClasses; -- -- private: -- static int _vmslots_offset; // number of argument slots needed -- static int _vmlayout_offset; // object describing internal calling sequence -- static int _erasedType_offset; // erasedType = canonical MethodType -- static int _genericInvoker_offset; // genericInvoker = adapter for invokeGeneric -- -- static void compute_offsets(); -- -- public: -- // Accessors -- static int vmslots(oop mtform); -- static void set_vmslots(oop mtform, int vmslots); -- -- static oop erasedType(oop mtform); -- static oop genericInvoker(oop mtform); -- -- static oop vmlayout(oop mtform); -- static oop init_vmlayout(oop mtform, oop cookie); -- -- // Accessors for code generation: -- static int vmslots_offset_in_bytes() { return _vmslots_offset; } -- static int vmlayout_offset_in_bytes() { return _vmlayout_offset; } -- static int erasedType_offset_in_bytes() { return _erasedType_offset; } -- static int genericInvoker_offset_in_bytes() { return _genericInvoker_offset; } - }; - - -@@ -1278,6 +1129,7 @@ - - public: - static oop parent(oop loader); -+ static bool isAncestor(oop loader, oop cl); - - // Support for parallelCapable field - static bool parallelCapable(oop the_class_mirror); -@@ -1287,6 +1139,14 @@ - // Fix for 4474172 - static oop non_reflection_class_loader(oop loader); - -+ // Testers -+ static bool is_subclass(klassOop klass) { -+ return Klass::cast(klass)->is_subclass_of(SystemDictionary::ClassLoader_klass()); -+ } -+ static bool is_instance(oop obj) { -+ return obj != NULL && is_subclass(obj->klass()); -+ } -+ - // Debugging - friend class JavaClasses; - }; -@@ -1441,10 +1301,7 @@ - - #define ALL_INJECTED_FIELDS(macro) \ - CLASS_INJECTED_FIELDS(macro) \ -- METHODHANDLE_INJECTED_FIELDS(macro) \ -- DIRECTMETHODHANDLE_INJECTED_FIELDS(macro) \ -- MEMBERNAME_INJECTED_FIELDS(macro) \ -- METHODTYPEFORM_INJECTED_FIELDS(macro) -+ MEMBERNAME_INJECTED_FIELDS(macro) - - // Interface to hard-coded offset checking - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/symbolTable.hpp ---- openjdk/hotspot/src/share/vm/classfile/symbolTable.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/classfile/symbolTable.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -57,12 +57,15 @@ - - // Operator= increments reference count. - void operator=(const TempNewSymbol &s) { -+ //clear(); //FIXME - _temp = s._temp; - if (_temp !=NULL) _temp->increment_refcount(); - } - - // Decrement reference counter so it can go away if it's unique -- ~TempNewSymbol() { if (_temp != NULL) _temp->decrement_refcount(); } -+ void clear() { if (_temp != NULL) _temp->decrement_refcount(); _temp = NULL; } -+ -+ ~TempNewSymbol() { clear(); } - - // Operators so they can be used like Symbols - Symbol* operator -> () const { return _temp; } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/systemDictionary.cpp ---- openjdk/hotspot/src/share/vm/classfile/systemDictionary.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/classfile/systemDictionary.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -30,6 +30,7 @@ - #include "classfile/resolutionErrors.hpp" - #include "classfile/systemDictionary.hpp" - #include "classfile/vmSymbols.hpp" -+#include "compiler/compileBroker.hpp" - #include "interpreter/bytecodeStream.hpp" - #include "interpreter/interpreter.hpp" - #include "memory/gcLocker.hpp" -@@ -193,7 +194,10 @@ - // Forwards to resolve_instance_class_or_null - - klassOop SystemDictionary::resolve_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS) { -- assert(!THREAD->is_Compiler_thread(), "Can not load classes with the Compiler thread"); -+ assert(!THREAD->is_Compiler_thread(), -+ err_msg("can not load classes with compiler thread: class=%s, classloader=%s", -+ class_name->as_C_string(), -+ class_loader.is_null() ? "null" : class_loader->klass()->klass_part()->name()->as_C_string())); - if (FieldType::is_array(class_name)) { - return resolve_array_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL); - } else if (FieldType::is_obj(class_name)) { -@@ -2358,72 +2362,134 @@ - } - - --methodOop SystemDictionary::find_method_handle_invoke(Symbol* name, -- Symbol* signature, -- KlassHandle accessing_klass, -- TRAPS) { -- if (!EnableInvokeDynamic) return NULL; -- vmSymbols::SID name_id = vmSymbols::find_sid(name); -- assert(name_id != vmSymbols::NO_SID, "must be a known name"); -- unsigned int hash = invoke_method_table()->compute_hash(signature, name_id); -+methodHandle SystemDictionary::find_method_handle_intrinsic(vmIntrinsics::ID iid, -+ Symbol* signature, -+ TRAPS) { -+ methodHandle empty; -+ assert(EnableInvokeDynamic, ""); -+ assert(MethodHandles::is_signature_polymorphic(iid) && -+ MethodHandles::is_signature_polymorphic_intrinsic(iid) && -+ iid != vmIntrinsics::_invokeGeneric, -+ err_msg("must be a known MH intrinsic iid=%d: %s", iid, vmIntrinsics::name_at(iid))); -+ -+ unsigned int hash = invoke_method_table()->compute_hash(signature, iid); - int index = invoke_method_table()->hash_to_index(hash); -- SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature, name_id); -- methodHandle non_cached_result; -+ SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature, iid); -+ methodHandle m; - if (spe == NULL || spe->property_oop() == NULL) { - spe = NULL; - // Must create lots of stuff here, but outside of the SystemDictionary lock. -- if (THREAD->is_Compiler_thread()) -- return NULL; // do not attempt from within compiler -- bool for_invokeGeneric = (name_id != vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name)); -- bool found_on_bcp = false; -- Handle mt = find_method_handle_type(signature, accessing_klass, -- for_invokeGeneric, -- found_on_bcp, CHECK_NULL); -- KlassHandle mh_klass = SystemDictionaryHandles::MethodHandle_klass(); -- methodHandle m = methodOopDesc::make_invoke_method(mh_klass, name, signature, -- mt, CHECK_NULL); -+ m = methodOopDesc::make_method_handle_intrinsic(iid, signature, CHECK_(empty)); -+ CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_highest_tier, -+ methodHandle(), CompileThreshold, "MH", CHECK_(empty)); -+ - // Now grab the lock. We might have to throw away the new method, - // if a racing thread has managed to install one at the same time. -- if (found_on_bcp) { -- MutexLocker ml(SystemDictionary_lock, Thread::current()); -- spe = invoke_method_table()->find_entry(index, hash, signature, name_id); -+ { -+ MutexLocker ml(SystemDictionary_lock, THREAD); -+ spe = invoke_method_table()->find_entry(index, hash, signature, iid); - if (spe == NULL) -- spe = invoke_method_table()->add_entry(index, hash, signature, name_id); -- if (spe->property_oop() == NULL) { -+ spe = invoke_method_table()->add_entry(index, hash, signature, iid); -+ if (spe->property_oop() == NULL) - spe->set_property_oop(m()); -- // Link m to his method type, if it is suitably generic. -- oop mtform = java_lang_invoke_MethodType::form(mt()); -- if (mtform != NULL && mt() == java_lang_invoke_MethodTypeForm::erasedType(mtform) -- // vmlayout must be an invokeExact: -- && name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name) -- && java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) { -- java_lang_invoke_MethodTypeForm::init_vmlayout(mtform, m()); -- } -- } -- } else { -- non_cached_result = m; - } - } -- if (spe != NULL && spe->property_oop() != NULL) { -- assert(spe->property_oop()->is_method(), ""); -- return (methodOop) spe->property_oop(); -- } else { -- return non_cached_result(); -+ -+ assert(spe != NULL && spe->property_oop() != NULL, ""); -+ m = methodOop(spe->property_oop()); -+ assert(m->is_method(), ""); -+ -+ return m; -+} -+ -+// Helper for unpacking the return value from linkMethod and linkCallSite. -+static methodHandle unpack_method_and_appendix(Handle mname, -+ objArrayHandle appendix_box, -+ Handle* appendix_result, -+ TRAPS) { -+ methodHandle empty; -+ if (mname.not_null()) { -+ oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname()); -+ if (vmtarget != NULL && vmtarget->is_method()) { -+ methodOop m = methodOop(vmtarget); -+ oop appendix = appendix_box->obj_at(0); -+ if (TraceMethodHandles) { -+ #ifndef PRODUCT -+ tty->print("Linked method="INTPTR_FORMAT": ", m); -+ m->print(); -+ if (appendix != NULL) { tty->print("appendix = "); appendix->print(); } -+ tty->cr(); -+ #endif //PRODUCT -+ } -+ (*appendix_result) = Handle(THREAD, appendix); -+ return methodHandle(THREAD, m); -+ } - } -+ THROW_MSG_(vmSymbols::java_lang_LinkageError(), "bad value from MethodHandleNatives", empty); -+ return empty; - } - -+methodHandle SystemDictionary::find_method_handle_invoker(Symbol* name, -+ Symbol* signature, -+ KlassHandle accessing_klass, -+ Handle* appendix_result, -+ TRAPS) { -+ methodHandle empty; -+ assert(EnableInvokeDynamic, ""); -+ assert(!THREAD->is_Compiler_thread(), ""); -+ Handle method_type = -+ SystemDictionary::find_method_handle_type(signature, accessing_klass, CHECK_(empty)); -+ if (false) { // FIXME: Decide if the Java upcall should resolve signatures. -+ method_type = java_lang_String::create_from_symbol(signature, CHECK_(empty)); -+ } -+ -+ KlassHandle mh_klass = SystemDictionaryHandles::MethodHandle_klass(); -+ int ref_kind = JVM_REF_invokeVirtual; -+ Handle name_str = StringTable::intern(name, CHECK_(empty)); -+ objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty)); -+ assert(appendix_box->obj_at(0) == NULL, ""); -+ -+ // call java.lang.invoke.MethodHandleNatives::linkMethod(... String, MethodType) -> MemberName -+ JavaCallArguments args; -+ args.push_oop(accessing_klass()->java_mirror()); -+ args.push_int(ref_kind); -+ args.push_oop(mh_klass()->java_mirror()); -+ args.push_oop(name_str()); -+ args.push_oop(method_type()); -+ args.push_oop(appendix_box()); -+ JavaValue result(T_OBJECT); -+ JavaCalls::call_static(&result, -+ SystemDictionary::MethodHandleNatives_klass(), -+ vmSymbols::linkMethod_name(), -+ vmSymbols::linkMethod_signature(), -+ &args, CHECK_(empty)); -+ Handle mname(THREAD, (oop) result.get_jobject()); -+ return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD); -+} -+ -+ - // Ask Java code to find or construct a java.lang.invoke.MethodType for the given - // signature, as interpreted relative to the given class loader. - // Because of class loader constraints, all method handle usage must be - // consistent with this loader. - Handle SystemDictionary::find_method_handle_type(Symbol* signature, - KlassHandle accessing_klass, -- bool for_invokeGeneric, -- bool& return_bcp_flag, - TRAPS) { -+ Handle empty; -+ vmIntrinsics::ID null_iid = vmIntrinsics::_none; // distinct from all method handle invoker intrinsics -+ unsigned int hash = invoke_method_table()->compute_hash(signature, null_iid); -+ int index = invoke_method_table()->hash_to_index(hash); -+ SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature, null_iid); -+ if (spe != NULL && spe->property_oop() != NULL) { -+ assert(java_lang_invoke_MethodType::is_instance(spe->property_oop()), ""); -+ return Handle(THREAD, spe->property_oop()); -+ } else if (THREAD->is_Compiler_thread()) { -+ warning("SystemDictionary::find_method_handle_type called from compiler thread"); // FIXME -+ return Handle(); // do not attempt from within compiler, unless it was cached -+ } -+ - Handle class_loader, protection_domain; - bool is_on_bcp = true; // keep this true as long as we can materialize from the boot classloader -- Handle empty; - int npts = ArgumentCount(signature).size(); - objArrayHandle pts = oopFactory::new_objArray(SystemDictionary::Class_klass(), npts, CHECK_(empty)); - int arg = 0; -@@ -2432,6 +2498,7 @@ - for (SignatureStream ss(signature); !ss.is_done(); ss.next()) { - oop mirror = NULL; - if (is_on_bcp) { -+ // Note: class_loader & protection_domain are both null at this point. - mirror = ss.as_java_mirror(class_loader, protection_domain, - SignatureStream::ReturnNull, CHECK_(empty)); - if (mirror == NULL) { -@@ -2452,9 +2519,11 @@ - rt = Handle(THREAD, mirror); - else - pts->obj_at_put(arg++, mirror); -+ - // Check accessibility. - if (ss.is_object() && accessing_klass.not_null()) { - klassOop sel_klass = java_lang_Class::as_klassOop(mirror); -+ mirror = NULL; // safety - // Emulate constantPoolOopDesc::verify_constant_pool_resolve. - if (Klass::cast(sel_klass)->oop_is_objArray()) - sel_klass = objArrayKlass::cast(sel_klass)->bottom_klass(); -@@ -2477,23 +2546,18 @@ - &args, CHECK_(empty)); - Handle method_type(THREAD, (oop) result.get_jobject()); - -- if (for_invokeGeneric) { -- // call java.lang.invoke.MethodHandleNatives::notifyGenericMethodType(MethodType) -> void -- JavaCallArguments args(Handle(THREAD, method_type())); -- JavaValue no_result(T_VOID); -- JavaCalls::call_static(&no_result, -- SystemDictionary::MethodHandleNatives_klass(), -- vmSymbols::notifyGenericMethodType_name(), -- vmSymbols::notifyGenericMethodType_signature(), -- &args, THREAD); -- if (HAS_PENDING_EXCEPTION) { -- // If the notification fails, just kill it. -- CLEAR_PENDING_EXCEPTION; -+ if (is_on_bcp) { -+ // We can cache this MethodType inside the JVM. -+ MutexLocker ml(SystemDictionary_lock, THREAD); -+ spe = invoke_method_table()->find_entry(index, hash, signature, null_iid); -+ if (spe == NULL) -+ spe = invoke_method_table()->add_entry(index, hash, signature, null_iid); -+ if (spe->property_oop() == NULL) { -+ spe->set_property_oop(method_type()); - } - } - -- // report back to the caller with the MethodType and the "on_bcp" flag -- return_bcp_flag = is_on_bcp; -+ // report back to the caller with the MethodType - return method_type; - } - -@@ -2508,8 +2572,7 @@ - Handle name = java_lang_String::create_from_symbol(name_sym, CHECK_(empty)); - Handle type; - if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') { -- bool ignore_is_on_bcp = false; -- type = find_method_handle_type(signature, caller, false, ignore_is_on_bcp, CHECK_(empty)); -+ type = find_method_handle_type(signature, caller, CHECK_(empty)); - } else { - ResourceMark rm(THREAD); - SignatureStream ss(signature, false); -@@ -2543,119 +2606,54 @@ - - // Ask Java code to find or construct a java.lang.invoke.CallSite for the given - // name and signature, as interpreted relative to the given class loader. --Handle SystemDictionary::make_dynamic_call_site(Handle bootstrap_method, -- Symbol* name, -- methodHandle signature_invoker, -- Handle info, -- methodHandle caller_method, -- int caller_bci, -- TRAPS) { -- Handle empty; -- guarantee(bootstrap_method.not_null() && -- java_lang_invoke_MethodHandle::is_instance(bootstrap_method()), -+methodHandle SystemDictionary::find_dynamic_call_site_invoker(KlassHandle caller, -+ Handle bootstrap_specifier, -+ Symbol* name, -+ Symbol* type, -+ Handle* appendix_result, -+ TRAPS) { -+ methodHandle empty; -+ Handle bsm, info; -+ if (java_lang_invoke_MethodHandle::is_instance(bootstrap_specifier())) { -+ bsm = bootstrap_specifier; -+ } else { -+ assert(bootstrap_specifier->is_objArray(), ""); -+ objArrayHandle args(THREAD, (objArrayOop) bootstrap_specifier()); -+ int len = args->length(); -+ assert(len >= 1, ""); -+ bsm = Handle(THREAD, args->obj_at(0)); -+ if (len > 1) { -+ objArrayOop args1 = oopFactory::new_objArray(SystemDictionary::Object_klass(), len-1, CHECK_(empty)); -+ for (int i = 1; i < len; i++) -+ args1->obj_at_put(i-1, args->obj_at(i)); -+ info = Handle(THREAD, args1); -+ } -+ } -+ guarantee(java_lang_invoke_MethodHandle::is_instance(bsm()), - "caller must supply a valid BSM"); - -- Handle caller_mname = MethodHandles::new_MemberName(CHECK_(empty)); -- MethodHandles::init_MemberName(caller_mname(), caller_method()); -- -- // call java.lang.invoke.MethodHandleNatives::makeDynamicCallSite(bootm, name, mtype, info, caller_mname, caller_pos) -- oop name_str_oop = StringTable::intern(name, CHECK_(empty)); // not a handle! -- JavaCallArguments args(Handle(THREAD, bootstrap_method())); -- args.push_oop(name_str_oop); -- args.push_oop(signature_invoker->method_handle_type()); -+ Handle method_name = java_lang_String::create_from_symbol(name, CHECK_(empty)); -+ Handle method_type = find_method_handle_type(type, caller, CHECK_(empty)); -+ -+ objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty)); -+ assert(appendix_box->obj_at(0) == NULL, ""); -+ -+ // call java.lang.invoke.MethodHandleNatives::linkCallSite(caller, bsm, name, mtype, info, &appendix) -+ JavaCallArguments args; -+ args.push_oop(caller->java_mirror()); -+ args.push_oop(bsm()); -+ args.push_oop(method_name()); -+ args.push_oop(method_type()); - args.push_oop(info()); -- args.push_oop(caller_mname()); -- args.push_int(caller_bci); -+ args.push_oop(appendix_box); - JavaValue result(T_OBJECT); - JavaCalls::call_static(&result, - SystemDictionary::MethodHandleNatives_klass(), -- vmSymbols::makeDynamicCallSite_name(), -- vmSymbols::makeDynamicCallSite_signature(), -+ vmSymbols::linkCallSite_name(), -+ vmSymbols::linkCallSite_signature(), - &args, CHECK_(empty)); -- oop call_site_oop = (oop) result.get_jobject(); -- assert(call_site_oop->is_oop() -- /*&& java_lang_invoke_CallSite::is_instance(call_site_oop)*/, "must be sane"); -- if (TraceMethodHandles) { --#ifndef PRODUCT -- tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop); -- call_site_oop->print(); -- tty->cr(); --#endif //PRODUCT -- } -- return call_site_oop; --} -- --Handle SystemDictionary::find_bootstrap_method(methodHandle caller_method, int caller_bci, -- int cache_index, -- Handle& argument_info_result, -- TRAPS) { -- Handle empty; -- -- constantPoolHandle pool; -- { -- klassOop caller = caller_method->method_holder(); -- if (!Klass::cast(caller)->oop_is_instance()) return empty; -- pool = constantPoolHandle(THREAD, instanceKlass::cast(caller)->constants()); -- } -- -- int constant_pool_index = pool->cache()->entry_at(cache_index)->constant_pool_index(); -- constantTag tag = pool->tag_at(constant_pool_index); -- -- if (tag.is_invoke_dynamic()) { -- // JVM_CONSTANT_InvokeDynamic is an ordered pair of [bootm, name&type], plus optional arguments -- // The bootm, being a JVM_CONSTANT_MethodHandle, has its own cache entry. -- int bsm_index = pool->invoke_dynamic_bootstrap_method_ref_index_at(constant_pool_index); -- if (bsm_index != 0) { -- int bsm_index_in_cache = pool->cache()->entry_at(cache_index)->bootstrap_method_index_in_cache(); -- DEBUG_ONLY(int bsm_index_2 = pool->cache()->entry_at(bsm_index_in_cache)->constant_pool_index()); -- assert(bsm_index == bsm_index_2, "BSM constant lifted to cache"); -- if (TraceMethodHandles) { -- tty->print_cr("resolving bootstrap method for "PTR_FORMAT" at %d at cache[%d]CP[%d]...", -- (intptr_t) caller_method(), caller_bci, cache_index, constant_pool_index); -- } -- oop bsm_oop = pool->resolve_cached_constant_at(bsm_index_in_cache, CHECK_(empty)); -- if (TraceMethodHandles) { -- tty->print_cr("bootstrap method for "PTR_FORMAT" at %d retrieved as "PTR_FORMAT":", -- (intptr_t) caller_method(), caller_bci, (intptr_t) bsm_oop); -- } -- assert(bsm_oop->is_oop(), "must be sane"); -- // caller must verify that it is of type MethodHandle -- Handle bsm(THREAD, bsm_oop); -- bsm_oop = NULL; // safety -- -- // Extract the optional static arguments. -- Handle argument_info; // either null, or one arg, or Object[]{arg...} -- int argc = pool->invoke_dynamic_argument_count_at(constant_pool_index); -- if (TraceInvokeDynamic) { -- tty->print_cr("find_bootstrap_method: [%d/%d] CONSTANT_InvokeDynamic: %d[%d]", -- constant_pool_index, cache_index, bsm_index, argc); -- } -- if (argc > 0) { -- objArrayHandle arg_array; -- if (argc > 1) { -- objArrayOop arg_array_oop = oopFactory::new_objArray(SystemDictionary::Object_klass(), argc, CHECK_(empty)); -- arg_array = objArrayHandle(THREAD, arg_array_oop); -- argument_info = arg_array; -- } -- for (int arg_i = 0; arg_i < argc; arg_i++) { -- int arg_index = pool->invoke_dynamic_argument_index_at(constant_pool_index, arg_i); -- oop arg_oop = pool->resolve_possibly_cached_constant_at(arg_index, CHECK_(empty)); -- if (arg_array.is_null()) { -- argument_info = Handle(THREAD, arg_oop); -- } else { -- arg_array->obj_at_put(arg_i, arg_oop); -- } -- } -- } -- -- argument_info_result = argument_info; // return argument_info to caller -- return bsm; -- } -- } else { -- ShouldNotReachHere(); // verifier does not allow this -- } -- -- return empty; -+ Handle mname(THREAD, (oop) result.get_jobject()); -+ return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD); - } - - // Since the identity hash code for symbols changes when the symbols are -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/systemDictionary.hpp ---- openjdk/hotspot/src/share/vm/classfile/systemDictionary.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/classfile/systemDictionary.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -147,15 +147,10 @@ - template(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292) \ - template(MemberName_klass, java_lang_invoke_MemberName, Pre_JSR292) \ - template(MethodHandleNatives_klass, java_lang_invoke_MethodHandleNatives, Pre_JSR292) \ -- template(AdapterMethodHandle_klass, java_lang_invoke_AdapterMethodHandle, Pre_JSR292) \ -- template(BoundMethodHandle_klass, java_lang_invoke_BoundMethodHandle, Pre_JSR292) \ -- template(DirectMethodHandle_klass, java_lang_invoke_DirectMethodHandle, Pre_JSR292) \ -+ template(LambdaForm_klass, java_lang_invoke_LambdaForm, Opt) \ - template(MethodType_klass, java_lang_invoke_MethodType, Pre_JSR292) \ -- template(MethodTypeForm_klass, java_lang_invoke_MethodTypeForm, Pre_JSR292) \ - template(BootstrapMethodError_klass, java_lang_BootstrapMethodError, Pre_JSR292) \ -- template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \ - template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \ -- template(CountingMethodHandle_klass, java_lang_invoke_CountingMethodHandle, Opt) \ - template(ConstantCallSite_klass, java_lang_invoke_ConstantCallSite, Pre_JSR292) \ - template(MutableCallSite_klass, java_lang_invoke_MutableCallSite, Pre_JSR292) \ - template(VolatileCallSite_klass, java_lang_invoke_VolatileCallSite, Pre_JSR292) \ -@@ -487,17 +482,24 @@ - Handle loader2, bool is_method, TRAPS); - - // JSR 292 -- // find the java.lang.invoke.MethodHandles::invoke method for a given signature -- static methodOop find_method_handle_invoke(Symbol* name, -- Symbol* signature, -- KlassHandle accessing_klass, -- TRAPS); -- // ask Java to compute a java.lang.invoke.MethodType object for a given signature -+ // find a java.lang.invoke.MethodHandle.invoke* method for a given signature -+ // (asks Java to compute it if necessary, except in a compiler thread) -+ static methodHandle find_method_handle_invoker(Symbol* name, -+ Symbol* signature, -+ KlassHandle accessing_klass, -+ Handle *appendix_result, -+ TRAPS); -+ // for a given signature, find the internal MethodHandle method (linkTo* or invokeBasic) -+ // (does not ask Java, since this is a low-level intrinsic defined by the JVM) -+ static methodHandle find_method_handle_intrinsic(vmIntrinsics::ID iid, -+ Symbol* signature, -+ TRAPS); -+ // find a java.lang.invoke.MethodType object for a given signature -+ // (asks Java to compute it if necessary, except in a compiler thread) - static Handle find_method_handle_type(Symbol* signature, - KlassHandle accessing_klass, -- bool for_invokeGeneric, -- bool& return_bcp_flag, - TRAPS); -+ - // ask Java to compute a java.lang.invoke.MethodHandle object for a given CP entry - static Handle link_method_handle_constant(KlassHandle caller, - int ref_kind, //e.g., JVM_REF_invokeVirtual -@@ -505,23 +507,14 @@ - Symbol* name, - Symbol* signature, - TRAPS); -+ - // ask Java to create a dynamic call site, while linking an invokedynamic op -- static Handle make_dynamic_call_site(Handle bootstrap_method, -- // Callee information: -- Symbol* name, -- methodHandle signature_invoker, -- Handle info, -- // Caller information: -- methodHandle caller_method, -- int caller_bci, -- TRAPS); -- -- // coordinate with Java about bootstrap methods -- static Handle find_bootstrap_method(methodHandle caller_method, -- int caller_bci, // N.B. must be an invokedynamic -- int cache_index, // must be corresponding main_entry -- Handle &argument_info_result, // static BSM arguments, if any -- TRAPS); -+ static methodHandle find_dynamic_call_site_invoker(KlassHandle caller, -+ Handle bootstrap_method, -+ Symbol* name, -+ Symbol* type, -+ Handle *appendix_result, -+ TRAPS); - - // Utility for printing loader "name" as part of tracing constraints - static const char* loader_name(oop loader) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/vmSymbols.cpp ---- openjdk/hotspot/src/share/vm/classfile/vmSymbols.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/classfile/vmSymbols.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -332,7 +332,14 @@ - if (cname == NULL || mname == NULL || msig == NULL) return NULL; - klassOop k = SystemDictionary::find_well_known_klass(cname); - if (k == NULL) return NULL; -- return instanceKlass::cast(k)->find_method(mname, msig); -+ methodOop m = instanceKlass::cast(k)->find_method(mname, msig); -+ if (m == NULL && -+ cname == vmSymbols::java_lang_invoke_MethodHandle() && -+ msig == vmSymbols::star_name()) { -+ // Any signature polymorphic method is represented by a fixed concrete signature: -+ m = instanceKlass::cast(k)->find_method(mname, vmSymbols::object_array_object_signature()); -+ } -+ return m; - } - - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/classfile/vmSymbols.hpp ---- openjdk/hotspot/src/share/vm/classfile/vmSymbols.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/classfile/vmSymbols.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -208,10 +208,12 @@ - template(newField_signature, "(Lsun/reflect/FieldInfo;)Ljava/lang/reflect/Field;") \ - template(newMethod_name, "newMethod") \ - template(newMethod_signature, "(Lsun/reflect/MethodInfo;)Ljava/lang/reflect/Method;") \ -- /* the following two names must be in order: */ \ -- template(invokeExact_name, "invokeExact") \ -- template(invokeGeneric_name, "invokeGeneric") \ -- template(invokeVarargs_name, "invokeVarargs") \ -+ template(invokeBasic_name, "invokeBasic") \ -+ template(linkToVirtual_name, "linkToVirtual") \ -+ template(linkToStatic_name, "linkToStatic") \ -+ template(linkToSpecial_name, "linkToSpecial") \ -+ template(linkToInterface_name, "linkToInterface") \ -+ template(compiledLambdaForm_name, "") /*fake name*/ \ - template(star_name, "*") /*not really a name*/ \ - template(invoke_name, "invoke") \ - template(override_name, "override") \ -@@ -232,36 +234,33 @@ - template(base_name, "base") \ - \ - /* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */ \ -- template(java_lang_invoke_InvokeDynamic, "java/lang/invoke/InvokeDynamic") \ -- template(java_lang_invoke_Linkage, "java/lang/invoke/Linkage") \ - template(java_lang_invoke_CallSite, "java/lang/invoke/CallSite") \ - template(java_lang_invoke_ConstantCallSite, "java/lang/invoke/ConstantCallSite") \ - template(java_lang_invoke_MutableCallSite, "java/lang/invoke/MutableCallSite") \ - template(java_lang_invoke_VolatileCallSite, "java/lang/invoke/VolatileCallSite") \ - template(java_lang_invoke_MethodHandle, "java/lang/invoke/MethodHandle") \ - template(java_lang_invoke_MethodType, "java/lang/invoke/MethodType") \ -- template(java_lang_invoke_WrongMethodTypeException, "java/lang/invoke/WrongMethodTypeException") \ - template(java_lang_invoke_MethodType_signature, "Ljava/lang/invoke/MethodType;") \ -+ template(java_lang_invoke_MemberName_signature, "Ljava/lang/invoke/MemberName;") \ -+ template(java_lang_invoke_LambdaForm_signature, "Ljava/lang/invoke/LambdaForm;") \ - template(java_lang_invoke_MethodHandle_signature, "Ljava/lang/invoke/MethodHandle;") \ - /* internal classes known only to the JVM: */ \ -- template(java_lang_invoke_MethodTypeForm, "java/lang/invoke/MethodTypeForm") \ -- template(java_lang_invoke_MethodTypeForm_signature, "Ljava/lang/invoke/MethodTypeForm;") \ - template(java_lang_invoke_MemberName, "java/lang/invoke/MemberName") \ - template(java_lang_invoke_MethodHandleNatives, "java/lang/invoke/MethodHandleNatives") \ -- template(java_lang_invoke_MethodHandleImpl, "java/lang/invoke/MethodHandleImpl") \ -- template(java_lang_invoke_AdapterMethodHandle, "java/lang/invoke/AdapterMethodHandle") \ -- template(java_lang_invoke_BoundMethodHandle, "java/lang/invoke/BoundMethodHandle") \ -- template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \ -- template(java_lang_invoke_CountingMethodHandle, "java/lang/invoke/CountingMethodHandle") \ -+ template(java_lang_invoke_LambdaForm, "java/lang/invoke/LambdaForm") \ -+ template(java_lang_invoke_ForceInline_signature, "Ljava/lang/invoke/ForceInline;") \ -+ template(java_lang_invoke_DontInline_signature, "Ljava/lang/invoke/DontInline;") \ -+ template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \ -+ template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;") \ - /* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \ - template(findMethodHandleType_name, "findMethodHandleType") \ - template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \ -- template(notifyGenericMethodType_name, "notifyGenericMethodType") \ -- template(notifyGenericMethodType_signature, "(Ljava/lang/invoke/MethodType;)V") \ - template(linkMethodHandleConstant_name, "linkMethodHandleConstant") \ - template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/invoke/MethodHandle;") \ -- template(makeDynamicCallSite_name, "makeDynamicCallSite") \ -- template(makeDynamicCallSite_signature, "(Ljava/lang/invoke/MethodHandle;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/Object;Ljava/lang/invoke/MemberName;I)Ljava/lang/invoke/CallSite;") \ -+ template(linkMethod_name, "linkMethod") \ -+ template(linkMethod_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/invoke/MemberName;") \ -+ template(linkCallSite_name, "linkCallSite") \ -+ template(linkCallSite_signature, "(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/invoke/MemberName;") \ - template(setTargetNormal_name, "setTargetNormal") \ - template(setTargetVolatile_name, "setTargetVolatile") \ - template(setTarget_signature, "(Ljava/lang/invoke/MethodHandle;)V") \ -@@ -355,22 +354,15 @@ - template(toString_name, "toString") \ - template(values_name, "values") \ - template(receiver_name, "receiver") \ -- template(vmmethod_name, "vmmethod") \ - template(vmtarget_name, "vmtarget") \ -+ template(vmindex_name, "vmindex") \ -+ template(vmcount_name, "vmcount") \ - template(vmentry_name, "vmentry") \ -- template(vmcount_name, "vmcount") \ -- template(vmslots_name, "vmslots") \ -- template(vmlayout_name, "vmlayout") \ -- template(vmindex_name, "vmindex") \ -- template(vmargslot_name, "vmargslot") \ - template(flags_name, "flags") \ -- template(argument_name, "argument") \ -- template(conversion_name, "conversion") \ - template(rtype_name, "rtype") \ - template(ptypes_name, "ptypes") \ - template(form_name, "form") \ -- template(erasedType_name, "erasedType") \ -- template(genericInvoker_name, "genericInvoker") \ -+ template(basicType_name, "basicType") \ - template(append_name, "append") \ - template(klass_name, "klass") \ - template(resolved_constructor_name, "resolved_constructor") \ -@@ -922,15 +914,15 @@ - \ - do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_object_array_object_signature, F_R) \ - /* (symbols invoke_name and invoke_signature defined above) */ \ -- do_intrinsic(_checkSpreadArgument, java_lang_invoke_MethodHandleNatives, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) \ -- do_name( checkSpreadArgument_name, "checkSpreadArgument") \ -- do_name( checkSpreadArgument_signature, "(Ljava/lang/Object;I)V") \ -- do_intrinsic(_invokeExact, java_lang_invoke_MethodHandle, invokeExact_name, object_array_object_signature, F_RN) \ -- do_intrinsic(_invokeGeneric, java_lang_invoke_MethodHandle, invokeGeneric_name, object_array_object_signature, F_RN) \ -- do_intrinsic(_invokeVarargs, java_lang_invoke_MethodHandle, invokeVarargs_name, object_array_object_signature, F_R) \ -- do_intrinsic(_invokeDynamic, java_lang_invoke_InvokeDynamic, star_name, object_array_object_signature, F_SN) \ -- \ -- do_intrinsic(_selectAlternative, java_lang_invoke_MethodHandleImpl, selectAlternative_name, selectAlternative_signature, F_S) \ -+ /* the polymorphic MH intrinsics must be in compact order, with _invokeGeneric first and _linkToInterface last */ \ -+ do_intrinsic(_invokeGeneric, java_lang_invoke_MethodHandle, invoke_name, star_name, F_RN) \ -+ do_intrinsic(_invokeBasic, java_lang_invoke_MethodHandle, invokeBasic_name, star_name, F_RN) \ -+ do_intrinsic(_linkToVirtual, java_lang_invoke_MethodHandle, linkToVirtual_name, star_name, F_SN) \ -+ do_intrinsic(_linkToStatic, java_lang_invoke_MethodHandle, linkToStatic_name, star_name, F_SN) \ -+ do_intrinsic(_linkToSpecial, java_lang_invoke_MethodHandle, linkToSpecial_name, star_name, F_SN) \ -+ do_intrinsic(_linkToInterface, java_lang_invoke_MethodHandle, linkToInterface_name, star_name, F_SN) \ -+ /* special marker for bytecode generated for the JVM from a LambdaForm: */ \ -+ do_intrinsic(_compiledLambdaForm, java_lang_invoke_MethodHandle, compiledLambdaForm_name, star_name, F_RN) \ - \ - /* unboxing methods: */ \ - do_intrinsic(_booleanValue, java_lang_Boolean, booleanValue_name, void_boolean_signature, F_R) \ -@@ -1063,6 +1055,10 @@ - - ID_LIMIT, - LAST_COMPILER_INLINE = _prefetchWriteStatic, -+ FIRST_MH_SIG_POLY = _invokeGeneric, -+ FIRST_MH_STATIC = _linkToVirtual, -+ LAST_MH_SIG_POLY = _linkToInterface, -+ - FIRST_ID = _none + 1 - }; - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/code/codeBlob.cpp ---- openjdk/hotspot/src/share/vm/code/codeBlob.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/code/codeBlob.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -359,43 +359,6 @@ - - - //---------------------------------------------------------------------------------------------------- --// Implementation of RicochetBlob -- --RicochetBlob::RicochetBlob( -- CodeBuffer* cb, -- int size, -- int bounce_offset, -- int exception_offset, -- int frame_size --) --: SingletonBlob("RicochetBlob", cb, sizeof(RicochetBlob), size, frame_size, (OopMapSet*) NULL) --{ -- _bounce_offset = bounce_offset; -- _exception_offset = exception_offset; --} -- -- --RicochetBlob* RicochetBlob::create( -- CodeBuffer* cb, -- int bounce_offset, -- int exception_offset, -- int frame_size) --{ -- RicochetBlob* blob = NULL; -- ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock -- { -- MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); -- unsigned int size = allocation_size(cb, sizeof(RicochetBlob)); -- blob = new (size) RicochetBlob(cb, size, bounce_offset, exception_offset, frame_size); -- } -- -- trace_new_stub(blob, "RicochetBlob"); -- -- return blob; --} -- -- --//---------------------------------------------------------------------------------------------------- - // Implementation of DeoptimizationBlob - - DeoptimizationBlob::DeoptimizationBlob( -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/code/codeBlob.hpp ---- openjdk/hotspot/src/share/vm/code/codeBlob.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/code/codeBlob.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -35,7 +35,6 @@ - // Suptypes are: - // nmethod : Compiled Java methods (include method that calls to native code) - // RuntimeStub : Call to VM runtime methods --// RicochetBlob : Used for blocking MethodHandle adapters - // DeoptimizationBlob : Used for deoptimizatation - // ExceptionBlob : Used for stack unrolling - // SafepointBlob : Used to handle illegal instruction exceptions -@@ -99,7 +98,6 @@ - virtual bool is_buffer_blob() const { return false; } - virtual bool is_nmethod() const { return false; } - virtual bool is_runtime_stub() const { return false; } -- virtual bool is_ricochet_stub() const { return false; } - virtual bool is_deoptimization_stub() const { return false; } - virtual bool is_uncommon_trap_stub() const { return false; } - virtual bool is_exception_stub() const { return false; } -@@ -350,50 +348,6 @@ - - - //---------------------------------------------------------------------------------------------------- --// RicochetBlob --// Holds an arbitrary argument list indefinitely while Java code executes recursively. -- --class RicochetBlob: public SingletonBlob { -- friend class VMStructs; -- private: -- -- int _bounce_offset; -- int _exception_offset; -- -- // Creation support -- RicochetBlob( -- CodeBuffer* cb, -- int size, -- int bounce_offset, -- int exception_offset, -- int frame_size -- ); -- -- public: -- // Creation -- static RicochetBlob* create( -- CodeBuffer* cb, -- int bounce_offset, -- int exception_offset, -- int frame_size -- ); -- -- // Typing -- bool is_ricochet_stub() const { return true; } -- -- // GC for args -- void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ } -- -- address bounce_addr() const { return code_begin() + _bounce_offset; } -- address exception_addr() const { return code_begin() + _exception_offset; } -- bool returns_to_bounce_addr(address pc) const { -- address bounce_pc = bounce_addr(); -- return (pc == bounce_pc || (pc + frame::pc_return_offset) == bounce_pc); -- } --}; -- -- --//---------------------------------------------------------------------------------------------------- - // DeoptimizationBlob - - class DeoptimizationBlob: public SingletonBlob { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/code/codeCache.cpp ---- openjdk/hotspot/src/share/vm/code/codeCache.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/code/codeCache.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -796,7 +796,6 @@ - int nmethodCount = 0; - int runtimeStubCount = 0; - int adapterCount = 0; -- int ricochetStubCount = 0; - int deoptimizationStubCount = 0; - int uncommonTrapStubCount = 0; - int bufferBlobCount = 0; -@@ -841,8 +840,6 @@ - } - } else if (cb->is_runtime_stub()) { - runtimeStubCount++; -- } else if (cb->is_ricochet_stub()) { -- ricochetStubCount++; - } else if (cb->is_deoptimization_stub()) { - deoptimizationStubCount++; - } else if (cb->is_uncommon_trap_stub()) { -@@ -879,7 +876,6 @@ - tty->print_cr("runtime_stubs: %d",runtimeStubCount); - tty->print_cr("adapters: %d",adapterCount); - tty->print_cr("buffer blobs: %d",bufferBlobCount); -- tty->print_cr("ricochet_stubs: %d",ricochetStubCount); - tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); - tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); - tty->print_cr("\nnmethod size distribution (non-zombie java)"); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/code/debugInfoRec.cpp ---- openjdk/hotspot/src/share/vm/code/debugInfoRec.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/code/debugInfoRec.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -311,6 +311,7 @@ - assert(method == NULL || - (method->is_native() && bci == 0) || - (!method->is_native() && 0 <= bci && bci < method->code_size()) || -+ (method->is_compiled_lambda_form() && bci == -99) || // this might happen in C1 - bci == -1, "illegal bci"); - - // serialize the locals/expressions/monitors -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/code/nmethod.cpp ---- openjdk/hotspot/src/share/vm/code/nmethod.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/code/nmethod.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -945,8 +945,12 @@ - void nmethod::print_on(outputStream* st, const char* msg) const { - if (st != NULL) { - ttyLocker ttyl; -- CompileTask::print_compilation(st, this, msg); -- if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this); -+ if (WizardMode) { -+ CompileTask::print_compilation(st, this, msg, /*short_form:*/ true); -+ st->print_cr(" (" INTPTR_FORMAT ")", this); -+ } else { -+ CompileTask::print_compilation(st, this, msg, /*short_form:*/ false); -+ } - } - } - -@@ -964,7 +968,9 @@ - if (printmethod) { - print_code(); - print_pcs(); -- oop_maps()->print(); -+ if (oop_maps()) { -+ oop_maps()->print(); -+ } - } - if (PrintDebugInfo) { - print_scopes(); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/code/vtableStubs.hpp ---- openjdk/hotspot/src/share/vm/code/vtableStubs.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/code/vtableStubs.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -55,6 +55,8 @@ - int index() const { return _index; } - static VMReg receiver_location() { return _receiver_location; } - void set_next(VtableStub* n) { _next = n; } -+ -+ public: - address code_begin() const { return (address)(this + 1); } - address code_end() const { return code_begin() + pd_code_size_limit(_is_vtable_stub); } - address entry_point() const { return code_begin(); } -@@ -65,6 +67,7 @@ - } - bool contains(address pc) const { return code_begin() <= pc && pc < code_end(); } - -+ private: - void set_exception_points(address npe_addr, address ame_addr) { - _npe_offset = npe_addr - code_begin(); - _ame_offset = ame_addr - code_begin(); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/compiler/compileBroker.cpp ---- openjdk/hotspot/src/share/vm/compiler/compileBroker.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/compiler/compileBroker.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -407,7 +407,10 @@ - if (is_osr_method) { - st->print(" @ %d", osr_bci); - } -- st->print(" (%d bytes)", method->code_size()); -+ if (method->is_native()) -+ st->print(" (native)"); -+ else -+ st->print(" (%d bytes)", method->code_size()); - } - - if (msg != NULL) { -@@ -427,12 +430,17 @@ - st->print(" "); // print compilation number - - // method attributes -- const char sync_char = method->is_synchronized() ? 's' : ' '; -- const char exception_char = method->has_exception_handlers() ? '!' : ' '; -- const char monitors_char = method->has_monitor_bytecodes() ? 'm' : ' '; -+ if (method->is_loaded()) { -+ const char sync_char = method->is_synchronized() ? 's' : ' '; -+ const char exception_char = method->has_exception_handlers() ? '!' : ' '; -+ const char monitors_char = method->has_monitor_bytecodes() ? 'm' : ' '; - -- // print method attributes -- st->print(" %c%c%c ", sync_char, exception_char, monitors_char); -+ // print method attributes -+ st->print(" %c%c%c ", sync_char, exception_char, monitors_char); -+ } else { -+ // %s!bn -+ st->print(" "); // print method attributes -+ } - - if (TieredCompilation) { - st->print(" "); -@@ -444,7 +452,10 @@ - - st->print("@ %d ", bci); // print bci - method->print_short_name(st); -- st->print(" (%d bytes)", method->code_size()); -+ if (method->is_loaded()) -+ st->print(" (%d bytes)", method->code_size()); -+ else -+ st->print(" (not loaded)"); - - if (msg != NULL) { - st->print(" %s", msg); -@@ -1018,6 +1029,7 @@ - "sanity check"); - assert(!instanceKlass::cast(method->method_holder())->is_not_initialized(), - "method holder must be initialized"); -+ assert(!method->is_method_handle_intrinsic(), "do not enqueue these guys"); - - if (CIPrintRequests) { - tty->print("request: "); -@@ -1231,7 +1243,7 @@ - // - // Note: A native method implies non-osr compilation which is - // checked with an assertion at the entry of this method. -- if (method->is_native()) { -+ if (method->is_native() && !method->is_method_handle_intrinsic()) { - bool in_base_library; - address adr = NativeLookup::lookup(method, in_base_library, THREAD); - if (HAS_PENDING_EXCEPTION) { -@@ -1264,7 +1276,7 @@ - - // do the compilation - if (method->is_native()) { -- if (!PreferInterpreterNativeStubs) { -+ if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) { - // Acquire our lock. - int compile_id; - { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/compiler/compileBroker.hpp ---- openjdk/hotspot/src/share/vm/compiler/compileBroker.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/compiler/compileBroker.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -104,10 +104,10 @@ - - public: - void print_compilation(outputStream* st = tty, bool short_form = false); -- static void print_compilation(outputStream* st, const nmethod* nm, const char* msg = NULL) { -+ static void print_compilation(outputStream* st, const nmethod* nm, const char* msg = NULL, bool short_form = false) { - print_compilation_impl(st, nm->method(), nm->compile_id(), nm->comp_level(), - nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false, -- msg); -+ msg, short_form); - } - - static void print_inlining(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg = NULL); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/abstractInterpreter.hpp ---- openjdk/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -99,7 +99,10 @@ - empty, // empty method (code: _return) - accessor, // accessor method (code: _aload_0, _getfield, _(a|i)return) - abstract, // abstract method (throws an AbstractMethodException) -- method_handle, // java.lang.invoke.MethodHandles::invoke -+ method_handle_invoke_FIRST, // java.lang.invoke.MethodHandles::invokeExact, etc. -+ method_handle_invoke_LAST = (method_handle_invoke_FIRST -+ + (vmIntrinsics::LAST_MH_SIG_POLY -+ - vmIntrinsics::FIRST_MH_SIG_POLY)), - java_lang_math_sin, // implementation of java.lang.Math.sin (x) - java_lang_math_cos, // implementation of java.lang.Math.cos (x) - java_lang_math_tan, // implementation of java.lang.Math.tan (x) -@@ -112,6 +115,14 @@ - invalid = -1 - }; - -+ // Conversion from the part of the above enum to vmIntrinsics::_invokeExact, etc. -+ static vmIntrinsics::ID method_handle_intrinsic(MethodKind kind) { -+ if (kind >= method_handle_invoke_FIRST && kind <= method_handle_invoke_LAST) -+ return (vmIntrinsics::ID)( vmIntrinsics::FIRST_MH_SIG_POLY + (kind - method_handle_invoke_FIRST) ); -+ else -+ return vmIntrinsics::_none; -+ } -+ - enum SomeConstants { - number_of_result_handlers = 10 // number of result handlers for native calls - }; -@@ -146,6 +157,9 @@ - static address entry_for_kind(MethodKind k) { assert(0 <= k && k < number_of_method_entries, "illegal kind"); return _entry_table[k]; } - static address entry_for_method(methodHandle m) { return entry_for_kind(method_kind(m)); } - -+ // used for bootstrapping method handles: -+ static void set_entry_for_kind(MethodKind k, address e); -+ - static void print_method_kind(MethodKind kind) PRODUCT_RETURN; - - static bool can_be_compiled(methodHandle m); -@@ -304,6 +318,7 @@ - void bang_stack_shadow_pages(bool native_call); - - void generate_all(); -+ void initialize_method_handle_entries(); - - public: - AbstractInterpreterGenerator(StubQueue* _code); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/bytecode.cpp ---- openjdk/hotspot/src/share/vm/interpreter/bytecode.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/bytecode.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -120,19 +120,22 @@ - - void Bytecode_invoke::verify() const { - assert(is_valid(), "check invoke"); -- assert(method()->constants()->cache() != NULL, "do not call this from verifier or rewriter"); -+ assert(cpcache() != NULL, "do not call this from verifier or rewriter"); - } - - --Symbol* Bytecode_member_ref::signature() const { -- constantPoolOop constants = method()->constants(); -- return constants->signature_ref_at(index()); -+Symbol* Bytecode_member_ref::klass() const { -+ return constants()->klass_ref_at_noresolve(index()); - } - - - Symbol* Bytecode_member_ref::name() const { -- constantPoolOop constants = method()->constants(); -- return constants->name_ref_at(index()); -+ return constants()->name_ref_at(index()); -+} -+ -+ -+Symbol* Bytecode_member_ref::signature() const { -+ return constants()->signature_ref_at(index()); - } - - -@@ -146,18 +149,19 @@ - methodHandle Bytecode_invoke::static_target(TRAPS) { - methodHandle m; - KlassHandle resolved_klass; -- constantPoolHandle constants(THREAD, _method->constants()); -+ constantPoolHandle constants(THREAD, this->constants()); - -- if (java_code() == Bytecodes::_invokedynamic) { -- LinkResolver::resolve_dynamic_method(m, resolved_klass, constants, index(), CHECK_(methodHandle())); -- } else if (java_code() != Bytecodes::_invokeinterface) { -- LinkResolver::resolve_method(m, resolved_klass, constants, index(), CHECK_(methodHandle())); -- } else { -- LinkResolver::resolve_interface_method(m, resolved_klass, constants, index(), CHECK_(methodHandle())); -- } -+ Bytecodes::Code bc = invoke_code(); -+ LinkResolver::resolve_method_statically(m, resolved_klass, bc, constants, index(), CHECK_(methodHandle())); - return m; - } - -+Handle Bytecode_invoke::appendix(TRAPS) { -+ ConstantPoolCacheEntry* cpce = cpcache_entry(); -+ if (cpce->has_appendix()) -+ return Handle(THREAD, cpce->f1_appendix()); -+ return Handle(); // usual case -+} - - int Bytecode_member_ref::index() const { - // Note: Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4, -@@ -170,12 +174,16 @@ - } - - int Bytecode_member_ref::pool_index() const { -+ return cpcache_entry()->constant_pool_index(); -+} -+ -+ConstantPoolCacheEntry* Bytecode_member_ref::cpcache_entry() const { - int index = this->index(); - DEBUG_ONLY({ - if (!has_index_u4(code())) -- index -= constantPoolOopDesc::CPCACHE_INDEX_TAG; -+ index = constantPoolOopDesc::get_cpcache_index(index); - }); -- return _method->constants()->cache()->entry_at(index)->constant_pool_index(); -+ return cpcache()->entry_at(index); - } - - // Implementation of Bytecode_field -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/bytecode.hpp ---- openjdk/hotspot/src/share/vm/interpreter/bytecode.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/bytecode.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -80,6 +80,7 @@ - - Bytecodes::Code code() const { return _code; } - Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); } -+ Bytecodes::Code invoke_code() const { return (code() == Bytecodes::_invokehandle) ? code() : java_code(); } - - // Static functions for parsing bytecodes in place. - int get_index_u1(Bytecodes::Code bc) const { -@@ -195,10 +196,14 @@ - Bytecode_member_ref(methodHandle method, int bci) : Bytecode(method(), method()->bcp_from(bci)), _method(method) {} - - methodHandle method() const { return _method; } -+ constantPoolOop constants() const { return _method->constants(); } -+ constantPoolCacheOop cpcache() const { return _method->constants()->cache(); } -+ ConstantPoolCacheEntry* cpcache_entry() const; - - public: - int index() const; // cache index (loaded from instruction) - int pool_index() const; // constant pool index -+ Symbol* klass() const; // returns the klass of the method or field - Symbol* name() const; // returns the name of the method or field - Symbol* signature() const; // returns the signature of the method or field - -@@ -218,13 +223,15 @@ - - // Attributes - methodHandle static_target(TRAPS); // "specified" method (from constant pool) -+ Handle appendix(TRAPS); // if CPCE::has_appendix (from constant pool) - - // Testers -- bool is_invokeinterface() const { return java_code() == Bytecodes::_invokeinterface; } -- bool is_invokevirtual() const { return java_code() == Bytecodes::_invokevirtual; } -- bool is_invokestatic() const { return java_code() == Bytecodes::_invokestatic; } -- bool is_invokespecial() const { return java_code() == Bytecodes::_invokespecial; } -- bool is_invokedynamic() const { return java_code() == Bytecodes::_invokedynamic; } -+ bool is_invokeinterface() const { return invoke_code() == Bytecodes::_invokeinterface; } -+ bool is_invokevirtual() const { return invoke_code() == Bytecodes::_invokevirtual; } -+ bool is_invokestatic() const { return invoke_code() == Bytecodes::_invokestatic; } -+ bool is_invokespecial() const { return invoke_code() == Bytecodes::_invokespecial; } -+ bool is_invokedynamic() const { return invoke_code() == Bytecodes::_invokedynamic; } -+ bool is_invokehandle() const { return invoke_code() == Bytecodes::_invokehandle; } - - bool has_receiver() const { return !is_invokestatic() && !is_invokedynamic(); } - -@@ -232,15 +239,12 @@ - is_invokevirtual() || - is_invokestatic() || - is_invokespecial() || -- is_invokedynamic(); } -+ is_invokedynamic() || -+ is_invokehandle(); } - -- bool is_method_handle_invoke() const { -- return (is_invokedynamic() || -- (is_invokevirtual() && -- method()->constants()->klass_ref_at_noresolve(index()) == vmSymbols::java_lang_invoke_MethodHandle() && -- methodOopDesc::is_method_handle_invoke_name(name()))); -- } -+ bool has_appendix() { return cpcache_entry()->has_appendix(); } - -+ private: - // Helper to skip verification. Used is_valid() to check if the result is really an invoke - inline friend Bytecode_invoke Bytecode_invoke_check(methodHandle method, int bci); - }; -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/bytecodeInterpreter.cpp ---- openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -235,10 +235,6 @@ - #endif - #endif - --// JavaStack Implementation --#define MORE_STACK(count) \ -- (topOfStack -= ((count) * Interpreter::stackElementWords)) -- - - #define UPDATE_PC(opsize) {pc += opsize; } - /* -@@ -575,7 +571,7 @@ - - /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, - /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, --/* 0xE8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, -+/* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default, - /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, - - /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, -@@ -1774,7 +1770,7 @@ - - oop obj; - if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { -- obj = (oop) cache->f1(); -+ obj = (oop) cache->f1_as_instance(); - MORE_STACK(1); // Assume single slot push - } else { - obj = (oop) STACK_OBJECT(-1); -@@ -1785,7 +1781,7 @@ - // Now store the result on the stack - // - TosState tos_type = cache->flag_state(); -- int field_offset = cache->f2(); -+ int field_offset = cache->f2_as_index(); - if (cache->is_volatile()) { - if (tos_type == atos) { - VERIFY_OOP(obj->obj_field_acquire(field_offset)); -@@ -1885,7 +1881,7 @@ - --count; - } - if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { -- obj = (oop) cache->f1(); -+ obj = (oop) cache->f1_as_instance(); - } else { - --count; - obj = (oop) STACK_OBJECT(count); -@@ -1895,7 +1891,7 @@ - // - // Now store the result - // -- int field_offset = cache->f2(); -+ int field_offset = cache->f2_as_index(); - if (cache->is_volatile()) { - if (tos_type == itos) { - obj->release_int_field_put(field_offset, STACK_INT(-1)); -@@ -2177,13 +2173,15 @@ - // This kind of CP cache entry does not need to match the flags byte, because - // there is a 1-1 relation between bytecode type and CP entry type. - ConstantPoolCacheEntry* cache = cp->entry_at(index); -- if (cache->is_f1_null()) { -+ oop result = cache->f1_as_instance(); -+ if (result == NULL) { - CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), - handle_exception); -+ result = cache->f1_as_instance(); - } - -- VERIFY_OOP(cache->f1()); -- SET_STACK_OBJECT(cache->f1(), 0); -+ VERIFY_OOP(result); -+ SET_STACK_OBJECT(result, 0); - UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); - } - -@@ -2197,28 +2195,74 @@ - ShouldNotReachHere(); - } - -- int index = Bytes::get_native_u4(pc+1); -+ u4 index = Bytes::get_native_u4(pc+1); -+ ConstantPoolCacheEntry* cache = cp->secondary_entry_at(index); -+ oop result = cache->f1_as_instance(); - - // We are resolved if the f1 field contains a non-null object (CallSite, etc.) - // This kind of CP cache entry does not need to match the flags byte, because - // there is a 1-1 relation between bytecode type and CP entry type. - assert(constantPoolCacheOopDesc::is_secondary_index(index), "incorrect format"); -- ConstantPoolCacheEntry* cache = cp->secondary_entry_at(index); -- if (cache->is_f1_null()) { -+ if (! cache->is_resolved((Bytecodes::Code) opcode)) { - CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD), - handle_exception); -+ result = cache->f1_as_instance(); - } - -- VERIFY_OOP(cache->f1()); -- oop method_handle = java_lang_invoke_CallSite::target(cache->f1()); -+ VERIFY_OOP(result); -+ oop method_handle = java_lang_invoke_CallSite::target(result); - CHECK_NULL(method_handle); - -- istate->set_msg(call_method_handle); -- istate->set_callee((methodOop) method_handle); -+ methodOop method = cache->f1_as_method(); -+ VERIFY_OOP(method); -+ -+ /** Re-enabled in 7200949 -+ if (cache->has_appendix()) { -+ constantPoolOop constants = METHOD->constants(); -+ SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); -+ MORE_STACK(1); -+ } **/ -+ -+ istate->set_msg(call_method); -+ istate->set_callee(method); -+ istate->set_callee_entry_point(method->from_interpreted_entry()); - istate->set_bcp_advance(5); - - UPDATE_PC_AND_RETURN(0); // I'll be back... - } -+ CASE(_invokehandle): { -+ -+ if (!EnableInvokeDynamic) { -+ ShouldNotReachHere(); -+ } -+ -+ u2 index = Bytes::get_native_u2(pc+1); -+ ConstantPoolCacheEntry* cache = cp->entry_at(index); -+ -+ if (! cache->is_resolved((Bytecodes::Code) opcode)) { -+ CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD), -+ handle_exception); -+ cache = cp->entry_at(index); -+ } -+ -+ methodOop method = cache->f1_as_method(); -+ -+ VERIFY_OOP(method); -+ -+ /** Re-enabled in 7200949 -+ if (cache->has_appendix()) { -+ constantPoolOop constants = METHOD->constants(); -+ SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); -+ MORE_STACK(1); -+ } **/ -+ -+ istate->set_msg(call_method); -+ istate->set_callee(method); -+ istate->set_callee_entry_point(method->from_interpreted_entry()); -+ istate->set_bcp_advance(3); -+ -+ UPDATE_PC_AND_RETURN(0); // I'll be back... -+ } - - CASE(_invokeinterface): { - u2 index = Bytes::get_native_u2(pc+1); -@@ -2239,11 +2283,11 @@ - // java.lang.Object. See cpCacheOop.cpp for details. - // This code isn't produced by javac, but could be produced by - // another compliant java compiler. -- if (cache->is_methodInterface()) { -+ if (cache->is_forced_virtual()) { - methodOop callee; - CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); - if (cache->is_vfinal()) { -- callee = (methodOop) cache->f2(); -+ callee = cache->f2_as_vfinal_method(); - } else { - // get receiver - int parms = cache->parameter_size(); -@@ -2251,7 +2295,7 @@ - VERIFY_OOP(STACK_OBJECT(-parms)); - instanceKlass* rcvrKlass = (instanceKlass*) - STACK_OBJECT(-parms)->klass()->klass_part(); -- callee = (methodOop) rcvrKlass->start_of_vtable()[ cache->f2()]; -+ callee = (methodOop) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; - } - istate->set_callee(callee); - istate->set_callee_entry_point(callee->from_interpreted_entry()); -@@ -2266,7 +2310,7 @@ - - // this could definitely be cleaned up QQQ - methodOop callee; -- klassOop iclass = (klassOop)cache->f1(); -+ klassOop iclass = cache->f1_as_klass(); - // instanceKlass* interface = (instanceKlass*) iclass->klass_part(); - // get receiver - int parms = cache->parameter_size(); -@@ -2284,7 +2328,7 @@ - if (i == int2->itable_length()) { - VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), ""); - } -- int mindex = cache->f2(); -+ int mindex = cache->f2_as_index(); - itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); - callee = im[mindex].method(); - if (callee == NULL) { -@@ -2322,12 +2366,12 @@ - methodOop callee; - if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { - CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); -- if (cache->is_vfinal()) callee = (methodOop) cache->f2(); -+ if (cache->is_vfinal()) callee = cache->f2_as_vfinal_method(); - else { - // get receiver - int parms = cache->parameter_size(); - // this works but needs a resourcemark and seems to create a vtable on every call: -- // methodOop callee = rcvr->klass()->klass_part()->vtable()->method_at(cache->f2()); -+ // methodOop callee = rcvr->klass()->klass_part()->vtable()->method_at(cache->f2_as_index()); - // - // this fails with an assert - // instanceKlass* rcvrKlass = instanceKlass::cast(STACK_OBJECT(-parms)->klass()); -@@ -2350,13 +2394,13 @@ - However it seems to have a vtable in the right location. Huh? - - */ -- callee = (methodOop) rcvrKlass->start_of_vtable()[ cache->f2()]; -+ callee = (methodOop) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; - } - } else { - if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { - CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); - } -- callee = (methodOop) cache->f1(); -+ callee = cache->f1_as_method(); - } - - istate->set_callee(callee); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/bytecodeInterpreter.hpp ---- openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -50,6 +50,10 @@ - - #ifdef CC_INTERP - -+// JavaStack Implementation -+#define MORE_STACK(count) \ -+ (topOfStack -= ((count) * Interpreter::stackElementWords)) -+ - // CVM definitions find hotspot equivalents... - - union VMJavaVal64 { -@@ -107,7 +111,6 @@ - rethrow_exception, // unwinding and throwing exception - // requests to frame manager from C++ interpreter - call_method, // request for new frame from interpreter, manager responds with method_entry -- call_method_handle, // like the above, except the callee is a method handle - return_from_method, // request from interpreter to unwind, manager responds with method_continue - more_monitors, // need a new monitor - throwing_exception, // unwind stack and rethrow -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/bytecodes.cpp ---- openjdk/hotspot/src/share/vm/interpreter/bytecodes.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/bytecodes.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -534,6 +534,8 @@ - - def(_return_register_finalizer , "return_register_finalizer" , "b" , NULL , T_VOID , 0, true, _return); - -+ def(_invokehandle , "invokehandle" , "bJJ" , NULL , T_ILLEGAL, -1, true, _invokevirtual ); -+ - def(_fast_aldc , "fast_aldc" , "bj" , NULL , T_OBJECT, 1, true, _ldc ); - def(_fast_aldc_w , "fast_aldc_w" , "bJJ" , NULL , T_OBJECT, 1, true, _ldc_w ); - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/bytecodes.hpp ---- openjdk/hotspot/src/share/vm/interpreter/bytecodes.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/bytecodes.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -282,6 +282,9 @@ - - _return_register_finalizer , - -+ // special handling of signature-polymorphic methods: -+ _invokehandle , -+ - _shouldnotreachhere, // For debugging - - // Platform specific JVM bytecodes -@@ -356,8 +359,8 @@ - - public: - // Conversion -- static void check (Code code) { assert(is_defined(code), "illegal code"); } -- static void wide_check (Code code) { assert(wide_is_defined(code), "illegal code"); } -+ static void check (Code code) { assert(is_defined(code), err_msg("illegal code: %d", (int)code)); } -+ static void wide_check (Code code) { assert(wide_is_defined(code), err_msg("illegal code: %d", (int)code)); } - static Code cast (int code) { return (Code)code; } - - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/cppInterpreter.cpp ---- openjdk/hotspot/src/share/vm/interpreter/cppInterpreter.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/cppInterpreter.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -117,7 +117,6 @@ - method_entry(empty); - method_entry(accessor); - method_entry(abstract); -- method_entry(method_handle); - method_entry(java_lang_math_sin ); - method_entry(java_lang_math_cos ); - method_entry(java_lang_math_tan ); -@@ -126,6 +125,9 @@ - method_entry(java_lang_math_log ); - method_entry(java_lang_math_log10 ); - method_entry(java_lang_ref_reference_get); -+ -+ initialize_method_handle_entries(); -+ - Interpreter::_native_entry_begin = Interpreter::code()->code_end(); - method_entry(native); - method_entry(native_synchronized); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/interpreter.cpp ---- openjdk/hotspot/src/share/vm/interpreter/interpreter.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/interpreter.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -37,6 +37,7 @@ - #include "oops/oop.inline.hpp" - #include "prims/forte.hpp" - #include "prims/jvmtiExport.hpp" -+#include "prims/methodHandles.hpp" - #include "runtime/handles.inline.hpp" - #include "runtime/sharedRuntime.hpp" - #include "runtime/stubRoutines.hpp" -@@ -180,14 +181,21 @@ - // Abstract method? - if (m->is_abstract()) return abstract; - -- // Invoker for method handles? -- if (m->is_method_handle_invoke()) return method_handle; -+ // Method handle primitive? -+ if (m->is_method_handle_intrinsic()) { -+ vmIntrinsics::ID id = m->intrinsic_id(); -+ assert(MethodHandles::is_signature_polymorphic(id), "must match an intrinsic"); -+ MethodKind kind = (MethodKind)( method_handle_invoke_FIRST + -+ ((int)id - vmIntrinsics::FIRST_MH_SIG_POLY) ); -+ assert(kind <= method_handle_invoke_LAST, "parallel enum ranges"); -+ return kind; -+ } - - // Native method? - // Note: This test must come _before_ the test for intrinsic - // methods. See also comments below. - if (m->is_native()) { -- assert(!m->is_method_handle_invoke(), "overlapping bits here, watch out"); -+ assert(!m->is_method_handle_intrinsic(), "overlapping bits here, watch out"); - return m->is_synchronized() ? native_synchronized : native; - } - -@@ -237,6 +245,14 @@ - } - - -+void AbstractInterpreter::set_entry_for_kind(AbstractInterpreter::MethodKind kind, address entry) { -+ assert(kind >= method_handle_invoke_FIRST && -+ kind <= method_handle_invoke_LAST, "late initialization only for MH entry points"); -+ assert(_entry_table[kind] == _entry_table[abstract], "previous value must be AME entry"); -+ _entry_table[kind] = entry; -+} -+ -+ - // Return true if the interpreter can prove that the given bytecode has - // not yet been executed (in Java semantics, not in actual operation). - bool AbstractInterpreter::is_not_reached(methodHandle method, int bci) { -@@ -268,7 +284,6 @@ - case empty : tty->print("empty" ); break; - case accessor : tty->print("accessor" ); break; - case abstract : tty->print("abstract" ); break; -- case method_handle : tty->print("method_handle" ); break; - case java_lang_math_sin : tty->print("java_lang_math_sin" ); break; - case java_lang_math_cos : tty->print("java_lang_math_cos" ); break; - case java_lang_math_tan : tty->print("java_lang_math_tan" ); break; -@@ -276,7 +291,16 @@ - case java_lang_math_sqrt : tty->print("java_lang_math_sqrt" ); break; - case java_lang_math_log : tty->print("java_lang_math_log" ); break; - case java_lang_math_log10 : tty->print("java_lang_math_log10" ); break; -- default : ShouldNotReachHere(); -+ default: -+ if (kind >= method_handle_invoke_FIRST && -+ kind <= method_handle_invoke_LAST) { -+ const char* kind_name = vmIntrinsics::name_at(method_handle_intrinsic(kind)); -+ if (kind_name[0] == '_') kind_name = &kind_name[1]; // '_invokeExact' => 'invokeExact' -+ tty->print("method_handle_%s", kind_name); -+ break; -+ } -+ ShouldNotReachHere(); -+ break; - } - } - #endif // PRODUCT -@@ -436,3 +460,11 @@ - } - } - } -+ -+void AbstractInterpreterGenerator::initialize_method_handle_entries() { -+ // method handle entry kinds are generated later in MethodHandlesAdapterGenerator::generate: -+ for (int i = Interpreter::method_handle_invoke_FIRST; i <= Interpreter::method_handle_invoke_LAST; i++) { -+ Interpreter::MethodKind kind = (Interpreter::MethodKind) i; -+ Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract]; -+ } -+} -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/interpreterRuntime.cpp ---- openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -145,7 +145,7 @@ - // The bytecode wrappers aren't GC-safe so construct a new one - Bytecode_loadconstant ldc2(m, bci(thread)); - ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc2.cache_index()); -- assert(result == cpce->f1(), "expected result for assembly code"); -+ assert(result == cpce->f1_as_instance(), "expected result for assembly code"); - } - #endif - } -@@ -674,7 +674,7 @@ - JvmtiExport::post_raw_breakpoint(thread, method, bcp); - IRT_END - --IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode)) -+IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode)) { - // extract receiver from the outgoing argument list if necessary - Handle receiver(thread, NULL); - if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface) { -@@ -742,86 +742,54 @@ - info.resolved_method(), - info.vtable_index()); - } -+} -+IRT_END -+ -+ -+// First time execution: Resolve symbols, create a permanent MethodType object. -+IRT_ENTRY(void, InterpreterRuntime::resolve_invokehandle(JavaThread* thread)) { -+ assert(EnableInvokeDynamic, ""); -+ const Bytecodes::Code bytecode = Bytecodes::_invokehandle; -+ -+ // resolve method -+ CallInfo info; -+ constantPoolHandle pool(thread, method(thread)->constants()); -+ -+ { -+ JvmtiHideSingleStepping jhss(thread); -+ LinkResolver::resolve_invoke(info, Handle(), pool, -+ get_index_u2_cpcache(thread, bytecode), bytecode, CHECK); -+ } // end JvmtiHideSingleStepping -+ -+ cache_entry(thread)->set_method_handle( -+ info.resolved_method(), -+ info.resolved_appendix()); -+} - IRT_END - - - // First time execution: Resolve symbols, create a permanent CallSite object. - IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) { -- ResourceMark rm(thread); -- - assert(EnableInvokeDynamic, ""); -- - const Bytecodes::Code bytecode = Bytecodes::_invokedynamic; - -- methodHandle caller_method(thread, method(thread)); -+ //TO DO: consider passing BCI to Java. -+ // int caller_bci = method(thread)->bci_from(bcp(thread)); - -- constantPoolHandle pool(thread, caller_method->constants()); -- pool->set_invokedynamic(); // mark header to flag active call sites -+ // resolve method -+ CallInfo info; -+ constantPoolHandle pool(thread, method(thread)->constants()); -+ int index = get_index_u4(thread, bytecode); - -- int caller_bci = 0; -- int site_index = 0; -- { address caller_bcp = bcp(thread); -- caller_bci = caller_method->bci_from(caller_bcp); -- site_index = Bytes::get_native_u4(caller_bcp+1); -- } -- assert(site_index == InterpreterRuntime::bytecode(thread).get_index_u4(bytecode), ""); -- assert(constantPoolCacheOopDesc::is_secondary_index(site_index), "proper format"); -- // there is a second CPC entries that is of interest; it caches signature info: -- int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index(); -- int pool_index = pool->cache()->entry_at(main_index)->constant_pool_index(); -+ { -+ JvmtiHideSingleStepping jhss(thread); -+ LinkResolver::resolve_invoke(info, Handle(), pool, -+ index, bytecode, CHECK); -+ } // end JvmtiHideSingleStepping - -- // first resolve the signature to a MH.invoke methodOop -- if (!pool->cache()->entry_at(main_index)->is_resolved(bytecode)) { -- JvmtiHideSingleStepping jhss(thread); -- CallInfo callinfo; -- LinkResolver::resolve_invoke(callinfo, Handle(), pool, -- site_index, bytecode, CHECK); -- // The main entry corresponds to a JVM_CONSTANT_InvokeDynamic, and serves -- // as a common reference point for all invokedynamic call sites with -- // that exact call descriptor. We will link it in the CP cache exactly -- // as if it were an invokevirtual of MethodHandle.invoke. -- pool->cache()->entry_at(main_index)->set_method( -- bytecode, -- callinfo.resolved_method(), -- callinfo.vtable_index()); -- } -- -- // The method (f2 entry) of the main entry is the MH.invoke for the -- // invokedynamic target call signature. -- oop f1_value = pool->cache()->entry_at(main_index)->f1(); -- methodHandle signature_invoker(THREAD, (methodOop) f1_value); -- assert(signature_invoker.not_null() && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(), -- "correct result from LinkResolver::resolve_invokedynamic"); -- -- Handle info; // optional argument(s) in JVM_CONSTANT_InvokeDynamic -- Handle bootm = SystemDictionary::find_bootstrap_method(caller_method, caller_bci, -- main_index, info, CHECK); -- if (!java_lang_invoke_MethodHandle::is_instance(bootm())) { -- THROW_MSG(vmSymbols::java_lang_IllegalStateException(), -- "no bootstrap method found for invokedynamic"); -- } -- -- // Short circuit if CallSite has been bound already: -- if (!pool->cache()->secondary_entry_at(site_index)->is_f1_null()) -- return; -- -- Symbol* call_site_name = pool->name_ref_at(site_index); -- -- Handle call_site -- = SystemDictionary::make_dynamic_call_site(bootm, -- // Callee information: -- call_site_name, -- signature_invoker, -- info, -- // Caller information: -- caller_method, -- caller_bci, -- CHECK); -- -- // In the secondary entry, the f1 field is the call site, and the f2 (index) -- // field is some data about the invoke site. Currently, it is just the BCI. -- // Later, it might be changed to help manage inlining dependencies. -- pool->cache()->secondary_entry_at(site_index)->set_dynamic_call(call_site, signature_invoker); -+ pool->cache()->secondary_entry_at(index)->set_dynamic_call( -+ info.resolved_method(), -+ info.resolved_appendix()); - } - IRT_END - -@@ -993,7 +961,7 @@ - - // check the access_flags for the field in the klass - -- instanceKlass* ik = instanceKlass::cast(java_lang_Class::as_klassOop(cp_entry->f1())); -+ instanceKlass* ik = instanceKlass::cast(java_lang_Class::as_klassOop(cp_entry->f1_as_klass_mirror())); - int index = cp_entry->field_index(); - if ((ik->field_access_flags(index) & JVM_ACC_FIELD_ACCESS_WATCHED) == 0) return; - -@@ -1016,15 +984,15 @@ - // non-static field accessors have an object, but we need a handle - h_obj = Handle(thread, obj); - } -- instanceKlassHandle h_cp_entry_f1(thread, java_lang_Class::as_klassOop(cp_entry->f1())); -- jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_cp_entry_f1, cp_entry->f2(), is_static); -+ instanceKlassHandle h_cp_entry_f1(thread, java_lang_Class::as_klassOop(cp_entry->f1_as_klass_mirror())); -+ jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_cp_entry_f1, cp_entry->f2_as_index(), is_static); - JvmtiExport::post_field_access(thread, method(thread), bcp(thread), h_cp_entry_f1, h_obj, fid); - IRT_END - - IRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread *thread, - oopDesc* obj, ConstantPoolCacheEntry *cp_entry, jvalue *value)) - -- klassOop k = java_lang_Class::as_klassOop(cp_entry->f1()); -+ klassOop k = java_lang_Class::as_klassOop(cp_entry->f1_as_klass_mirror()); - - // check the access_flags for the field in the klass - instanceKlass* ik = instanceKlass::cast(k); -@@ -1049,7 +1017,7 @@ - - HandleMark hm(thread); - instanceKlassHandle h_klass(thread, k); -- jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_klass, cp_entry->f2(), is_static); -+ jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_klass, cp_entry->f2_as_index(), is_static); - jvalue fvalue; - #ifdef _LP64 - fvalue = *value; -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/interpreterRuntime.hpp ---- openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -71,6 +71,8 @@ - { return bytecode(thread).get_index_u2(bc); } - static int get_index_u2_cpcache(JavaThread *thread, Bytecodes::Code bc) - { return bytecode(thread).get_index_u2_cpcache(bc); } -+ static int get_index_u4(JavaThread *thread, Bytecodes::Code bc) -+ { return bytecode(thread).get_index_u4(bc); } - static int number_of_dimensions(JavaThread *thread) { return bcp(thread)[3]; } - - static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i) { return method(thread)->constants()->cache()->entry_at(i); } -@@ -118,6 +120,7 @@ - - // Calls - static void resolve_invoke (JavaThread* thread, Bytecodes::Code bytecode); -+ static void resolve_invokehandle (JavaThread* thread); - static void resolve_invokedynamic(JavaThread* thread); - - // Breakpoints -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/linkResolver.cpp ---- openjdk/hotspot/src/share/vm/interpreter/linkResolver.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/linkResolver.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -96,15 +96,21 @@ - void CallInfo::set_virtual(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) { - assert(vtable_index >= 0 || vtable_index == methodOopDesc::nonvirtual_vtable_index, "valid index"); - set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK); -+ assert(!resolved_method->is_compiled_lambda_form(), "these must be handled via an invokehandle call"); - } - --void CallInfo::set_dynamic(methodHandle resolved_method, TRAPS) { -- assert(resolved_method->is_method_handle_invoke(), ""); -+void CallInfo::set_handle(methodHandle resolved_method, Handle resolved_appendix, TRAPS) { -+ if (resolved_method.is_null()) { -+ THROW_MSG(vmSymbols::java_lang_InternalError(), "resolved method is null"); -+ } - KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass(); -- assert(resolved_klass == resolved_method->method_holder(), ""); -+ assert(resolved_method->intrinsic_id() == vmIntrinsics::_invokeBasic || -+ resolved_method->is_compiled_lambda_form(), -+ "linkMethod must return one of these"); - int vtable_index = methodOopDesc::nonvirtual_vtable_index; - assert(resolved_method->vtable_index() == vtable_index, ""); -- set_common(resolved_klass, KlassHandle(), resolved_method, resolved_method, vtable_index, CHECK); -+ set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK); -+ _resolved_appendix = resolved_appendix; - } - - void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) { -@@ -114,6 +120,7 @@ - _resolved_method = resolved_method; - _selected_method = selected_method; - _vtable_index = vtable_index; -+ _resolved_appendix = Handle(); - if (CompilationPolicy::must_be_compiled(selected_method)) { - // This path is unusual, mostly used by the '-Xcomp' stress test mode. - -@@ -180,11 +187,9 @@ - void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) { - methodOop result_oop = klass->uncached_lookup_method(name, signature); - if (EnableInvokeDynamic && result_oop != NULL) { -- switch (result_oop->intrinsic_id()) { -- case vmIntrinsics::_invokeExact: -- case vmIntrinsics::_invokeGeneric: -- case vmIntrinsics::_invokeDynamic: -- // Do not link directly to these. The VM must produce a synthetic one using lookup_implicit_method. -+ vmIntrinsics::ID iid = result_oop->intrinsic_id(); -+ if (MethodHandles::is_signature_polymorphic(iid)) { -+ // Do not link directly to these. The VM must produce a synthetic one using lookup_polymorphic_method. - return; - } - } -@@ -213,31 +218,97 @@ - result = methodHandle(THREAD, ik->lookup_method_in_all_interfaces(name, signature)); - } - --void LinkResolver::lookup_implicit_method(methodHandle& result, -- KlassHandle klass, Symbol* name, Symbol* signature, -- KlassHandle current_klass, -- TRAPS) { -+void LinkResolver::lookup_polymorphic_method(methodHandle& result, -+ KlassHandle klass, Symbol* name, Symbol* full_signature, -+ KlassHandle current_klass, -+ Handle* appendix_result_or_null, -+ TRAPS) { -+ vmIntrinsics::ID iid = MethodHandles::signature_polymorphic_name_id(name); -+ if (TraceMethodHandles) { -+ tty->print_cr("lookup_polymorphic_method iid=%s %s.%s%s", -+ vmIntrinsics::name_at(iid), klass->external_name(), -+ name->as_C_string(), full_signature->as_C_string()); -+ } - if (EnableInvokeDynamic && - klass() == SystemDictionary::MethodHandle_klass() && -- methodOopDesc::is_method_handle_invoke_name(name)) { -- if (!THREAD->is_Compiler_thread() && !MethodHandles::enabled()) { -- // Make sure the Java part of the runtime has been booted up. -- klassOop natives = SystemDictionary::MethodHandleNatives_klass(); -- if (natives == NULL || instanceKlass::cast(natives)->is_not_initialized()) { -- SystemDictionary::resolve_or_fail(vmSymbols::java_lang_invoke_MethodHandleNatives(), -- Handle(), -- Handle(), -- true, -- CHECK); -+ iid != vmIntrinsics::_none) { -+ if (MethodHandles::is_signature_polymorphic_intrinsic(iid)) { -+ // Most of these do not need an up-call to Java to resolve, so can be done anywhere. -+ // Do not erase last argument type (MemberName) if it is a static linkTo method. -+ bool keep_last_arg = MethodHandles::is_signature_polymorphic_static(iid); -+ TempNewSymbol basic_signature = -+ MethodHandles::lookup_basic_type_signature(full_signature, keep_last_arg, CHECK); -+ if (TraceMethodHandles) { -+ tty->print_cr("lookup_polymorphic_method %s %s => basic %s", -+ name->as_C_string(), -+ full_signature->as_C_string(), -+ basic_signature->as_C_string()); - } -- } -- methodOop result_oop = SystemDictionary::find_method_handle_invoke(name, -- signature, -- current_klass, -- CHECK); -- if (result_oop != NULL) { -- assert(result_oop->is_method_handle_invoke() && result_oop->signature() == signature, "consistent"); -- result = methodHandle(THREAD, result_oop); -+ result = SystemDictionary::find_method_handle_intrinsic(iid, -+ basic_signature, -+ CHECK); -+ if (result.not_null()) { -+ assert(result->is_method_handle_intrinsic(), "MH.invokeBasic or MH.linkTo* intrinsic"); -+ assert(result->intrinsic_id() != vmIntrinsics::_invokeGeneric, "wrong place to find this"); -+ assert(basic_signature == result->signature(), "predict the result signature"); -+ if (TraceMethodHandles) { -+ tty->print("lookup_polymorphic_method => intrinsic "); -+ result->print_on(tty); -+ } -+ return; -+ } -+ } else if (iid == vmIntrinsics::_invokeGeneric -+ && !THREAD->is_Compiler_thread() -+ && appendix_result_or_null != NULL) { -+ // This is a method with type-checking semantics. -+ // We will ask Java code to spin an adapter method for it. -+ if (!MethodHandles::enabled()) { -+ // Make sure the Java part of the runtime has been booted up. -+ klassOop natives = SystemDictionary::MethodHandleNatives_klass(); -+ if (natives == NULL || instanceKlass::cast(natives)->is_not_initialized()) { -+ SystemDictionary::resolve_or_fail(vmSymbols::java_lang_invoke_MethodHandleNatives(), -+ Handle(), -+ Handle(), -+ true, -+ CHECK); -+ } -+ } -+ -+ Handle appendix; -+ result = SystemDictionary::find_method_handle_invoker(name, -+ full_signature, -+ current_klass, -+ &appendix, -+ CHECK); -+ if (TraceMethodHandles) { -+ tty->print("lookup_polymorphic_method => (via Java) "); -+ result->print_on(tty); -+ tty->print(" lookup_polymorphic_method => appendix = "); -+ if (appendix.is_null()) tty->print_cr("(none)"); -+ else appendix->print_on(tty); -+ } -+ if (result.not_null()) { -+#ifdef ASSERT -+ TempNewSymbol basic_signature = -+ MethodHandles::lookup_basic_type_signature(full_signature, CHECK); -+ int actual_size_of_params = result->size_of_parameters(); -+ int expected_size_of_params = ArgumentSizeComputer(basic_signature).size(); -+ // +1 for MethodHandle.this, +1 for trailing MethodType -+ if (!MethodHandles::is_signature_polymorphic_static(iid)) expected_size_of_params += 1; -+ if (appendix.not_null()) expected_size_of_params += 1; -+ if (actual_size_of_params != expected_size_of_params) { -+ tty->print_cr("*** basic_signature=%s", basic_signature->as_C_string()); -+ tty->print_cr("*** result for %s: ", vmIntrinsics::name_at(iid)); -+ result->print(); -+ } -+ assert(actual_size_of_params == expected_size_of_params, -+ err_msg("%d != %d", actual_size_of_params, expected_size_of_params)); -+#endif //ASSERT -+ -+ assert(appendix_result_or_null != NULL, ""); -+ (*appendix_result_or_null) = appendix; -+ return; -+ } - } - } - } -@@ -267,6 +338,7 @@ - new_flags = new_flags | JVM_ACC_PUBLIC; - flags.set_flags(new_flags); - } -+// assert(extra_arg_result_or_null != NULL, "must be able to return extra argument"); - - if (!Reflection::verify_field_access(ref_klass->as_klassOop(), - resolved_klass->as_klassOop(), -@@ -287,10 +359,19 @@ - } - } - --void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle& resolved_klass, -- constantPoolHandle pool, int index, TRAPS) { -+void LinkResolver::resolve_method_statically(methodHandle& resolved_method, KlassHandle& resolved_klass, -+ Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS) { - - // resolve klass -+ if (code == Bytecodes::_invokedynamic) { -+ resolved_klass = SystemDictionaryHandles::MethodHandle_klass(); -+ Symbol* method_name = vmSymbols::invoke_name(); -+ Symbol* method_signature = pool->signature_ref_at(index); -+ KlassHandle current_klass(THREAD, pool->pool_holder()); -+ resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); -+ return; -+ } -+ - resolve_klass(resolved_klass, pool, index, CHECK); - - Symbol* method_name = pool->name_ref_at(index); -@@ -299,7 +380,7 @@ - - if (pool->has_preresolution() - || (resolved_klass() == SystemDictionary::MethodHandle_klass() && -- methodOopDesc::is_method_handle_invoke_name(method_name))) { -+ MethodHandles::is_signature_polymorphic_name(resolved_klass(), method_name))) { - methodOop result_oop = constantPoolOopDesc::method_at_if_loaded(pool, index); - if (result_oop != NULL) { - resolved_method = methodHandle(THREAD, result_oop); -@@ -307,33 +388,13 @@ - } - } - -- resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); -+ if (code == Bytecodes::_invokeinterface) { -+ resolve_interface_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); -+ } else { -+ resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); -+ } - } - --void LinkResolver::resolve_dynamic_method(methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS) { -- // The class is java.lang.invoke.MethodHandle -- resolved_klass = SystemDictionaryHandles::MethodHandle_klass(); -- -- Symbol* method_name = vmSymbols::invokeExact_name(); -- -- Symbol* method_signature = pool->signature_ref_at(index); -- KlassHandle current_klass (THREAD, pool->pool_holder()); -- -- resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); --} -- --void LinkResolver::resolve_interface_method(methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS) { -- -- // resolve klass -- resolve_klass(resolved_klass, pool, index, CHECK); -- Symbol* method_name = pool->name_ref_at(index); -- Symbol* method_signature = pool->signature_ref_at(index); -- KlassHandle current_klass(THREAD, pool->pool_holder()); -- -- resolve_interface_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK); --} -- -- - void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle resolved_klass, - Symbol* method_name, Symbol* method_signature, - KlassHandle current_klass, bool check_access, TRAPS) { -@@ -346,6 +407,8 @@ - THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); - } - -+ Handle nested_exception; -+ - // 2. lookup method in resolved klass and its super klasses - lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); - -@@ -354,17 +417,23 @@ - lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); - - if (resolved_method.is_null()) { -- // JSR 292: see if this is an implicitly generated method MethodHandle.invoke(*...) -- lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, CHECK); -+ // JSR 292: see if this is an implicitly generated method MethodHandle.linkToVirtual(*...), etc -+ lookup_polymorphic_method(resolved_method, resolved_klass, method_name, method_signature, -+ current_klass, (Handle*)NULL, THREAD); -+ if (HAS_PENDING_EXCEPTION) { -+ nested_exception = Handle(THREAD, PENDING_EXCEPTION); -+ CLEAR_PENDING_EXCEPTION; -+ } - } - - if (resolved_method.is_null()) { - // 4. method lookup failed - ResourceMark rm(THREAD); -- THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), -- methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), -- method_name, -- method_signature)); -+ THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(), -+ methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), -+ method_name, -+ method_signature), -+ nested_exception); - } - } - -@@ -1053,6 +1122,7 @@ - case Bytecodes::_invokestatic : resolve_invokestatic (result, pool, index, CHECK); break; - case Bytecodes::_invokespecial : resolve_invokespecial (result, pool, index, CHECK); break; - case Bytecodes::_invokevirtual : resolve_invokevirtual (result, recv, pool, index, CHECK); break; -+ case Bytecodes::_invokehandle : resolve_invokehandle (result, pool, index, CHECK); break; - case Bytecodes::_invokedynamic : resolve_invokedynamic (result, pool, index, CHECK); break; - case Bytecodes::_invokeinterface: resolve_invokeinterface(result, recv, pool, index, CHECK); break; - } -@@ -1116,22 +1186,91 @@ - } - - --void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle pool, int raw_index, TRAPS) { -+void LinkResolver::resolve_invokehandle(CallInfo& result, constantPoolHandle pool, int index, TRAPS) { - assert(EnableInvokeDynamic, ""); -+ // This guy is reached from InterpreterRuntime::resolve_invokehandle. -+ KlassHandle resolved_klass; -+ Symbol* method_name = NULL; -+ Symbol* method_signature = NULL; -+ KlassHandle current_klass; -+ resolve_pool(resolved_klass, method_name, method_signature, current_klass, pool, index, CHECK); -+ if (TraceMethodHandles) -+ tty->print_cr("resolve_invokehandle %s %s", method_name->as_C_string(), method_signature->as_C_string()); -+ resolve_handle_call(result, resolved_klass, method_name, method_signature, current_klass, CHECK); -+} - -- // This guy is reached from InterpreterRuntime::resolve_invokedynamic. -+void LinkResolver::resolve_handle_call(CallInfo& result, KlassHandle resolved_klass, -+ Symbol* method_name, Symbol* method_signature, -+ KlassHandle current_klass, -+ TRAPS) { -+ // JSR 292: this must be an implicitly generated method MethodHandle.invokeExact(*...) or similar -+ assert(resolved_klass() == SystemDictionary::MethodHandle_klass(), ""); -+ assert(MethodHandles::is_signature_polymorphic_name(method_name), ""); -+ methodHandle resolved_method; -+ Handle resolved_appendix; -+ lookup_polymorphic_method(resolved_method, resolved_klass, -+ method_name, method_signature, -+ current_klass, &resolved_appendix, CHECK); -+ result.set_handle(resolved_method, resolved_appendix, CHECK); -+} - -- // At this point, we only need the signature, and can ignore the name. -- Symbol* method_signature = pool->signature_ref_at(raw_index); // raw_index works directly -- Symbol* method_name = vmSymbols::invokeExact_name(); -- KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass(); - -- // JSR 292: this must be an implicitly generated method MethodHandle.invokeExact(*...) -- // The extra MH receiver will be inserted into the stack on every call. -- methodHandle resolved_method; -- KlassHandle current_klass(THREAD, pool->pool_holder()); -- lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, THREAD); -+void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle pool, int index, TRAPS) { -+ assert(EnableInvokeDynamic, ""); -+ pool->set_invokedynamic(); // mark header to flag active call sites -+ -+ //resolve_pool(, method_name, method_signature, current_klass, pool, index, CHECK); -+ Symbol* method_name = pool->name_ref_at(index); -+ Symbol* method_signature = pool->signature_ref_at(index); -+ KlassHandle current_klass = KlassHandle(THREAD, pool->pool_holder()); -+ -+ // Resolve the bootstrap specifier (BSM + optional arguments). -+ Handle bootstrap_specifier; -+ // Check if CallSite has been bound already: -+ ConstantPoolCacheEntry* cpce = pool->cache()->secondary_entry_at(index); -+ if (cpce->is_f1_null()) { -+ int pool_index = pool->cache()->main_entry_at(index)->constant_pool_index(); -+ oop bsm_info = pool->resolve_bootstrap_specifier_at(pool_index, CHECK); -+ assert(bsm_info != NULL, ""); -+ // FIXME: Cache this once per BootstrapMethods entry, not once per CONSTANT_InvokeDynamic. -+ bootstrap_specifier = Handle(THREAD, bsm_info); -+ } -+ if (!cpce->is_f1_null()) { -+ methodHandle method(THREAD, cpce->f2_as_vfinal_method()); -+ Handle appendix(THREAD, cpce->has_appendix() ? cpce->f1_appendix() : (oop)NULL); -+ result.set_handle(method, appendix, CHECK); -+ return; -+ } -+ -+ if (TraceMethodHandles) { -+ tty->print_cr("resolve_invokedynamic #%d %s %s", -+ constantPoolCacheOopDesc::decode_secondary_index(index), -+ method_name->as_C_string(), method_signature->as_C_string()); -+ tty->print(" BSM info: "); bootstrap_specifier->print(); -+ } -+ -+ resolve_dynamic_call(result, bootstrap_specifier, method_name, method_signature, current_klass, CHECK); -+} -+ -+void LinkResolver::resolve_dynamic_call(CallInfo& result, -+ Handle bootstrap_specifier, -+ Symbol* method_name, Symbol* method_signature, -+ KlassHandle current_klass, -+ TRAPS) { -+ // JSR 292: this must resolve to an implicitly generated method MH.linkToCallSite(*...) -+ // The appendix argument is likely to be a freshly-created CallSite. -+ Handle resolved_appendix; -+ methodHandle resolved_method = -+ SystemDictionary::find_dynamic_call_site_invoker(current_klass, -+ bootstrap_specifier, -+ method_name, method_signature, -+ &resolved_appendix, -+ CHECK); - if (HAS_PENDING_EXCEPTION) { -+ if (TraceMethodHandles) { -+ tty->print_cr("invokedynamic throws BSME for "INTPTR_FORMAT, PENDING_EXCEPTION); -+ PENDING_EXCEPTION->print(); -+ } - if (PENDING_EXCEPTION->is_a(SystemDictionary::BootstrapMethodError_klass())) { - // throw these guys, since they are already wrapped - return; -@@ -1141,17 +1280,12 @@ - return; - } - // See the "Linking Exceptions" section for the invokedynamic instruction in the JVMS. -- Handle ex(THREAD, PENDING_EXCEPTION); -+ Handle nested_exception(THREAD, PENDING_EXCEPTION); - CLEAR_PENDING_EXCEPTION; -- oop bsme = Klass::cast(SystemDictionary::BootstrapMethodError_klass())->java_mirror(); -- MethodHandles::raise_exception(Bytecodes::_athrow, ex(), bsme, CHECK); -- // java code should not return, but if it does throw out anyway -- THROW(vmSymbols::java_lang_InternalError()); -+ THROW_MSG_CAUSE(vmSymbols::java_lang_BootstrapMethodError(), -+ "BootstrapMethodError", nested_exception) - } -- if (resolved_method.is_null()) { -- THROW(vmSymbols::java_lang_InternalError()); -- } -- result.set_dynamic(resolved_method, CHECK); -+ result.set_handle(resolved_method, resolved_appendix, CHECK); - } - - //------------------------------------------------------------------------------------------------------------------------ -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/linkResolver.hpp ---- openjdk/hotspot/src/share/vm/interpreter/linkResolver.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/linkResolver.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -75,11 +75,12 @@ - methodHandle _resolved_method; // static target method - methodHandle _selected_method; // dynamic (actual) target method - int _vtable_index; // vtable index of selected method -+ Handle _resolved_appendix; // extra argument in constant pool (if CPCE::has_appendix) - - void set_static( KlassHandle resolved_klass, methodHandle resolved_method , TRAPS); - void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method , TRAPS); - void set_virtual( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS); -- void set_dynamic( methodHandle resolved_method, TRAPS); -+ void set_handle( methodHandle resolved_method, Handle resolved_appendix, TRAPS); - void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS); - - friend class LinkResolver; -@@ -89,6 +90,7 @@ - KlassHandle selected_klass() const { return _selected_klass; } - methodHandle resolved_method() const { return _resolved_method; } - methodHandle selected_method() const { return _selected_method; } -+ Handle resolved_appendix() const { return _resolved_appendix; } - - BasicType result_type() const { return selected_method()->result_type(); } - bool has_vtable_index() const { return _vtable_index >= 0; } -@@ -110,8 +112,8 @@ - static void lookup_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); - static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); - static void lookup_method_in_interfaces (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); -- static void lookup_implicit_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, -- KlassHandle current_klass, TRAPS); -+ static void lookup_polymorphic_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, -+ KlassHandle current_klass, Handle* appendix_result_or_null, TRAPS); - - static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); - -@@ -139,10 +141,9 @@ - // constant pool resolving - static void check_klass_accessability(KlassHandle ref_klass, KlassHandle sel_klass, TRAPS); - -- // static resolving for all calls except interface calls -- static void resolve_method (methodHandle& method_result, KlassHandle& klass_result, constantPoolHandle pool, int index, TRAPS); -- static void resolve_dynamic_method (methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS); -- static void resolve_interface_method(methodHandle& method_result, KlassHandle& klass_result, constantPoolHandle pool, int index, TRAPS); -+ // static resolving calls (will not run any Java code); used only from Bytecode_invoke::static_target -+ static void resolve_method_statically(methodHandle& method_result, KlassHandle& klass_result, -+ Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS); - - // runtime/static resolving for fields - static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS); -@@ -156,6 +157,8 @@ - static void resolve_special_call (CallInfo& result, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); - static void resolve_virtual_call (CallInfo& result, Handle recv, KlassHandle recv_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, bool check_null_and_abstract, TRAPS); - static void resolve_interface_call(CallInfo& result, Handle recv, KlassHandle recv_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, bool check_null_and_abstract, TRAPS); -+ static void resolve_handle_call (CallInfo& result, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, TRAPS); -+ static void resolve_dynamic_call (CallInfo& result, Handle bootstrap_specifier, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, TRAPS); - - // same as above for compile-time resolution; but returns null handle instead of throwing an exception on error - // also, does not initialize klass (i.e., no side effects) -@@ -177,6 +180,7 @@ - static void resolve_invokevirtual (CallInfo& result, Handle recv, constantPoolHandle pool, int index, TRAPS); - static void resolve_invokeinterface(CallInfo& result, Handle recv, constantPoolHandle pool, int index, TRAPS); - static void resolve_invokedynamic (CallInfo& result, constantPoolHandle pool, int index, TRAPS); -+ static void resolve_invokehandle (CallInfo& result, constantPoolHandle pool, int index, TRAPS); - - static void resolve_invoke (CallInfo& result, Handle recv, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS); - }; -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/rewriter.cpp ---- openjdk/hotspot/src/share/vm/interpreter/rewriter.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/rewriter.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -33,6 +33,7 @@ - #include "oops/objArrayOop.hpp" - #include "oops/oop.inline.hpp" - #include "prims/methodComparator.hpp" -+#include "prims/methodHandles.hpp" - - // Computes a CPC map (new_index -> original_index) for constant pool entries - // that are referred to by the interpreter at runtime via the constant pool cache. -@@ -41,10 +42,9 @@ - void Rewriter::compute_index_maps() { - const int length = _pool->length(); - init_cp_map(length); -- jint tag_mask = 0; -+ bool saw_mh_symbol = false; - for (int i = 0; i < length; i++) { - int tag = _pool->tag_at(i).value(); -- tag_mask |= (1 << tag); - switch (tag) { - case JVM_CONSTANT_InterfaceMethodref: - case JVM_CONSTANT_Fieldref : // fall through -@@ -54,13 +54,18 @@ - case JVM_CONSTANT_InvokeDynamic : // fall through - add_cp_cache_entry(i); - break; -+ case JVM_CONSTANT_Utf8: -+ if (_pool->symbol_at(i) == vmSymbols::java_lang_invoke_MethodHandle()) -+ saw_mh_symbol = true; -+ break; - } - } - - guarantee((int)_cp_cache_map.length()-1 <= (int)((u2)-1), - "all cp cache indexes fit in a u2"); - -- _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0); -+ if (saw_mh_symbol) -+ _method_handle_invokers.initialize(length, (int)0); - } - - // Unrewrite the bytecodes if an error occurs. -@@ -80,22 +85,6 @@ - oopFactory::new_constantPoolCache(length, CHECK); - No_Safepoint_Verifier nsv; - cache->initialize(_cp_cache_map); -- -- // Don't bother with the next pass if there is no JVM_CONSTANT_InvokeDynamic. -- if (_have_invoke_dynamic) { -- for (int i = 0; i < length; i++) { -- int pool_index = cp_cache_entry_pool_index(i); -- if (pool_index >= 0 && -- _pool->tag_at(pool_index).is_invoke_dynamic()) { -- int bsm_index = _pool->invoke_dynamic_bootstrap_method_ref_index_at(pool_index); -- assert(_pool->tag_at(bsm_index).is_method_handle(), "must be a MH constant"); -- // There is a CP cache entry holding the BSM for these calls. -- int bsm_cache_index = cp_entry_to_cp_cache(bsm_index); -- cache->entry_at(i)->initialize_bootstrap_method_index_in_cache(bsm_cache_index); -- } -- } -- } -- - _pool->set_cache(cache); - cache->set_constant_pool(_pool()); - } -@@ -148,10 +137,53 @@ - int cp_index = Bytes::get_Java_u2(p); - int cache_index = cp_entry_to_cp_cache(cp_index); - Bytes::put_native_u2(p, cache_index); -+ if (!_method_handle_invokers.is_empty()) -+ maybe_rewrite_invokehandle(p - 1, cp_index, reverse); - } else { - int cache_index = Bytes::get_native_u2(p); - int pool_index = cp_cache_entry_pool_index(cache_index); - Bytes::put_Java_u2(p, pool_index); -+ if (!_method_handle_invokers.is_empty()) -+ maybe_rewrite_invokehandle(p - 1, pool_index, reverse); -+ } -+} -+ -+ -+// Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.) -+void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, bool reverse) { -+ if (!reverse) { -+ if ((*opc) == (u1)Bytecodes::_invokevirtual || -+ // allow invokespecial as an alias, although it would be very odd: -+ (*opc) == (u1)Bytecodes::_invokespecial) { -+ assert(_pool->tag_at(cp_index).is_method(), "wrong index"); -+ // Determine whether this is a signature-polymorphic method. -+ if (cp_index >= _method_handle_invokers.length()) return; -+ int status = _method_handle_invokers[cp_index]; -+ assert(status >= -1 && status <= 1, "oob tri-state"); -+ if (status == 0) { -+ if (_pool->klass_ref_at_noresolve(cp_index) == vmSymbols::java_lang_invoke_MethodHandle() && -+ MethodHandles::is_signature_polymorphic_name(SystemDictionary::MethodHandle_klass(), -+ _pool->name_ref_at(cp_index))) -+ status = +1; -+ else -+ status = -1; -+ _method_handle_invokers[cp_index] = status; -+ } -+ // We use a special internal bytecode for such methods (if non-static). -+ // The basic reason for this is that such methods need an extra "appendix" argument -+ // to transmit the call site's intended call type. -+ if (status > 0) { -+ (*opc) = (u1)Bytecodes::_invokehandle; -+ } -+ } -+ } else { -+ // Do not need to look at cp_index. -+ if ((*opc) == (u1)Bytecodes::_invokehandle) { -+ (*opc) = (u1)Bytecodes::_invokevirtual; -+ // Ignore corner case of original _invokespecial instruction. -+ // This is safe because (a) the signature polymorphic method was final, and -+ // (b) the implementation of MethodHandle will not call invokespecial on it. -+ } - } - } - -@@ -297,17 +329,18 @@ - case Bytecodes::_invokespecial : // fall through - case Bytecodes::_invokestatic : - case Bytecodes::_invokeinterface: -+ case Bytecodes::_invokehandle : // if reverse=true - rewrite_member_reference(bcp, prefix_length+1, reverse); - break; - case Bytecodes::_invokedynamic: - rewrite_invokedynamic(bcp, prefix_length+1, reverse); - break; - case Bytecodes::_ldc: -- case Bytecodes::_fast_aldc: -+ case Bytecodes::_fast_aldc: // if reverse=true - maybe_rewrite_ldc(bcp, prefix_length+1, false, reverse); - break; - case Bytecodes::_ldc_w: -- case Bytecodes::_fast_aldc_w: -+ case Bytecodes::_fast_aldc_w: // if reverse=true - maybe_rewrite_ldc(bcp, prefix_length+1, true, reverse); - break; - case Bytecodes::_jsr : // fall through -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/rewriter.hpp ---- openjdk/hotspot/src/share/vm/interpreter/rewriter.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/rewriter.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -39,7 +39,7 @@ - objArrayHandle _methods; - intArray _cp_map; - intStack _cp_cache_map; -- bool _have_invoke_dynamic; -+ intArray _method_handle_invokers; - - void init_cp_map(int length) { - _cp_map.initialize(length, -1); -@@ -88,6 +88,7 @@ - void scan_method(methodOop m, bool reverse = false); - void rewrite_Object_init(methodHandle m, TRAPS); - void rewrite_member_reference(address bcp, int offset, bool reverse = false); -+ void maybe_rewrite_invokehandle(address opc, int cp_index, bool reverse = false); - void rewrite_invokedynamic(address bcp, int offset, bool reverse = false); - void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false); - // Revert bytecodes in case of an exception. -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/templateInterpreter.cpp ---- openjdk/hotspot/src/share/vm/interpreter/templateInterpreter.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/templateInterpreter.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -362,7 +362,6 @@ - method_entry(empty) - method_entry(accessor) - method_entry(abstract) -- method_entry(method_handle) - method_entry(java_lang_math_sin ) - method_entry(java_lang_math_cos ) - method_entry(java_lang_math_tan ) -@@ -372,6 +371,8 @@ - method_entry(java_lang_math_log10) - method_entry(java_lang_ref_reference_get) - -+ initialize_method_handle_entries(); -+ - // all native method kinds (must be one contiguous block) - Interpreter::_native_entry_begin = Interpreter::code()->code_end(); - method_entry(native) -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/templateTable.cpp ---- openjdk/hotspot/src/share/vm/interpreter/templateTable.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/templateTable.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -444,7 +444,7 @@ - def(Bytecodes::_invokespecial , ubcp|disp|clvm|____, vtos, vtos, invokespecial , f1_byte ); - def(Bytecodes::_invokestatic , ubcp|disp|clvm|____, vtos, vtos, invokestatic , f1_byte ); - def(Bytecodes::_invokeinterface , ubcp|disp|clvm|____, vtos, vtos, invokeinterface , f1_byte ); -- def(Bytecodes::_invokedynamic , ubcp|disp|clvm|____, vtos, vtos, invokedynamic , f1_oop ); -+ def(Bytecodes::_invokedynamic , ubcp|disp|clvm|____, vtos, vtos, invokedynamic , f12_oop ); - def(Bytecodes::_new , ubcp|____|clvm|____, vtos, atos, _new , _ ); - def(Bytecodes::_newarray , ubcp|____|clvm|____, itos, atos, newarray , _ ); - def(Bytecodes::_anewarray , ubcp|____|clvm|____, itos, atos, anewarray , _ ); -@@ -514,6 +514,8 @@ - - def(Bytecodes::_return_register_finalizer , ____|disp|clvm|____, vtos, vtos, _return , vtos ); - -+ def(Bytecodes::_invokehandle , ubcp|disp|clvm|____, vtos, vtos, invokehandle , f12_oop ); -+ - def(Bytecodes::_shouldnotreachhere , ____|____|____|____, vtos, vtos, shouldnotreachhere , _ ); - // platform specific bytecodes - pd_initialize(); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/interpreter/templateTable.hpp ---- openjdk/hotspot/src/share/vm/interpreter/templateTable.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/interpreter/templateTable.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -98,7 +98,7 @@ - public: - enum Operation { add, sub, mul, div, rem, _and, _or, _xor, shl, shr, ushr }; - enum Condition { equal, not_equal, less, less_equal, greater, greater_equal }; -- enum CacheByte { f1_byte = 1, f2_byte = 2, f1_oop = 0x11 }; // byte_no codes -+ enum CacheByte { f1_byte = 1, f2_byte = 2, f12_oop = 0x12 }; // byte_no codes - - private: - static bool _is_initialized; // true if TemplateTable has been initialized -@@ -294,6 +294,7 @@ - static void invokestatic(int byte_no); - static void invokeinterface(int byte_no); - static void invokedynamic(int byte_no); -+ static void invokehandle(int byte_no); - static void fast_invokevfinal(int byte_no); - - static void getfield_or_static(int byte_no, bool is_static); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/constantPoolOop.cpp ---- openjdk/hotspot/src/share/vm/oops/constantPoolOop.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/oops/constantPoolOop.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -267,25 +267,61 @@ - - - methodOop constantPoolOopDesc::method_at_if_loaded(constantPoolHandle cpool, -- int which, Bytecodes::Code invoke_code) { -+ int which) { - assert(!constantPoolCacheOopDesc::is_secondary_index(which), "no indy instruction here"); - if (cpool->cache() == NULL) return NULL; // nothing to load yet -- int cache_index = which - CPCACHE_INDEX_TAG; -+ int cache_index = get_cpcache_index(which); - if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) { - if (PrintMiscellaneous && (Verbose||WizardMode)) { -- tty->print_cr("bad operand %d for %d in:", which, invoke_code); cpool->print(); -+ tty->print_cr("bad operand %d in:", which); cpool->print(); - } - return NULL; - } - ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); -- if (invoke_code != Bytecodes::_illegal) -- return e->get_method_if_resolved(invoke_code, cpool); -- Bytecodes::Code bc; -- if ((bc = e->bytecode_1()) != (Bytecodes::Code)0) -- return e->get_method_if_resolved(bc, cpool); -- if ((bc = e->bytecode_2()) != (Bytecodes::Code)0) -- return e->get_method_if_resolved(bc, cpool); -- return NULL; -+ return e->method_if_resolved(cpool); -+} -+ -+ -+bool constantPoolOopDesc::has_appendix_at_if_loaded(constantPoolHandle cpool, int which) { -+ if (cpool->cache() == NULL) return false; // nothing to load yet -+ // XXX Is there a simpler way to get to the secondary entry? -+ ConstantPoolCacheEntry* e; -+ if (constantPoolCacheOopDesc::is_secondary_index(which)) { -+ e = cpool->cache()->secondary_entry_at(which); -+ } else { -+ int cache_index = get_cpcache_index(which); -+ if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) { -+ if (PrintMiscellaneous && (Verbose||WizardMode)) { -+ tty->print_cr("bad operand %d in:", which); cpool->print(); -+ } -+ return false; -+ } -+ e = cpool->cache()->entry_at(cache_index); -+ } -+ return e->has_appendix(); -+} -+ -+ -+oop constantPoolOopDesc::appendix_at_if_loaded(constantPoolHandle cpool, int which) { -+ if (cpool->cache() == NULL) return NULL; // nothing to load yet -+ // XXX Is there a simpler way to get to the secondary entry? -+ ConstantPoolCacheEntry* e; -+ if (constantPoolCacheOopDesc::is_secondary_index(which)) { -+ e = cpool->cache()->secondary_entry_at(which); -+ } else { -+ int cache_index = get_cpcache_index(which); -+ if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) { -+ if (PrintMiscellaneous && (Verbose||WizardMode)) { -+ tty->print_cr("bad operand %d in:", which); cpool->print(); -+ } -+ return NULL; -+ } -+ e = cpool->cache()->entry_at(cache_index); -+ } -+ if (!e->has_appendix()) { -+ return NULL; -+ } -+ return e->f1_as_instance(); - } - - -@@ -481,7 +517,7 @@ - if (cache_index >= 0) { - assert(index == _no_index_sentinel, "only one kind of index at a time"); - ConstantPoolCacheEntry* cpc_entry = this_oop->cache()->entry_at(cache_index); -- result_oop = cpc_entry->f1(); -+ result_oop = cpc_entry->f1_as_instance(); - if (result_oop != NULL) { - return decode_exception_from_f1(result_oop, THREAD); - // That was easy... -@@ -553,12 +589,7 @@ - index, this_oop->method_type_index_at(index), - signature->as_C_string()); - KlassHandle klass(THREAD, this_oop->pool_holder()); -- bool ignore_is_on_bcp = false; -- Handle value = SystemDictionary::find_method_handle_type(signature, -- klass, -- false, -- ignore_is_on_bcp, -- THREAD); -+ Handle value = SystemDictionary::find_method_handle_type(signature, klass, THREAD); - if (HAS_PENDING_EXCEPTION) { - throw_exception = Handle(THREAD, PENDING_EXCEPTION); - CLEAR_PENDING_EXCEPTION; -@@ -608,7 +639,7 @@ - result_oop = NULL; // safety - ObjectLocker ol(this_oop, THREAD); - ConstantPoolCacheEntry* cpc_entry = this_oop->cache()->entry_at(cache_index); -- result_oop = cpc_entry->f1(); -+ result_oop = cpc_entry->f1_as_instance(); - // Benign race condition: f1 may already be filled in while we were trying to lock. - // The important thing here is that all threads pick up the same result. - // It doesn't matter which racing thread wins, as long as only one -@@ -627,6 +658,45 @@ - } - } - -+ -+oop constantPoolOopDesc::resolve_bootstrap_specifier_at_impl(constantPoolHandle this_oop, int index, TRAPS) { -+ assert(this_oop->tag_at(index).is_invoke_dynamic(), "Corrupted constant pool"); -+ -+ Handle bsm; -+ int argc; -+ { -+ // JVM_CONSTANT_InvokeDynamic is an ordered pair of [bootm, name&type], plus optional arguments -+ // The bootm, being a JVM_CONSTANT_MethodHandle, has its own cache entry. -+ // It is accompanied by the optional arguments. -+ int bsm_index = this_oop->invoke_dynamic_bootstrap_method_ref_index_at(index); -+ oop bsm_oop = this_oop->resolve_possibly_cached_constant_at(bsm_index, CHECK_NULL); -+ if (!java_lang_invoke_MethodHandle::is_instance(bsm_oop)) { -+ THROW_MSG_NULL(vmSymbols::java_lang_LinkageError(), "BSM not an MethodHandle"); -+ } -+ -+ // Extract the optional static arguments. -+ argc = this_oop->invoke_dynamic_argument_count_at(index); -+ if (argc == 0) return bsm_oop; -+ -+ bsm = Handle(THREAD, bsm_oop); -+ } -+ -+ objArrayHandle info; -+ { -+ objArrayOop info_oop = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1+argc, CHECK_NULL); -+ info = objArrayHandle(THREAD, info_oop); -+ } -+ -+ info->obj_at_put(0, bsm()); -+ for (int i = 0; i < argc; i++) { -+ int arg_index = this_oop->invoke_dynamic_argument_index_at(index, i); -+ oop arg_oop = this_oop->resolve_possibly_cached_constant_at(arg_index, CHECK_NULL); -+ info->obj_at_put(1+i, arg_oop); -+ } -+ -+ return info(); -+} -+ - oop constantPoolOopDesc::string_at_impl(constantPoolHandle this_oop, int which, TRAPS) { - oop str = NULL; - CPSlot entry = this_oop->slot_at(which); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/constantPoolOop.hpp ---- openjdk/hotspot/src/share/vm/oops/constantPoolOop.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/oops/constantPoolOop.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -642,6 +642,11 @@ - return resolve_constant_at_impl(h_this, pool_index, _possible_index_sentinel, THREAD); - } - -+ oop resolve_bootstrap_specifier_at(int index, TRAPS) { -+ constantPoolHandle h_this(THREAD, this); -+ return resolve_bootstrap_specifier_at_impl(h_this, index, THREAD); -+ } -+ - // Klass name matches name at offset - bool klass_name_at_matches(instanceKlassHandle k, int which); - -@@ -666,12 +671,13 @@ - friend class SystemDictionary; - - // Used by compiler to prevent classloading. -- static methodOop method_at_if_loaded (constantPoolHandle this_oop, int which, -- Bytecodes::Code bc = Bytecodes::_illegal); -- static klassOop klass_at_if_loaded (constantPoolHandle this_oop, int which); -- static klassOop klass_ref_at_if_loaded (constantPoolHandle this_oop, int which); -+ static methodOop method_at_if_loaded (constantPoolHandle this_oop, int which); -+ static bool has_appendix_at_if_loaded (constantPoolHandle this_oop, int which); -+ static oop appendix_at_if_loaded (constantPoolHandle this_oop, int which); -+ static klassOop klass_at_if_loaded (constantPoolHandle this_oop, int which); -+ static klassOop klass_ref_at_if_loaded (constantPoolHandle this_oop, int which); - // Same as above - but does LinkResolving. -- static klassOop klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS); -+ static klassOop klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS); - - // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the - // future by other Java code. These take constant pool indices rather than -@@ -697,6 +703,8 @@ - enum { CPCACHE_INDEX_TAG = 0 }; // in product mode, this zero value is a no-op - #endif //ASSERT - -+ static int get_cpcache_index(int index) { return index - CPCACHE_INDEX_TAG; } -+ - private: - - Symbol* impl_name_ref_at(int which, bool uncached); -@@ -729,6 +737,7 @@ - static void resolve_string_constants_impl(constantPoolHandle this_oop, TRAPS); - - static oop resolve_constant_at_impl(constantPoolHandle this_oop, int index, int cache_index, TRAPS); -+ static oop resolve_bootstrap_specifier_at_impl(constantPoolHandle this_oop, int index, TRAPS); - - public: - // Merging constantPoolOop support: -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/cpCacheOop.cpp ---- openjdk/hotspot/src/share/vm/oops/cpCacheOop.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/oops/cpCacheOop.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -31,6 +31,7 @@ - #include "oops/objArrayOop.hpp" - #include "oops/oop.inline.hpp" - #include "prims/jvmtiRedefineClassesTrace.hpp" -+#include "prims/methodHandles.hpp" - #include "runtime/handles.inline.hpp" - - -@@ -44,68 +45,61 @@ - - void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) { - assert(0 <= main_index && main_index < 0x10000, "sanity check"); -- _indices = (main_index << 16); -+ _indices = (main_index << main_cp_index_bits); - assert(main_entry_index() == main_index, ""); - } - --int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final, -- bool is_vfinal, bool is_volatile, -- bool is_method_interface, bool is_method) { -- int f = state; -- -- assert( state < number_of_states, "Invalid state in as_flags"); -- -- f <<= 1; -- if (is_final) f |= 1; -- f <<= 1; -- if (is_vfinal) f |= 1; -- f <<= 1; -- if (is_volatile) f |= 1; -- f <<= 1; -- if (is_method_interface) f |= 1; -- f <<= 1; -- if (is_method) f |= 1; -- f <<= ConstantPoolCacheEntry::hotSwapBit; -+int ConstantPoolCacheEntry::make_flags(TosState state, -+ int option_bits, -+ int field_index_or_method_params) { -+ assert(state < number_of_states, "Invalid state in make_flags"); -+ int f = ((int)state << tos_state_shift) | option_bits | field_index_or_method_params; - // Preserve existing flag bit values -+ // The low bits are a field offset, or else the method parameter size. - #ifdef ASSERT -- int old_state = ((_flags >> tosBits) & 0x0F); -- assert(old_state == 0 || old_state == state, -+ TosState old_state = flag_state(); -+ assert(old_state == (TosState)0 || old_state == state, - "inconsistent cpCache flags state"); - #endif - return (_flags | f) ; - } - - void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) { -+ assert(!is_secondary_entry(), "must not overwrite main_entry_index"); - #ifdef ASSERT - // Read once. - volatile Bytecodes::Code c = bytecode_1(); - assert(c == 0 || c == code || code == 0, "update must be consistent"); - #endif - // Need to flush pending stores here before bytecode is written. -- OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16)); -+ OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift)); - } - - void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) { -+ assert(!is_secondary_entry(), "must not overwrite main_entry_index"); - #ifdef ASSERT - // Read once. - volatile Bytecodes::Code c = bytecode_2(); - assert(c == 0 || c == code || code == 0, "update must be consistent"); - #endif - // Need to flush pending stores here before bytecode is written. -- OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24)); -+ OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift)); - } - --// Atomically sets f1 if it is still NULL, otherwise it keeps the --// current value. --void ConstantPoolCacheEntry::set_f1_if_null_atomic(oop f1) { -+// Sets f1, ordering with previous writes. -+void ConstantPoolCacheEntry::release_set_f1(oop f1) { - // Use barriers as in oop_store -+ assert(f1 != NULL, ""); - oop* f1_addr = (oop*) &_f1; - update_barrier_set_pre(f1_addr, f1); -- void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL); -- bool success = (result == NULL); -- if (success) { -- update_barrier_set((void*) f1_addr, f1); -- } -+ OrderAccess::release_store_ptr((intptr_t*)f1_addr, f1); -+ update_barrier_set((void*) f1_addr, f1); -+} -+ -+// Sets flags, but only if the value was previously zero. -+bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) { -+ intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0); -+ return (result == 0); - } - - #ifdef ASSERT -@@ -135,17 +129,32 @@ - bool is_volatile) { - set_f1(field_holder()->java_mirror()); - set_f2(field_offset); -- assert(field_index <= field_index_mask, -+ assert((field_index & field_index_mask) == field_index, - "field index does not fit in low flag bits"); -- set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) | -- (field_index & field_index_mask)); -+ set_field_flags(field_type, -+ ((is_volatile ? 1 : 0) << is_volatile_shift) | -+ ((is_final ? 1 : 0) << is_final_shift), -+ field_index); - set_bytecode_1(get_code); - set_bytecode_2(put_code); - NOT_PRODUCT(verify(tty)); - } - --int ConstantPoolCacheEntry::field_index() const { -- return (_flags & field_index_mask); -+void ConstantPoolCacheEntry::set_parameter_size(int value) { -+ // This routine is called only in corner cases where the CPCE is not yet initialized. -+ // See AbstractInterpreter::deopt_continue_after_entry. -+ assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value, -+ err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value)); -+ // Setting the parameter size by itself is only safe if the -+ // current value of _flags is 0, otherwise another thread may have -+ // updated it and we don't want to overwrite that value. Don't -+ // bother trying to update it once it's nonzero but always make -+ // sure that the final parameter size agrees with what was passed. -+ if (_flags == 0) { -+ Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0); -+ } -+ guarantee(parameter_size() == value, -+ err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value)); - } - - void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code, -@@ -154,51 +163,51 @@ - assert(!is_secondary_entry(), ""); - assert(method->interpreter_entry() != NULL, "should have been set at this point"); - assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); -- bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface); - - int byte_no = -1; -- bool needs_vfinal_flag = false; -+ bool change_to_virtual = false; -+ - switch (invoke_code) { -+ case Bytecodes::_invokeinterface: -+ // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface -+ // instruction somehow links to a non-interface method (in Object). -+ // In that case, the method has no itable index and must be invoked as a virtual. -+ // Set a flag to keep track of this corner case. -+ change_to_virtual = true; -+ -+ // ...and fall through as if we were handling invokevirtual: - case Bytecodes::_invokevirtual: -- case Bytecodes::_invokeinterface: { -+ { - if (method->can_be_statically_bound()) { -- set_f2((intptr_t)method()); -- needs_vfinal_flag = true; -+ // set_f2_as_vfinal_method checks if is_vfinal flag is true. -+ set_method_flags(as_TosState(method->result_type()), -+ ( 1 << is_vfinal_shift) | -+ ((method->is_final_method() ? 1 : 0) << is_final_shift) | -+ ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift), -+ method()->size_of_parameters()); -+ set_f2_as_vfinal_method(method()); - } else { - assert(vtable_index >= 0, "valid index"); -+ assert(!method->is_final_method(), "sanity"); -+ set_method_flags(as_TosState(method->result_type()), -+ ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift), -+ method()->size_of_parameters()); - set_f2(vtable_index); - } - byte_no = 2; - break; -- } -- -- case Bytecodes::_invokedynamic: // similar to _invokevirtual -- if (TraceInvokeDynamic) { -- tty->print_cr("InvokeDynamic set_method%s method="PTR_FORMAT" index=%d", -- (is_secondary_entry() ? " secondary" : ""), -- (intptr_t)method(), vtable_index); -- method->print(); -- this->print(tty, 0); - } -- assert(method->can_be_statically_bound(), "must be a MH invoker method"); -- assert(_f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized"); -- // SystemDictionary::find_method_handle_invoke only caches -- // methods which signature classes are on the boot classpath, -- // otherwise the newly created method is returned. To avoid -- // races in that case we store the first one coming in into the -- // cp-cache atomically if it's still unset. -- set_f1_if_null_atomic(method()); -- needs_vfinal_flag = false; // _f2 is not an oop -- assert(!is_vfinal(), "f2 not an oop"); -- byte_no = 1; // coordinate this with bytecode_number & is_resolved -- break; - - case Bytecodes::_invokespecial: -- // Preserve the value of the vfinal flag on invokevirtual bytecode -- // which may be shared with this constant pool cache entry. -- needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal(); -- // fall through - case Bytecodes::_invokestatic: -+ // Note: Read and preserve the value of the is_vfinal flag on any -+ // invokevirtual bytecode shared with this constant pool cache entry. -+ // It is cheap and safe to consult is_vfinal() at all times. -+ // Once is_vfinal is set, it must stay that way, lest we get a dangling oop. -+ set_method_flags(as_TosState(method->result_type()), -+ ((is_vfinal() ? 1 : 0) << is_vfinal_shift) | -+ ((method->is_final_method() ? 1 : 0) << is_final_shift), -+ method()->size_of_parameters()); - set_f1(method()); - byte_no = 1; - break; -@@ -207,19 +216,14 @@ - break; - } - -- set_flags(as_flags(as_TosState(method->result_type()), -- method->is_final_method(), -- needs_vfinal_flag, -- false, -- change_to_virtual, -- true)| -- method()->size_of_parameters()); -- - // Note: byte_no also appears in TemplateTable::resolve. - if (byte_no == 1) { -+ assert(invoke_code != Bytecodes::_invokevirtual && -+ invoke_code != Bytecodes::_invokeinterface, ""); - set_bytecode_1(invoke_code); - } else if (byte_no == 2) { - if (change_to_virtual) { -+ assert(invoke_code == Bytecodes::_invokeinterface, ""); - // NOTE: THIS IS A HACK - BE VERY CAREFUL!!! - // - // Workaround for the case where we encounter an invokeinterface, but we -@@ -235,10 +239,11 @@ - // Otherwise, the method needs to be reresolved with caller for each - // interface call. - if (method->is_public()) set_bytecode_1(invoke_code); -- set_bytecode_2(Bytecodes::_invokevirtual); - } else { -- set_bytecode_2(invoke_code); -+ assert(invoke_code == Bytecodes::_invokevirtual, ""); - } -+ // set up for invokevirtual, even if linking for invokeinterface also: -+ set_bytecode_2(Bytecodes::_invokevirtual); - } else { - ShouldNotReachHere(); - } -@@ -250,73 +255,129 @@ - assert(!is_secondary_entry(), ""); - klassOop interf = method->method_holder(); - assert(instanceKlass::cast(interf)->is_interface(), "must be an interface"); -+ assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here"); - set_f1(interf); - set_f2(index); -- set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters()); -+ set_method_flags(as_TosState(method->result_type()), -+ 0, // no option bits -+ method()->size_of_parameters()); - set_bytecode_1(Bytecodes::_invokeinterface); - } - - --void ConstantPoolCacheEntry::initialize_bootstrap_method_index_in_cache(int bsm_cache_index) { -- assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry"); -- assert(_f2 == 0, "initialize once"); -- assert(bsm_cache_index == (int)(u2)bsm_cache_index, "oob"); -- set_f2(bsm_cache_index + constantPoolOopDesc::CPCACHE_INDEX_TAG); -+void ConstantPoolCacheEntry::set_method_handle(methodHandle adapter, Handle appendix) { -+ assert(!is_secondary_entry(), ""); -+ set_method_handle_common(Bytecodes::_invokehandle, adapter, appendix); - } - --int ConstantPoolCacheEntry::bootstrap_method_index_in_cache() { -- assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry"); -- intptr_t bsm_cache_index = (intptr_t) _f2 - constantPoolOopDesc::CPCACHE_INDEX_TAG; -- assert(bsm_cache_index == (intptr_t)(u2)bsm_cache_index, "oob"); -- return (int) bsm_cache_index; -+void ConstantPoolCacheEntry::set_dynamic_call(methodHandle adapter, Handle appendix) { -+ assert(is_secondary_entry(), ""); -+ set_method_handle_common(Bytecodes::_invokedynamic, adapter, appendix); - } - --void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, methodHandle signature_invoker) { -- assert(is_secondary_entry(), ""); -- // NOTE: it's important that all other values are set before f1 is -- // set since some users short circuit on f1 being set -- // (i.e. non-null) and that may result in uninitialized values for -- // other racing threads (e.g. flags). -- int param_size = signature_invoker->size_of_parameters(); -- assert(param_size >= 1, "method argument size must include MH.this"); -- param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic -- bool is_final = true; -- assert(signature_invoker->is_final_method(), "is_final"); -- int flags = as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size; -- assert(_flags == 0 || _flags == flags, "flags should be the same"); -- set_flags(flags); -- // do not do set_bytecode on a secondary CP cache entry -- //set_bytecode_1(Bytecodes::_invokedynamic); -- set_f1_if_null_atomic(call_site()); // This must be the last one to set (see NOTE above)! -+void ConstantPoolCacheEntry::set_method_handle_common(Bytecodes::Code invoke_code, methodHandle adapter, Handle appendix) { -+ // NOTE: This CPCE can be the subject of data races. -+ // There are three words to update: flags, f2, f1 (in that order). -+ // Writers must store all other values before f1. -+ // Readers must test f1 first for non-null before reading other fields. -+ // Competing writers must acquire exclusive access on the first -+ // write, to flags, using a compare/exchange. -+ // A losing writer must spin until the winner writes f1, -+ // so that when he returns, he can use the linked cache entry. -+ -+ bool has_appendix = appendix.not_null(); -+ if (!has_appendix) { -+ // The extra argument is not used, but we need a non-null value to signify linkage state. -+ // Set it to something benign that will never leak memory. -+ appendix = Universe::void_mirror(); -+ } -+ -+ bool owner = -+ init_method_flags_atomic(as_TosState(adapter->result_type()), -+ ((has_appendix ? 1 : 0) << has_appendix_shift) | -+ ( 1 << is_vfinal_shift) | -+ ( 1 << is_final_shift), -+ adapter->size_of_parameters()); -+ if (!owner) { -+ while (is_f1_null()) { -+ // Pause momentarily on a low-level lock, to allow racing thread to win. -+ MutexLockerEx mu(Patching_lock, Mutex::_no_safepoint_check_flag); -+ os::yield(); -+ } -+ return; -+ } -+ -+ if (TraceInvokeDynamic) { -+ tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method="PTR_FORMAT" ", -+ invoke_code, -+ (intptr_t)appendix(), (has_appendix ? "" : " (unused)"), -+ (intptr_t)adapter()); -+ adapter->print(); -+ if (has_appendix) appendix()->print(); -+ } -+ -+ // Method handle invokes and invokedynamic sites use both cp cache words. -+ // f1, if not null, contains a value passed as a trailing argument to the adapter. -+ // In the general case, this could be the call site's MethodType, -+ // for use with java.lang.Invokers.checkExactType, or else a CallSite object. -+ // f2 contains the adapter method which manages the actual call. -+ // In the general case, this is a compiled LambdaForm. -+ // (The Java code is free to optimize these calls by binding other -+ // sorts of methods and appendices to call sites.) -+ // JVM-level linking is via f2, as if for invokevfinal, and signatures are erased. -+ // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits. -+ // In principle this means that the method (with appendix) could take up to 256 parameter slots. -+ // -+ // This means that given a call site like (List)mh.invoke("foo"), -+ // the f2 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;', -+ // not '(Ljava/lang/String;)Ljava/util/List;'. -+ // The fact that String and List are involved is encoded in the MethodType in f1. -+ // This allows us to create fewer method oops, while keeping type safety. -+ // -+ set_f2_as_vfinal_method(adapter()); -+ assert(appendix.not_null(), "needed for linkage state"); -+ release_set_f1(appendix()); // This must be the last one to set (see NOTE above)! -+ if (!is_secondary_entry()) { -+ // The interpreter assembly code does not check byte_2, -+ // but it is used by is_resolved, method_if_resolved, etc. -+ set_bytecode_2(invoke_code); -+ } -+ NOT_PRODUCT(verify(tty)); -+ if (TraceInvokeDynamic) { -+ this->print(tty, 0); -+ } - } - -- --methodOop ConstantPoolCacheEntry::get_method_if_resolved(Bytecodes::Code invoke_code, constantPoolHandle cpool) { -- assert(invoke_code > (Bytecodes::Code)0, "bad query"); -+methodOop ConstantPoolCacheEntry::method_if_resolved(constantPoolHandle cpool) { - if (is_secondary_entry()) { -- return cpool->cache()->entry_at(main_entry_index())->get_method_if_resolved(invoke_code, cpool); -+ if (!is_f1_null()) -+ return f2_as_vfinal_method(); -+ return NULL; - } - // Decode the action of set_method and set_interface_call -- if (bytecode_1() == invoke_code) { -+ Bytecodes::Code invoke_code = bytecode_1(); -+ if (invoke_code != (Bytecodes::Code)0) { - oop f1 = _f1; - if (f1 != NULL) { - switch (invoke_code) { - case Bytecodes::_invokeinterface: - assert(f1->is_klass(), ""); -- return klassItable::method_for_itable_index(klassOop(f1), (int) f2()); -+ return klassItable::method_for_itable_index(klassOop(f1), f2_as_index()); - case Bytecodes::_invokestatic: - case Bytecodes::_invokespecial: -+ assert(!has_appendix(), ""); - assert(f1->is_method(), ""); - return methodOop(f1); - } - } - } -- if (bytecode_2() == invoke_code) { -+ invoke_code = bytecode_2(); -+ if (invoke_code != (Bytecodes::Code)0) { - switch (invoke_code) { - case Bytecodes::_invokevirtual: - if (is_vfinal()) { - // invokevirtual -- methodOop m = methodOop((intptr_t) f2()); -+ methodOop m = f2_as_vfinal_method(); - assert(m->is_method(), ""); - return m; - } else { -@@ -325,16 +386,19 @@ - klassOop klass = cpool->resolved_klass_at(holder_index); - if (!Klass::cast(klass)->oop_is_instance()) - klass = SystemDictionary::Object_klass(); -- return instanceKlass::cast(klass)->method_at_vtable((int) f2()); -+ return instanceKlass::cast(klass)->method_at_vtable(f2_as_index()); - } - } -+ break; -+ case Bytecodes::_invokehandle: -+ case Bytecodes::_invokedynamic: -+ return f2_as_vfinal_method(); - } - } - return NULL; - } - - -- - class LocalOopClosure: public OopClosure { - private: - void (*_f)(oop*); -@@ -419,9 +483,10 @@ - methodOop new_method, bool * trace_name_printed) { - - if (is_vfinal()) { -- // virtual and final so f2() contains method ptr instead of vtable index -- if (f2() == (intptr_t)old_method) { -+ // virtual and final so _f2 contains method ptr instead of vtable index -+ if (f2_as_vfinal_method() == old_method) { - // match old_method so need an update -+ // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values - _f2 = (intptr_t)new_method; - if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { - if (!(*trace_name_printed)) { -@@ -497,16 +562,17 @@ - methodOop m = NULL; - if (is_vfinal()) { - // virtual and final so _f2 contains method ptr instead of vtable index -- m = (methodOop)_f2; -- } else if ((oop)_f1 == NULL) { -+ m = f2_as_vfinal_method(); -+ } else if (is_f1_null()) { - // NULL _f1 means this is a virtual entry so also not interesting - return false; - } else { -- if (!((oop)_f1)->is_method()) { -+ oop f1 = _f1; // _f1 is volatile -+ if (!f1->is_method()) { - // _f1 can also contain a klassOop for an interface - return false; - } -- m = (methodOop)_f1; -+ m = f1_as_method(); - } - - assert(m != NULL && m->is_method(), "sanity check"); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/cpCacheOop.hpp ---- openjdk/hotspot/src/share/vm/oops/cpCacheOop.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/oops/cpCacheOop.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -38,13 +38,14 @@ - // bit number |31 0| - // bit length |-8--|-8--|---16----| - // -------------------------------- --// _indices [ b2 | b1 | index ] --// _f1 [ entry specific ] --// _f2 [ entry specific ] --// _flags [t|f|vf|v|m|h|unused|field_index] (for field entries) --// bit length |4|1|1 |1|1|0|---7--|----16-----] --// _flags [t|f|vf|v|m|h|unused|eidx|psze] (for method entries) --// bit length |4|1|1 |1|1|1|---7--|-8--|-8--] -+// _indices [ b2 | b1 | index ] index = constant_pool_index (!= 0, normal entries only) -+// _indices [ index | 00000 ] index = main_entry_index (secondary entries only) -+// _f1 [ entry specific ] method, klass, or oop (MethodType or CallSite) -+// _f2 [ entry specific ] vtable index or vfinal method -+// _flags [tos|0|00|00|00|f|v|f2|unused|field_index] (for field entries) -+// bit length [ 4 |1|1 |1 | 1|1|1| 1|---5--|----16-----] -+// _flags [tos|M|vf|fv|ea|f|0|f2|unused|00000|psize] (for method entries) -+// bit length [ 4 |1|1 |1 | 1|1|1| 1|---5--|--8--|--8--] - - // -------------------------------- - // -@@ -52,24 +53,23 @@ - // index = original constant pool index - // b1 = bytecode 1 - // b2 = bytecode 2 --// psze = parameters size (method entries only) --// eidx = interpreter entry index (method entries only) -+// psize = parameters size (method entries only) - // field_index = index into field information in holder instanceKlass - // The index max is 0xffff (max number of fields in constant pool) - // and is multiplied by (instanceKlass::next_offset) when accessing. - // t = TosState (see below) - // f = field is marked final (see below) --// vf = virtual, final (method entries only : is_vfinal()) -+// f2 = virtual but final (method entries only: is_vfinal()) - // v = field is volatile (see below) - // m = invokeinterface used for method in class Object (see below) - // h = RedefineClasses/Hotswap bit (see below) - // - // The flags after TosState have the following interpretation: --// bit 27: f flag true if field is marked final --// bit 26: vf flag true if virtual final method --// bit 25: v flag true if field is volatile (only for fields) --// bit 24: m flag true if invokeinterface used for method in class Object --// bit 23: 0 for fields, 1 for methods -+// bit 27: 0 for fields, 1 for methods -+// f flag true if field is marked final -+// v flag true if field is volatile (only for fields) -+// f2 flag true if f2 contains an oop (e.g., virtual final method) -+// fv flag true if invokeinterface used for method in class Object - // - // The flags 31, 30, 29, 28 together build a 4 bit number 0 to 8 with the - // following mapping to the TosState states: -@@ -86,25 +86,26 @@ - // - // Entry specific: field entries: - // _indices = get (b1 section) and put (b2 section) bytecodes, original constant pool index --// _f1 = field holder --// _f2 = field offset in words --// _flags = field type information, original field index in field holder -+// _f1 = field holder (as a java.lang.Class, not a klassOop) -+// _f2 = field offset in bytes -+// _flags = field type information, original FieldInfo index in field holder - // (field_index section) - // - // Entry specific: method entries: - // _indices = invoke code for f1 (b1 section), invoke code for f2 (b2 section), - // original constant pool index --// _f1 = method for all but virtual calls, unused by virtual calls --// (note: for interface calls, which are essentially virtual, --// contains klassOop for the corresponding interface. --// for invokedynamic, f1 contains the CallSite object for the invocation --// _f2 = method/vtable index for virtual calls only, unused by all other --// calls. The vf flag indicates this is a method pointer not an --// index. --// _flags = field type info (f section), --// virtual final entry (vf), --// interpreter entry index (eidx section), --// parameter size (psze section) -+// _f1 = methodOop for non-virtual calls, unused by virtual calls. -+// for interface calls, which are essentially virtual but need a klass, -+// contains klassOop for the corresponding interface. -+// for invokedynamic, f1 contains a site-specific CallSite object (as an appendix) -+// for invokehandle, f1 contains a site-specific MethodType object (as an appendix) -+// (upcoming metadata changes will move the appendix to a separate array) -+// _f2 = vtable/itable index (or final methodOop) for virtual calls only, -+// unused by non-virtual. The is_vfinal flag indicates this is a -+// method pointer for a final method, not an index. -+// _flags = method type info (t section), -+// virtual final bit (vfinal), -+// parameter size (psize section) - // - // Note: invokevirtual & invokespecial bytecodes can share the same constant - // pool entry and thus the same constant pool cache entry. All invoke -@@ -138,30 +139,61 @@ - assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change"); - oop_store(&_f1, f1); - } -- void set_f1_if_null_atomic(oop f1); -- void set_f2(intx f2) { assert(_f2 == 0 || _f2 == f2, "illegal field change"); _f2 = f2; } -- int as_flags(TosState state, bool is_final, bool is_vfinal, bool is_volatile, -- bool is_method_interface, bool is_method); -+ void release_set_f1(oop f1); -+ void set_f2(intx f2) { assert(_f2 == 0 || _f2 == f2, "illegal field change"); _f2 = f2; } -+ void set_f2_as_vfinal_method(methodOop f2) { assert(_f2 == 0 || _f2 == (intptr_t) f2, "illegal field change"); assert(is_vfinal(), "flags must be set"); _f2 = (intptr_t) f2; } -+ int make_flags(TosState state, int option_bits, int field_index_or_method_params); - void set_flags(intx flags) { _flags = flags; } -+ bool init_flags_atomic(intx flags); -+ void set_field_flags(TosState field_type, int option_bits, int field_index) { -+ assert((field_index & field_index_mask) == field_index, "field_index in range"); -+ set_flags(make_flags(field_type, option_bits | (1 << is_field_entry_shift), field_index)); -+ } -+ void set_method_flags(TosState return_type, int option_bits, int method_params) { -+ assert((method_params & parameter_size_mask) == method_params, "method_params in range"); -+ set_flags(make_flags(return_type, option_bits, method_params)); -+ } -+ bool init_method_flags_atomic(TosState return_type, int option_bits, int method_params) { -+ assert((method_params & parameter_size_mask) == method_params, "method_params in range"); -+ return init_flags_atomic(make_flags(return_type, option_bits, method_params)); -+ } - - public: -- // specific bit values in flag field -- // Note: the interpreter knows this layout! -- enum FlagBitValues { -- hotSwapBit = 23, -- methodInterface = 24, -- volatileField = 25, -- vfinalMethod = 26, -- finalField = 27 -+ // specific bit definitions for the flags field: -+ // (Note: the interpreter must use these definitions to access the CP cache.) -+ enum { -+ // high order bits are the TosState corresponding to field type or method return type -+ tos_state_bits = 4, -+ tos_state_mask = right_n_bits(tos_state_bits), -+ tos_state_shift = BitsPerInt - tos_state_bits, // see verify_tos_state_shift below -+ // misc. option bits; can be any bit position in [16..27] -+ is_vfinal_shift = 21, -+ is_volatile_shift = 22, -+ is_final_shift = 23, -+ has_appendix_shift = 24, -+ is_forced_virtual_shift = 25, -+ is_field_entry_shift = 26, -+ // low order bits give field index (for FieldInfo) or method parameter size: -+ field_index_bits = 16, -+ field_index_mask = right_n_bits(field_index_bits), -+ parameter_size_bits = 8, // subset of field_index_mask, range is 0..255 -+ parameter_size_mask = right_n_bits(parameter_size_bits), -+ option_bits_mask = ~(((-1) << tos_state_shift) | (field_index_mask | parameter_size_mask)) - }; - -- enum { field_index_mask = 0xFFFF }; -+ // specific bit definitions for the indices field: -+ enum { -+ main_cp_index_bits = 2*BitsPerByte, -+ main_cp_index_mask = right_n_bits(main_cp_index_bits), -+ bytecode_1_shift = main_cp_index_bits, -+ bytecode_1_mask = right_n_bits(BitsPerByte), // == (u1)0xFF -+ bytecode_2_shift = main_cp_index_bits + BitsPerByte, -+ bytecode_2_mask = right_n_bits(BitsPerByte), // == (u1)0xFF -+ // the secondary cp index overlaps with bytecodes 1 and 2: -+ secondary_cp_index_shift = bytecode_1_shift, -+ secondary_cp_index_bits = BitsPerInt - main_cp_index_bits -+ }; - -- // start of type bits in flags -- // Note: the interpreter knows this layout! -- enum FlagValues { -- tosBits = 28 -- }; - - // Initialization - void initialize_entry(int original_index); // initialize primary entry -@@ -189,30 +221,40 @@ - int index // Method index into interface - ); - -- void set_dynamic_call( -- Handle call_site, // Resolved java.lang.invoke.CallSite (f1) -- methodHandle signature_invoker // determines signature information -+ void set_method_handle( -+ methodHandle method, // adapter for invokeExact, etc. -+ Handle appendix // stored in f1; could be a java.lang.invoke.MethodType - ); - -- methodOop get_method_if_resolved(Bytecodes::Code invoke_code, constantPoolHandle cpool); -+ void set_dynamic_call( -+ methodHandle method, // adapter for this call site -+ Handle appendix // stored in f1; could be a java.lang.invoke.CallSite -+ ); - -- // For JVM_CONSTANT_InvokeDynamic cache entries: -- void initialize_bootstrap_method_index_in_cache(int bsm_cache_index); -- int bootstrap_method_index_in_cache(); -+ // Common code for invokedynamic and MH invocations. - -- void set_parameter_size(int value) { -- assert(parameter_size() == 0 || parameter_size() == value, -- "size must not change"); -- // Setting the parameter size by itself is only safe if the -- // current value of _flags is 0, otherwise another thread may have -- // updated it and we don't want to overwrite that value. Don't -- // bother trying to update it once it's nonzero but always make -- // sure that the final parameter size agrees with what was passed. -- if (_flags == 0) { -- Atomic::cmpxchg_ptr((value & 0xFF), &_flags, 0); -- } -- guarantee(parameter_size() == value, "size must not change"); -- } -+ // The "appendix" is an optional call-site-specific parameter which is -+ // pushed by the JVM at the end of the argument list. This argument may -+ // be a MethodType for the MH.invokes and a CallSite for an invokedynamic -+ // instruction. However, its exact type and use depends on the Java upcall, -+ // which simply returns a compiled LambdaForm along with any reference -+ // that LambdaForm needs to complete the call. If the upcall returns a -+ // null appendix, the argument is not passed at all. -+ // -+ // The appendix is *not* represented in the signature of the symbolic -+ // reference for the call site, but (if present) it *is* represented in -+ // the methodOop bound to the site. This means that static and dynamic -+ // resolution logic needs to make slightly different assessments about the -+ // number and types of arguments. -+ void set_method_handle_common( -+ Bytecodes::Code invoke_code, // _invokehandle or _invokedynamic -+ methodHandle adapter, // invoker method (f2) -+ Handle appendix // appendix such as CallSite, MethodType, etc. (f1) -+ ); -+ -+ methodOop method_if_resolved(constantPoolHandle cpool); -+ -+ void set_parameter_size(int value); - - // Which bytecode number (1 or 2) in the index field is valid for this bytecode? - // Returns -1 if neither is valid. -@@ -222,10 +264,11 @@ - case Bytecodes::_getfield : // fall through - case Bytecodes::_invokespecial : // fall through - case Bytecodes::_invokestatic : // fall through -- case Bytecodes::_invokedynamic : // fall through - case Bytecodes::_invokeinterface : return 1; - case Bytecodes::_putstatic : // fall through - case Bytecodes::_putfield : // fall through -+ case Bytecodes::_invokehandle : // fall through -+ case Bytecodes::_invokedynamic : // fall through - case Bytecodes::_invokevirtual : return 2; - default : break; - } -@@ -242,31 +285,43 @@ - } - - // Accessors -- bool is_secondary_entry() const { return (_indices & 0xFFFF) == 0; } -- int constant_pool_index() const { assert((_indices & 0xFFFF) != 0, "must be main entry"); -- return (_indices & 0xFFFF); } -- int main_entry_index() const { assert((_indices & 0xFFFF) == 0, "must be secondary entry"); -- return ((uintx)_indices >> 16); } -- Bytecodes::Code bytecode_1() const { return Bytecodes::cast((_indices >> 16) & 0xFF); } -- Bytecodes::Code bytecode_2() const { return Bytecodes::cast((_indices >> 24) & 0xFF); } -- volatile oop f1() const { return _f1; } -- bool is_f1_null() const { return (oop)_f1 == NULL; } // classifies a CPC entry as unbound -- intx f2() const { return _f2; } -- int field_index() const; -- int parameter_size() const { return _flags & 0xFF; } -- bool is_vfinal() const { return ((_flags & (1 << vfinalMethod)) == (1 << vfinalMethod)); } -- bool is_volatile() const { return ((_flags & (1 << volatileField)) == (1 << volatileField)); } -- bool is_methodInterface() const { return ((_flags & (1 << methodInterface)) == (1 << methodInterface)); } -- bool is_byte() const { return (((uintx) _flags >> tosBits) == btos); } -- bool is_char() const { return (((uintx) _flags >> tosBits) == ctos); } -- bool is_short() const { return (((uintx) _flags >> tosBits) == stos); } -- bool is_int() const { return (((uintx) _flags >> tosBits) == itos); } -- bool is_long() const { return (((uintx) _flags >> tosBits) == ltos); } -- bool is_float() const { return (((uintx) _flags >> tosBits) == ftos); } -- bool is_double() const { return (((uintx) _flags >> tosBits) == dtos); } -- bool is_object() const { return (((uintx) _flags >> tosBits) == atos); } -- TosState flag_state() const { assert( ( (_flags >> tosBits) & 0x0F ) < number_of_states, "Invalid state in as_flags"); -- return (TosState)((_flags >> tosBits) & 0x0F); } -+ bool is_secondary_entry() const { return (_indices & main_cp_index_mask) == 0; } -+ int main_entry_index() const { assert(is_secondary_entry(), "must be secondary entry"); -+ return ((uintx)_indices >> secondary_cp_index_shift); } -+ int primary_entry_indices() const { assert(!is_secondary_entry(), "must be main entry"); -+ return _indices; } -+ int constant_pool_index() const { return (primary_entry_indices() & main_cp_index_mask); } -+ Bytecodes::Code bytecode_1() const { return Bytecodes::cast((primary_entry_indices() >> bytecode_1_shift) -+ & bytecode_1_mask); } -+ Bytecodes::Code bytecode_2() const { return Bytecodes::cast((primary_entry_indices() >> bytecode_2_shift) -+ & bytecode_2_mask); } -+ methodOop f1_as_method() const { oop f1 = _f1; assert(f1 == NULL || f1->is_method(), ""); return methodOop(f1); } -+ klassOop f1_as_klass() const { oop f1 = _f1; assert(f1 == NULL || f1->is_klass(), ""); return klassOop(f1); } -+ oop f1_as_klass_mirror() const { oop f1 = f1_as_instance(); return f1; } // i.e., return a java_mirror -+ oop f1_as_instance() const { oop f1 = _f1; assert(f1 == NULL || f1->is_instance() || f1->is_array(), ""); return f1; } -+ oop f1_appendix() const { assert(has_appendix(), ""); return f1_as_instance(); } -+ bool is_f1_null() const { oop f1 = _f1; return f1 == NULL; } // classifies a CPC entry as unbound -+ int f2_as_index() const { assert(!is_vfinal(), ""); return (int) _f2; } -+ methodOop f2_as_vfinal_method() const { assert(is_vfinal(), ""); return methodOop(_f2); } -+ int field_index() const { assert(is_field_entry(), ""); return (_flags & field_index_mask); } -+ int parameter_size() const { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); } -+ bool is_volatile() const { return (_flags & (1 << is_volatile_shift)) != 0; } -+ bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; } -+ bool has_appendix() const { return (_flags & (1 << has_appendix_shift)) != 0; } -+ bool is_forced_virtual() const { return (_flags & (1 << is_forced_virtual_shift)) != 0; } -+ bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; } -+ bool is_method_entry() const { return (_flags & (1 << is_field_entry_shift)) == 0; } -+ bool is_field_entry() const { return (_flags & (1 << is_field_entry_shift)) != 0; } -+ bool is_byte() const { return flag_state() == btos; } -+ bool is_char() const { return flag_state() == ctos; } -+ bool is_short() const { return flag_state() == stos; } -+ bool is_int() const { return flag_state() == itos; } -+ bool is_long() const { return flag_state() == ltos; } -+ bool is_float() const { return flag_state() == ftos; } -+ bool is_double() const { return flag_state() == dtos; } -+ bool is_object() const { return flag_state() == atos; } -+ TosState flag_state() const { assert((uint)number_of_states <= (uint)tos_state_mask+1, ""); -+ return (TosState)((_flags >> tos_state_shift) & tos_state_mask); } - - // Code generation support - static WordSize size() { return in_WordSize(sizeof(ConstantPoolCacheEntry) / HeapWordSize); } -@@ -300,15 +355,14 @@ - bool * trace_name_printed); - bool check_no_old_or_obsolete_entries(); - bool is_interesting_method_entry(klassOop k); -- bool is_field_entry() const { return (_flags & (1 << hotSwapBit)) == 0; } -- bool is_method_entry() const { return (_flags & (1 << hotSwapBit)) != 0; } - - // Debugging & Printing - void print (outputStream* st, int index) const; - void verify(outputStream* st) const; - -- static void verify_tosBits() { -- assert(tosBits == 28, "interpreter now assumes tosBits is 28"); -+ static void verify_tos_state_shift() { -+ // When shifting flags as a 32-bit int, make sure we don't need an extra mask for tos_state: -+ assert((((u4)-1 >> tos_state_shift) & ~tos_state_mask) == 0, "no need for tos_state mask"); - } - }; - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/generateOopMap.cpp ---- openjdk/hotspot/src/share/vm/oops/generateOopMap.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/oops/generateOopMap.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -31,6 +31,7 @@ - #include "runtime/java.hpp" - #include "runtime/relocator.hpp" - #include "utilities/bitMap.inline.hpp" -+#include "prims/methodHandles.hpp" - - // - // -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/instanceKlass.cpp ---- openjdk/hotspot/src/share/vm/oops/instanceKlass.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/oops/instanceKlass.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -2389,6 +2389,22 @@ - } else if (java_lang_boxing_object::is_instance(obj)) { - st->print(" = "); - java_lang_boxing_object::print(obj, st); -+ } else if (as_klassOop() == SystemDictionary::LambdaForm_klass()) { -+ oop vmentry = java_lang_invoke_LambdaForm::vmentry(obj); -+ if (vmentry != NULL) { -+ st->print(" => "); -+ vmentry->print_value_on(st); -+ } -+ } else if (as_klassOop() == SystemDictionary::MemberName_klass()) { -+ oop vmtarget = java_lang_invoke_MemberName::vmtarget(obj); -+ if (vmtarget != NULL) { -+ st->print(" = "); -+ vmtarget->print_value_on(st); -+ } else { -+ java_lang_invoke_MemberName::clazz(obj)->print_value_on(st); -+ st->print("."); -+ java_lang_invoke_MemberName::name(obj)->print_value_on(st); -+ } - } - } - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/methodKlass.cpp ---- openjdk/hotspot/src/share/vm/oops/methodKlass.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/oops/methodKlass.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -250,7 +250,11 @@ - st->print_cr(" - highest level: %d", m->highest_comp_level()); - st->print_cr(" - vtable index: %d", m->_vtable_index); - st->print_cr(" - i2i entry: " INTPTR_FORMAT, m->interpreter_entry()); -- st->print_cr(" - adapter: " INTPTR_FORMAT, m->adapter()); -+ st->print( " - adapters: "); -+ if (m->adapter() == NULL) -+ st->print_cr(INTPTR_FORMAT, m->adapter()); -+ else -+ m->adapter()->print_adapter_on(st); - st->print_cr(" - compiled entry " INTPTR_FORMAT, m->from_compiled_entry()); - st->print_cr(" - code size: %d", m->code_size()); - if (m->code_size() != 0) { -@@ -298,13 +302,8 @@ - if (m->code() != NULL) { - st->print (" - compiled code: "); - m->code()->print_value_on(st); -- st->cr(); - } -- if (m->is_method_handle_invoke()) { -- st->print_cr(" - invoke method type: " INTPTR_FORMAT, (address) m->method_handle_type()); -- // m is classified as native, but it does not have an interesting -- // native_function or signature handler -- } else if (m->is_native()) { -+ if (m->is_native()) { - st->print_cr(" - native function: " INTPTR_FORMAT, m->native_function()); - st->print_cr(" - signature handler: " INTPTR_FORMAT, m->signature_handler()); - } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/methodOop.cpp ---- openjdk/hotspot/src/share/vm/oops/methodOop.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/oops/methodOop.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -40,7 +40,7 @@ - #include "oops/oop.inline.hpp" - #include "oops/symbol.hpp" - #include "prims/jvmtiExport.hpp" --#include "prims/methodHandleWalk.hpp" -+#include "prims/methodHandles.hpp" - #include "prims/nativeLookup.hpp" - #include "runtime/arguments.hpp" - #include "runtime/compilationPolicy.hpp" -@@ -556,6 +556,7 @@ - - void methodOopDesc::set_native_function(address function, bool post_event_flag) { - assert(function != NULL, "use clear_native_function to unregister natives"); -+ assert(!is_method_handle_intrinsic() || function == SharedRuntime::native_method_throw_unsatisfied_link_error_entry(), ""); - address* native_function = native_function_addr(); - - // We can see racers trying to place the same native function into place. Once -@@ -585,12 +586,14 @@ - - - bool methodOopDesc::has_native_function() const { -+ assert(!is_method_handle_intrinsic(), ""); - address func = native_function(); - return (func != NULL && func != SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); - } - - - void methodOopDesc::clear_native_function() { -+ // Note: is_method_handle_intrinsic() is allowed here. - set_native_function( - SharedRuntime::native_method_throw_unsatisfied_link_error_entry(), - !native_bind_event_is_interesting); -@@ -610,10 +613,6 @@ - - - bool methodOopDesc::is_not_compilable(int comp_level) const { -- if (is_method_handle_invoke()) { -- // compilers must recognize this method specially, or not at all -- return true; -- } - if (number_of_breakpoints() > 0) { - return true; - } -@@ -713,7 +712,7 @@ - assert(entry != NULL, "interpreter entry must be non-null"); - // Sets both _i2i_entry and _from_interpreted_entry - set_interpreter_entry(entry); -- if (is_native() && !is_method_handle_invoke()) { -+ if (is_native() && !is_method_handle_intrinsic()) { - set_native_function( - SharedRuntime::native_method_throw_unsatisfied_link_error_entry(), - !native_bind_event_is_interesting); -@@ -801,13 +800,13 @@ - OrderAccess::storestore(); - #ifdef SHARK - mh->_from_interpreted_entry = code->insts_begin(); --#else -+#else //!SHARK - mh->_from_compiled_entry = code->verified_entry_point(); - OrderAccess::storestore(); - // Instantly compiled code can execute. -- mh->_from_interpreted_entry = mh->get_i2c_entry(); --#endif // SHARK -- -+ if (!mh->is_method_handle_intrinsic()) -+ mh->_from_interpreted_entry = mh->get_i2c_entry(); -+#endif //!SHARK - } - - -@@ -859,104 +858,51 @@ - return false; - } - --bool methodOopDesc::is_method_handle_invoke_name(vmSymbols::SID name_sid) { -- switch (name_sid) { -- case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name): -- case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name): -- return true; -- } -- if (AllowInvokeGeneric -- && name_sid == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name)) -- return true; -- return false; --} -- - // Constant pool structure for invoke methods: - enum { -- _imcp_invoke_name = 1, // utf8: 'invokeExact' or 'invokeGeneric' -+ _imcp_invoke_name = 1, // utf8: 'invokeExact', etc. - _imcp_invoke_signature, // utf8: (variable Symbol*) -- _imcp_method_type_value, // string: (variable java/lang/invoke/MethodType, sic) - _imcp_limit - }; - --oop methodOopDesc::method_handle_type() const { -- if (!is_method_handle_invoke()) { assert(false, "caller resp."); return NULL; } -- oop mt = constants()->resolved_string_at(_imcp_method_type_value); -- assert(mt->klass() == SystemDictionary::MethodType_klass(), ""); -- return mt; -+// Test if this method is an MH adapter frame generated by Java code. -+// Cf. java/lang/invoke/InvokerBytecodeGenerator -+bool methodOopDesc::is_compiled_lambda_form() const { -+ return intrinsic_id() == vmIntrinsics::_compiledLambdaForm; - } - --jint* methodOopDesc::method_type_offsets_chain() { -- static jint pchase[] = { -1, -1, -1 }; -- if (pchase[0] == -1) { -- jint step0 = in_bytes(constants_offset()); -- jint step1 = (constantPoolOopDesc::header_size() + _imcp_method_type_value) * HeapWordSize; -- // do this in reverse to avoid races: -- OrderAccess::release_store(&pchase[1], step1); -- OrderAccess::release_store(&pchase[0], step0); -- } -- return pchase; -+// Test if this method is an internal MH primitive method. -+bool methodOopDesc::is_method_handle_intrinsic() const { -+ vmIntrinsics::ID iid = intrinsic_id(); -+ return (MethodHandles::is_signature_polymorphic(iid) && -+ MethodHandles::is_signature_polymorphic_intrinsic(iid)); - } - --//------------------------------------------------------------------------------ --// methodOopDesc::is_method_handle_adapter --// --// Tests if this method is an internal adapter frame from the --// MethodHandleCompiler. --// Must be consistent with MethodHandleCompiler::get_method_oop(). --bool methodOopDesc::is_method_handle_adapter() const { -- if (is_synthetic() && -- !is_native() && // has code from MethodHandleCompiler -- is_method_handle_invoke_name(name()) && -- MethodHandleCompiler::klass_is_method_handle_adapter_holder(method_holder())) { -- assert(!is_method_handle_invoke(), "disjoint"); -- return true; -- } else { -- return false; -- } -+bool methodOopDesc::has_member_arg() const { -+ vmIntrinsics::ID iid = intrinsic_id(); -+ return (MethodHandles::is_signature_polymorphic(iid) && -+ MethodHandles::has_member_arg(iid)); - } - --methodHandle methodOopDesc::make_invoke_method(KlassHandle holder, -- Symbol* name, -- Symbol* signature, -- Handle method_type, TRAPS) { -+// Make an instance of a signature-polymorphic internal MH primitive. -+methodHandle methodOopDesc::make_method_handle_intrinsic(vmIntrinsics::ID iid, -+ Symbol* signature, -+ TRAPS) { - ResourceMark rm; - methodHandle empty; - -- assert(holder() == SystemDictionary::MethodHandle_klass(), -- "must be a JSR 292 magic type"); -- -+ KlassHandle holder = SystemDictionary::MethodHandle_klass(); -+ Symbol* name = MethodHandles::signature_polymorphic_intrinsic_name(iid); -+ assert(iid == MethodHandles::signature_polymorphic_name_id(name), ""); - if (TraceMethodHandles) { -- tty->print("Creating invoke method for "); -- signature->print_value(); -- tty->cr(); -+ tty->print_cr("make_method_handle_intrinsic MH.%s%s", name->as_C_string(), signature->as_C_string()); - } - - // invariant: cp->symbol_at_put is preceded by a refcount increment (more usually a lookup) - name->increment_refcount(); - signature->increment_refcount(); - -- // record non-BCP method types in the constant pool -- GrowableArray* extra_klasses = NULL; -- for (int i = -1, len = java_lang_invoke_MethodType::ptype_count(method_type()); i < len; i++) { -- oop ptype = (i == -1 -- ? java_lang_invoke_MethodType::rtype(method_type()) -- : java_lang_invoke_MethodType::ptype(method_type(), i)); -- klassOop klass = check_non_bcp_klass(java_lang_Class::as_klassOop(ptype)); -- if (klass != NULL) { -- if (extra_klasses == NULL) -- extra_klasses = new GrowableArray(len+1); -- bool dup = false; -- for (int j = 0; j < extra_klasses->length(); j++) { -- if (extra_klasses->at(j) == klass) { dup = true; break; } -- } -- if (!dup) -- extra_klasses->append(KlassHandle(THREAD, klass)); -- } -- } -- -- int extra_klass_count = (extra_klasses == NULL ? 0 : extra_klasses->length()); -- int cp_length = _imcp_limit + extra_klass_count; -+ int cp_length = _imcp_limit; - constantPoolHandle cp; - { - constantPoolOop cp_oop = oopFactory::new_constantPool(cp_length, IsSafeConc, CHECK_(empty)); -@@ -964,19 +910,17 @@ - } - cp->symbol_at_put(_imcp_invoke_name, name); - cp->symbol_at_put(_imcp_invoke_signature, signature); -- cp->string_at_put(_imcp_method_type_value, Universe::the_null_string()); -- for (int j = 0; j < extra_klass_count; j++) { -- KlassHandle klass = extra_klasses->at(j); -- cp->klass_at_put(_imcp_limit + j, klass()); -- } - cp->set_preresolution(); - cp->set_pool_holder(holder()); - -- // set up the fancy stuff: -- cp->pseudo_string_at_put(_imcp_method_type_value, method_type()); -+ // decide on access bits: public or not? -+ int flags_bits = (JVM_ACC_NATIVE | JVM_ACC_SYNTHETIC | JVM_ACC_FINAL); -+ bool must_be_static = MethodHandles::is_signature_polymorphic_static(iid); -+ if (must_be_static) flags_bits |= JVM_ACC_STATIC; -+ assert((flags_bits & JVM_ACC_PUBLIC) == 0, "do not expose these methods"); -+ - methodHandle m; - { -- int flags_bits = (JVM_MH_INVOKE_BITS | JVM_ACC_PUBLIC | JVM_ACC_FINAL); - methodOop m_oop = oopFactory::new_method(0, accessFlags_from(flags_bits), - 0, 0, 0, IsSafeConc, CHECK_(empty)); - m = methodHandle(THREAD, m_oop); -@@ -984,9 +928,8 @@ - m->set_constants(cp()); - m->set_name_index(_imcp_invoke_name); - m->set_signature_index(_imcp_invoke_signature); -- assert(is_method_handle_invoke_name(m->name()), ""); -+ assert(MethodHandles::is_signature_polymorphic_name(m->name()), ""); - assert(m->signature() == signature, ""); -- assert(m->is_method_handle_invoke(), ""); - #ifdef CC_INTERP - ResultTypeFinder rtf(signature); - m->set_result_index(rtf.type()); -@@ -994,24 +937,18 @@ - m->compute_size_of_parameters(THREAD); - m->set_exception_table(Universe::the_empty_int_array()); - m->init_intrinsic_id(); -- assert(m->intrinsic_id() == vmIntrinsics::_invokeExact || -- m->intrinsic_id() == vmIntrinsics::_invokeGeneric, "must be an invoker"); -+ assert(m->is_method_handle_intrinsic(), ""); -+#ifdef ASSERT -+ if (!MethodHandles::is_signature_polymorphic(m->intrinsic_id())) m->print(); -+ assert(MethodHandles::is_signature_polymorphic(m->intrinsic_id()), "must be an invoker"); -+ assert(m->intrinsic_id() == iid, "correctly predicted iid"); -+#endif //ASSERT - - // Finally, set up its entry points. -- assert(m->method_handle_type() == method_type(), ""); - assert(m->can_be_statically_bound(), ""); - m->set_vtable_index(methodOopDesc::nonvirtual_vtable_index); - m->link_method(m, CHECK_(empty)); - --#ifdef ASSERT -- // Make sure the pointer chase works. -- address p = (address) m(); -- for (jint* pchase = method_type_offsets_chain(); (*pchase) != -1; pchase++) { -- p = *(address*)(p + (*pchase)); -- } -- assert((oop)p == method_type(), "pointer chase is correct"); --#endif -- - if (TraceMethodHandles && (Verbose || WizardMode)) - m->print_on(tty); - -@@ -1028,7 +965,7 @@ - } - - --methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, -+methodHandle methodOopDesc::clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, - u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS) { - // Code below does not work for native methods - they should never get rewritten anyway - assert(!m->is_native(), "cannot rewrite native methods"); -@@ -1138,7 +1075,9 @@ - - // ditto for method and signature: - vmSymbols::SID name_id = vmSymbols::find_sid(name()); -- if (name_id == vmSymbols::NO_SID) return; -+ if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle) -+ && name_id == vmSymbols::NO_SID) -+ return; - vmSymbols::SID sig_id = vmSymbols::find_sid(signature()); - if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle) - && sig_id == vmSymbols::NO_SID) return; -@@ -1167,21 +1106,10 @@ - - // Signature-polymorphic methods: MethodHandle.invoke*, InvokeDynamic.*. - case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle): -- if (is_static() || !is_native()) break; -- switch (name_id) { -- case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name): -- if (!AllowInvokeGeneric) break; -- case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name): -- id = vmIntrinsics::_invokeGeneric; -- break; -- case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name): -- id = vmIntrinsics::_invokeExact; -- break; -- } -- break; -- case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_InvokeDynamic): -- if (!is_static() || !is_native()) break; -- id = vmIntrinsics::_invokeDynamic; -+ if (!is_native()) break; -+ id = MethodHandles::signature_polymorphic_name_id(method_holder(), name()); -+ if (is_static() != MethodHandles::is_signature_polymorphic_static(id)) -+ id = vmIntrinsics::_none; - break; - } - -@@ -1194,6 +1122,12 @@ - - // These two methods are static since a GC may move the methodOopDesc - bool methodOopDesc::load_signature_classes(methodHandle m, TRAPS) { -+ if (THREAD->is_Compiler_thread()) { -+ // There is nothing useful this routine can do from within the Compile thread. -+ // Hopefully, the signature contains only well-known classes. -+ // We could scan for this and return true/false, but the caller won't care. -+ return false; -+ } - bool sig_is_loaded = true; - Handle class_loader(THREAD, instanceKlass::cast(m->method_holder())->class_loader()); - Handle protection_domain(THREAD, Klass::cast(m->method_holder())->protection_domain()); -@@ -1247,6 +1181,8 @@ - #endif - name()->print_symbol_on(st); - if (WizardMode) signature()->print_symbol_on(st); -+ else if (MethodHandles::is_signature_polymorphic(intrinsic_id())) -+ MethodHandles::print_as_basic_type_signature_on(st, signature(), true); - } - - // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/methodOop.hpp ---- openjdk/hotspot/src/share/vm/oops/methodOop.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/oops/methodOop.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -125,7 +125,10 @@ - u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words - u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) - u1 _jfr_towrite : 1, // Flags -- : 7; -+ _force_inline : 1, -+ _hidden : 1, -+ _dont_inline : 1, -+ : 4; - u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting - u2 _number_of_breakpoints; // fullspeed debugging support - InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations -@@ -246,7 +249,7 @@ - void set_constants(constantPoolOop c) { oop_store_without_check((oop*)&_constants, c); } - - // max stack -- int max_stack() const { return _max_stack; } -+ int max_stack() const { return _max_stack + extra_stack_entries(); } - void set_max_stack(int size) { _max_stack = size; } - - // max locals -@@ -592,28 +595,19 @@ - bool is_overridden_in(klassOop k) const; - - // JSR 292 support -- bool is_method_handle_invoke() const { return access_flags().is_method_handle_invoke(); } -- static bool is_method_handle_invoke_name(vmSymbols::SID name_sid); -- static bool is_method_handle_invoke_name(Symbol* name) { -- return is_method_handle_invoke_name(vmSymbols::find_sid(name)); -- } -- // Tests if this method is an internal adapter frame from the -- // MethodHandleCompiler. -- bool is_method_handle_adapter() const; -- static methodHandle make_invoke_method(KlassHandle holder, -- Symbol* name, //invokeExact or invokeGeneric -- Symbol* signature, //anything at all -- Handle method_type, -- TRAPS); -+ bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id) -+ bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm -+ bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc. -+ static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual -+ Symbol* signature, //anything at all -+ TRAPS); - static klassOop check_non_bcp_klass(klassOop klass); - // these operate only on invoke methods: -- oop method_handle_type() const; -- static jint* method_type_offsets_chain(); // series of pointer-offsets, terminated by -1 - // presize interpreter frames for extra interpreter stack entries, if needed - // method handles want to be able to push a few extra values (e.g., a bound receiver), and - // invokedynamic sometimes needs to push a bootstrap method, call site, and arglist, - // all without checking for a stack overflow -- static int extra_stack_entries() { return EnableInvokeDynamic ? (int) MethodHandlePushLimit + 3 : 0; } -+ static int extra_stack_entries() { return EnableInvokeDynamic ? 2 : 0; } - static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize() - - // RedefineClasses() support: -@@ -658,6 +652,13 @@ - bool jfr_towrite() { return _jfr_towrite; } - void set_jfr_towrite(bool towrite) { _jfr_towrite = towrite; } - -+ bool force_inline() { return _force_inline; } -+ void set_force_inline(bool x) { _force_inline = x; } -+ bool dont_inline() { return _dont_inline; } -+ void set_dont_inline(bool x) { _dont_inline = x; } -+ bool is_hidden() { return _hidden; } -+ void set_hidden(bool x) { _hidden = x; } -+ - // On-stack replacement support - bool has_osr_nmethod(int level, bool match_level) { - return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL; -@@ -703,8 +704,8 @@ - static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS); - - // Printing -- void print_short_name(outputStream* st); // prints as klassname::methodname; Exposed so field engineers can debug VM -- void print_name(outputStream* st); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses -+ void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM -+ void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses - - // Helper routine used for method sorting - static void sort_methods(objArrayOop methods, -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/oops/symbol.cpp ---- openjdk/hotspot/src/share/vm/oops/symbol.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/oops/symbol.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -86,7 +86,7 @@ - address scan = bytes + i; - if (scan > limit) - return -1; -- for (;;) { -+ for (; scan <= limit; scan++) { - scan = (address) memchr(scan, first_char, (limit + 1 - scan)); - if (scan == NULL) - return -1; // not found -@@ -94,6 +94,7 @@ - if (memcmp(scan, str, len) == 0) - return (int)(scan - bytes); - } -+ return -1; - } - - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/bytecodeInfo.cpp ---- openjdk/hotspot/src/share/vm/opto/bytecodeInfo.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/opto/bytecodeInfo.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -93,7 +93,7 @@ - ); - } - --// positive filter: should send be inlined? returns NULL, if yes, or rejection msg -+// positive filter: should callee be inlined? returns NULL, if yes, or rejection msg - const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const { - // Allows targeted inlining - if(callee_method->should_inline()) { -@@ -131,33 +131,6 @@ - int call_site_count = method()->scale_count(profile.count()); - int invoke_count = method()->interpreter_invocation_count(); - -- // Bytecoded method handle adapters do not have interpreter -- // profiling data but only made up MDO data. Get the counter from -- // there. -- if (caller_method->is_method_handle_adapter()) { -- assert(method()->method_data_or_null(), "must have an MDO"); -- ciMethodData* mdo = method()->method_data(); -- ciProfileData* mha_profile = mdo->bci_to_data(caller_bci); -- assert(mha_profile, "must exist"); -- CounterData* cd = mha_profile->as_CounterData(); -- invoke_count = cd->count(); -- if (invoke_count == 0) { -- return "method handle not reached"; -- } -- -- if (_caller_jvms != NULL && _caller_jvms->method() != NULL && -- _caller_jvms->method()->method_data() != NULL && -- !_caller_jvms->method()->method_data()->is_empty()) { -- ciMethodData* mdo = _caller_jvms->method()->method_data(); -- ciProfileData* mha_profile = mdo->bci_to_data(_caller_jvms->bci()); -- assert(mha_profile, "must exist"); -- CounterData* cd = mha_profile->as_CounterData(); -- call_site_count = cd->count(); -- } else { -- call_site_count = invoke_count; // use the same value -- } -- } -- - assert(invoke_count != 0, "require invocation count greater than zero"); - int freq = call_site_count / invoke_count; - -@@ -189,15 +162,16 @@ - } - - --// negative filter: should send NOT be inlined? returns NULL, ok to inline, or rejection msg -+// negative filter: should callee NOT be inlined? returns NULL, ok to inline, or rejection msg - const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const { - // negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg - if (!UseOldInlining) { - const char* fail = NULL; -- if (callee_method->is_abstract()) fail = "abstract method"; -+ if ( callee_method->is_abstract()) fail = "abstract method"; - // note: we allow ik->is_abstract() -- if (!callee_method->holder()->is_initialized()) fail = "method holder not initialized"; -- if (callee_method->is_native()) fail = "native method"; -+ if (!callee_method->holder()->is_initialized()) fail = "method holder not initialized"; -+ if ( callee_method->is_native()) fail = "native method"; -+ if ( callee_method->dont_inline()) fail = "don't inline by annotation"; - - if (fail) { - *wci_result = *(WarmCallInfo::always_cold()); -@@ -217,7 +191,8 @@ - } - } - -- if (callee_method->has_compiled_code() && callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode) { -+ if (callee_method->has_compiled_code() && -+ callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode) { - wci_result->set_profit(wci_result->profit() * 0.1); - // %%% adjust wci_result->size()? - } -@@ -225,26 +200,25 @@ - return NULL; - } - -- // Always inline MethodHandle methods and generated MethodHandle adapters. -- if (callee_method->is_method_handle_invoke() || callee_method->is_method_handle_adapter()) -- return NULL; -+ // First check all inlining restrictions which are required for correctness -+ if ( callee_method->is_abstract()) return "abstract method"; -+ // note: we allow ik->is_abstract() -+ if (!callee_method->holder()->is_initialized()) return "method holder not initialized"; -+ if ( callee_method->is_native()) return "native method"; -+ if ( callee_method->dont_inline()) return "don't inline by annotation"; -+ if ( callee_method->has_unloaded_classes_in_signature()) return "unloaded signature classes"; - -- // First check all inlining restrictions which are required for correctness -- if (callee_method->is_abstract()) return "abstract method"; -- // note: we allow ik->is_abstract() -- if (!callee_method->holder()->is_initialized()) return "method holder not initialized"; -- if (callee_method->is_native()) return "native method"; -- if (callee_method->has_unloaded_classes_in_signature()) return "unloaded signature classes"; -- -- if (callee_method->should_inline()) { -+ if (callee_method->force_inline() || callee_method->should_inline()) { - // ignore heuristic controls on inlining - return NULL; - } - - // Now perform checks which are heuristic - -- if( callee_method->has_compiled_code() && callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode ) -+ if (callee_method->has_compiled_code() && -+ callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode) { - return "already compiled into a big method"; -+ } - - // don't inline exception code unless the top method belongs to an - // exception class -@@ -258,7 +232,7 @@ - } - - // use frequency-based objections only for non-trivial methods -- if (callee_method->code_size_for_inlining() <= MaxTrivialSize) return NULL; -+ if (callee_method->code_size() <= MaxTrivialSize) return NULL; - - // don't use counts with -Xcomp or CTW - if (UseInterpreter && !CompileTheWorld) { -@@ -319,7 +293,7 @@ - } - - // suppress a few checks for accessors and trivial methods -- if (callee_method->code_size_for_inlining() > MaxTrivialSize) { -+ if (callee_method->code_size() > MaxTrivialSize) { - - // don't inline into giant methods - if (C->unique() > (uint)NodeCountInliningCutoff) { -@@ -346,7 +320,7 @@ - } - - // detect direct and indirect recursive inlining -- { -+ if (!callee_method->is_compiled_lambda_form()) { - // count the current method and the callee - int inline_level = (method() == callee_method) ? 1 : 0; - if (inline_level > MaxRecursiveInlineLevel) -@@ -412,6 +386,7 @@ - const char* InlineTree::check_can_parse(ciMethod* callee) { - // Certain methods cannot be parsed at all: - if ( callee->is_native()) return "native method"; -+ if ( callee->is_abstract()) return "abstract method"; - if (!callee->can_be_compiled()) return "not compilable (disabled)"; - if (!callee->has_balanced_monitors()) return "not compilable (unbalanced monitors)"; - if ( callee->get_flow_analysis()->failing()) return "not compilable (flow analysis failed)"; -@@ -426,7 +401,7 @@ - if (Verbose && callee_method) { - const InlineTree *top = this; - while( top->caller_tree() != NULL ) { top = top->caller_tree(); } -- tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count()); -+ //tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count()); - } - } - -@@ -449,10 +424,7 @@ - - // Do some initial checks. - if (!pass_initial_checks(caller_method, caller_bci, callee_method)) { -- if (PrintInlining) { -- failure_msg = "failed_initial_checks"; -- print_inlining(callee_method, caller_bci, failure_msg); -- } -+ if (PrintInlining) print_inlining(callee_method, caller_bci, "failed initial checks"); - return NULL; - } - -@@ -539,9 +511,10 @@ - } - int max_inline_level_adjust = 0; - if (caller_jvms->method() != NULL) { -- if (caller_jvms->method()->is_method_handle_adapter()) -+ if (caller_jvms->method()->is_compiled_lambda_form()) - max_inline_level_adjust += 1; // don't count actions in MH or indy adapter frames -- else if (callee_method->is_method_handle_invoke()) { -+ else if (callee_method->is_method_handle_intrinsic() || -+ callee_method->is_compiled_lambda_form()) { - max_inline_level_adjust += 1; // don't count method handle calls from java.lang.invoke implem - } - if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) { -@@ -590,7 +563,7 @@ - // Given a jvms, which determines a call chain from the root method, - // find the corresponding inline tree. - // Note: This method will be removed or replaced as InlineTree goes away. --InlineTree* InlineTree::find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found) { -+InlineTree* InlineTree::find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee) { - InlineTree* iltp = root; - uint depth = jvms && jvms->has_method() ? jvms->depth() : 0; - for (uint d = 1; d <= depth; d++) { -@@ -599,12 +572,12 @@ - assert(jvmsp->method() == iltp->method(), "tree still in sync"); - ciMethod* d_callee = (d == depth) ? callee : jvms->of_depth(d+1)->method(); - InlineTree* sub = iltp->callee_at(jvmsp->bci(), d_callee); -- if (!sub) { -- if (create_if_not_found && d == depth) { -- return iltp->build_inline_tree_for_callee(d_callee, jvmsp, jvmsp->bci()); -+ if (sub == NULL) { -+ if (d == depth) { -+ sub = iltp->build_inline_tree_for_callee(d_callee, jvmsp, jvmsp->bci()); - } -- assert(sub != NULL, "should be a sub-ilt here"); -- return NULL; -+ guarantee(sub != NULL, "should be a sub-ilt here"); -+ return sub; - } - iltp = sub; - } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/callGenerator.cpp ---- openjdk/hotspot/src/share/vm/opto/callGenerator.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/opto/callGenerator.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -1,5 +1,5 @@ - /* -- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it -@@ -26,6 +26,7 @@ - #include "ci/bcEscapeAnalyzer.hpp" - #include "ci/ciCallSite.hpp" - #include "ci/ciCPCache.hpp" -+#include "ci/ciMemberName.hpp" - #include "ci/ciMethodHandle.hpp" - #include "classfile/javaClasses.hpp" - #include "compiler/compileLog.hpp" -@@ -39,9 +40,6 @@ - #include "opto/runtime.hpp" - #include "opto/subnode.hpp" - --CallGenerator::CallGenerator(ciMethod* method) { -- _method = method; --} - - // Utility function. - const TypeFunc* CallGenerator::tf() const { -@@ -147,7 +145,8 @@ - } - // Mark the call node as virtual, sort of: - call->set_optimized_virtual(true); -- if (method()->is_method_handle_invoke()) { -+ if (method()->is_method_handle_intrinsic() || -+ method()->is_compiled_lambda_form()) { - call->set_method_handle_invoke(true); - } - } -@@ -325,12 +324,13 @@ - - CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { - assert(!m->is_static(), "for_virtual_call mismatch"); -- assert(!m->is_method_handle_invoke(), "should be a direct call"); -+ assert(!m->is_method_handle_intrinsic(), "should be a direct call"); - return new VirtualCallGenerator(m, vtable_index); - } - - CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) { -- assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch"); -+ assert(m->is_compiled_lambda_form(), "for_dynamic_call mismatch"); -+ //@@ FIXME: this should be done via a direct call - return new DynamicCallGenerator(m); - } - -@@ -654,271 +654,95 @@ - } - - --//------------------------PredictedDynamicCallGenerator----------------------- --// Internal class which handles all out-of-line calls checking receiver type. --class PredictedDynamicCallGenerator : public CallGenerator { -- ciMethodHandle* _predicted_method_handle; -- CallGenerator* _if_missed; -- CallGenerator* _if_hit; -- float _hit_prob; -- --public: -- PredictedDynamicCallGenerator(ciMethodHandle* predicted_method_handle, -- CallGenerator* if_missed, -- CallGenerator* if_hit, -- float hit_prob) -- : CallGenerator(if_missed->method()), -- _predicted_method_handle(predicted_method_handle), -- _if_missed(if_missed), -- _if_hit(if_hit), -- _hit_prob(hit_prob) -- {} -- -- virtual bool is_inline() const { return _if_hit->is_inline(); } -- virtual bool is_deferred() const { return _if_hit->is_deferred(); } -- -- virtual JVMState* generate(JVMState* jvms); --}; -- -- --CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle, -- CallGenerator* if_missed, -- CallGenerator* if_hit, -- float hit_prob) { -- return new PredictedDynamicCallGenerator(predicted_method_handle, if_missed, if_hit, hit_prob); --} -- -- --CallGenerator* CallGenerator::for_method_handle_call(Node* method_handle, JVMState* jvms, -- ciMethod* caller, ciMethod* callee, ciCallProfile profile) { -- assert(callee->is_method_handle_invoke() || callee->is_method_handle_adapter(), "for_method_handle_call mismatch"); -- CallGenerator* cg = CallGenerator::for_method_handle_inline(method_handle, jvms, caller, callee, profile); -+CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee) { -+ assert(callee->is_method_handle_intrinsic() || -+ callee->is_compiled_lambda_form(), "for_method_handle_call mismatch"); -+ CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee); - if (cg != NULL) - return cg; - return CallGenerator::for_direct_call(callee); - } - --CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMState* jvms, -- ciMethod* caller, ciMethod* callee, ciCallProfile profile) { -- if (method_handle->Opcode() == Op_ConP) { -- const TypeOopPtr* oop_ptr = method_handle->bottom_type()->is_oopptr(); -- ciObject* const_oop = oop_ptr->const_oop(); -- ciMethodHandle* method_handle = const_oop->as_method_handle(); -- -- // Set the callee to have access to the class and signature in -- // the MethodHandleCompiler. -- method_handle->set_callee(callee); -- method_handle->set_caller(caller); -- method_handle->set_call_profile(profile); -- -- // Get an adapter for the MethodHandle. -- ciMethod* target_method = method_handle->get_method_handle_adapter(); -- if (target_method != NULL) { -- CallGenerator* cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS); -- if (cg != NULL && cg->is_inline()) -- return cg; -- } -- } else if (method_handle->Opcode() == Op_Phi && method_handle->req() == 3 && -- method_handle->in(1)->Opcode() == Op_ConP && method_handle->in(2)->Opcode() == Op_ConP) { -- float prob = PROB_FAIR; -- Node* meth_region = method_handle->in(0); -- if (meth_region->is_Region() && -- meth_region->in(1)->is_Proj() && meth_region->in(2)->is_Proj() && -- meth_region->in(1)->in(0) == meth_region->in(2)->in(0) && -- meth_region->in(1)->in(0)->is_If()) { -- // If diamond, so grab the probability of the test to drive the inlining below -- prob = meth_region->in(1)->in(0)->as_If()->_prob; -- if (meth_region->in(1)->is_IfTrue()) { -- prob = 1 - prob; -+CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee) { -+ GraphKit kit(jvms); -+ PhaseGVN& gvn = kit.gvn(); -+ Compile* C = kit.C; -+ vmIntrinsics::ID iid = callee->intrinsic_id(); -+ switch (iid) { -+ case vmIntrinsics::_invokeBasic: -+ { -+ // get MethodHandle receiver -+ Node* receiver = kit.argument(0); -+ if (receiver->Opcode() == Op_ConP) { -+ const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr(); -+ ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget(); -+ guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove -+ const int vtable_index = methodOopDesc::invalid_vtable_index; -+ CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS); -+ if (cg != NULL && cg->is_inline()) -+ return cg; -+ } else { -+ if (PrintInlining) CompileTask::print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant"); - } - } -+ break; - -- // selectAlternative idiom merging two constant MethodHandles. -- // Generate a guard so that each can be inlined. We might want to -- // do more inputs at later point but this gets the most common -- // case. -- CallGenerator* cg1 = for_method_handle_call(method_handle->in(1), jvms, caller, callee, profile.rescale(1.0 - prob)); -- CallGenerator* cg2 = for_method_handle_call(method_handle->in(2), jvms, caller, callee, profile.rescale(prob)); -- if (cg1 != NULL && cg2 != NULL) { -- const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr(); -- ciObject* const_oop = oop_ptr->const_oop(); -- ciMethodHandle* mh = const_oop->as_method_handle(); -- return new PredictedDynamicCallGenerator(mh, cg2, cg1, prob); -+ case vmIntrinsics::_linkToVirtual: -+ case vmIntrinsics::_linkToStatic: -+ case vmIntrinsics::_linkToSpecial: -+ case vmIntrinsics::_linkToInterface: -+ { -+ // pop MemberName argument -+ Node* member_name = kit.argument(callee->arg_size() - 1); -+ if (member_name->Opcode() == Op_ConP) { -+ const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr(); -+ ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); -+ -+ // In lamda forms we erase signature types to avoid resolving issues -+ // involving class loaders. When we optimize a method handle invoke -+ // to a direct call we must cast the receiver and arguments to its -+ // actual types. -+ ciSignature* signature = target->signature(); -+ const int receiver_skip = target->is_static() ? 0 : 1; -+ // Cast receiver to its type. -+ if (!target->is_static()) { -+ Node* arg = kit.argument(0); -+ const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); -+ const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); -+ if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { -+ Node* cast_obj = gvn.transform(new (C, 2) CheckCastPPNode(kit.control(), arg, sig_type)); -+ kit.set_argument(0, cast_obj); -+ } -+ } -+ // Cast reference arguments to its type. -+ for (int i = 0; i < signature->count(); i++) { -+ ciType* t = signature->type_at(i); -+ if (t->is_klass()) { -+ Node* arg = kit.argument(receiver_skip + i); -+ const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); -+ const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); -+ if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { -+ Node* cast_obj = gvn.transform(new (C, 2) CheckCastPPNode(kit.control(), arg, sig_type)); -+ kit.set_argument(receiver_skip + i, cast_obj); -+ } -+ } -+ } -+ const int vtable_index = methodOopDesc::invalid_vtable_index; -+ const bool call_is_virtual = target->is_abstract(); // FIXME workaround -+ CallGenerator* cg = C->call_generator(target, vtable_index, call_is_virtual, jvms, true, PROB_ALWAYS); -+ if (cg != NULL && cg->is_inline()) -+ return cg; -+ } - } -+ break; -+ -+ default: -+ fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); -+ break; - } - return NULL; - } - --CallGenerator* CallGenerator::for_invokedynamic_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile) { -- assert(callee->is_method_handle_invoke() || callee->is_method_handle_adapter(), "for_invokedynamic_call mismatch"); -- // Get the CallSite object. -- ciBytecodeStream str(caller); -- str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. -- ciCallSite* call_site = str.get_call_site(); -- CallGenerator* cg = CallGenerator::for_invokedynamic_inline(call_site, jvms, caller, callee, profile); -- if (cg != NULL) -- return cg; -- return CallGenerator::for_dynamic_call(callee); --} -- --CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms, -- ciMethod* caller, ciMethod* callee, ciCallProfile profile) { -- ciMethodHandle* method_handle = call_site->get_target(); -- -- // Set the callee to have access to the class and signature in the -- // MethodHandleCompiler. -- method_handle->set_callee(callee); -- method_handle->set_caller(caller); -- method_handle->set_call_profile(profile); -- -- // Get an adapter for the MethodHandle. -- ciMethod* target_method = method_handle->get_invokedynamic_adapter(); -- if (target_method != NULL) { -- Compile *C = Compile::current(); -- CallGenerator* cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS); -- if (cg != NULL && cg->is_inline()) { -- // Add a dependence for invalidation of the optimization. -- if (!call_site->is_constant_call_site()) { -- C->dependencies()->assert_call_site_target_value(call_site, method_handle); -- } -- return cg; -- } -- } -- return NULL; --} -- -- --JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) { -- GraphKit kit(jvms); -- Compile* C = kit.C; -- PhaseGVN& gvn = kit.gvn(); -- -- CompileLog* log = C->log(); -- if (log != NULL) { -- log->elem("predicted_dynamic_call bci='%d'", jvms->bci()); -- } -- -- const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true); -- Node* predicted_mh = kit.makecon(predicted_mh_ptr); -- -- Node* bol = NULL; -- int bc = jvms->method()->java_code_at_bci(jvms->bci()); -- if (bc != Bytecodes::_invokedynamic) { -- // This is the selectAlternative idiom for guardWithTest or -- // similar idioms. -- Node* receiver = kit.argument(0); -- -- // Check if the MethodHandle is the expected one -- Node* cmp = gvn.transform(new (C, 3) CmpPNode(receiver, predicted_mh)); -- bol = gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq) ); -- } else { -- // Get the constant pool cache from the caller class. -- ciMethod* caller_method = jvms->method(); -- ciBytecodeStream str(caller_method); -- str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. -- ciCPCache* cpcache = str.get_cpcache(); -- -- // Get the offset of the CallSite from the constant pool cache -- // pointer. -- int index = str.get_method_index(); -- size_t call_site_offset = cpcache->get_f1_offset(index); -- -- // Load the CallSite object from the constant pool cache. -- const TypeOopPtr* cpcache_type = TypeOopPtr::make_from_constant(cpcache); // returns TypeAryPtr of type T_OBJECT -- const TypeOopPtr* call_site_type = TypeOopPtr::make_from_klass(C->env()->CallSite_klass()); -- Node* cpcache_adr = kit.makecon(cpcache_type); -- Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, call_site_offset); -- // The oops in the constant pool cache are not compressed; load then as raw pointers. -- Node* call_site = kit.make_load(kit.control(), call_site_adr, call_site_type, T_ADDRESS, Compile::AliasIdxRaw); -- -- // Load the target MethodHandle from the CallSite object. -- const TypeOopPtr* target_type = TypeOopPtr::make_from_klass(C->env()->MethodHandle_klass()); -- Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes()); -- Node* target_mh = kit.make_load(kit.control(), target_adr, target_type, T_OBJECT); -- -- // Check if the MethodHandle is still the same. -- Node* cmp = gvn.transform(new (C, 3) CmpPNode(target_mh, predicted_mh)); -- bol = gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq) ); -- } -- IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN); -- kit.set_control( gvn.transform(new (C, 1) IfTrueNode (iff))); -- Node* slow_ctl = gvn.transform(new (C, 1) IfFalseNode(iff)); -- -- SafePointNode* slow_map = NULL; -- JVMState* slow_jvms; -- { PreserveJVMState pjvms(&kit); -- kit.set_control(slow_ctl); -- if (!kit.stopped()) { -- slow_jvms = _if_missed->generate(kit.sync_jvms()); -- if (kit.failing()) -- return NULL; // might happen because of NodeCountInliningCutoff -- assert(slow_jvms != NULL, "must be"); -- kit.add_exception_states_from(slow_jvms); -- kit.set_map(slow_jvms->map()); -- if (!kit.stopped()) -- slow_map = kit.stop(); -- } -- } -- -- if (kit.stopped()) { -- // Instance exactly does not matches the desired type. -- kit.set_jvms(slow_jvms); -- return kit.transfer_exceptions_into_jvms(); -- } -- -- // Make the hot call: -- JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); -- if (new_jvms == NULL) { -- // Inline failed, so make a direct call. -- assert(_if_hit->is_inline(), "must have been a failed inline"); -- CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); -- new_jvms = cg->generate(kit.sync_jvms()); -- } -- kit.add_exception_states_from(new_jvms); -- kit.set_jvms(new_jvms); -- -- // Need to merge slow and fast? -- if (slow_map == NULL) { -- // The fast path is the only path remaining. -- return kit.transfer_exceptions_into_jvms(); -- } -- -- if (kit.stopped()) { -- // Inlined method threw an exception, so it's just the slow path after all. -- kit.set_jvms(slow_jvms); -- return kit.transfer_exceptions_into_jvms(); -- } -- -- // Finish the diamond. -- kit.C->set_has_split_ifs(true); // Has chance for split-if optimization -- RegionNode* region = new (C, 3) RegionNode(3); -- region->init_req(1, kit.control()); -- region->init_req(2, slow_map->control()); -- kit.set_control(gvn.transform(region)); -- Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); -- iophi->set_req(2, slow_map->i_o()); -- kit.set_i_o(gvn.transform(iophi)); -- kit.merge_memory(slow_map->merged_memory(), region, 2); -- uint tos = kit.jvms()->stkoff() + kit.sp(); -- uint limit = slow_map->req(); -- for (uint i = TypeFunc::Parms; i < limit; i++) { -- // Skip unused stack slots; fast forward to monoff(); -- if (i == tos) { -- i = kit.jvms()->monoff(); -- if( i >= limit ) break; -- } -- Node* m = kit.map()->in(i); -- Node* n = slow_map->in(i); -- if (m != n) { -- const Type* t = gvn.type(m)->meet(gvn.type(n)); -- Node* phi = PhiNode::make(region, m, t); -- phi->set_req(2, n); -- kit.map()->set_req(i, gvn.transform(phi)); -- } -- } -- return kit.transfer_exceptions_into_jvms(); --} -- - - //-------------------------UncommonTrapCallGenerator----------------------------- - // Internal class which handles all out-of-line calls checking receiver type. -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/callGenerator.hpp ---- openjdk/hotspot/src/share/vm/opto/callGenerator.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/opto/callGenerator.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -25,6 +25,7 @@ - #ifndef SHARE_VM_OPTO_CALLGENERATOR_HPP - #define SHARE_VM_OPTO_CALLGENERATOR_HPP - -+#include "compiler/compileBroker.hpp" - #include "opto/callnode.hpp" - #include "opto/compile.hpp" - #include "opto/type.hpp" -@@ -44,7 +45,7 @@ - ciMethod* _method; // The method being called. - - protected: -- CallGenerator(ciMethod* method); -+ CallGenerator(ciMethod* method) : _method(method) {} - - public: - // Accessors -@@ -111,11 +112,8 @@ - static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface - static CallGenerator* for_dynamic_call(ciMethod* m); // invokedynamic - -- static CallGenerator* for_method_handle_call(Node* method_handle, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile); -- static CallGenerator* for_invokedynamic_call( JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile); -- -- static CallGenerator* for_method_handle_inline(Node* method_handle, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile); -- static CallGenerator* for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile); -+ static CallGenerator* for_method_handle_call( JVMState* jvms, ciMethod* caller, ciMethod* callee); -+ static CallGenerator* for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee); - - // How to generate a replace a direct call with an inline version - static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg); -@@ -145,13 +143,21 @@ - // Registry for intrinsics: - static CallGenerator* for_intrinsic(ciMethod* m); - static void register_intrinsic(ciMethod* m, CallGenerator* cg); -+ -+ static void print_inlining(ciMethod* callee, int inline_level, int bci, const char* msg) { -+ if (PrintInlining) -+ CompileTask::print_inlining(callee, inline_level, bci, msg); -+ } - }; - -+ -+//------------------------InlineCallGenerator---------------------------------- - class InlineCallGenerator : public CallGenerator { -+ protected: -+ InlineCallGenerator(ciMethod* method) : CallGenerator(method) {} -+ -+ public: - virtual bool is_inline() const { return true; } -- -- protected: -- InlineCallGenerator(ciMethod* method) : CallGenerator(method) { } - }; - - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/callnode.cpp ---- openjdk/hotspot/src/share/vm/opto/callnode.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/opto/callnode.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -231,9 +231,9 @@ - } - - //============================================================================= --JVMState::JVMState(ciMethod* method, JVMState* caller) { -+JVMState::JVMState(ciMethod* method, JVMState* caller) : -+ _method(method) { - assert(method != NULL, "must be valid call site"); -- _method = method; - _reexecute = Reexecute_Undefined; - debug_only(_bci = -99); // random garbage value - debug_only(_map = (SafePointNode*)-1); -@@ -246,8 +246,8 @@ - _endoff = _monoff; - _sp = 0; - } --JVMState::JVMState(int stack_size) { -- _method = NULL; -+JVMState::JVMState(int stack_size) : -+ _method(NULL) { - _bci = InvocationEntryBci; - _reexecute = Reexecute_Undefined; - debug_only(_map = (SafePointNode*)-1); -@@ -526,8 +526,8 @@ - } - _map->dump(2); - } -- st->print("JVMS depth=%d loc=%d stk=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", -- depth(), locoff(), stkoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); -+ st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", -+ depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); - if (_method == NULL) { - st->print_cr("(none)"); - } else { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/callnode.hpp ---- openjdk/hotspot/src/share/vm/opto/callnode.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/opto/callnode.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -197,7 +197,7 @@ - - private: - JVMState* _caller; // List pointer for forming scope chains -- uint _depth; // One mroe than caller depth, or one. -+ uint _depth; // One more than caller depth, or one. - uint _locoff; // Offset to locals in input edge mapping - uint _stkoff; // Offset to stack in input edge mapping - uint _monoff; // Offset to monitors in input edge mapping -@@ -223,6 +223,8 @@ - JVMState(int stack_size); // root state; has a null method - - // Access functions for the JVM -+ // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| -+ // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff - uint locoff() const { return _locoff; } - uint stkoff() const { return _stkoff; } - uint argoff() const { return _stkoff + _sp; } -@@ -231,15 +233,16 @@ - uint endoff() const { return _endoff; } - uint oopoff() const { return debug_end(); } - -- int loc_size() const { return _stkoff - _locoff; } -- int stk_size() const { return _monoff - _stkoff; } -- int mon_size() const { return _scloff - _monoff; } -- int scl_size() const { return _endoff - _scloff; } -+ int loc_size() const { return stkoff() - locoff(); } -+ int stk_size() const { return monoff() - stkoff(); } -+ int arg_size() const { return monoff() - argoff(); } -+ int mon_size() const { return scloff() - monoff(); } -+ int scl_size() const { return endoff() - scloff(); } - -- bool is_loc(uint i) const { return i >= _locoff && i < _stkoff; } -- bool is_stk(uint i) const { return i >= _stkoff && i < _monoff; } -- bool is_mon(uint i) const { return i >= _monoff && i < _scloff; } -- bool is_scl(uint i) const { return i >= _scloff && i < _endoff; } -+ bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } -+ bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } -+ bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } -+ bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } - - uint sp() const { return _sp; } - int bci() const { return _bci; } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/doCall.cpp ---- openjdk/hotspot/src/share/vm/opto/doCall.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/opto/doCall.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -59,13 +59,13 @@ - } - #endif - --CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, -+CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_is_virtual, - JVMState* jvms, bool allow_inline, - float prof_factor) { - ciMethod* caller = jvms->method(); - int bci = jvms->bci(); - Bytecodes::Code bytecode = caller->java_code_at_bci(bci); -- guarantee(call_method != NULL, "failed method resolution"); -+ guarantee(callee != NULL, "failed method resolution"); - - // Dtrace currently doesn't work unless all calls are vanilla - if (env()->dtrace_method_probes()) { -@@ -91,7 +91,7 @@ - int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1; - int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1; - log->begin_elem("call method='%d' count='%d' prof_factor='%g'", -- log->identify(call_method), site_count, prof_factor); -+ log->identify(callee), site_count, prof_factor); - if (call_is_virtual) log->print(" virtual='1'"); - if (allow_inline) log->print(" inline='1'"); - if (receiver_count >= 0) { -@@ -109,7 +109,7 @@ - // We do this before the strict f.p. check below because the - // intrinsics handle strict f.p. correctly. - if (allow_inline) { -- CallGenerator* cg = find_intrinsic(call_method, call_is_virtual); -+ CallGenerator* cg = find_intrinsic(callee, call_is_virtual); - if (cg != NULL) return cg; - } - -@@ -117,19 +117,12 @@ - // NOTE: This must happen before normal inlining logic below since - // MethodHandle.invoke* are native methods which obviously don't - // have bytecodes and so normal inlining fails. -- if (call_method->is_method_handle_invoke()) { -- if (bytecode != Bytecodes::_invokedynamic) { -- GraphKit kit(jvms); -- Node* method_handle = kit.argument(0); -- return CallGenerator::for_method_handle_call(method_handle, jvms, caller, call_method, profile); -- } -- else { -- return CallGenerator::for_invokedynamic_call(jvms, caller, call_method, profile); -- } -+ if (callee->is_method_handle_intrinsic()) { -+ return CallGenerator::for_method_handle_call(jvms, caller, callee); - } - - // Do not inline strict fp into non-strict code, or the reverse -- if (caller->is_strict() ^ call_method->is_strict()) { -+ if (caller->is_strict() ^ callee->is_strict()) { - allow_inline = false; - } - -@@ -155,26 +148,26 @@ - } - WarmCallInfo scratch_ci; - if (!UseOldInlining) -- scratch_ci.init(jvms, call_method, profile, prof_factor); -- WarmCallInfo* ci = ilt->ok_to_inline(call_method, jvms, profile, &scratch_ci); -+ scratch_ci.init(jvms, callee, profile, prof_factor); -+ WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci); - assert(ci != &scratch_ci, "do not let this pointer escape"); - bool allow_inline = (ci != NULL && !ci->is_cold()); - bool require_inline = (allow_inline && ci->is_hot()); - - if (allow_inline) { -- CallGenerator* cg = CallGenerator::for_inline(call_method, expected_uses); -- if (require_inline && cg != NULL && should_delay_inlining(call_method, jvms)) { -+ CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses); -+ if (require_inline && cg != NULL && should_delay_inlining(callee, jvms)) { - // Delay the inlining of this method to give us the - // opportunity to perform some high level optimizations - // first. -- return CallGenerator::for_late_inline(call_method, cg); -+ return CallGenerator::for_late_inline(callee, cg); - } - if (cg == NULL) { - // Fall through. - } else if (require_inline || !InlineWarmCalls) { - return cg; - } else { -- CallGenerator* cold_cg = call_generator(call_method, vtable_index, call_is_virtual, jvms, false, prof_factor); -+ CallGenerator* cold_cg = call_generator(callee, vtable_index, call_is_virtual, jvms, false, prof_factor); - return CallGenerator::for_warm_call(ci, cold_cg, cg); - } - } -@@ -189,7 +182,7 @@ - (profile.morphism() == 2 && UseBimorphicInlining)) { - // receiver_method = profile.method(); - // Profiles do not suggest methods now. Look it up in the major receiver. -- receiver_method = call_method->resolve_invoke(jvms->method()->holder(), -+ receiver_method = callee->resolve_invoke(jvms->method()->holder(), - profile.receiver(0)); - } - if (receiver_method != NULL) { -@@ -201,7 +194,7 @@ - CallGenerator* next_hit_cg = NULL; - ciMethod* next_receiver_method = NULL; - if (profile.morphism() == 2 && UseBimorphicInlining) { -- next_receiver_method = call_method->resolve_invoke(jvms->method()->holder(), -+ next_receiver_method = callee->resolve_invoke(jvms->method()->holder(), - profile.receiver(1)); - if (next_receiver_method != NULL) { - next_hit_cg = this->call_generator(next_receiver_method, -@@ -224,12 +217,12 @@ - ) { - // Generate uncommon trap for class check failure path - // in case of monomorphic or bimorphic virtual call site. -- miss_cg = CallGenerator::for_uncommon_trap(call_method, reason, -+ miss_cg = CallGenerator::for_uncommon_trap(callee, reason, - Deoptimization::Action_maybe_recompile); - } else { - // Generate virtual call for class check failure path - // in case of polymorphic virtual call site. -- miss_cg = CallGenerator::for_virtual_call(call_method, vtable_index); -+ miss_cg = CallGenerator::for_virtual_call(callee, vtable_index); - } - if (miss_cg != NULL) { - if (next_hit_cg != NULL) { -@@ -252,11 +245,11 @@ - // There was no special inlining tactic, or it bailed out. - // Use a more generic tactic, like a simple call. - if (call_is_virtual) { -- return CallGenerator::for_virtual_call(call_method, vtable_index); -+ return CallGenerator::for_virtual_call(callee, vtable_index); - } else { - // Class Hierarchy Analysis or Type Profile reveals a unique target, - // or it is a static or special call. -- return CallGenerator::for_direct_call(call_method, should_delay_inlining(call_method, jvms)); -+ return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms)); - } - } - -@@ -355,33 +348,40 @@ - - // Find target being called - bool will_link; -- ciMethod* dest_method = iter().get_method(will_link); -- ciInstanceKlass* holder_klass = dest_method->holder(); -+ ciMethod* bc_callee = iter().get_method(will_link); // actual callee from bytecode -+ ciInstanceKlass* holder_klass = bc_callee->holder(); - ciKlass* holder = iter().get_declared_method_holder(); - ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); - -- int nargs = dest_method->arg_size(); -- if (is_invokedynamic) nargs -= 1; -- - // uncommon-trap when callee is unloaded, uninitialized or will not link - // bailout when too many arguments for register representation -- if (!will_link || can_not_compile_call_site(dest_method, klass)) { -+ if (!will_link || can_not_compile_call_site(bc_callee, klass)) { - #ifndef PRODUCT - if (PrintOpto && (Verbose || WizardMode)) { - method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci()); -- dest_method->print_name(); tty->cr(); -+ bc_callee->print_name(); tty->cr(); - } - #endif - return; - } - assert(holder_klass->is_loaded(), ""); -- assert((dest_method->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); -+ //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw) - // Note: this takes into account invokeinterface of methods declared in java/lang/Object, - // which should be invokevirtuals but according to the VM spec may be invokeinterfaces - assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc"); - // Note: In the absence of miranda methods, an abstract class K can perform - // an invokevirtual directly on an interface method I.m if K implements I. - -+ const int nargs = bc_callee->arg_size(); -+ -+ // Push appendix argument (MethodType, CallSite, etc.), if one. -+ if (iter().has_appendix()) { -+ ciObject* appendix_arg = iter().get_appendix(); -+ const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg); -+ Node* appendix_arg_node = _gvn.makecon(appendix_arg_type); -+ push(appendix_arg_node); -+ } -+ - // --------------------- - // Does Class Hierarchy Analysis reveal only a single target of a v-call? - // Then we may inline or make a static call, but become dependent on there being only 1 target. -@@ -392,21 +392,21 @@ - // Choose call strategy. - bool call_is_virtual = is_virtual_or_interface; - int vtable_index = methodOopDesc::invalid_vtable_index; -- ciMethod* call_method = dest_method; -+ ciMethod* callee = bc_callee; - - // Try to get the most accurate receiver type - if (is_virtual_or_interface) { - Node* receiver_node = stack(sp() - nargs); - const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); -- ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type); -+ ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, bc_callee, receiver_type); - - // Have the call been sufficiently improved such that it is no longer a virtual? - if (optimized_virtual_method != NULL) { -- call_method = optimized_virtual_method; -+ callee = optimized_virtual_method; - call_is_virtual = false; -- } else if (!UseInlineCaches && is_virtual && call_method->is_loaded()) { -+ } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) { - // We can make a vtable call at this site -- vtable_index = call_method->resolve_vtable_index(method()->holder(), klass); -+ vtable_index = callee->resolve_vtable_index(method()->holder(), klass); - } - } - -@@ -416,22 +416,24 @@ - bool try_inline = (C->do_inlining() || InlineAccessors); - - // --------------------- -- inc_sp(- nargs); // Temporarily pop args for JVM state of call -+ dec_sp(nargs); // Temporarily pop args for JVM state of call - JVMState* jvms = sync_jvms(); - - // --------------------- - // Decide call tactic. - // This call checks with CHA, the interpreter profile, intrinsics table, etc. - // It decides whether inlining is desirable or not. -- CallGenerator* cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); -+ CallGenerator* cg = C->call_generator(callee, vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); -+ -+ bc_callee = callee = NULL; // don't use bc_callee and callee after this point - - // --------------------- - // Round double arguments before call -- round_double_arguments(dest_method); -+ round_double_arguments(cg->method()); - - #ifndef PRODUCT - // bump global counters for calls -- count_compiled_calls(false/*at_method_entry*/, cg->is_inline()); -+ count_compiled_calls(/*at_method_entry*/ false, cg->is_inline()); - - // Record first part of parsing work for this call - parse_histogram()->record_change(); -@@ -447,8 +449,8 @@ - // because exceptions don't return to the call site.) - profile_call(receiver); - -- JVMState* new_jvms; -- if ((new_jvms = cg->generate(jvms)) == NULL) { -+ JVMState* new_jvms = cg->generate(jvms); -+ if (new_jvms == NULL) { - // When inlining attempt fails (e.g., too many arguments), - // it may contaminate the current compile state, making it - // impossible to pull back and try again. Once we call -@@ -469,7 +471,7 @@ - // intrinsic was expecting to optimize. The fallback position is - // to call out-of-line. - try_inline = false; // Inline tactic bailed out. -- cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); -+ cg = C->call_generator(cg->method(), vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); - if ((new_jvms = cg->generate(jvms)) == NULL) { - guarantee(failing(), "call failed to generate: calls should work"); - return; -@@ -478,8 +480,8 @@ - - if (cg->is_inline()) { - // Accumulate has_loops estimate -- C->set_has_loops(C->has_loops() || call_method->has_loops()); -- C->env()->notice_inlined_method(call_method); -+ C->set_has_loops(C->has_loops() || cg->method()->has_loops()); -+ C->env()->notice_inlined_method(cg->method()); - } - - // Reset parser state from [new_]jvms, which now carries results of the call. -@@ -501,20 +503,74 @@ - } - - // Round double result after a call from strict to non-strict code -- round_double_result(dest_method); -+ round_double_result(cg->method()); -+ -+ ciType* rtype = cg->method()->return_type(); -+ if (iter().cur_bc_raw() == Bytecodes::_invokehandle || is_invokedynamic) { -+ // Be careful here with return types. -+ ciType* ctype = iter().get_declared_method_signature()->return_type(); -+ if (ctype != rtype) { -+ BasicType rt = rtype->basic_type(); -+ BasicType ct = ctype->basic_type(); -+ Node* retnode = peek(); -+ if (ct == T_VOID) { -+ // It's OK for a method to return a value that is discarded. -+ // The discarding does not require any special action from the caller. -+ // The Java code knows this, at VerifyType.isNullConversion. -+ pop_node(rt); // whatever it was, pop it -+ retnode = top(); -+ } else if (rt == T_INT || is_subword_type(rt)) { -+ // FIXME: This logic should be factored out. -+ if (ct == T_BOOLEAN) { -+ retnode = _gvn.transform( new (C, 3) AndINode(retnode, intcon(0x1)) ); -+ } else if (ct == T_CHAR) { -+ retnode = _gvn.transform( new (C, 3) AndINode(retnode, intcon(0xFFFF)) ); -+ } else if (ct == T_BYTE) { -+ retnode = _gvn.transform( new (C, 3) LShiftINode(retnode, intcon(24)) ); -+ retnode = _gvn.transform( new (C, 3) RShiftINode(retnode, intcon(24)) ); -+ } else if (ct == T_SHORT) { -+ retnode = _gvn.transform( new (C, 3) LShiftINode(retnode, intcon(16)) ); -+ retnode = _gvn.transform( new (C, 3) RShiftINode(retnode, intcon(16)) ); -+ } else { -+ assert(ct == T_INT, err_msg("rt=%d, ct=%d", rt, ct)); -+ } -+ } else if (rt == T_OBJECT) { -+ assert(ct == T_OBJECT, err_msg("rt=T_OBJECT, ct=%d", ct)); -+ if (ctype->is_loaded()) { -+ Node* if_fail = top(); -+ retnode = gen_checkcast(retnode, makecon(TypeKlassPtr::make(ctype->as_klass())), &if_fail); -+ if (if_fail != top()) { -+ PreserveJVMState pjvms(this); -+ set_control(if_fail); -+ builtin_throw(Deoptimization::Reason_class_check); -+ } -+ pop(); -+ push(retnode); -+ } -+ } else { -+ assert(ct == rt, err_msg("unexpected mismatch rt=%d, ct=%d", rt, ct)); -+ // push a zero; it's better than getting an oop/int mismatch -+ retnode = pop_node(rt); -+ retnode = zerocon(ct); -+ push_node(ct, retnode); -+ } -+ // Now that the value is well-behaved, continue with the call-site type. -+ rtype = ctype; -+ } -+ } - - // If the return type of the method is not loaded, assert that the - // value we got is a null. Otherwise, we need to recompile. -- if (!dest_method->return_type()->is_loaded()) { -+ if (!rtype->is_loaded()) { - #ifndef PRODUCT - if (PrintOpto && (Verbose || WizardMode)) { - method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci()); -- dest_method->print_name(); tty->cr(); -+ cg->method()->print_name(); tty->cr(); - } - #endif - if (C->log() != NULL) { - C->log()->elem("assert_null reason='return' klass='%d'", -- C->log()->identify(dest_method->return_type())); -+ C->log()->identify(rtype)); - } - // If there is going to be a trap, put it at the next bytecode: - set_bci(iter().next_bci()); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/graphKit.cpp ---- openjdk/hotspot/src/share/vm/opto/graphKit.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/opto/graphKit.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -965,7 +965,7 @@ - assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, ""); - } - --bool GraphKit::compute_stack_effects(int& inputs, int& depth) { -+bool GraphKit::compute_stack_effects(int& inputs, int& depth, bool for_parse) { - Bytecodes::Code code = java_bc(); - if (code == Bytecodes::_wide) { - code = method()->java_code_at_bci(bci() + 1); -@@ -1032,12 +1032,21 @@ - ciBytecodeStream iter(method()); - iter.reset_to_bci(bci()); - iter.next(); -- ciMethod* method = iter.get_method(ignore); -+ ciMethod* callee = iter.get_method(ignore); - // (Do not use ciMethod::arg_size(), because - // it might be an unloaded method, which doesn't - // know whether it is static or not.) -- inputs = method->invoke_arg_size(code); -- int size = method->return_type()->size(); -+ if (for_parse) { -+ // Case 1: When called from parse we are *before* the invoke (in the -+ // caller) and need to to adjust the inputs by an appendix -+ // argument that will be pushed implicitly. -+ inputs = callee->invoke_arg_size(code) - (iter.has_appendix() ? 1 : 0); -+ } else { -+ // Case 2: Here we are *after* the invoke (in the callee) and need to -+ // remove any appendix arguments that were popped. -+ inputs = callee->invoke_arg_size(code) - (callee->has_member_arg() ? 1 : 0); -+ } -+ int size = callee->return_type()->size(); - depth = size - inputs; - } - break; -@@ -1373,7 +1382,6 @@ - } - - -- - //============================================================================= - //--------------------------------memory--------------------------------------- - Node* GraphKit::memory(uint alias_idx) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/graphKit.hpp ---- openjdk/hotspot/src/share/vm/opto/graphKit.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/opto/graphKit.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -145,6 +145,7 @@ - void clean_stack(int from_sp); // clear garbage beyond from_sp to top - - void inc_sp(int i) { set_sp(sp() + i); } -+ void dec_sp(int i) { set_sp(sp() - i); } - void set_bci(int bci) { _bci = bci; } - - // Make sure jvms has current bci & sp. -@@ -285,7 +286,7 @@ - // How many stack inputs does the current BC consume? - // And, how does the stack change after the bytecode? - // Returns false if unknown. -- bool compute_stack_effects(int& inputs, int& depth); -+ bool compute_stack_effects(int& inputs, int& depth, bool for_parse = false); - - // Add a fixed offset to a pointer - Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) { -@@ -370,9 +371,9 @@ - // Replace all occurrences of one node by another. - void replace_in_map(Node* old, Node* neww); - -- void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms,_sp++,n); } -- Node* pop() { map_not_null(); return _map->stack(_map->_jvms,--_sp); } -- Node* peek(int off=0) { map_not_null(); return _map->stack(_map->_jvms, _sp - off - 1); } -+ void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++, n); } -+ Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp); } -+ Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1); } - - void push_pair(Node* ldval) { - push(ldval); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/library_call.cpp ---- openjdk/hotspot/src/share/vm/opto/library_call.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/opto/library_call.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -2104,7 +2104,7 @@ - if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL)) return false; - if (id == vmIntrinsics::_reverseBytes_c && !Matcher::has_match_rule(Op_ReverseBytesUS)) return false; - if (id == vmIntrinsics::_reverseBytes_s && !Matcher::has_match_rule(Op_ReverseBytesS)) return false; -- _sp += arg_size(); // restore stack pointer -+ _sp += arg_size(); // restore stack pointer - switch (id) { - case vmIntrinsics::_reverseBytes_i: - push(_gvn.transform(new (C, 2) ReverseBytesINode(0, pop()))); -@@ -2277,6 +2277,7 @@ - - // Argument words: "this" plus (oop/offset) or (lo/hi) args plus maybe 1 or 2 value words - int nargs = 1 + (is_native_ptr ? 2 : 3) + (is_store ? type_words : 0); -+ assert(callee()->arg_size() == nargs, "must be"); - - debug_only(int saved_sp = _sp); - _sp += nargs; -@@ -3932,7 +3933,8 @@ - } - } - -- if (method->is_method_handle_adapter()) { -+ if (method->is_method_handle_intrinsic() || -+ method->is_compiled_lambda_form()) { - // This is an internal adapter frame from the MethodHandleCompiler -- skip it - return true; - } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/matcher.cpp ---- openjdk/hotspot/src/share/vm/opto/matcher.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/opto/matcher.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -1231,8 +1231,9 @@ - if (is_method_handle_invoke) { - // Kill some extra stack space in case method handles want to do - // a little in-place argument insertion. -+ // FIXME: Is this still necessary? - int regs_per_word = NOT_LP64(1) LP64_ONLY(2); // %%% make a global const! -- out_arg_limit_per_call += MethodHandlePushLimit * regs_per_word; -+ out_arg_limit_per_call += methodOopDesc::extra_stack_entries() * regs_per_word; - // Do not update mcall->_argsize because (a) the extra space is not - // pushed as arguments and (b) _argsize is dead (not used anywhere). - } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/node.hpp ---- openjdk/hotspot/src/share/vm/opto/node.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/opto/node.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -362,7 +362,7 @@ - #endif - - // Reference to the i'th input Node. Error if out of bounds. -- Node* in(uint i) const { assert(i < _max,"oob"); return _in[i]; } -+ Node* in(uint i) const { assert(i < _max, err_msg("oob: i=%d, _max=%d", i, _max)); return _in[i]; } - // Reference to the i'th output Node. Error if out of bounds. - // Use this accessor sparingly. We are going trying to use iterators instead. - Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; } -@@ -393,7 +393,7 @@ - void ins_req( uint i, Node *n ); // Insert a NEW required input - void set_req( uint i, Node *n ) { - assert( is_not_dead(n), "can not use dead node"); -- assert( i < _cnt, "oob"); -+ assert( i < _cnt, err_msg("oob: i=%d, _cnt=%d", i, _cnt)); - assert( !VerifyHashTableKeys || _hash_lock == 0, - "remove node from hash table before modifying it"); - Node** p = &_in[i]; // cache this._in, across the del_out call -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/parse.hpp ---- openjdk/hotspot/src/share/vm/opto/parse.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/opto/parse.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -84,7 +84,7 @@ - static const char* check_can_parse(ciMethod* callee); - - static InlineTree* build_inline_tree_root(); -- static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false); -+ static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee); - - // For temporary (stack-allocated, stateless) ilts: - InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/parse1.cpp ---- openjdk/hotspot/src/share/vm/opto/parse1.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/opto/parse1.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -398,7 +398,7 @@ - if (PrintCompilation || PrintOpto) { - // Make sure I have an inline tree, so I can print messages about it. - JVMState* ilt_caller = is_osr_parse() ? caller->caller() : caller; -- InlineTree::find_subtree_from_root(C->ilt(), ilt_caller, parse_method, true); -+ InlineTree::find_subtree_from_root(C->ilt(), ilt_caller, parse_method); - } - _max_switch_depth = 0; - _est_switch_depth = 0; -@@ -1398,8 +1398,8 @@ - #ifdef ASSERT - int pre_bc_sp = sp(); - int inputs, depth; -- bool have_se = !stopped() && compute_stack_effects(inputs, depth); -- assert(!have_se || pre_bc_sp >= inputs, "have enough stack to execute this BC"); -+ bool have_se = !stopped() && compute_stack_effects(inputs, depth, /*for_parse*/ true); -+ assert(!have_se || pre_bc_sp >= inputs, err_msg("have enough stack to execute this BC: pre_bc_sp=%d, inputs=%d", pre_bc_sp, inputs)); - #endif //ASSERT - - do_one_bytecode(); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/opto/phaseX.hpp ---- openjdk/hotspot/src/share/vm/opto/phaseX.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/opto/phaseX.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -193,6 +193,7 @@ - // If you want the type of a very new (untransformed) node, - // you must use type_or_null, and test the result for NULL. - const Type* type(const Node* n) const { -+ assert(n != NULL, "must not be null"); - const Type* t = _types.fast_lookup(n->_idx); - assert(t != NULL, "must set before get"); - return t; -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/prims/jvmtiTagMap.cpp ---- openjdk/hotspot/src/share/vm/prims/jvmtiTagMap.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/prims/jvmtiTagMap.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -3162,9 +3162,6 @@ - if (fr->is_entry_frame()) { - last_entry_frame = fr; - } -- if (fr->is_ricochet_frame()) { -- fr->oops_ricochet_do(blk, vf->register_map()); -- } - } - - vf = vf->sender(); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/prims/methodHandleWalk.cpp ---- openjdk/hotspot/src/share/vm/prims/methodHandleWalk.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ /dev/null Thu Jan 01 00:00:00 1970 +0000 -@@ -1,2089 +0,0 @@ --/* -- * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. -- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -- * -- * This code is free software; you can redistribute it and/or modify it -- * under the terms of the GNU General Public License version 2 only, as -- * published by the Free Software Foundation. -- * -- * This code is distributed in the hope that it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -- * version 2 for more details (a copy is included in the LICENSE file that -- * accompanied this code). -- * -- * You should have received a copy of the GNU General Public License version -- * 2 along with this work; if not, write to the Free Software Foundation, -- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -- * -- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -- * or visit www.oracle.com if you need additional information or have any -- * questions. -- * -- */ -- --#include "precompiled.hpp" --#include "interpreter/rewriter.hpp" --#include "memory/oopFactory.hpp" --#include "prims/methodHandleWalk.hpp" -- --/* -- * JSR 292 reference implementation: method handle structure analysis -- */ -- --#ifdef PRODUCT --#define print_method_handle(mh) {} --#else //PRODUCT --extern "C" void print_method_handle(oop mh); --#endif //PRODUCT -- --// ----------------------------------------------------------------------------- --// MethodHandleChain -- --void MethodHandleChain::set_method_handle(Handle mh, TRAPS) { -- if (!java_lang_invoke_MethodHandle::is_instance(mh())) lose("bad method handle", CHECK); -- -- // set current method handle and unpack partially -- _method_handle = mh; -- _is_last = false; -- _is_bound = false; -- _arg_slot = -1; -- _arg_type = T_VOID; -- _conversion = -1; -- _last_invoke = Bytecodes::_nop; //arbitrary non-garbage -- -- if (java_lang_invoke_DirectMethodHandle::is_instance(mh())) { -- set_last_method(mh(), THREAD); -- return; -- } -- if (java_lang_invoke_AdapterMethodHandle::is_instance(mh())) { -- _conversion = AdapterMethodHandle_conversion(); -- assert(_conversion != -1, "bad conv value"); -- assert(java_lang_invoke_BoundMethodHandle::is_instance(mh()), "also BMH"); -- } -- if (java_lang_invoke_BoundMethodHandle::is_instance(mh())) { -- if (!is_adapter()) // keep AMH and BMH separate in this model -- _is_bound = true; -- _arg_slot = BoundMethodHandle_vmargslot(); -- oop target = MethodHandle_vmtarget_oop(); -- if (!is_bound() || java_lang_invoke_MethodHandle::is_instance(target)) { -- _arg_type = compute_bound_arg_type(target, NULL, _arg_slot, CHECK); -- } else if (target != NULL && target->is_method()) { -- methodOop m = (methodOop) target; -- _arg_type = compute_bound_arg_type(NULL, m, _arg_slot, CHECK); -- set_last_method(mh(), CHECK); -- } else { -- _is_bound = false; // lose! -- } -- } -- if (is_bound() && _arg_type == T_VOID) { -- lose("bad vmargslot", CHECK); -- } -- if (!is_bound() && !is_adapter()) { -- lose("unrecognized MH type", CHECK); -- } --} -- -- --void MethodHandleChain::set_last_method(oop target, TRAPS) { -- _is_last = true; -- KlassHandle receiver_limit; int flags = 0; -- _last_method = MethodHandles::decode_method(target, receiver_limit, flags); -- if ((flags & MethodHandles::_dmf_has_receiver) == 0) -- _last_invoke = Bytecodes::_invokestatic; -- else if ((flags & MethodHandles::_dmf_does_dispatch) == 0) -- _last_invoke = Bytecodes::_invokespecial; -- else if ((flags & MethodHandles::_dmf_from_interface) != 0) -- _last_invoke = Bytecodes::_invokeinterface; -- else -- _last_invoke = Bytecodes::_invokevirtual; --} -- -- --BasicType MethodHandleChain::compute_bound_arg_type(oop target, methodOop m, int arg_slot, TRAPS) { -- // There is no direct indication of whether the argument is primitive or not. -- // It is implied by the _vmentry code, and by the MethodType of the target. -- BasicType arg_type = T_VOID; -- if (target != NULL) { -- oop mtype = java_lang_invoke_MethodHandle::type(target); -- int arg_num = MethodHandles::argument_slot_to_argnum(mtype, arg_slot); -- if (arg_num >= 0) { -- oop ptype = java_lang_invoke_MethodType::ptype(mtype, arg_num); -- arg_type = java_lang_Class::as_BasicType(ptype); -- } -- } else if (m != NULL) { -- // figure out the argument type from the slot -- // FIXME: make this explicit in the MH -- int cur_slot = m->size_of_parameters(); -- if (arg_slot >= cur_slot) -- return T_VOID; -- if (!m->is_static()) { -- cur_slot -= type2size[T_OBJECT]; -- if (cur_slot == arg_slot) -- return T_OBJECT; -- } -- ResourceMark rm(THREAD); -- for (SignatureStream ss(m->signature()); !ss.is_done(); ss.next()) { -- BasicType bt = ss.type(); -- cur_slot -= type2size[bt]; -- if (cur_slot <= arg_slot) { -- if (cur_slot == arg_slot) -- arg_type = bt; -- break; -- } -- } -- } -- if (arg_type == T_ARRAY) -- arg_type = T_OBJECT; -- return arg_type; --} -- -- --void MethodHandleChain::lose(const char* msg, TRAPS) { -- _lose_message = msg; --#ifdef ASSERT -- if (Verbose) { -- tty->print_cr(INTPTR_FORMAT " lose: %s", _method_handle(), msg); -- print(); -- } --#endif -- if (!THREAD->is_Java_thread() || ((JavaThread*)THREAD)->thread_state() != _thread_in_vm) { -- // throw a preallocated exception -- THROW_OOP(Universe::virtual_machine_error_instance()); -- } -- THROW_MSG(vmSymbols::java_lang_InternalError(), msg); --} -- -- --#ifdef ASSERT --static const char* adapter_ops[] = { -- "retype_only" , -- "retype_raw" , -- "check_cast" , -- "prim_to_prim" , -- "ref_to_prim" , -- "prim_to_ref" , -- "swap_args" , -- "rot_args" , -- "dup_args" , -- "drop_args" , -- "collect_args" , -- "spread_args" , -- "fold_args" --}; -- --static const char* adapter_op_to_string(int op) { -- if (op >= 0 && op < (int)ARRAY_SIZE(adapter_ops)) -- return adapter_ops[op]; -- return "unknown_op"; --} -- --void MethodHandleChain::print(oopDesc* m) { -- HandleMark hm; -- ResourceMark rm; -- Handle mh(m); -- EXCEPTION_MARK; -- MethodHandleChain mhc(mh, THREAD); -- if (HAS_PENDING_EXCEPTION) { -- oop ex = THREAD->pending_exception(); -- CLEAR_PENDING_EXCEPTION; -- ex->print(); -- return; -- } -- mhc.print(); --} -- -- --void MethodHandleChain::print() { -- EXCEPTION_MARK; -- print_impl(THREAD); -- if (HAS_PENDING_EXCEPTION) { -- oop ex = THREAD->pending_exception(); -- CLEAR_PENDING_EXCEPTION; -- ex->print(); -- } --} -- --void MethodHandleChain::print_impl(TRAPS) { -- ResourceMark rm; -- -- MethodHandleChain chain(_root, CHECK); -- for (;;) { -- tty->print(INTPTR_FORMAT ": ", chain.method_handle()()); -- if (chain.is_bound()) { -- tty->print("bound: arg_type %s arg_slot %d", -- type2name(chain.bound_arg_type()), -- chain.bound_arg_slot()); -- oop o = chain.bound_arg_oop(); -- if (o != NULL) { -- if (o->is_instance()) { -- tty->print(" instance %s", o->klass()->klass_part()->internal_name()); -- if (java_lang_invoke_CountingMethodHandle::is_instance(o)) { -- tty->print(" vmcount: %d", java_lang_invoke_CountingMethodHandle::vmcount(o)); -- } -- } else { -- o->print(); -- } -- } -- oop vmt = chain.vmtarget_oop(); -- if (vmt != NULL) { -- if (vmt->is_method()) { -- tty->print(" "); -- methodOop(vmt)->print_short_name(tty); -- } else if (java_lang_invoke_MethodHandle::is_instance(vmt)) { -- tty->print(" method handle " INTPTR_FORMAT, vmt); -- } else { -- ShouldNotReachHere(); -- } -- } -- } else if (chain.is_adapter()) { -- tty->print("adapter: arg_slot %d conversion op %s", -- chain.adapter_arg_slot(), -- adapter_op_to_string(chain.adapter_conversion_op())); -- switch (chain.adapter_conversion_op()) { -- case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY: -- if (java_lang_invoke_CountingMethodHandle::is_instance(chain.method_handle_oop())) { -- tty->print(" vmcount: %d", java_lang_invoke_CountingMethodHandle::vmcount(chain.method_handle_oop())); -- } -- case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW: -- case java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST: -- case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM: -- case java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM: -- break; -- -- case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF: { -- tty->print(" src_type = %s", type2name(chain.adapter_conversion_src_type())); -- break; -- } -- -- case java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS: -- case java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS: { -- int dest_arg_slot = chain.adapter_conversion_vminfo(); -- tty->print(" dest_arg_slot %d type %s", dest_arg_slot, type2name(chain.adapter_conversion_src_type())); -- break; -- } -- -- case java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS: -- case java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS: { -- int dup_slots = chain.adapter_conversion_stack_pushes(); -- tty->print(" pushes %d", dup_slots); -- break; -- } -- -- case java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS: -- case java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS: { -- int coll_slots = chain.MethodHandle_vmslots(); -- tty->print(" coll_slots %d", coll_slots); -- break; -- } -- -- case java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS: { -- // Check the required length. -- int spread_slots = 1 + chain.adapter_conversion_stack_pushes(); -- tty->print(" spread_slots %d", spread_slots); -- break; -- } -- -- default: -- tty->print_cr("bad adapter conversion"); -- break; -- } -- } else { -- // DMH -- tty->print("direct: "); -- chain.last_method_oop()->print_short_name(tty); -- } -- -- tty->print(" ("); -- objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(chain.method_type_oop()); -- for (int i = ptypes->length() - 1; i >= 0; i--) { -- BasicType t = java_lang_Class::as_BasicType(ptypes->obj_at(i)); -- if (t == T_ARRAY) t = T_OBJECT; -- tty->print("%c", type2char(t)); -- if (t == T_LONG || t == T_DOUBLE) tty->print("_"); -- } -- tty->print(")"); -- BasicType rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(chain.method_type_oop())); -- if (rtype == T_ARRAY) rtype = T_OBJECT; -- tty->print("%c", type2char(rtype)); -- tty->cr(); -- if (!chain.is_last()) { -- chain.next(CHECK); -- } else { -- break; -- } -- } --} --#endif -- -- --// ----------------------------------------------------------------------------- --// MethodHandleWalker -- --Bytecodes::Code MethodHandleWalker::conversion_code(BasicType src, BasicType dest) { -- if (is_subword_type(src)) { -- src = T_INT; // all subword src types act like int -- } -- if (src == dest) { -- return Bytecodes::_nop; -- } -- --#define SRC_DEST(s,d) (((int)(s) << 4) + (int)(d)) -- switch (SRC_DEST(src, dest)) { -- case SRC_DEST(T_INT, T_LONG): return Bytecodes::_i2l; -- case SRC_DEST(T_INT, T_FLOAT): return Bytecodes::_i2f; -- case SRC_DEST(T_INT, T_DOUBLE): return Bytecodes::_i2d; -- case SRC_DEST(T_INT, T_BYTE): return Bytecodes::_i2b; -- case SRC_DEST(T_INT, T_CHAR): return Bytecodes::_i2c; -- case SRC_DEST(T_INT, T_SHORT): return Bytecodes::_i2s; -- -- case SRC_DEST(T_LONG, T_INT): return Bytecodes::_l2i; -- case SRC_DEST(T_LONG, T_FLOAT): return Bytecodes::_l2f; -- case SRC_DEST(T_LONG, T_DOUBLE): return Bytecodes::_l2d; -- -- case SRC_DEST(T_FLOAT, T_INT): return Bytecodes::_f2i; -- case SRC_DEST(T_FLOAT, T_LONG): return Bytecodes::_f2l; -- case SRC_DEST(T_FLOAT, T_DOUBLE): return Bytecodes::_f2d; -- -- case SRC_DEST(T_DOUBLE, T_INT): return Bytecodes::_d2i; -- case SRC_DEST(T_DOUBLE, T_LONG): return Bytecodes::_d2l; -- case SRC_DEST(T_DOUBLE, T_FLOAT): return Bytecodes::_d2f; -- } --#undef SRC_DEST -- -- // cannot do it in one step, or at all -- return Bytecodes::_illegal; --} -- -- --// ----------------------------------------------------------------------------- --// MethodHandleWalker::walk --// --MethodHandleWalker::ArgToken --MethodHandleWalker::walk(TRAPS) { -- ArgToken empty = ArgToken(); // Empty return value. -- -- walk_incoming_state(CHECK_(empty)); -- -- for (;;) { -- set_method_handle(chain().method_handle_oop()); -- -- assert(_outgoing_argc == argument_count_slow(), "empty slots under control"); -- -- if (chain().is_adapter()) { -- int conv_op = chain().adapter_conversion_op(); -- int arg_slot = chain().adapter_arg_slot(); -- -- // Check that the arg_slot is valid. In most cases it must be -- // within range of the current arguments but there are some -- // exceptions. Those are sanity checked in their implemention -- // below. -- if ((arg_slot < 0 || arg_slot >= _outgoing.length()) && -- conv_op > java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW && -- conv_op != java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS && -- conv_op != java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS) { -- lose(err_msg("bad argument index %d", arg_slot), CHECK_(empty)); -- } -- -- bool retain_original_args = false; // used by fold/collect logic -- -- // perform the adapter action -- switch (conv_op) { -- case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY: -- // No changes to arguments; pass the bits through. -- break; -- -- case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW: { -- // To keep the verifier happy, emit bitwise ("raw") conversions as needed. -- // See MethodHandles::same_basic_type_for_arguments for allowed conversions. -- Handle incoming_mtype(THREAD, chain().method_type_oop()); -- Handle outgoing_mtype; -- { -- oop outgoing_mh_oop = chain().vmtarget_oop(); -- if (!java_lang_invoke_MethodHandle::is_instance(outgoing_mh_oop)) -- lose("outgoing target not a MethodHandle", CHECK_(empty)); -- outgoing_mtype = Handle(THREAD, java_lang_invoke_MethodHandle::type(outgoing_mh_oop)); -- } -- -- int nptypes = java_lang_invoke_MethodType::ptype_count(outgoing_mtype()); -- if (nptypes != java_lang_invoke_MethodType::ptype_count(incoming_mtype())) -- lose("incoming and outgoing parameter count do not agree", CHECK_(empty)); -- -- // Argument types. -- for (int i = 0, slot = _outgoing.length() - 1; slot >= 0; slot--) { -- if (arg_type(slot) == T_VOID) continue; -- -- klassOop src_klass = NULL; -- klassOop dst_klass = NULL; -- BasicType src = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(incoming_mtype(), i), &src_klass); -- BasicType dst = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(outgoing_mtype(), i), &dst_klass); -- retype_raw_argument_type(src, dst, slot, CHECK_(empty)); -- i++; // We need to skip void slots at the top of the loop. -- } -- -- // Return type. -- { -- BasicType src = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(incoming_mtype())); -- BasicType dst = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(outgoing_mtype())); -- retype_raw_return_type(src, dst, CHECK_(empty)); -- } -- break; -- } -- -- case java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST: { -- // checkcast the Nth outgoing argument in place -- klassOop dest_klass = NULL; -- BasicType dest = java_lang_Class::as_BasicType(chain().adapter_arg_oop(), &dest_klass); -- assert(dest == T_OBJECT, ""); -- ArgToken arg = _outgoing.at(arg_slot); -- assert(dest == arg.basic_type(), ""); -- arg = make_conversion(T_OBJECT, dest_klass, Bytecodes::_checkcast, arg, CHECK_(empty)); -- // replace the object by the result of the cast, to make the compiler happy: -- change_argument(T_OBJECT, arg_slot, T_OBJECT, arg); -- debug_only(dest_klass = (klassOop)badOop); -- break; -- } -- -- case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM: { -- // i2l, etc., on the Nth outgoing argument in place -- BasicType src = chain().adapter_conversion_src_type(), -- dest = chain().adapter_conversion_dest_type(); -- ArgToken arg = _outgoing.at(arg_slot); -- Bytecodes::Code bc = conversion_code(src, dest); -- if (bc == Bytecodes::_nop) { -- break; -- } else if (bc != Bytecodes::_illegal) { -- arg = make_conversion(dest, NULL, bc, arg, CHECK_(empty)); -- } else if (is_subword_type(dest)) { -- bc = conversion_code(src, T_INT); -- if (bc != Bytecodes::_illegal) { -- arg = make_conversion(dest, NULL, bc, arg, CHECK_(empty)); -- bc = conversion_code(T_INT, dest); -- arg = make_conversion(dest, NULL, bc, arg, CHECK_(empty)); -- } -- } -- if (bc == Bytecodes::_illegal) { -- lose(err_msg("bad primitive conversion for %s -> %s", type2name(src), type2name(dest)), CHECK_(empty)); -- } -- change_argument(src, arg_slot, dest, arg); -- break; -- } -- -- case java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM: { -- // checkcast to wrapper type & call intValue, etc. -- BasicType dest = chain().adapter_conversion_dest_type(); -- ArgToken arg = _outgoing.at(arg_slot); -- arg = make_conversion(T_OBJECT, SystemDictionary::box_klass(dest), -- Bytecodes::_checkcast, arg, CHECK_(empty)); -- vmIntrinsics::ID unboxer = vmIntrinsics::for_unboxing(dest); -- if (unboxer == vmIntrinsics::_none) { -- lose("no unboxing method", CHECK_(empty)); -- } -- ArgToken arglist[2]; -- arglist[0] = arg; // outgoing 'this' -- arglist[1] = ArgToken(); // sentinel -- arg = make_invoke(methodHandle(), unboxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty)); -- change_argument(T_OBJECT, arg_slot, dest, arg); -- break; -- } -- -- case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF: { -- // call wrapper type.valueOf -- BasicType src = chain().adapter_conversion_src_type(); -- vmIntrinsics::ID boxer = vmIntrinsics::for_boxing(src); -- if (boxer == vmIntrinsics::_none) { -- lose("no boxing method", CHECK_(empty)); -- } -- ArgToken arg = _outgoing.at(arg_slot); -- ArgToken arglist[2]; -- arglist[0] = arg; // outgoing value -- arglist[1] = ArgToken(); // sentinel -- arg = make_invoke(methodHandle(), boxer, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(empty)); -- change_argument(src, arg_slot, T_OBJECT, arg); -- break; -- } -- -- case java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS: { -- int dest_arg_slot = chain().adapter_conversion_vminfo(); -- if (!has_argument(dest_arg_slot)) { -- lose("bad swap index", CHECK_(empty)); -- } -- // a simple swap between two arguments -- if (arg_slot > dest_arg_slot) { -- int tmp = arg_slot; -- arg_slot = dest_arg_slot; -- dest_arg_slot = tmp; -- } -- ArgToken a1 = _outgoing.at(arg_slot); -- ArgToken a2 = _outgoing.at(dest_arg_slot); -- change_argument(a2.basic_type(), dest_arg_slot, a1); -- change_argument(a1.basic_type(), arg_slot, a2); -- break; -- } -- -- case java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS: { -- int limit_raw = chain().adapter_conversion_vminfo(); -- bool rot_down = (arg_slot < limit_raw); -- int limit_bias = (rot_down ? MethodHandles::OP_ROT_ARGS_DOWN_LIMIT_BIAS : 0); -- int limit_slot = limit_raw - limit_bias; -- if ((uint)limit_slot > (uint)_outgoing.length()) { -- lose("bad rotate index", CHECK_(empty)); -- } -- // Rotate the source argument (plus following N slots) into the -- // position occupied by the dest argument (plus following N slots). -- int rotate_count = type2size[chain().adapter_conversion_src_type()]; -- // (no other rotate counts are currently supported) -- if (rot_down) { -- for (int i = 0; i < rotate_count; i++) { -- ArgToken temp = _outgoing.at(arg_slot); -- _outgoing.remove_at(arg_slot); -- _outgoing.insert_before(limit_slot - 1, temp); -- } -- } else { // arg_slot > limit_slot => rotate_up -- for (int i = 0; i < rotate_count; i++) { -- ArgToken temp = _outgoing.at(arg_slot + rotate_count - 1); -- _outgoing.remove_at(arg_slot + rotate_count - 1); -- _outgoing.insert_before(limit_slot, temp); -- } -- } -- assert(_outgoing_argc == argument_count_slow(), "empty slots under control"); -- break; -- } -- -- case java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS: { -- int dup_slots = chain().adapter_conversion_stack_pushes(); -- if (dup_slots <= 0) { -- lose("bad dup count", CHECK_(empty)); -- } -- for (int i = 0; i < dup_slots; i++) { -- ArgToken dup = _outgoing.at(arg_slot + 2*i); -- if (dup.basic_type() != T_VOID) _outgoing_argc += 1; -- _outgoing.insert_before(i, dup); -- } -- assert(_outgoing_argc == argument_count_slow(), "empty slots under control"); -- break; -- } -- -- case java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS: { -- int drop_slots = -chain().adapter_conversion_stack_pushes(); -- if (drop_slots <= 0) { -- lose("bad drop count", CHECK_(empty)); -- } -- for (int i = 0; i < drop_slots; i++) { -- ArgToken drop = _outgoing.at(arg_slot); -- if (drop.basic_type() != T_VOID) _outgoing_argc -= 1; -- _outgoing.remove_at(arg_slot); -- } -- assert(_outgoing_argc == argument_count_slow(), "empty slots under control"); -- break; -- } -- -- case java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS: -- retain_original_args = true; // and fall through: -- case java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS: { -- // call argument MH recursively -- //{static int x; if (!x++) print_method_handle(chain().method_handle_oop()); --x;} -- Handle recursive_mh(THREAD, chain().adapter_arg_oop()); -- if (!java_lang_invoke_MethodHandle::is_instance(recursive_mh())) { -- lose("recursive target not a MethodHandle", CHECK_(empty)); -- } -- Handle recursive_mtype(THREAD, java_lang_invoke_MethodHandle::type(recursive_mh())); -- int argc = java_lang_invoke_MethodType::ptype_count(recursive_mtype()); -- int coll_slots = java_lang_invoke_MethodHandle::vmslots(recursive_mh()); -- BasicType rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(recursive_mtype())); -- ArgToken* arglist = NEW_RESOURCE_ARRAY(ArgToken, 1 + argc + 1); // 1+: mh, +1: sentinel -- arglist[0] = make_oop_constant(recursive_mh(), CHECK_(empty)); -- if (arg_slot < 0 || coll_slots < 0 || arg_slot + coll_slots > _outgoing.length()) { -- lose("bad fold/collect arg slot", CHECK_(empty)); -- } -- for (int i = 0, slot = arg_slot + coll_slots - 1; slot >= arg_slot; slot--) { -- ArgToken arg_state = _outgoing.at(slot); -- BasicType arg_type = arg_state.basic_type(); -- if (arg_type == T_VOID) continue; -- ArgToken arg = _outgoing.at(slot); -- if (i >= argc) { lose("bad fold/collect arg", CHECK_(empty)); } -- arglist[1+i] = arg; -- if (!retain_original_args) -- change_argument(arg_type, slot, T_VOID, ArgToken(tt_void)); -- i++; -- } -- arglist[1+argc] = ArgToken(); // sentinel -- oop invoker = java_lang_invoke_MethodTypeForm::vmlayout( -- java_lang_invoke_MethodType::form(recursive_mtype()) ); -- if (invoker == NULL || !invoker->is_method()) { -- lose("bad vmlayout slot", CHECK_(empty)); -- } -- // FIXME: consider inlining the invokee at the bytecode level -- ArgToken ret = make_invoke(methodHandle(THREAD, methodOop(invoker)), vmIntrinsics::_invokeGeneric, -- Bytecodes::_invokevirtual, false, 1+argc, &arglist[0], CHECK_(empty)); -- // The iid = _invokeGeneric really means to adjust reference types as needed. -- DEBUG_ONLY(invoker = NULL); -- if (rtype == T_OBJECT) { -- klassOop rklass = java_lang_Class::as_klassOop( java_lang_invoke_MethodType::rtype(recursive_mtype()) ); -- if (rklass != SystemDictionary::Object_klass() && -- !Klass::cast(rklass)->is_interface()) { -- // preserve type safety -- ret = make_conversion(T_OBJECT, rklass, Bytecodes::_checkcast, ret, CHECK_(empty)); -- } -- } -- if (rtype != T_VOID) { -- int ret_slot = arg_slot + (retain_original_args ? coll_slots : 0); -- change_argument(T_VOID, ret_slot, rtype, ret); -- } -- break; -- } -- -- case java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS: { -- klassOop array_klass_oop = NULL; -- BasicType array_type = java_lang_Class::as_BasicType(chain().adapter_arg_oop(), -- &array_klass_oop); -- assert(array_type == T_OBJECT, ""); -- assert(Klass::cast(array_klass_oop)->oop_is_array(), ""); -- arrayKlassHandle array_klass(THREAD, array_klass_oop); -- debug_only(array_klass_oop = (klassOop)badOop); -- -- klassOop element_klass_oop = NULL; -- BasicType element_type = java_lang_Class::as_BasicType(array_klass->component_mirror(), -- &element_klass_oop); -- KlassHandle element_klass(THREAD, element_klass_oop); -- debug_only(element_klass_oop = (klassOop)badOop); -- -- // Fetch the argument, which we will cast to the required array type. -- ArgToken arg = _outgoing.at(arg_slot); -- assert(arg.basic_type() == T_OBJECT, ""); -- ArgToken array_arg = arg; -- array_arg = make_conversion(T_OBJECT, array_klass(), Bytecodes::_checkcast, array_arg, CHECK_(empty)); -- change_argument(T_OBJECT, arg_slot, T_VOID, ArgToken(tt_void)); -- -- // Check the required length. -- int spread_slots = 1 + chain().adapter_conversion_stack_pushes(); -- int spread_length = spread_slots; -- if (type2size[element_type] == 2) { -- if (spread_slots % 2 != 0) spread_slots = -1; // force error -- spread_length = spread_slots / 2; -- } -- if (spread_slots < 0) { -- lose("bad spread length", CHECK_(empty)); -- } -- -- jvalue length_jvalue; length_jvalue.i = spread_length; -- ArgToken length_arg = make_prim_constant(T_INT, &length_jvalue, CHECK_(empty)); -- // Call a built-in method known to the JVM to validate the length. -- ArgToken arglist[3]; -- arglist[0] = array_arg; // value to check -- arglist[1] = length_arg; // length to check -- arglist[2] = ArgToken(); // sentinel -- make_invoke(methodHandle(), vmIntrinsics::_checkSpreadArgument, -- Bytecodes::_invokestatic, false, 2, &arglist[0], CHECK_(empty)); -- -- // Spread out the array elements. -- Bytecodes::Code aload_op = Bytecodes::_nop; -- switch (element_type) { -- case T_INT: aload_op = Bytecodes::_iaload; break; -- case T_LONG: aload_op = Bytecodes::_laload; break; -- case T_FLOAT: aload_op = Bytecodes::_faload; break; -- case T_DOUBLE: aload_op = Bytecodes::_daload; break; -- case T_OBJECT: aload_op = Bytecodes::_aaload; break; -- case T_BOOLEAN: // fall through: -- case T_BYTE: aload_op = Bytecodes::_baload; break; -- case T_CHAR: aload_op = Bytecodes::_caload; break; -- case T_SHORT: aload_op = Bytecodes::_saload; break; -- default: lose("primitive array NYI", CHECK_(empty)); -- } -- int ap = arg_slot; -- for (int i = 0; i < spread_length; i++) { -- jvalue offset_jvalue; offset_jvalue.i = i; -- ArgToken offset_arg = make_prim_constant(T_INT, &offset_jvalue, CHECK_(empty)); -- ArgToken element_arg = make_fetch(element_type, element_klass(), aload_op, array_arg, offset_arg, CHECK_(empty)); -- change_argument(T_VOID, ap, element_type, element_arg); -- //ap += type2size[element_type]; // don't do this; insert next arg to *right* of previous -- } -- break; -- } -- -- default: -- lose("bad adapter conversion", CHECK_(empty)); -- break; -- } -- } -- -- if (chain().is_bound()) { -- // push a new argument -- BasicType arg_type = chain().bound_arg_type(); -- jint arg_slot = chain().bound_arg_slot(); -- oop arg_oop = chain().bound_arg_oop(); -- ArgToken arg; -- if (arg_type == T_OBJECT) { -- arg = make_oop_constant(arg_oop, CHECK_(empty)); -- } else { -- jvalue arg_value; -- BasicType bt = java_lang_boxing_object::get_value(arg_oop, &arg_value); -- if (bt == arg_type || (bt == T_INT && is_subword_type(arg_type))) { -- arg = make_prim_constant(arg_type, &arg_value, CHECK_(empty)); -- } else { -- lose(err_msg("bad bound value: arg_type %s boxing %s", type2name(arg_type), type2name(bt)), CHECK_(empty)); -- } -- } -- DEBUG_ONLY(arg_oop = badOop); -- change_argument(T_VOID, arg_slot, arg_type, arg); -- } -- -- // this test must come after the body of the loop -- if (!chain().is_last()) { -- chain().next(CHECK_(empty)); -- } else { -- break; -- } -- } -- -- // finish the sequence with a tail-call to the ultimate target -- // parameters are passed in logical order (recv 1st), not slot order -- ArgToken* arglist = NEW_RESOURCE_ARRAY(ArgToken, _outgoing.length() + 1); -- int ap = 0; -- for (int i = _outgoing.length() - 1; i >= 0; i--) { -- ArgToken arg_state = _outgoing.at(i); -- if (arg_state.basic_type() == T_VOID) continue; -- arglist[ap++] = _outgoing.at(i); -- } -- assert(ap == _outgoing_argc, ""); -- arglist[ap] = ArgToken(); // add a sentinel, for the sake of asserts -- return make_invoke(chain().last_method(), -- vmIntrinsics::_none, -- chain().last_invoke_code(), true, -- ap, arglist, THREAD); --} -- -- --// ----------------------------------------------------------------------------- --// MethodHandleWalker::walk_incoming_state --// --void MethodHandleWalker::walk_incoming_state(TRAPS) { -- Handle mtype(THREAD, chain().method_type_oop()); -- int nptypes = java_lang_invoke_MethodType::ptype_count(mtype()); -- _outgoing_argc = nptypes; -- int argp = nptypes - 1; -- if (argp >= 0) { -- _outgoing.at_grow(argp, ArgToken(tt_void)); // presize -- } -- for (int i = 0; i < nptypes; i++) { -- klassOop arg_type_klass = NULL; -- BasicType arg_type = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(mtype(), i), &arg_type_klass); -- int index = new_local_index(arg_type); -- ArgToken arg = make_parameter(arg_type, arg_type_klass, index, CHECK); -- DEBUG_ONLY(arg_type_klass = (klassOop) NULL); -- _outgoing.at_put(argp, arg); -- if (type2size[arg_type] == 2) { -- // add the extra slot, so we can model the JVM stack -- _outgoing.insert_before(argp+1, ArgToken(tt_void)); -- } -- --argp; -- } -- // call make_parameter at the end of the list for the return type -- klassOop ret_type_klass = NULL; -- BasicType ret_type = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(mtype()), &ret_type_klass); -- ArgToken ret = make_parameter(ret_type, ret_type_klass, -1, CHECK); -- // ignore ret; client can catch it if needed -- -- assert(_outgoing_argc == argument_count_slow(), "empty slots under control"); -- -- verify_args_and_signature(CHECK); --} -- -- --#ifdef ASSERT --void MethodHandleWalker::verify_args_and_signature(TRAPS) { -- int index = _outgoing.length() - 1; -- objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(chain().method_type_oop()); -- for (int i = 0, limit = ptypes->length(); i < limit; i++) { -- BasicType t = java_lang_Class::as_BasicType(ptypes->obj_at(i)); -- if (t == T_ARRAY) t = T_OBJECT; -- if (t == T_LONG || t == T_DOUBLE) { -- assert(T_VOID == _outgoing.at(index).basic_type(), "types must match"); -- index--; -- } -- assert(t == _outgoing.at(index).basic_type(), "types must match"); -- index--; -- } --} --#endif -- -- --// ----------------------------------------------------------------------------- --// MethodHandleWalker::change_argument --// --// This is messy because some kinds of arguments are paired with --// companion slots containing an empty value. --void MethodHandleWalker::change_argument(BasicType old_type, int slot, const ArgToken& new_arg) { -- BasicType new_type = new_arg.basic_type(); -- int old_size = type2size[old_type]; -- int new_size = type2size[new_type]; -- if (old_size == new_size) { -- // simple case first -- _outgoing.at_put(slot, new_arg); -- } else if (old_size > new_size) { -- for (int i = old_size - 1; i >= new_size; i--) { -- assert((i != 0) == (_outgoing.at(slot + i).basic_type() == T_VOID), ""); -- _outgoing.remove_at(slot + i); -- } -- if (new_size > 0) -- _outgoing.at_put(slot, new_arg); -- else -- _outgoing_argc -= 1; // deleted a real argument -- } else { -- for (int i = old_size; i < new_size; i++) { -- _outgoing.insert_before(slot + i, ArgToken(tt_void)); -- } -- _outgoing.at_put(slot, new_arg); -- if (old_size == 0) -- _outgoing_argc += 1; // inserted a real argument -- } -- assert(_outgoing_argc == argument_count_slow(), "empty slots under control"); --} -- -- --#ifdef ASSERT --int MethodHandleWalker::argument_count_slow() { -- int args_seen = 0; -- for (int i = _outgoing.length() - 1; i >= 0; i--) { -- if (_outgoing.at(i).basic_type() != T_VOID) { -- ++args_seen; -- if (_outgoing.at(i).basic_type() == T_LONG || -- _outgoing.at(i).basic_type() == T_DOUBLE) { -- assert(_outgoing.at(i + 1).basic_type() == T_VOID, "should only follow two word"); -- } -- } else { -- assert(_outgoing.at(i - 1).basic_type() == T_LONG || -- _outgoing.at(i - 1).basic_type() == T_DOUBLE, "should only follow two word"); -- } -- } -- return args_seen; --} --#endif -- -- --// ----------------------------------------------------------------------------- --// MethodHandleWalker::retype_raw_conversion --// --// Do the raw retype conversions for OP_RETYPE_RAW. --void MethodHandleWalker::retype_raw_conversion(BasicType src, BasicType dst, bool for_return, int slot, TRAPS) { -- if (src != dst) { -- if (MethodHandles::same_basic_type_for_returns(src, dst, /*raw*/ true)) { -- if (MethodHandles::is_float_fixed_reinterpretation_cast(src, dst)) { -- vmIntrinsics::ID iid = vmIntrinsics::for_raw_conversion(src, dst); -- if (iid == vmIntrinsics::_none) { -- lose("no raw conversion method", CHECK); -- } -- ArgToken arglist[2]; -- if (!for_return) { -- // argument type conversion -- ArgToken arg = _outgoing.at(slot); -- assert(arg.token_type() >= tt_symbolic || src == arg.basic_type(), "sanity"); -- arglist[0] = arg; // outgoing 'this' -- arglist[1] = ArgToken(); // sentinel -- arg = make_invoke(methodHandle(), iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK); -- change_argument(src, slot, dst, arg); -- } else { -- // return type conversion -- if (_return_conv == vmIntrinsics::_none) { -- _return_conv = iid; -- } else if (_return_conv == vmIntrinsics::for_raw_conversion(dst, src)) { -- _return_conv = vmIntrinsics::_none; -- } else if (_return_conv != zero_return_conv()) { -- lose(err_msg("requested raw return conversion not allowed: %s -> %s (before %s)", type2name(src), type2name(dst), vmIntrinsics::name_at(_return_conv)), CHECK); -- } -- } -- } else { -- // Nothing to do. -- } -- } else if (for_return && (!is_subword_type(src) || !is_subword_type(dst))) { -- // This can occur in exception-throwing MHs, which have a fictitious return value encoded as Void or Empty. -- _return_conv = zero_return_conv(); -- } else if (src == T_OBJECT && is_java_primitive(dst)) { -- // ref-to-prim: discard ref, push zero -- lose("requested ref-to-prim conversion not expected", CHECK); -- } else { -- lose(err_msg("requested raw conversion not allowed: %s -> %s", type2name(src), type2name(dst)), CHECK); -- } -- } --} -- -- --// ----------------------------------------------------------------------------- --// MethodHandleCompiler -- --MethodHandleCompiler::MethodHandleCompiler(Handle root, Symbol* name, Symbol* signature, int invoke_count, bool is_invokedynamic, TRAPS) -- : MethodHandleWalker(root, is_invokedynamic, THREAD), -- _invoke_count(invoke_count), -- _thread(THREAD), -- _bytecode(THREAD, 50), -- _constants(THREAD, 10), -- _non_bcp_klasses(THREAD, 5), -- _cur_stack(0), -- _max_stack(0), -- _rtype(T_ILLEGAL), -- _selectAlternative_bci(-1), -- _taken_count(0), -- _not_taken_count(0) --{ -- -- // Element zero is always the null constant. -- (void) _constants.append(NULL); -- -- // Set name and signature index. -- _name_index = cpool_symbol_put(name); -- _signature_index = cpool_symbol_put(signature); -- -- // To make the resulting methods more recognizable by -- // stack walkers and compiler heuristics, -- // we put them in holder class MethodHandle. -- // See klass_is_method_handle_adapter_holder -- // and methodOopDesc::is_method_handle_adapter. -- _target_klass = SystemDictionaryHandles::MethodHandle_klass(); -- -- check_non_bcp_klasses(java_lang_invoke_MethodHandle::type(root()), CHECK); -- -- // Get return type klass. -- Handle first_mtype(THREAD, chain().method_type_oop()); -- // _rklass is NULL for primitives. -- _rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(first_mtype()), &_rklass); -- if (_rtype == T_ARRAY) _rtype = T_OBJECT; -- -- ArgumentSizeComputer args(signature); -- int params = args.size() + 1; // Incoming arguments plus receiver. -- _num_params = for_invokedynamic() ? params - 1 : params; // XXX Check if callee is static? --} -- -- --// ----------------------------------------------------------------------------- --// MethodHandleCompiler::compile --// --// Compile this MethodHandle into a bytecode adapter and return a --// methodOop. --methodHandle MethodHandleCompiler::compile(TRAPS) { -- assert(_thread == THREAD, "must be same thread"); -- methodHandle nullHandle; -- (void) walk(CHECK_(nullHandle)); -- record_non_bcp_klasses(); -- return get_method_oop(CHECK_(nullHandle)); --} -- -- --void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index, int args_size) { -- Bytecodes::check(op); // Are we legal? -- -- switch (op) { -- // b -- case Bytecodes::_aconst_null: -- case Bytecodes::_iconst_m1: -- case Bytecodes::_iconst_0: -- case Bytecodes::_iconst_1: -- case Bytecodes::_iconst_2: -- case Bytecodes::_iconst_3: -- case Bytecodes::_iconst_4: -- case Bytecodes::_iconst_5: -- case Bytecodes::_lconst_0: -- case Bytecodes::_lconst_1: -- case Bytecodes::_fconst_0: -- case Bytecodes::_fconst_1: -- case Bytecodes::_fconst_2: -- case Bytecodes::_dconst_0: -- case Bytecodes::_dconst_1: -- case Bytecodes::_iload_0: -- case Bytecodes::_iload_1: -- case Bytecodes::_iload_2: -- case Bytecodes::_iload_3: -- case Bytecodes::_lload_0: -- case Bytecodes::_lload_1: -- case Bytecodes::_lload_2: -- case Bytecodes::_lload_3: -- case Bytecodes::_fload_0: -- case Bytecodes::_fload_1: -- case Bytecodes::_fload_2: -- case Bytecodes::_fload_3: -- case Bytecodes::_dload_0: -- case Bytecodes::_dload_1: -- case Bytecodes::_dload_2: -- case Bytecodes::_dload_3: -- case Bytecodes::_aload_0: -- case Bytecodes::_aload_1: -- case Bytecodes::_aload_2: -- case Bytecodes::_aload_3: -- case Bytecodes::_istore_0: -- case Bytecodes::_istore_1: -- case Bytecodes::_istore_2: -- case Bytecodes::_istore_3: -- case Bytecodes::_lstore_0: -- case Bytecodes::_lstore_1: -- case Bytecodes::_lstore_2: -- case Bytecodes::_lstore_3: -- case Bytecodes::_fstore_0: -- case Bytecodes::_fstore_1: -- case Bytecodes::_fstore_2: -- case Bytecodes::_fstore_3: -- case Bytecodes::_dstore_0: -- case Bytecodes::_dstore_1: -- case Bytecodes::_dstore_2: -- case Bytecodes::_dstore_3: -- case Bytecodes::_astore_0: -- case Bytecodes::_astore_1: -- case Bytecodes::_astore_2: -- case Bytecodes::_astore_3: -- case Bytecodes::_iand: -- case Bytecodes::_i2l: -- case Bytecodes::_i2f: -- case Bytecodes::_i2d: -- case Bytecodes::_i2b: -- case Bytecodes::_i2c: -- case Bytecodes::_i2s: -- case Bytecodes::_l2i: -- case Bytecodes::_l2f: -- case Bytecodes::_l2d: -- case Bytecodes::_f2i: -- case Bytecodes::_f2l: -- case Bytecodes::_f2d: -- case Bytecodes::_d2i: -- case Bytecodes::_d2l: -- case Bytecodes::_d2f: -- case Bytecodes::_iaload: -- case Bytecodes::_laload: -- case Bytecodes::_faload: -- case Bytecodes::_daload: -- case Bytecodes::_aaload: -- case Bytecodes::_baload: -- case Bytecodes::_caload: -- case Bytecodes::_saload: -- case Bytecodes::_ireturn: -- case Bytecodes::_lreturn: -- case Bytecodes::_freturn: -- case Bytecodes::_dreturn: -- case Bytecodes::_areturn: -- case Bytecodes::_return: -- assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_b, "wrong bytecode format"); -- _bytecode.push(op); -- break; -- -- // bi -- case Bytecodes::_ldc: -- assert(Bytecodes::format_bits(op, false) == (Bytecodes::_fmt_b|Bytecodes::_fmt_has_k), "wrong bytecode format"); -- if (index == (index & 0xff)) { -- _bytecode.push(op); -- _bytecode.push(index); -- } else { -- _bytecode.push(Bytecodes::_ldc_w); -- _bytecode.push(index >> 8); -- _bytecode.push(index); -- } -- break; -- -- case Bytecodes::_iload: -- case Bytecodes::_lload: -- case Bytecodes::_fload: -- case Bytecodes::_dload: -- case Bytecodes::_aload: -- case Bytecodes::_istore: -- case Bytecodes::_lstore: -- case Bytecodes::_fstore: -- case Bytecodes::_dstore: -- case Bytecodes::_astore: -- assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bi, "wrong bytecode format"); -- if (index == (index & 0xff)) { -- _bytecode.push(op); -- _bytecode.push(index); -- } else { -- // doesn't fit in a u2 -- _bytecode.push(Bytecodes::_wide); -- _bytecode.push(op); -- _bytecode.push(index >> 8); -- _bytecode.push(index); -- } -- break; -- -- // bkk -- case Bytecodes::_ldc_w: -- case Bytecodes::_ldc2_w: -- case Bytecodes::_checkcast: -- assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bkk, "wrong bytecode format"); -- assert((unsigned short) index == index, "index does not fit in 16-bit"); -- _bytecode.push(op); -- _bytecode.push(index >> 8); -- _bytecode.push(index); -- break; -- -- // bJJ -- case Bytecodes::_invokestatic: -- case Bytecodes::_invokespecial: -- case Bytecodes::_invokevirtual: -- assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format"); -- assert((unsigned short) index == index, "index does not fit in 16-bit"); -- _bytecode.push(op); -- _bytecode.push(index >> 8); -- _bytecode.push(index); -- break; -- -- case Bytecodes::_invokeinterface: -- assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format"); -- assert((unsigned short) index == index, "index does not fit in 16-bit"); -- assert(args_size > 0, "valid args_size"); -- _bytecode.push(op); -- _bytecode.push(index >> 8); -- _bytecode.push(index); -- _bytecode.push(args_size); -- _bytecode.push(0); -- break; -- -- case Bytecodes::_ifeq: -- assert((unsigned short) index == index, "index does not fit in 16-bit"); -- _bytecode.push(op); -- _bytecode.push(index >> 8); -- _bytecode.push(index); -- break; -- -- default: -- ShouldNotReachHere(); -- } --} -- --void MethodHandleCompiler::update_branch_dest(int src, int dst) { -- switch (_bytecode.at(src)) { -- case Bytecodes::_ifeq: -- dst -= src; // compute the offset -- assert((unsigned short) dst == dst, "index does not fit in 16-bit"); -- _bytecode.at_put(src + 1, dst >> 8); -- _bytecode.at_put(src + 2, dst); -- break; -- default: -- ShouldNotReachHere(); -- } --} -- --void MethodHandleCompiler::emit_load(ArgToken arg) { -- TokenType tt = arg.token_type(); -- BasicType bt = arg.basic_type(); -- -- switch (tt) { -- case tt_parameter: -- case tt_temporary: -- emit_load(bt, arg.index()); -- break; -- case tt_constant: -- emit_load_constant(arg); -- break; -- case tt_illegal: -- case tt_void: -- default: -- ShouldNotReachHere(); -- } --} -- -- --void MethodHandleCompiler::emit_load(BasicType bt, int index) { -- if (index <= 3) { -- switch (bt) { -- case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT: -- case T_INT: emit_bc(Bytecodes::cast(Bytecodes::_iload_0 + index)); break; -- case T_LONG: emit_bc(Bytecodes::cast(Bytecodes::_lload_0 + index)); break; -- case T_FLOAT: emit_bc(Bytecodes::cast(Bytecodes::_fload_0 + index)); break; -- case T_DOUBLE: emit_bc(Bytecodes::cast(Bytecodes::_dload_0 + index)); break; -- case T_OBJECT: emit_bc(Bytecodes::cast(Bytecodes::_aload_0 + index)); break; -- default: -- ShouldNotReachHere(); -- } -- } -- else { -- switch (bt) { -- case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT: -- case T_INT: emit_bc(Bytecodes::_iload, index); break; -- case T_LONG: emit_bc(Bytecodes::_lload, index); break; -- case T_FLOAT: emit_bc(Bytecodes::_fload, index); break; -- case T_DOUBLE: emit_bc(Bytecodes::_dload, index); break; -- case T_OBJECT: emit_bc(Bytecodes::_aload, index); break; -- default: -- ShouldNotReachHere(); -- } -- } -- stack_push(bt); --} -- --void MethodHandleCompiler::emit_store(BasicType bt, int index) { -- if (index <= 3) { -- switch (bt) { -- case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT: -- case T_INT: emit_bc(Bytecodes::cast(Bytecodes::_istore_0 + index)); break; -- case T_LONG: emit_bc(Bytecodes::cast(Bytecodes::_lstore_0 + index)); break; -- case T_FLOAT: emit_bc(Bytecodes::cast(Bytecodes::_fstore_0 + index)); break; -- case T_DOUBLE: emit_bc(Bytecodes::cast(Bytecodes::_dstore_0 + index)); break; -- case T_OBJECT: emit_bc(Bytecodes::cast(Bytecodes::_astore_0 + index)); break; -- default: -- ShouldNotReachHere(); -- } -- } -- else { -- switch (bt) { -- case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT: -- case T_INT: emit_bc(Bytecodes::_istore, index); break; -- case T_LONG: emit_bc(Bytecodes::_lstore, index); break; -- case T_FLOAT: emit_bc(Bytecodes::_fstore, index); break; -- case T_DOUBLE: emit_bc(Bytecodes::_dstore, index); break; -- case T_OBJECT: emit_bc(Bytecodes::_astore, index); break; -- default: -- ShouldNotReachHere(); -- } -- } -- stack_pop(bt); --} -- -- --void MethodHandleCompiler::emit_load_constant(ArgToken arg) { -- BasicType bt = arg.basic_type(); -- if (is_subword_type(bt)) bt = T_INT; -- switch (bt) { -- case T_INT: { -- jint value = arg.get_jint(); -- if (-1 <= value && value <= 5) -- emit_bc(Bytecodes::cast(Bytecodes::_iconst_0 + value)); -- else -- emit_bc(Bytecodes::_ldc, cpool_int_put(value)); -- break; -- } -- case T_LONG: { -- jlong value = arg.get_jlong(); -- if (0 <= value && value <= 1) -- emit_bc(Bytecodes::cast(Bytecodes::_lconst_0 + (int) value)); -- else -- emit_bc(Bytecodes::_ldc2_w, cpool_long_put(value)); -- break; -- } -- case T_FLOAT: { -- jfloat value = arg.get_jfloat(); -- if (value == 0.0 || value == 1.0 || value == 2.0) -- emit_bc(Bytecodes::cast(Bytecodes::_fconst_0 + (int) value)); -- else -- emit_bc(Bytecodes::_ldc, cpool_float_put(value)); -- break; -- } -- case T_DOUBLE: { -- jdouble value = arg.get_jdouble(); -- if (value == 0.0 || value == 1.0) -- emit_bc(Bytecodes::cast(Bytecodes::_dconst_0 + (int) value)); -- else -- emit_bc(Bytecodes::_ldc2_w, cpool_double_put(value)); -- break; -- } -- case T_OBJECT: { -- Handle value = arg.object(); -- if (value.is_null()) { -- emit_bc(Bytecodes::_aconst_null); -- break; -- } -- if (java_lang_Class::is_instance(value())) { -- klassOop k = java_lang_Class::as_klassOop(value()); -- if (k != NULL) { -- emit_bc(Bytecodes::_ldc, cpool_klass_put(k)); -- break; -- } -- } -- emit_bc(Bytecodes::_ldc, cpool_object_put(value)); -- break; -- } -- default: -- ShouldNotReachHere(); -- } -- stack_push(bt); --} -- -- --MethodHandleWalker::ArgToken --MethodHandleCompiler::make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, -- const ArgToken& src, TRAPS) { -- -- BasicType srctype = src.basic_type(); -- TokenType tt = src.token_type(); -- int index = -1; -- -- switch (op) { -- case Bytecodes::_i2l: -- case Bytecodes::_i2f: -- case Bytecodes::_i2d: -- case Bytecodes::_i2b: -- case Bytecodes::_i2c: -- case Bytecodes::_i2s: -- -- case Bytecodes::_l2i: -- case Bytecodes::_l2f: -- case Bytecodes::_l2d: -- -- case Bytecodes::_f2i: -- case Bytecodes::_f2l: -- case Bytecodes::_f2d: -- -- case Bytecodes::_d2i: -- case Bytecodes::_d2l: -- case Bytecodes::_d2f: -- if (tt == tt_constant) { -- emit_load_constant(src); -- } else { -- emit_load(srctype, src.index()); -- } -- stack_pop(srctype); // pop the src type -- emit_bc(op); -- stack_push(type); // push the dest value -- if (tt != tt_constant) -- index = src.index(); -- if (srctype != type || index == -1) -- index = new_local_index(type); -- emit_store(type, index); -- break; -- -- case Bytecodes::_checkcast: -- if (tt == tt_constant) { -- emit_load_constant(src); -- } else { -- emit_load(srctype, src.index()); -- index = src.index(); -- } -- emit_bc(op, cpool_klass_put(tk)); -- check_non_bcp_klass(tk, CHECK_(src)); -- // Allocate a new local for the type so that we don't hide the -- // previous type from the verifier. -- index = new_local_index(type); -- emit_store(srctype, index); -- break; -- -- case Bytecodes::_nop: -- // nothing to do -- return src; -- -- default: -- if (op == Bytecodes::_illegal) -- lose(err_msg("no such primitive conversion: %s -> %s", type2name(src.basic_type()), type2name(type)), THREAD); -- else -- lose(err_msg("bad primitive conversion op: %s", Bytecodes::name(op)), THREAD); -- return make_prim_constant(type, &zero_jvalue, THREAD); -- } -- -- return make_parameter(type, tk, index, THREAD); --} -- -- --// ----------------------------------------------------------------------------- --// MethodHandleCompiler --// -- --// Values used by the compiler. --jvalue MethodHandleCompiler::zero_jvalue = { 0 }; --jvalue MethodHandleCompiler::one_jvalue = { 1 }; -- --// Fetch any values from CountingMethodHandles and capture them for profiles --bool MethodHandleCompiler::fetch_counts(ArgToken arg1, ArgToken arg2) { -- int count1 = -1, count2 = -1; -- if (arg1.token_type() == tt_constant && arg1.basic_type() == T_OBJECT && -- java_lang_invoke_CountingMethodHandle::is_instance(arg1.object()())) { -- count1 = java_lang_invoke_CountingMethodHandle::vmcount(arg1.object()()); -- } -- if (arg2.token_type() == tt_constant && arg2.basic_type() == T_OBJECT && -- java_lang_invoke_CountingMethodHandle::is_instance(arg2.object()())) { -- count2 = java_lang_invoke_CountingMethodHandle::vmcount(arg2.object()()); -- } -- int total = count1 + count2; -- if (count1 != -1 && count2 != -1 && total != 0) { -- // Normalize the collect counts to the invoke_count -- if (count1 != 0) _not_taken_count = (int)(_invoke_count * count1 / (double)total); -- if (count2 != 0) _taken_count = (int)(_invoke_count * count2 / (double)total); -- return true; -- } -- return false; --} -- --// Emit bytecodes for the given invoke instruction. --MethodHandleWalker::ArgToken --MethodHandleCompiler::make_invoke(methodHandle m, vmIntrinsics::ID iid, -- Bytecodes::Code op, bool tailcall, -- int argc, MethodHandleWalker::ArgToken* argv, -- TRAPS) { -- ArgToken zero; -- if (m.is_null()) { -- // Get the intrinsic methodOop. -- m = methodHandle(THREAD, vmIntrinsics::method_for(iid)); -- if (m.is_null()) { -- lose(vmIntrinsics::name_at(iid), CHECK_(zero)); -- } -- } -- -- klassOop klass = m->method_holder(); -- Symbol* name = m->name(); -- Symbol* signature = m->signature(); -- -- if (iid == vmIntrinsics::_invokeGeneric && -- argc >= 1 && argv[0].token_type() == tt_constant) { -- assert(m->intrinsic_id() == vmIntrinsics::_invokeExact, ""); -- Handle receiver = argv[0].object(); -- Handle rtype(THREAD, java_lang_invoke_MethodHandle::type(receiver())); -- Handle mtype(THREAD, m->method_handle_type()); -- if (rtype() != mtype()) { -- assert(java_lang_invoke_MethodType::form(rtype()) == -- java_lang_invoke_MethodType::form(mtype()), -- "must be the same shape"); -- // customize m to the exact required rtype -- bool has_non_bcp_klass = check_non_bcp_klasses(rtype(), CHECK_(zero)); -- TempNewSymbol sig2 = java_lang_invoke_MethodType::as_signature(rtype(), true, CHECK_(zero)); -- methodHandle m2; -- if (!has_non_bcp_klass) { -- methodOop m2_oop = SystemDictionary::find_method_handle_invoke(m->name(), sig2, -- KlassHandle(), CHECK_(zero)); -- m2 = methodHandle(THREAD, m2_oop); -- } -- if (m2.is_null()) { -- // just build it fresh -- m2 = methodOopDesc::make_invoke_method(klass, m->name(), sig2, rtype, CHECK_(zero)); -- if (m2.is_null()) -- lose(err_msg("no customized invoker %s", sig2->as_utf8()), CHECK_(zero)); -- } -- m = m2; -- signature = m->signature(); -- } -- } -- -- if (m->intrinsic_id() == vmIntrinsics::_selectAlternative && -- fetch_counts(argv[1], argv[2])) { -- assert(argc == 3, "three arguments"); -- assert(tailcall, "only"); -- -- // do inline bytecodes so we can drop profile data into it, -- // 0: iload_0 -- emit_load(argv[0]); -- // 1: ifeq 8 -- _selectAlternative_bci = _bytecode.length(); -- emit_bc(Bytecodes::_ifeq, 0); // emit placeholder offset -- // 4: aload_1 -- emit_load(argv[1]); -- // 5: areturn; -- emit_bc(Bytecodes::_areturn); -- // 8: aload_2 -- update_branch_dest(_selectAlternative_bci, cur_bci()); -- emit_load(argv[2]); -- // 9: areturn -- emit_bc(Bytecodes::_areturn); -- return ArgToken(); // Dummy return value. -- } -- -- check_non_bcp_klass(klass, CHECK_(zero)); -- if (m->is_method_handle_invoke()) { -- check_non_bcp_klasses(m->method_handle_type(), CHECK_(zero)); -- } -- -- // Count the number of arguments, not the size -- ArgumentCount asc(signature); -- assert(argc == asc.size() + ((op == Bytecodes::_invokestatic || op == Bytecodes::_invokedynamic) ? 0 : 1), -- "argc mismatch"); -- -- for (int i = 0; i < argc; i++) { -- ArgToken arg = argv[i]; -- TokenType tt = arg.token_type(); -- BasicType bt = arg.basic_type(); -- -- switch (tt) { -- case tt_parameter: -- case tt_temporary: -- emit_load(bt, arg.index()); -- break; -- case tt_constant: -- emit_load_constant(arg); -- break; -- case tt_illegal: -- // Sentinel. -- assert(i == (argc - 1), "sentinel must be last entry"); -- break; -- case tt_void: -- default: -- ShouldNotReachHere(); -- } -- } -- -- // Populate constant pool. -- int name_index = cpool_symbol_put(name); -- int signature_index = cpool_symbol_put(signature); -- int name_and_type_index = cpool_name_and_type_put(name_index, signature_index); -- int klass_index = cpool_klass_put(klass); -- int methodref_index = cpool_methodref_put(op, klass_index, name_and_type_index, m); -- -- // Generate invoke. -- switch (op) { -- case Bytecodes::_invokestatic: -- case Bytecodes::_invokespecial: -- case Bytecodes::_invokevirtual: -- emit_bc(op, methodref_index); -- break; -- -- case Bytecodes::_invokeinterface: { -- ArgumentSizeComputer asc(signature); -- emit_bc(op, methodref_index, asc.size() + 1); -- break; -- } -- -- default: -- ShouldNotReachHere(); -- } -- -- // If tailcall, we have walked all the way to a direct method handle. -- // Otherwise, make a recursive call to some helper routine. -- BasicType rbt = m->result_type(); -- if (rbt == T_ARRAY) rbt = T_OBJECT; -- stack_push(rbt); // The return value is already pushed onto the stack. -- ArgToken ret; -- if (tailcall) { -- if (return_conv() == zero_return_conv()) { -- rbt = T_VOID; // discard value -- } else if (return_conv() != vmIntrinsics::_none) { -- // return value conversion -- int index = new_local_index(rbt); -- emit_store(rbt, index); -- ArgToken arglist[2]; -- arglist[0] = ArgToken(tt_temporary, rbt, index); -- arglist[1] = ArgToken(); // sentinel -- ret = make_invoke(methodHandle(), return_conv(), Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(zero)); -- set_return_conv(vmIntrinsics::_none); -- rbt = ret.basic_type(); -- emit_load(rbt, ret.index()); -- } -- if (rbt != _rtype) { -- if (rbt == T_VOID) { -- // push a zero of the right sort -- if (_rtype == T_OBJECT) { -- zero = make_oop_constant(NULL, CHECK_(zero)); -- } else { -- zero = make_prim_constant(_rtype, &zero_jvalue, CHECK_(zero)); -- } -- emit_load_constant(zero); -- } else if (_rtype == T_VOID) { -- // We'll emit a _return with something on the stack. -- // It's OK to ignore what's on the stack. -- } else if (rbt == T_INT && is_subword_type(_rtype)) { -- // Convert value to match return type. -- switch (_rtype) { -- case T_BOOLEAN: { -- // boolean is treated as a one-bit unsigned integer. -- // Cf. API documentation: java/lang/invoke/MethodHandles.html#explicitCastArguments -- ArgToken one = make_prim_constant(T_INT, &one_jvalue, CHECK_(zero)); -- emit_load_constant(one); -- emit_bc(Bytecodes::_iand); -- break; -- } -- case T_BYTE: emit_bc(Bytecodes::_i2b); break; -- case T_CHAR: emit_bc(Bytecodes::_i2c); break; -- case T_SHORT: emit_bc(Bytecodes::_i2s); break; -- default: ShouldNotReachHere(); -- } -- } else if (is_subword_type(rbt) && (is_subword_type(_rtype) || (_rtype == T_INT))) { -- // The subword type was returned as an int and will be passed -- // on as an int. -- } else { -- lose("unknown conversion", CHECK_(zero)); -- } -- } -- switch (_rtype) { -- case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT: -- case T_INT: emit_bc(Bytecodes::_ireturn); break; -- case T_LONG: emit_bc(Bytecodes::_lreturn); break; -- case T_FLOAT: emit_bc(Bytecodes::_freturn); break; -- case T_DOUBLE: emit_bc(Bytecodes::_dreturn); break; -- case T_VOID: emit_bc(Bytecodes::_return); break; -- case T_OBJECT: -- if (_rklass.not_null() && _rklass() != SystemDictionary::Object_klass() && !Klass::cast(_rklass())->is_interface()) { -- emit_bc(Bytecodes::_checkcast, cpool_klass_put(_rklass())); -- check_non_bcp_klass(_rklass(), CHECK_(zero)); -- } -- emit_bc(Bytecodes::_areturn); -- break; -- default: ShouldNotReachHere(); -- } -- ret = ArgToken(); // Dummy return value. -- } -- else { -- int index = new_local_index(rbt); -- switch (rbt) { -- case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT: -- case T_INT: case T_LONG: case T_FLOAT: case T_DOUBLE: -- case T_OBJECT: -- emit_store(rbt, index); -- ret = ArgToken(tt_temporary, rbt, index); -- break; -- case T_VOID: -- ret = ArgToken(tt_void); -- break; -- default: -- ShouldNotReachHere(); -- } -- } -- -- return ret; --} -- --MethodHandleWalker::ArgToken --MethodHandleCompiler::make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, -- const MethodHandleWalker::ArgToken& base, -- const MethodHandleWalker::ArgToken& offset, -- TRAPS) { -- switch (base.token_type()) { -- case tt_parameter: -- case tt_temporary: -- emit_load(base.basic_type(), base.index()); -- break; -- case tt_constant: -- emit_load_constant(base); -- break; -- default: -- ShouldNotReachHere(); -- } -- switch (offset.token_type()) { -- case tt_parameter: -- case tt_temporary: -- emit_load(offset.basic_type(), offset.index()); -- break; -- case tt_constant: -- emit_load_constant(offset); -- break; -- default: -- ShouldNotReachHere(); -- } -- emit_bc(op); -- int index = new_local_index(type); -- emit_store(type, index); -- return ArgToken(tt_temporary, type, index); --} -- -- --int MethodHandleCompiler::cpool_primitive_put(BasicType bt, jvalue* con) { -- jvalue con_copy; -- assert(bt < T_OBJECT, ""); -- if (type2aelembytes(bt) < jintSize) { -- // widen to int -- con_copy = (*con); -- con = &con_copy; -- switch (bt) { -- case T_BOOLEAN: con->i = (con->z ? 1 : 0); break; -- case T_BYTE: con->i = con->b; break; -- case T_CHAR: con->i = con->c; break; -- case T_SHORT: con->i = con->s; break; -- default: ShouldNotReachHere(); -- } -- bt = T_INT; -- } -- --// for (int i = 1, imax = _constants.length(); i < imax; i++) { --// ConstantValue* con = _constants.at(i); --// if (con != NULL && con->is_primitive() && con.basic_type() == bt) { --// bool match = false; --// switch (type2size[bt]) { --// case 1: if (pcon->_value.i == con->i) match = true; break; --// case 2: if (pcon->_value.j == con->j) match = true; break; --// } --// if (match) --// return i; --// } --// } -- ConstantValue* cv = new ConstantValue(bt, *con); -- int index = _constants.append(cv); -- -- // long and double entries take 2 slots, we add another empty entry. -- if (type2size[bt] == 2) -- (void) _constants.append(NULL); -- -- return index; --} -- --bool MethodHandleCompiler::check_non_bcp_klasses(Handle method_type, TRAPS) { -- bool res = false; -- for (int i = -1, len = java_lang_invoke_MethodType::ptype_count(method_type()); i < len; i++) { -- oop ptype = (i == -1 -- ? java_lang_invoke_MethodType::rtype(method_type()) -- : java_lang_invoke_MethodType::ptype(method_type(), i)); -- res |= check_non_bcp_klass(java_lang_Class::as_klassOop(ptype), CHECK_(false)); -- } -- return res; --} -- --bool MethodHandleCompiler::check_non_bcp_klass(klassOop klass, TRAPS) { -- klass = methodOopDesc::check_non_bcp_klass(klass); -- if (klass != NULL) { -- Symbol* name = Klass::cast(klass)->name(); -- for (int i = _non_bcp_klasses.length() - 1; i >= 0; i--) { -- klassOop k2 = _non_bcp_klasses.at(i)(); -- if (Klass::cast(k2)->name() == name) { -- if (k2 != klass) { -- lose(err_msg("unsupported klass name alias %s", name->as_utf8()), THREAD); -- } -- return true; -- } -- } -- _non_bcp_klasses.append(KlassHandle(THREAD, klass)); -- return true; -- } -- return false; --} -- --void MethodHandleCompiler::record_non_bcp_klasses() { -- // Append extra klasses to constant pool, to guide klass lookup. -- for (int k = 0; k < _non_bcp_klasses.length(); k++) { -- klassOop non_bcp_klass = _non_bcp_klasses.at(k)(); -- bool add_to_cp = true; -- for (int j = 1; j < _constants.length(); j++) { -- ConstantValue* cv = _constants.at(j); -- if (cv != NULL && cv->tag() == JVM_CONSTANT_Class -- && cv->klass_oop() == non_bcp_klass) { -- add_to_cp = false; -- break; -- } -- } -- if (add_to_cp) cpool_klass_put(non_bcp_klass); -- } --} -- --constantPoolHandle MethodHandleCompiler::get_constant_pool(TRAPS) const { -- constantPoolHandle nullHandle; -- constantPoolOop cpool_oop = oopFactory::new_constantPool(_constants.length(), -- oopDesc::IsSafeConc, -- CHECK_(nullHandle)); -- constantPoolHandle cpool(THREAD, cpool_oop); -- -- // Fill the real constant pool skipping the zero element. -- for (int i = 1; i < _constants.length(); i++) { -- ConstantValue* cv = _constants.at(i); -- switch (cv->tag()) { -- case JVM_CONSTANT_Utf8: cpool->symbol_at_put( i, cv->symbol() ); break; -- case JVM_CONSTANT_Integer: cpool->int_at_put( i, cv->get_jint() ); break; -- case JVM_CONSTANT_Float: cpool->float_at_put( i, cv->get_jfloat() ); break; -- case JVM_CONSTANT_Long: cpool->long_at_put( i, cv->get_jlong() ); break; -- case JVM_CONSTANT_Double: cpool->double_at_put( i, cv->get_jdouble() ); break; -- case JVM_CONSTANT_Class: cpool->klass_at_put( i, cv->klass_oop() ); break; -- case JVM_CONSTANT_Methodref: cpool->method_at_put( i, cv->first_index(), cv->second_index()); break; -- case JVM_CONSTANT_InterfaceMethodref: -- cpool->interface_method_at_put(i, cv->first_index(), cv->second_index()); break; -- case JVM_CONSTANT_NameAndType: cpool->name_and_type_at_put(i, cv->first_index(), cv->second_index()); break; -- case JVM_CONSTANT_Object: cpool->object_at_put( i, cv->object_oop() ); break; -- default: ShouldNotReachHere(); -- } -- -- switch (cv->tag()) { -- case JVM_CONSTANT_Long: -- case JVM_CONSTANT_Double: -- i++; // Skip empty entry. -- assert(_constants.at(i) == NULL, "empty entry"); -- break; -- } -- } -- -- cpool->set_preresolution(); -- -- // Set the constant pool holder to the target method's class. -- cpool->set_pool_holder(_target_klass()); -- -- return cpool; --} -- -- --methodHandle MethodHandleCompiler::get_method_oop(TRAPS) { -- methodHandle empty; -- // Create a method that holds the generated bytecode. invokedynamic -- // has no receiver, normal MH calls do. -- int flags_bits; -- if (for_invokedynamic()) -- flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_SYNTHETIC | JVM_ACC_STATIC); -- else -- flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_SYNTHETIC); -- -- // Create a new method -- methodHandle m; -- { -- methodOop m_oop = oopFactory::new_method(bytecode_length(), -- accessFlags_from(flags_bits), -- 0, 0, 0, oopDesc::IsSafeConc, CHECK_(empty)); -- m = methodHandle(THREAD, m_oop); -- } -- -- constantPoolHandle cpool = get_constant_pool(CHECK_(empty)); -- m->set_constants(cpool()); -- -- m->set_name_index(_name_index); -- m->set_signature_index(_signature_index); -- -- m->set_code((address) bytecode()); -- -- m->set_max_stack(_max_stack); -- m->set_max_locals(max_locals()); -- m->set_size_of_parameters(_num_params); -- -- typeArrayHandle exception_handlers(THREAD, Universe::the_empty_int_array()); -- m->set_exception_table(exception_handlers()); -- -- // Rewrite the method and set up the constant pool cache. -- objArrayOop m_array = oopFactory::new_system_objArray(1, CHECK_(empty)); -- objArrayHandle methods(THREAD, m_array); -- methods->obj_at_put(0, m()); -- Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(empty)); // Use fake class. -- Rewriter::relocate_and_link(_target_klass(), methods, CHECK_(empty)); // Use fake class. -- -- // Pre-resolve selected CP cache entries, to avoid problems with class loader scoping. -- constantPoolCacheHandle cpc(THREAD, cpool->cache()); -- for (int i = 0; i < cpc->length(); i++) { -- ConstantPoolCacheEntry* e = cpc->entry_at(i); -- assert(!e->is_secondary_entry(), "no indy instructions in here, yet"); -- int constant_pool_index = e->constant_pool_index(); -- ConstantValue* cv = _constants.at(constant_pool_index); -- if (!cv->has_linkage()) continue; -- methodHandle m = cv->linkage(); -- int index; -- switch (cv->tag()) { -- case JVM_CONSTANT_Methodref: -- index = m->vtable_index(); -- if (m->is_static()) { -- e->set_method(Bytecodes::_invokestatic, m, index); -- } else { -- e->set_method(Bytecodes::_invokespecial, m, index); -- e->set_method(Bytecodes::_invokevirtual, m, index); -- } -- break; -- case JVM_CONSTANT_InterfaceMethodref: -- index = klassItable::compute_itable_index(m()); -- e->set_interface_call(m, index); -- break; -- } -- } -- -- // Set the invocation counter's count to the invoke count of the -- // original call site. -- InvocationCounter* ic = m->invocation_counter(); -- ic->set(InvocationCounter::wait_for_compile, _invoke_count); -- -- // Create a new MDO -- { -- methodDataOop mdo = oopFactory::new_methodData(m, CHECK_(empty)); -- assert(m->method_data() == NULL, "there should not be an MDO yet"); -- m->set_method_data(mdo); -- -- bool found_selectAlternative = false; -- // Iterate over all profile data and set the count of the counter -- // data entries to the original call site counter. -- for (ProfileData* profile_data = mdo->first_data(); -- mdo->is_valid(profile_data); -- profile_data = mdo->next_data(profile_data)) { -- if (profile_data->is_CounterData()) { -- CounterData* counter_data = profile_data->as_CounterData(); -- counter_data->set_count(_invoke_count); -- } -- if (profile_data->is_BranchData() && -- profile_data->bci() == _selectAlternative_bci) { -- BranchData* bd = profile_data->as_BranchData(); -- bd->set_taken(_taken_count); -- bd->set_not_taken(_not_taken_count); -- found_selectAlternative = true; -- } -- } -- assert(_selectAlternative_bci == -1 || found_selectAlternative, "must have found profile entry"); -- } -- --#ifndef PRODUCT -- if (TraceMethodHandles) { -- m->print(); -- m->print_codes(); -- } --#endif //PRODUCT -- -- assert(m->is_method_handle_adapter(), "must be recognized as an adapter"); -- return m; --} -- -- --#ifndef PRODUCT -- --// MH printer for debugging. -- --class MethodHandlePrinter : public MethodHandleWalker { --private: -- outputStream* _out; -- bool _verbose; -- int _temp_num; -- int _param_state; -- stringStream _strbuf; -- const char* strbuf() { -- const char* s = _strbuf.as_string(); -- _strbuf.reset(); -- return s; -- } -- ArgToken token(const char* str, BasicType type) { -- return ArgToken(str, type); -- } -- const char* string(ArgToken token) { -- return token.str(); -- } -- void start_params() { -- _param_state <<= 1; -- _out->print("("); -- } -- void end_params() { -- if (_verbose) _out->print("\n"); -- _out->print(") => {"); -- _param_state >>= 1; -- } -- void put_type_name(BasicType type, klassOop tk, outputStream* s) { -- const char* kname = NULL; -- if (tk != NULL) -- kname = Klass::cast(tk)->external_name(); -- s->print("%s", (kname != NULL) ? kname : type2name(type)); -- } -- ArgToken maybe_make_temp(const char* statement_op, BasicType type, const char* temp_name) { -- const char* value = strbuf(); -- if (!_verbose) return token(value, type); -- // make an explicit binding for each separate value -- _strbuf.print("%s%d", temp_name, ++_temp_num); -- const char* temp = strbuf(); -- _out->print("\n %s %s %s = %s;", statement_op, type2name(type), temp, value); -- return token(temp, type); -- } -- --public: -- MethodHandlePrinter(Handle root, bool verbose, outputStream* out, TRAPS) -- : MethodHandleWalker(root, false, THREAD), -- _out(out), -- _verbose(verbose), -- _param_state(0), -- _temp_num(0) -- { -- out->print("MethodHandle:"); -- java_lang_invoke_MethodType::print_signature(java_lang_invoke_MethodHandle::type(root()), out); -- out->print(" : #"); -- start_params(); -- } -- virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) { -- if (argnum < 0) { -- end_params(); -- return token("return", type); -- } -- if ((_param_state & 1) == 0) { -- _param_state |= 1; -- _out->print(_verbose ? "\n " : ""); -- } else { -- _out->print(_verbose ? ",\n " : ", "); -- } -- if (argnum >= _temp_num) -- _temp_num = argnum; -- // generate an argument name -- _strbuf.print("a%d", argnum); -- const char* arg = strbuf(); -- put_type_name(type, tk, _out); -- _out->print(" %s", arg); -- return token(arg, type); -- } -- virtual ArgToken make_oop_constant(oop con, TRAPS) { -- if (con == NULL) -- _strbuf.print("null"); -- else -- con->print_value_on(&_strbuf); -- if (_strbuf.size() == 0) { // yuck -- _strbuf.print("(a "); -- put_type_name(T_OBJECT, con->klass(), &_strbuf); -- _strbuf.print(")"); -- } -- return maybe_make_temp("constant", T_OBJECT, "k"); -- } -- virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) { -- java_lang_boxing_object::print(type, con, &_strbuf); -- return maybe_make_temp("constant", type, "k"); -- } -- void print_bytecode_name(Bytecodes::Code op) { -- if (Bytecodes::is_defined(op)) -- _strbuf.print("%s", Bytecodes::name(op)); -- else -- _strbuf.print("bytecode_%d", (int) op); -- } -- virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS) { -- print_bytecode_name(op); -- _strbuf.print("(%s", string(src)); -- if (tk != NULL) { -- _strbuf.print(", "); -- put_type_name(type, tk, &_strbuf); -- } -- _strbuf.print(")"); -- return maybe_make_temp("convert", type, "v"); -- } -- virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS) { -- _strbuf.print("%s(%s, %s", Bytecodes::name(op), string(base), string(offset)); -- if (tk != NULL) { -- _strbuf.print(", "); -- put_type_name(type, tk, &_strbuf); -- } -- _strbuf.print(")"); -- return maybe_make_temp("fetch", type, "x"); -- } -- virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid, -- Bytecodes::Code op, bool tailcall, -- int argc, ArgToken* argv, TRAPS) { -- Symbol* name; -- Symbol* sig; -- if (m.not_null()) { -- name = m->name(); -- sig = m->signature(); -- } else { -- name = vmSymbols::symbol_at(vmIntrinsics::name_for(iid)); -- sig = vmSymbols::symbol_at(vmIntrinsics::signature_for(iid)); -- } -- _strbuf.print("%s %s%s(", Bytecodes::name(op), name->as_C_string(), sig->as_C_string()); -- for (int i = 0; i < argc; i++) { -- _strbuf.print("%s%s", (i > 0 ? ", " : ""), string(argv[i])); -- } -- _strbuf.print(")"); -- if (!tailcall) { -- BasicType rt = char2type(sig->byte_at(sig->utf8_length()-1)); -- if (rt == T_ILLEGAL) rt = T_OBJECT; // ';' at the end of '(...)L...;' -- return maybe_make_temp("invoke", rt, "x"); -- } else { -- const char* ret = strbuf(); -- _out->print(_verbose ? "\n return " : " "); -- _out->print("%s", ret); -- _out->print(_verbose ? "\n}\n" : " }"); -- } -- return ArgToken(); -- } -- -- virtual void set_method_handle(oop mh) { -- if (WizardMode && Verbose) { -- tty->print("\n--- next target: "); -- mh->print(); -- } -- } -- -- static void print(Handle root, bool verbose, outputStream* out, TRAPS) { -- ResourceMark rm; -- MethodHandlePrinter printer(root, verbose, out, CHECK); -- printer.walk(CHECK); -- out->print("\n"); -- } -- static void print(Handle root, bool verbose = Verbose, outputStream* out = tty) { -- Thread* THREAD = Thread::current(); -- ResourceMark rm; -- MethodHandlePrinter printer(root, verbose, out, THREAD); -- if (!HAS_PENDING_EXCEPTION) -- printer.walk(THREAD); -- if (HAS_PENDING_EXCEPTION) { -- oop ex = PENDING_EXCEPTION; -- CLEAR_PENDING_EXCEPTION; -- out->print(" *** "); -- if (printer.lose_message() != NULL) out->print("%s ", printer.lose_message()); -- out->print("}"); -- } -- out->print("\n"); -- } --}; -- --extern "C" --void print_method_handle(oop mh) { -- if (!mh->is_oop()) { -- tty->print_cr("*** not a method handle: "PTR_FORMAT, (intptr_t)mh); -- } else if (java_lang_invoke_MethodHandle::is_instance(mh)) { -- MethodHandlePrinter::print(mh); -- } else { -- tty->print("*** not a method handle: "); -- mh->print(); -- } --} -- --#endif // PRODUCT -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/prims/methodHandleWalk.hpp ---- openjdk/hotspot/src/share/vm/prims/methodHandleWalk.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ /dev/null Thu Jan 01 00:00:00 1970 +0000 -@@ -1,486 +0,0 @@ --/* -- * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. -- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -- * -- * This code is free software; you can redistribute it and/or modify it -- * under the terms of the GNU General Public License version 2 only, as -- * published by the Free Software Foundation. -- * -- * This code is distributed in the hope that it will be useful, but WITHOUT -- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -- * version 2 for more details (a copy is included in the LICENSE file that -- * accompanied this code). -- * -- * You should have received a copy of the GNU General Public License version -- * 2 along with this work; if not, write to the Free Software Foundation, -- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -- * -- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -- * or visit www.oracle.com if you need additional information or have any -- * questions. -- * -- */ -- --#ifndef SHARE_VM_PRIMS_METHODHANDLEWALK_HPP --#define SHARE_VM_PRIMS_METHODHANDLEWALK_HPP -- --#include "prims/methodHandles.hpp" -- --// Low-level parser for method handle chains. --class MethodHandleChain : StackObj { --public: -- typedef MethodHandles::EntryKind EntryKind; -- --private: -- Handle _root; // original target -- Handle _method_handle; // current target -- bool _is_last; // final guy in chain -- bool _is_bound; // has a bound argument -- BasicType _arg_type; // if is_bound, the bound argument type -- int _arg_slot; // if is_bound or is_adapter, affected argument slot -- jint _conversion; // conversion field of AMH or -1 -- methodHandle _last_method; // if is_last, which method we target -- Bytecodes::Code _last_invoke; // if is_last, type of invoke -- const char* _lose_message; // saved argument to lose() -- -- void set_method_handle(Handle target, TRAPS); -- void set_last_method(oop target, TRAPS); -- static BasicType compute_bound_arg_type(oop target, methodOop m, int arg_slot, TRAPS); -- -- oop MethodHandle_type_oop() { return java_lang_invoke_MethodHandle::type(method_handle_oop()); } -- oop MethodHandle_vmtarget_oop() { return java_lang_invoke_MethodHandle::vmtarget(method_handle_oop()); } -- int MethodHandle_vmslots() { return java_lang_invoke_MethodHandle::vmslots(method_handle_oop()); } -- int DirectMethodHandle_vmindex() { return java_lang_invoke_DirectMethodHandle::vmindex(method_handle_oop()); } -- oop BoundMethodHandle_argument_oop() { return java_lang_invoke_BoundMethodHandle::argument(method_handle_oop()); } -- int BoundMethodHandle_vmargslot() { return java_lang_invoke_BoundMethodHandle::vmargslot(method_handle_oop()); } -- int AdapterMethodHandle_conversion() { return java_lang_invoke_AdapterMethodHandle::conversion(method_handle_oop()); } -- --#ifdef ASSERT -- void print_impl(TRAPS); --#endif -- --public: -- MethodHandleChain(Handle root, TRAPS) -- : _root(root) -- { set_method_handle(root, THREAD); } -- -- bool is_adapter() { return _conversion != -1; } -- bool is_bound() { return _is_bound; } -- bool is_last() { return _is_last; } -- -- void next(TRAPS) { -- assert(!is_last(), ""); -- set_method_handle(MethodHandle_vmtarget_oop(), THREAD); -- } -- -- Handle root() { return _root; } -- Handle method_handle() { return _method_handle; } -- oop method_handle_oop() { return _method_handle(); } -- oop method_type_oop() { return MethodHandle_type_oop(); } -- oop vmtarget_oop() { return MethodHandle_vmtarget_oop(); } -- -- jint adapter_conversion() { assert(is_adapter(), ""); return _conversion; } -- int adapter_conversion_op() { return MethodHandles::adapter_conversion_op(adapter_conversion()); } -- BasicType adapter_conversion_src_type() -- { return MethodHandles::adapter_conversion_src_type(adapter_conversion()); } -- BasicType adapter_conversion_dest_type() -- { return MethodHandles::adapter_conversion_dest_type(adapter_conversion()); } -- int adapter_conversion_stack_move() -- { return MethodHandles::adapter_conversion_stack_move(adapter_conversion()); } -- int adapter_conversion_stack_pushes() -- { return adapter_conversion_stack_move() / MethodHandles::stack_move_unit(); } -- int adapter_conversion_vminfo() -- { return MethodHandles::adapter_conversion_vminfo(adapter_conversion()); } -- int adapter_arg_slot() { assert(is_adapter(), ""); return _arg_slot; } -- oop adapter_arg_oop() { assert(is_adapter(), ""); return BoundMethodHandle_argument_oop(); } -- -- BasicType bound_arg_type() { assert(is_bound(), ""); return _arg_type; } -- int bound_arg_slot() { assert(is_bound(), ""); return _arg_slot; } -- oop bound_arg_oop() { assert(is_bound(), ""); return BoundMethodHandle_argument_oop(); } -- -- methodHandle last_method() { assert(is_last(), ""); return _last_method; } -- methodOop last_method_oop() { assert(is_last(), ""); return _last_method(); } -- Bytecodes::Code last_invoke_code() { assert(is_last(), ""); return _last_invoke; } -- -- void lose(const char* msg, TRAPS); -- const char* lose_message() { return _lose_message; } -- --#ifdef ASSERT -- // Print a symbolic description of a method handle chain, including -- // the signature for each method. The signatures are printed in -- // slot order to make it easier to understand. -- void print(); -- static void print(oopDesc* mh); --#endif --}; -- -- --// Structure walker for method handles. --// Does abstract interpretation on top of low-level parsing. --// You supply the tokens shuffled by the abstract interpretation. --class MethodHandleWalker : StackObj { --public: -- // Stack values: -- enum TokenType { -- tt_void, -- tt_parameter, -- tt_temporary, -- tt_constant, -- tt_symbolic, -- tt_illegal -- }; -- -- // Argument token: -- class ArgToken { -- private: -- TokenType _tt; -- BasicType _bt; -- jvalue _value; -- Handle _handle; -- -- public: -- ArgToken(TokenType tt = tt_illegal) : _tt(tt), _bt(tt == tt_void ? T_VOID : T_ILLEGAL) { -- assert(tt == tt_illegal || tt == tt_void, "invalid token type"); -- } -- -- ArgToken(TokenType tt, BasicType bt, int index) : _tt(tt), _bt(bt) { -- assert(_tt == tt_parameter || _tt == tt_temporary, "must have index"); -- _value.i = index; -- } -- -- ArgToken(BasicType bt, jvalue value) : _tt(tt_constant), _bt(bt), _value(value) { assert(_bt != T_OBJECT, "wrong constructor"); } -- ArgToken(Handle handle) : _tt(tt_constant), _bt(T_OBJECT), _handle(handle) {} -- -- -- ArgToken(const char* str, BasicType type) : _tt(tt_symbolic), _bt(type) { -- _value.j = (intptr_t)str; -- } -- -- TokenType token_type() const { return _tt; } -- BasicType basic_type() const { return _bt; } -- bool has_index() const { return _tt == tt_parameter || _tt == tt_temporary; } -- int index() const { assert(has_index(), "must have index");; return _value.i; } -- Handle object() const { assert(_bt == T_OBJECT, "wrong accessor"); assert(_tt == tt_constant, "value type"); return _handle; } -- const char* str() const { assert(_tt == tt_symbolic, "string type"); return (const char*)(intptr_t)_value.j; } -- -- jint get_jint() const { assert(_bt == T_INT || is_subword_type(_bt), "wrong accessor"); assert(_tt == tt_constant, "value types"); return _value.i; } -- jlong get_jlong() const { assert(_bt == T_LONG, "wrong accessor"); assert(_tt == tt_constant, "value types"); return _value.j; } -- jfloat get_jfloat() const { assert(_bt == T_FLOAT, "wrong accessor"); assert(_tt == tt_constant, "value types"); return _value.f; } -- jdouble get_jdouble() const { assert(_bt == T_DOUBLE, "wrong accessor"); assert(_tt == tt_constant, "value types"); return _value.d; } -- }; -- --private: -- MethodHandleChain _chain; -- bool _for_invokedynamic; -- int _local_index; -- -- // This array is kept in an unusual order, indexed by low-level "slot number". -- // TOS is always _outgoing.at(0), so simple pushes and pops shift the whole _outgoing array. -- // If there is a receiver in the current argument list, it is at _outgoing.at(_outgoing.length()-1). -- // If a value at _outgoing.at(n) is T_LONG or T_DOUBLE, the value at _outgoing.at(n+1) is T_VOID. -- GrowableArray _outgoing; // current outgoing parameter slots -- int _outgoing_argc; // # non-empty outgoing slots -- -- vmIntrinsics::ID _return_conv; // Return conversion required by raw retypes. -- -- // Replace a value of type old_type at slot (and maybe slot+1) with the new value. -- // If old_type != T_VOID, remove the old argument at that point. -- // If new_type != T_VOID, insert the new argument at that point. -- // Insert or delete a second empty slot as needed. -- void change_argument(BasicType old_type, int slot, const ArgToken& new_arg); -- void change_argument(BasicType old_type, int slot, BasicType type, const ArgToken& new_arg) { -- assert(type == new_arg.basic_type(), "must agree"); -- change_argument(old_type, slot, new_arg); -- } -- -- // Raw retype conversions for OP_RAW_RETYPE. -- void retype_raw_conversion(BasicType src, BasicType dst, bool for_return, int slot, TRAPS); -- void retype_raw_argument_type(BasicType src, BasicType dst, int slot, TRAPS) { retype_raw_conversion(src, dst, false, slot, CHECK); } -- void retype_raw_return_type( BasicType src, BasicType dst, TRAPS) { retype_raw_conversion(src, dst, true, -1, CHECK); } -- -- BasicType arg_type(int slot) { -- return _outgoing.at(slot).basic_type(); -- } -- bool has_argument(int slot) { -- return arg_type(slot) < T_VOID; -- } -- --#ifdef ASSERT -- int argument_count_slow(); --#endif -- -- // Return a bytecode for converting src to dest, if one exists. -- Bytecodes::Code conversion_code(BasicType src, BasicType dest); -- -- void walk_incoming_state(TRAPS); -- -- void verify_args_and_signature(TRAPS) NOT_DEBUG_RETURN; -- --public: -- MethodHandleWalker(Handle root, bool for_invokedynamic, TRAPS) -- : _chain(root, THREAD), -- _for_invokedynamic(for_invokedynamic), -- _outgoing(THREAD, 10), -- _outgoing_argc(0), -- _return_conv(vmIntrinsics::_none) -- { -- _local_index = for_invokedynamic ? 0 : 1; -- } -- -- MethodHandleChain& chain() { return _chain; } -- -- bool for_invokedynamic() const { return _for_invokedynamic; } -- -- vmIntrinsics::ID return_conv() const { return _return_conv; } -- void set_return_conv(vmIntrinsics::ID c) { _return_conv = c; } -- static vmIntrinsics::ID zero_return_conv() { return vmIntrinsics::_min; } -- -- int new_local_index(BasicType bt) { -- //int index = _for_invokedynamic ? _local_index : _local_index - 1; -- int index = _local_index; -- _local_index += type2size[bt]; -- return index; -- } -- -- int max_locals() const { return _local_index; } -- -- // plug-in abstract interpretation steps: -- virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) = 0; -- virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) = 0; -- virtual ArgToken make_oop_constant(oop con, TRAPS) = 0; -- virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS) = 0; -- virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS) = 0; -- virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS) = 0; -- -- // For make_invoke, the methodHandle can be NULL if the intrinsic ID -- // is something other than vmIntrinsics::_none. -- -- // and in case anyone cares to related the previous actions to the chain: -- virtual void set_method_handle(oop mh) { } -- -- void lose(const char* msg, TRAPS) { chain().lose(msg, THREAD); } -- const char* lose_message() { return chain().lose_message(); } -- -- ArgToken walk(TRAPS); --}; -- -- --// An abstract interpreter for method handle chains. --// Produces an account of the semantics of a chain, in terms of a static IR. --// The IR happens to be JVM bytecodes. --class MethodHandleCompiler : public MethodHandleWalker { --private: -- int _invoke_count; // count the original call site has been executed -- KlassHandle _rklass; // Return type for casting. -- BasicType _rtype; -- KlassHandle _target_klass; -- Thread* _thread; -- -- int _selectAlternative_bci; // These are used for capturing profiles from GWTs -- int _taken_count; -- int _not_taken_count; -- -- // Values used by the compiler. -- static jvalue zero_jvalue; -- static jvalue one_jvalue; -- -- // Fake constant pool entry. -- class ConstantValue : public ResourceObj { -- private: -- int _tag; // Constant pool tag type. -- JavaValue _value; -- Handle _handle; -- Symbol* _sym; -- methodHandle _method; // pre-linkage -- -- public: -- // Constructor for oop types. -- ConstantValue(int tag, Handle con) : _tag(tag), _handle(con) { -- assert(tag == JVM_CONSTANT_Class || -- tag == JVM_CONSTANT_String || -- tag == JVM_CONSTANT_Object, "must be oop type"); -- } -- -- ConstantValue(int tag, Symbol* con) : _tag(tag), _sym(con) { -- assert(tag == JVM_CONSTANT_Utf8, "must be symbol type"); -- } -- -- // Constructor for oop reference types. -- ConstantValue(int tag, int index) : _tag(tag) { -- assert(JVM_CONSTANT_Fieldref <= tag && tag <= JVM_CONSTANT_NameAndType, "must be ref type"); -- _value.set_jint(index); -- } -- ConstantValue(int tag, int first_index, int second_index) : _tag(tag) { -- assert(JVM_CONSTANT_Fieldref <= tag && tag <= JVM_CONSTANT_NameAndType, "must be ref type"); -- _value.set_jint(first_index << 16 | second_index); -- } -- -- // Constructor for primitive types. -- ConstantValue(BasicType bt, jvalue con) { -- _value.set_type(bt); -- switch (bt) { -- case T_INT: _tag = JVM_CONSTANT_Integer; _value.set_jint( con.i); break; -- case T_LONG: _tag = JVM_CONSTANT_Long; _value.set_jlong( con.j); break; -- case T_FLOAT: _tag = JVM_CONSTANT_Float; _value.set_jfloat( con.f); break; -- case T_DOUBLE: _tag = JVM_CONSTANT_Double; _value.set_jdouble(con.d); break; -- default: ShouldNotReachHere(); -- } -- } -- -- int tag() const { return _tag; } -- Symbol* symbol() const { return _sym; } -- klassOop klass_oop() const { return (klassOop) _handle(); } -- oop object_oop() const { return _handle(); } -- int index() const { return _value.get_jint(); } -- int first_index() const { return _value.get_jint() >> 16; } -- int second_index() const { return _value.get_jint() & 0x0000FFFF; } -- -- bool is_primitive() const { return is_java_primitive(_value.get_type()); } -- jint get_jint() const { return _value.get_jint(); } -- jlong get_jlong() const { return _value.get_jlong(); } -- jfloat get_jfloat() const { return _value.get_jfloat(); } -- jdouble get_jdouble() const { return _value.get_jdouble(); } -- -- void set_linkage(methodHandle method) { -- assert(_method.is_null(), ""); -- _method = method; -- } -- bool has_linkage() const { return _method.not_null(); } -- methodHandle linkage() const { return _method; } -- }; -- -- // Fake constant pool. -- GrowableArray _constants; -- -- // Non-BCP classes that appear in associated MethodTypes (require special handling). -- GrowableArray _non_bcp_klasses; -- -- // Accumulated compiler state: -- GrowableArray _bytecode; -- -- int _cur_stack; -- int _max_stack; -- int _num_params; -- int _name_index; -- int _signature_index; -- -- void stack_push(BasicType bt) { -- _cur_stack += type2size[bt]; -- if (_cur_stack > _max_stack) _max_stack = _cur_stack; -- } -- void stack_pop(BasicType bt) { -- _cur_stack -= type2size[bt]; -- assert(_cur_stack >= 0, "sanity"); -- } -- -- unsigned char* bytecode() const { return _bytecode.adr_at(0); } -- int bytecode_length() const { return _bytecode.length(); } -- int cur_bci() const { return _bytecode.length(); } -- -- // Fake constant pool. -- int cpool_oop_put(int tag, Handle con) { -- if (con.is_null()) return 0; -- ConstantValue* cv = new ConstantValue(tag, con); -- return _constants.append(cv); -- } -- -- int cpool_symbol_put(int tag, Symbol* con) { -- if (con == NULL) return 0; -- ConstantValue* cv = new ConstantValue(tag, con); -- con->increment_refcount(); -- return _constants.append(cv); -- } -- -- int cpool_oop_reference_put(int tag, int first_index, int second_index, methodHandle method) { -- if (first_index == 0 && second_index == 0) return 0; -- assert(first_index != 0 && second_index != 0, "no zero indexes"); -- ConstantValue* cv = new ConstantValue(tag, first_index, second_index); -- if (method.not_null()) cv->set_linkage(method); -- return _constants.append(cv); -- } -- -- int cpool_primitive_put(BasicType type, jvalue* con); -- -- bool check_non_bcp_klasses(Handle method_type, TRAPS); -- bool check_non_bcp_klass(klassOop klass, TRAPS); -- void record_non_bcp_klasses(); -- -- int cpool_int_put(jint value) { -- jvalue con; con.i = value; -- return cpool_primitive_put(T_INT, &con); -- } -- int cpool_long_put(jlong value) { -- jvalue con; con.j = value; -- return cpool_primitive_put(T_LONG, &con); -- } -- int cpool_float_put(jfloat value) { -- jvalue con; con.f = value; -- return cpool_primitive_put(T_FLOAT, &con); -- } -- int cpool_double_put(jdouble value) { -- jvalue con; con.d = value; -- return cpool_primitive_put(T_DOUBLE, &con); -- } -- -- int cpool_object_put(Handle obj) { -- return cpool_oop_put(JVM_CONSTANT_Object, obj); -- } -- int cpool_symbol_put(Symbol* sym) { -- return cpool_symbol_put(JVM_CONSTANT_Utf8, sym); -- } -- int cpool_klass_put(klassOop klass) { -- return cpool_oop_put(JVM_CONSTANT_Class, klass); -- } -- int cpool_methodref_put(Bytecodes::Code op, int class_index, int name_and_type_index, methodHandle method) { -- int tag = (op == Bytecodes::_invokeinterface ? JVM_CONSTANT_InterfaceMethodref : JVM_CONSTANT_Methodref); -- return cpool_oop_reference_put(tag, class_index, name_and_type_index, method); -- } -- int cpool_name_and_type_put(int name_index, int signature_index) { -- return cpool_oop_reference_put(JVM_CONSTANT_NameAndType, name_index, signature_index, methodHandle()); -- } -- -- void emit_bc(Bytecodes::Code op, int index = 0, int args_size = -1); -- void update_branch_dest(int src, int dst); -- void emit_load(ArgToken arg); -- void emit_load(BasicType bt, int index); -- void emit_store(BasicType bt, int index); -- void emit_load_constant(ArgToken arg); -- -- virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) { -- return ArgToken(tt_parameter, type, argnum); -- } -- virtual ArgToken make_oop_constant(oop con, TRAPS) { -- Handle h(THREAD, con); -- return ArgToken(h); -- } -- virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) { -- return ArgToken(type, *con); -- } -- -- virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS); -- virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS); -- virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS); -- -- // Check for profiling information on a GWT and return true if it's found -- bool fetch_counts(ArgToken a1, ArgToken a2); -- -- // Get a real constant pool. -- constantPoolHandle get_constant_pool(TRAPS) const; -- -- // Get a real methodOop. -- methodHandle get_method_oop(TRAPS); -- --public: -- MethodHandleCompiler(Handle root, Symbol* name, Symbol* signature, int invoke_count, bool for_invokedynamic, TRAPS); -- -- // Compile the given MH chain into bytecode. -- methodHandle compile(TRAPS); -- -- // Tests if the given class is a MH adapter holder. -- static bool klass_is_method_handle_adapter_holder(klassOop klass) { -- return (klass == SystemDictionary::MethodHandle_klass()); -- } --}; -- --#endif // SHARE_VM_PRIMS_METHODHANDLEWALK_HPP -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/prims/methodHandles.cpp ---- openjdk/hotspot/src/share/vm/prims/methodHandles.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/prims/methodHandles.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -30,166 +30,30 @@ - #include "memory/allocation.inline.hpp" - #include "memory/oopFactory.hpp" - #include "prims/methodHandles.hpp" --#include "prims/methodHandleWalk.hpp" - #include "runtime/compilationPolicy.hpp" - #include "runtime/javaCalls.hpp" - #include "runtime/reflection.hpp" - #include "runtime/signature.hpp" - #include "runtime/stubRoutines.hpp" - -+ - /* - * JSR 292 reference implementation: method handles -+ * The JDK 7 reference implementation represented method handle -+ * combinations as chains. Each link in the chain had a "vmentry" -+ * field which pointed at a bit of assembly code which performed -+ * one transformation before dispatching to the next link in the chain. -+ * -+ * The current reference implementation pushes almost all code generation -+ * responsibility to (trusted) Java code. A method handle contains a -+ * pointer to its "LambdaForm", which embodies all details of the method -+ * handle's behavior. The LambdaForm is a normal Java object, managed -+ * by a runtime coded in Java. - */ - - bool MethodHandles::_enabled = false; // set true after successful native linkage -- --MethodHandleEntry* MethodHandles::_entries[MethodHandles::_EK_LIMIT] = {NULL}; --const char* MethodHandles::_entry_names[_EK_LIMIT+1] = { -- "raise_exception", -- "invokestatic", // how a MH emulates invokestatic -- "invokespecial", // ditto for the other invokes... -- "invokevirtual", -- "invokeinterface", -- "bound_ref", // these are for BMH... -- "bound_int", -- "bound_long", -- "bound_ref_direct", // (direct versions have a direct methodOop) -- "bound_int_direct", -- "bound_long_direct", -- -- // starting at _adapter_mh_first: -- "adapter_retype_only", // these are for AMH... -- "adapter_retype_raw", -- "adapter_check_cast", -- "adapter_prim_to_prim", -- "adapter_ref_to_prim", -- "adapter_prim_to_ref", -- "adapter_swap_args", -- "adapter_rot_args", -- "adapter_dup_args", -- "adapter_drop_args", -- "adapter_collect_args", -- "adapter_spread_args", -- "adapter_fold_args", -- "adapter_unused_13", -- -- // optimized adapter types: -- "adapter_swap_args/1", -- "adapter_swap_args/2", -- "adapter_rot_args/1,up", -- "adapter_rot_args/1,down", -- "adapter_rot_args/2,up", -- "adapter_rot_args/2,down", -- "adapter_prim_to_prim/i2i", -- "adapter_prim_to_prim/l2i", -- "adapter_prim_to_prim/d2f", -- "adapter_prim_to_prim/i2l", -- "adapter_prim_to_prim/f2d", -- "adapter_ref_to_prim/unboxi", -- "adapter_ref_to_prim/unboxl", -- -- // return value handlers for collect/filter/fold adapters: -- "return/ref", -- "return/int", -- "return/long", -- "return/float", -- "return/double", -- "return/void", -- "return/S0/ref", -- "return/S1/ref", -- "return/S2/ref", -- "return/S3/ref", -- "return/S4/ref", -- "return/S5/ref", -- "return/any", -- -- // spreading (array length cases 0, 1, ...) -- "adapter_spread/0", -- "adapter_spread/1/ref", -- "adapter_spread/2/ref", -- "adapter_spread/3/ref", -- "adapter_spread/4/ref", -- "adapter_spread/5/ref", -- "adapter_spread/ref", -- "adapter_spread/byte", -- "adapter_spread/char", -- "adapter_spread/short", -- "adapter_spread/int", -- "adapter_spread/long", -- "adapter_spread/float", -- "adapter_spread/double", -- -- // blocking filter/collect conversions: -- "adapter_collect/ref", -- "adapter_collect/int", -- "adapter_collect/long", -- "adapter_collect/float", -- "adapter_collect/double", -- "adapter_collect/void", -- "adapter_collect/0/ref", -- "adapter_collect/1/ref", -- "adapter_collect/2/ref", -- "adapter_collect/3/ref", -- "adapter_collect/4/ref", -- "adapter_collect/5/ref", -- "adapter_filter/S0/ref", -- "adapter_filter/S1/ref", -- "adapter_filter/S2/ref", -- "adapter_filter/S3/ref", -- "adapter_filter/S4/ref", -- "adapter_filter/S5/ref", -- "adapter_collect/2/S0/ref", -- "adapter_collect/2/S1/ref", -- "adapter_collect/2/S2/ref", -- "adapter_collect/2/S3/ref", -- "adapter_collect/2/S4/ref", -- "adapter_collect/2/S5/ref", -- -- // blocking fold conversions: -- "adapter_fold/ref", -- "adapter_fold/int", -- "adapter_fold/long", -- "adapter_fold/float", -- "adapter_fold/double", -- "adapter_fold/void", -- "adapter_fold/1/ref", -- "adapter_fold/2/ref", -- "adapter_fold/3/ref", -- "adapter_fold/4/ref", -- "adapter_fold/5/ref", -- -- "adapter_opt_profiling", -- -- NULL --}; -- --// Adapters. - MethodHandlesAdapterBlob* MethodHandles::_adapter_code = NULL; - --jobject MethodHandles::_raise_exception_method; -- --address MethodHandles::_adapter_return_handlers[CONV_TYPE_MASK+1]; -- --#ifdef ASSERT --bool MethodHandles::spot_check_entry_names() { -- assert(!strcmp(entry_name(_invokestatic_mh), "invokestatic"), ""); -- assert(!strcmp(entry_name(_bound_ref_mh), "bound_ref"), ""); -- assert(!strcmp(entry_name(_adapter_retype_only), "adapter_retype_only"), ""); -- assert(!strcmp(entry_name(_adapter_fold_args), "adapter_fold_args"), ""); -- assert(!strcmp(entry_name(_adapter_opt_unboxi), "adapter_ref_to_prim/unboxi"), ""); -- assert(!strcmp(entry_name(_adapter_opt_spread_char), "adapter_spread/char"), ""); -- assert(!strcmp(entry_name(_adapter_opt_spread_double), "adapter_spread/double"), ""); -- assert(!strcmp(entry_name(_adapter_opt_collect_int), "adapter_collect/int"), ""); -- assert(!strcmp(entry_name(_adapter_opt_collect_0_ref), "adapter_collect/0/ref"), ""); -- assert(!strcmp(entry_name(_adapter_opt_collect_2_S3_ref), "adapter_collect/2/S3/ref"), ""); -- assert(!strcmp(entry_name(_adapter_opt_filter_S5_ref), "adapter_filter/S5/ref"), ""); -- assert(!strcmp(entry_name(_adapter_opt_fold_3_ref), "adapter_fold/3/ref"), ""); -- assert(!strcmp(entry_name(_adapter_opt_fold_void), "adapter_fold/void"), ""); -- return true; --} --#endif -- -- - //------------------------------------------------------------------------------ - // MethodHandles::generate_adapters - // -@@ -216,36 +80,20 @@ - // - void MethodHandlesAdapterGenerator::generate() { - // Generate generic method handle adapters. -- for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST; -- ek < MethodHandles::_EK_LIMIT; -- ek = MethodHandles::EntryKind(1 + (int)ek)) { -- if (MethodHandles::ek_supported(ek)) { -- StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek)); -- MethodHandles::generate_method_handle_stub(_masm, ek); -+ // Generate interpreter entries -+ for (Interpreter::MethodKind mk = Interpreter::method_handle_invoke_FIRST; -+ mk <= Interpreter::method_handle_invoke_LAST; -+ mk = Interpreter::MethodKind(1 + (int)mk)) { -+ vmIntrinsics::ID iid = Interpreter::method_handle_intrinsic(mk); -+ StubCodeMark mark(this, "MethodHandle::interpreter_entry", vmIntrinsics::name_at(iid)); -+ address entry = MethodHandles::generate_method_handle_interpreter_entry(_masm, iid); -+ if (entry != NULL) { -+ Interpreter::set_entry_for_kind(mk, entry); - } -+ // If the entry is not set, it will throw AbstractMethodError. - } - } - -- --//------------------------------------------------------------------------------ --// MethodHandles::ek_supported --// --bool MethodHandles::ek_supported(MethodHandles::EntryKind ek) { -- MethodHandles::EntryKind ek_orig = MethodHandles::ek_original_kind(ek); -- switch (ek_orig) { -- case _adapter_unused_13: -- return false; // not defined yet -- case _adapter_prim_to_ref: -- return conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF); -- case _adapter_collect_args: -- return conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS); -- case _adapter_fold_args: -- return conv_op_supported(java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS); -- } -- return true; --} -- -- - void MethodHandles::set_enabled(bool z) { - if (_enabled != z) { - guarantee(z && EnableInvokeDynamic, "can only enable once, and only if -XX:+EnableInvokeDynamic"); -@@ -253,217 +101,6 @@ - } - } - --// Note: A method which does not have a TRAPS argument cannot block in the GC --// or throw exceptions. Such methods are used in this file to do something quick --// and local, like parse a data structure. For speed, such methods work on plain --// oops, not handles. Trapping methods uniformly operate on handles. -- --methodHandle MethodHandles::decode_vmtarget(oop vmtarget, int vmindex, oop mtype, -- KlassHandle& receiver_limit_result, int& decode_flags_result) { -- if (vmtarget == NULL) return methodHandle(); -- assert(methodOopDesc::nonvirtual_vtable_index < 0, "encoding"); -- if (vmindex < 0) { -- // this DMH performs no dispatch; it is directly bound to a methodOop -- // A MemberName may either be directly bound to a methodOop, -- // or it may use the klass/index form; both forms mean the same thing. -- methodOop m = decode_methodOop(methodOop(vmtarget), decode_flags_result); -- if ((decode_flags_result & _dmf_has_receiver) != 0 -- && java_lang_invoke_MethodType::is_instance(mtype)) { -- // Extract receiver type restriction from mtype.ptypes[0]. -- objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(mtype); -- oop ptype0 = (ptypes == NULL || ptypes->length() < 1) ? oop(NULL) : ptypes->obj_at(0); -- if (java_lang_Class::is_instance(ptype0)) -- receiver_limit_result = java_lang_Class::as_klassOop(ptype0); -- } -- if (vmindex == methodOopDesc::nonvirtual_vtable_index) { -- // this DMH can be an "invokespecial" version -- decode_flags_result &= ~_dmf_does_dispatch; -- } else { -- assert(vmindex == methodOopDesc::invalid_vtable_index, "random vmindex?"); -- } -- return m; -- } else { -- assert(vmtarget->is_klass(), "must be class or interface"); -- decode_flags_result |= MethodHandles::_dmf_does_dispatch; -- decode_flags_result |= MethodHandles::_dmf_has_receiver; -- receiver_limit_result = (klassOop)vmtarget; -- Klass* tk = Klass::cast((klassOop)vmtarget); -- if (tk->is_interface()) { -- // an itable linkage is -- decode_flags_result |= MethodHandles::_dmf_from_interface; -- return klassItable::method_for_itable_index((klassOop)vmtarget, vmindex); -- } else { -- if (!tk->oop_is_instance()) -- tk = instanceKlass::cast(SystemDictionary::Object_klass()); -- return ((instanceKlass*)tk)->method_at_vtable(vmindex); -- } -- } --} -- --// MemberName and DirectMethodHandle have the same linkage to the JVM internals. --// (MemberName is the non-operational name used for queries and setup.) -- --methodHandle MethodHandles::decode_DirectMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { -- oop vmtarget = java_lang_invoke_DirectMethodHandle::vmtarget(mh); -- int vmindex = java_lang_invoke_DirectMethodHandle::vmindex(mh); -- oop mtype = java_lang_invoke_DirectMethodHandle::type(mh); -- return decode_vmtarget(vmtarget, vmindex, mtype, receiver_limit_result, decode_flags_result); --} -- --methodHandle MethodHandles::decode_BoundMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { -- assert(java_lang_invoke_BoundMethodHandle::is_instance(mh), ""); -- assert(mh->klass() != SystemDictionary::AdapterMethodHandle_klass(), ""); -- for (oop bmh = mh;;) { -- // Bound MHs can be stacked to bind several arguments. -- oop target = java_lang_invoke_MethodHandle::vmtarget(bmh); -- if (target == NULL) return methodHandle(); -- decode_flags_result |= MethodHandles::_dmf_binds_argument; -- klassOop tk = target->klass(); -- if (tk == SystemDictionary::BoundMethodHandle_klass()) { -- bmh = target; -- continue; -- } else { -- if (java_lang_invoke_MethodHandle::is_subclass(tk)) { -- //assert(tk == SystemDictionary::DirectMethodHandle_klass(), "end of BMH chain must be DMH"); -- return decode_MethodHandle(target, receiver_limit_result, decode_flags_result); -- } else { -- // Optimized case: binding a receiver to a non-dispatched DMH -- // short-circuits directly to the methodOop. -- // (It might be another argument besides a receiver also.) -- assert(target->is_method(), "must be a simple method"); -- decode_flags_result |= MethodHandles::_dmf_binds_method; -- methodOop m = (methodOop) target; -- if (!m->is_static()) -- decode_flags_result |= MethodHandles::_dmf_has_receiver; -- return m; -- } -- } -- } --} -- --methodHandle MethodHandles::decode_AdapterMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { -- assert(mh->klass() == SystemDictionary::AdapterMethodHandle_klass(), ""); -- for (oop amh = mh;;) { -- // Adapter MHs can be stacked to convert several arguments. -- int conv_op = adapter_conversion_op(java_lang_invoke_AdapterMethodHandle::conversion(amh)); -- decode_flags_result |= (_dmf_adapter_lsb << conv_op) & _DMF_ADAPTER_MASK; -- oop target = java_lang_invoke_MethodHandle::vmtarget(amh); -- if (target == NULL) return methodHandle(); -- klassOop tk = target->klass(); -- if (tk == SystemDictionary::AdapterMethodHandle_klass()) { -- amh = target; -- continue; -- } else { -- // must be a BMH (which will bind some more arguments) or a DMH (for the final call) -- return MethodHandles::decode_MethodHandle(target, receiver_limit_result, decode_flags_result); -- } -- } --} -- --methodHandle MethodHandles::decode_MethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) { -- if (mh == NULL) return methodHandle(); -- klassOop mhk = mh->klass(); -- assert(java_lang_invoke_MethodHandle::is_subclass(mhk), "must be a MethodHandle"); -- if (mhk == SystemDictionary::DirectMethodHandle_klass()) { -- return decode_DirectMethodHandle(mh, receiver_limit_result, decode_flags_result); -- } else if (mhk == SystemDictionary::BoundMethodHandle_klass()) { -- return decode_BoundMethodHandle(mh, receiver_limit_result, decode_flags_result); -- } else if (mhk == SystemDictionary::AdapterMethodHandle_klass()) { -- return decode_AdapterMethodHandle(mh, receiver_limit_result, decode_flags_result); -- } else if (java_lang_invoke_BoundMethodHandle::is_subclass(mhk)) { -- // could be a JavaMethodHandle (but not an adapter MH) -- return decode_BoundMethodHandle(mh, receiver_limit_result, decode_flags_result); -- } else { -- assert(false, "cannot parse this MH"); -- return methodHandle(); // random MH? -- } --} -- --methodOop MethodHandles::decode_methodOop(methodOop m, int& decode_flags_result) { -- assert(m->is_method(), ""); -- if (m->is_static()) { -- // check that signature begins '(L' or '([' (not '(I', '()', etc.) -- Symbol* sig = m->signature(); -- BasicType recv_bt = char2type(sig->byte_at(1)); -- // Note: recv_bt might be T_ILLEGAL if byte_at(2) is ')' -- assert(sig->byte_at(0) == '(', "must be method sig"); --// if (recv_bt == T_OBJECT || recv_bt == T_ARRAY) --// decode_flags_result |= _dmf_has_receiver; -- } else { -- // non-static method -- decode_flags_result |= _dmf_has_receiver; -- if (!m->can_be_statically_bound() && !m->is_initializer()) { -- decode_flags_result |= _dmf_does_dispatch; -- if (Klass::cast(m->method_holder())->is_interface()) -- decode_flags_result |= _dmf_from_interface; -- } -- } -- return m; --} -- -- --// A trusted party is handing us a cookie to determine a method. --// Let's boil it down to the method oop they really want. --methodHandle MethodHandles::decode_method(oop x, KlassHandle& receiver_limit_result, int& decode_flags_result) { -- decode_flags_result = 0; -- receiver_limit_result = KlassHandle(); -- klassOop xk = x->klass(); -- if (xk == Universe::methodKlassObj()) { -- return decode_methodOop((methodOop) x, decode_flags_result); -- } else if (xk == SystemDictionary::MemberName_klass()) { -- // Note: This only works if the MemberName has already been resolved. -- return decode_MemberName(x, receiver_limit_result, decode_flags_result); -- } else if (java_lang_invoke_MethodHandle::is_subclass(xk)) { -- return decode_MethodHandle(x, receiver_limit_result, decode_flags_result); -- } else if (xk == SystemDictionary::reflect_Method_klass()) { -- oop clazz = java_lang_reflect_Method::clazz(x); -- int slot = java_lang_reflect_Method::slot(x); -- klassOop k = java_lang_Class::as_klassOop(clazz); -- if (k != NULL && Klass::cast(k)->oop_is_instance()) -- return decode_methodOop(instanceKlass::cast(k)->method_with_idnum(slot), -- decode_flags_result); -- } else if (xk == SystemDictionary::reflect_Constructor_klass()) { -- oop clazz = java_lang_reflect_Constructor::clazz(x); -- int slot = java_lang_reflect_Constructor::slot(x); -- klassOop k = java_lang_Class::as_klassOop(clazz); -- if (k != NULL && Klass::cast(k)->oop_is_instance()) -- return decode_methodOop(instanceKlass::cast(k)->method_with_idnum(slot), -- decode_flags_result); -- } else { -- // unrecognized object -- assert(!x->is_method(), "already checked"); -- assert(!java_lang_invoke_MemberName::is_instance(x), "already checked"); -- } -- return methodHandle(); --} -- -- --int MethodHandles::decode_MethodHandle_stack_pushes(oop mh) { -- if (mh->klass() == SystemDictionary::DirectMethodHandle_klass()) -- return 0; // no push/pop -- int this_vmslots = java_lang_invoke_MethodHandle::vmslots(mh); -- int last_vmslots = 0; -- oop last_mh = mh; -- for (;;) { -- oop target = java_lang_invoke_MethodHandle::vmtarget(last_mh); -- if (target->klass() == SystemDictionary::DirectMethodHandle_klass()) { -- last_vmslots = java_lang_invoke_MethodHandle::vmslots(target); -- break; -- } else if (!java_lang_invoke_MethodHandle::is_instance(target)) { -- // might be klass or method -- assert(target->is_method(), "must get here with a direct ref to method"); -- last_vmslots = methodOop(target)->size_of_parameters(); -- break; -- } -- last_mh = target; -- } -- // If I am called with fewer VM slots than my ultimate callee, -- // it must be that I push the additionally needed slots. -- // Likewise if am called with more VM slots, I will pop them. -- return (last_vmslots - this_vmslots); --} -- -- - // MemberName support - - // import java_lang_invoke_MemberName.* -@@ -472,10 +109,11 @@ - IS_CONSTRUCTOR = java_lang_invoke_MemberName::MN_IS_CONSTRUCTOR, - IS_FIELD = java_lang_invoke_MemberName::MN_IS_FIELD, - IS_TYPE = java_lang_invoke_MemberName::MN_IS_TYPE, -+ REFERENCE_KIND_SHIFT = java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT, -+ REFERENCE_KIND_MASK = java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK, - SEARCH_SUPERCLASSES = java_lang_invoke_MemberName::MN_SEARCH_SUPERCLASSES, - SEARCH_INTERFACES = java_lang_invoke_MemberName::MN_SEARCH_INTERFACES, -- ALL_KINDS = IS_METHOD | IS_CONSTRUCTOR | IS_FIELD | IS_TYPE, -- VM_INDEX_UNINITIALIZED = java_lang_invoke_MemberName::VM_INDEX_UNINITIALIZED -+ ALL_KINDS = IS_METHOD | IS_CONSTRUCTOR | IS_FIELD | IS_TYPE - }; - - Handle MethodHandles::new_MemberName(TRAPS) { -@@ -485,72 +123,265 @@ - return Handle(THREAD, k->allocate_instance(THREAD)); - } - --void MethodHandles::init_MemberName(oop mname_oop, oop target_oop) { -- if (target_oop->klass() == SystemDictionary::reflect_Field_klass()) { -+oop MethodHandles::init_MemberName(oop mname_oop, oop target_oop) { -+ klassOop target_klass = target_oop->klass(); -+ if (target_klass == SystemDictionary::reflect_Field_klass()) { - oop clazz = java_lang_reflect_Field::clazz(target_oop); // fd.field_holder() - int slot = java_lang_reflect_Field::slot(target_oop); // fd.index() - int mods = java_lang_reflect_Field::modifiers(target_oop); -+ oop type = java_lang_reflect_Field::type(target_oop); -+ oop name = java_lang_reflect_Field::name(target_oop); - klassOop k = java_lang_Class::as_klassOop(clazz); -- int offset = instanceKlass::cast(k)->field_offset(slot); -- init_MemberName(mname_oop, k, accessFlags_from(mods), offset); -- } else { -- KlassHandle receiver_limit; int decode_flags = 0; -- methodHandle m = MethodHandles::decode_method(target_oop, receiver_limit, decode_flags); -- bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0); -- init_MemberName(mname_oop, m(), do_dispatch); -+ intptr_t offset = instanceKlass::cast(k)->field_offset(slot); -+ return init_field_MemberName(mname_oop, k, accessFlags_from(mods), type, name, offset); -+ } else if (target_klass == SystemDictionary::reflect_Method_klass()) { -+ oop clazz = java_lang_reflect_Method::clazz(target_oop); -+ int slot = java_lang_reflect_Method::slot(target_oop); -+ klassOop k = java_lang_Class::as_klassOop(clazz); -+ if (k != NULL && Klass::cast(k)->oop_is_instance()) { -+ methodOop m = instanceKlass::cast(k)->method_with_idnum(slot); -+ return init_method_MemberName(mname_oop, m, true, k); -+ } -+ } else if (target_klass == SystemDictionary::reflect_Constructor_klass()) { -+ oop clazz = java_lang_reflect_Constructor::clazz(target_oop); -+ int slot = java_lang_reflect_Constructor::slot(target_oop); -+ klassOop k = java_lang_Class::as_klassOop(clazz); -+ if (k != NULL && Klass::cast(k)->oop_is_instance()) { -+ methodOop m = instanceKlass::cast(k)->method_with_idnum(slot); -+ return init_method_MemberName(mname_oop, m, false, k); -+ } -+ } else if (target_klass == SystemDictionary::MemberName_klass()) { -+ // Note: This only works if the MemberName has already been resolved. -+ oop clazz = java_lang_invoke_MemberName::clazz(target_oop); -+ int flags = java_lang_invoke_MemberName::flags(target_oop); -+ oop vmtarget = java_lang_invoke_MemberName::vmtarget(target_oop); -+ intptr_t vmindex = java_lang_invoke_MemberName::vmindex(target_oop); -+ klassOop k = java_lang_Class::as_klassOop(clazz); -+ int ref_kind = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK; -+ if (vmtarget == NULL) return NULL; // not resolved -+ if ((flags & IS_FIELD) != 0) { -+ assert(vmtarget->is_klass(), "field vmtarget is klassOop"); -+ int basic_mods = (ref_kind_is_static(ref_kind) ? JVM_ACC_STATIC : 0); -+ // FIXME: how does k (receiver_limit) contribute? -+ return init_field_MemberName(mname_oop, klassOop(vmtarget), accessFlags_from(basic_mods), NULL, NULL, vmindex); -+ } else if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) != 0) { -+ assert(vmtarget->is_method(), "method or constructor vmtarget is methodOop"); -+ return init_method_MemberName(mname_oop, methodOop(vmtarget), ref_kind_does_dispatch(ref_kind), k); -+ } else { -+ return NULL; -+ } - } -+ return NULL; - } - --void MethodHandles::init_MemberName(oop mname_oop, methodOop m, bool do_dispatch) { -- int flags = ((m->is_initializer() ? IS_CONSTRUCTOR : IS_METHOD) -- | (jushort)( m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS )); -- oop vmtarget = m; -- int vmindex = methodOopDesc::invalid_vtable_index; // implies no info yet -- if (!do_dispatch || (flags & IS_CONSTRUCTOR) || m->can_be_statically_bound()) -- vmindex = methodOopDesc::nonvirtual_vtable_index; // implies never any dispatch -- assert(vmindex != VM_INDEX_UNINITIALIZED, "Java sentinel value"); -+oop MethodHandles::init_method_MemberName(oop mname_oop, methodOop m, bool do_dispatch, -+ klassOop receiver_limit) { -+ AccessFlags mods = m->access_flags(); -+ int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS ); -+ int vmindex = methodOopDesc::nonvirtual_vtable_index; // implies never any dispatch -+ klassOop mklass = m->method_holder(); -+ if (receiver_limit == NULL) -+ receiver_limit = mklass; -+ if (m->is_initializer()) { -+ flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT); -+ } else if (mods.is_static()) { -+ flags |= IS_METHOD | (JVM_REF_invokeStatic << REFERENCE_KIND_SHIFT); -+ } else if (receiver_limit != mklass && -+ !Klass::cast(receiver_limit)->is_subtype_of(mklass)) { -+ return NULL; // bad receiver limit -+ } else if (Klass::cast(receiver_limit)->is_interface() && -+ Klass::cast(mklass)->is_interface()) { -+ flags |= IS_METHOD | (JVM_REF_invokeInterface << REFERENCE_KIND_SHIFT); -+ receiver_limit = mklass; // ignore passed-in limit; interfaces are interconvertible -+ vmindex = klassItable::compute_itable_index(m); -+ } else if (mklass != receiver_limit && Klass::cast(mklass)->is_interface()) { -+ flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT); -+ // it is a miranda method, so m->vtable_index is not what we want -+ ResourceMark rm; -+ klassVtable* vt = instanceKlass::cast(receiver_limit)->vtable(); -+ vmindex = vt->index_of_miranda(m->name(), m->signature()); -+ } else if (!do_dispatch || m->can_be_statically_bound()) { -+ flags |= IS_METHOD | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT); -+ } else { -+ flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT); -+ vmindex = m->vtable_index(); -+ } -+ -+ java_lang_invoke_MemberName::set_flags(mname_oop, flags); -+ java_lang_invoke_MemberName::set_vmtarget(mname_oop, m); -+ java_lang_invoke_MemberName::set_vmindex(mname_oop, vmindex); // vtable/itable index -+ java_lang_invoke_MemberName::set_clazz(mname_oop, Klass::cast(receiver_limit)->java_mirror()); -+ // Note: name and type can be lazily computed by resolve_MemberName, -+ // if Java code needs them as resolved String and MethodType objects. -+ // The clazz must be eagerly stored, because it provides a GC -+ // root to help keep alive the methodOop. -+ // If relevant, the vtable or itable value is stored as vmindex. -+ // This is done eagerly, since it is readily available without -+ // constructing any new objects. -+ // TO DO: maybe intern mname_oop -+ return mname_oop; -+} -+ -+Handle MethodHandles::init_method_MemberName(oop mname_oop, CallInfo& info, TRAPS) { -+ Handle empty; -+ if (info.resolved_appendix().not_null()) { -+ // The resolved MemberName must not be accompanied by an appendix argument, -+ // since there is no way to bind this value into the MemberName. -+ // Caller is responsible to prevent this from happening. -+ THROW_MSG_(vmSymbols::java_lang_InternalError(), "appendix", empty); -+ } -+ methodHandle m = info.resolved_method(); -+ KlassHandle defc = info.resolved_klass(); -+ int vmindex = -1; -+ if (defc->is_interface() && Klass::cast(m->method_holder())->is_interface()) { -+ // LinkResolver does not report itable indexes! (fix this?) -+ vmindex = klassItable::compute_itable_index(m()); -+ } else if (m->can_be_statically_bound()) { -+ // LinkResolver reports vtable index even for final methods! -+ vmindex = methodOopDesc::nonvirtual_vtable_index; -+ } else { -+ vmindex = info.vtable_index(); -+ } -+ oop res = init_method_MemberName(mname_oop, m(), (vmindex >= 0), defc()); -+ assert(res == NULL || (java_lang_invoke_MemberName::vmindex(res) == vmindex), ""); -+ return Handle(THREAD, res); -+} -+ -+oop MethodHandles::init_field_MemberName(oop mname_oop, klassOop field_holder, -+ AccessFlags mods, oop type, oop name, -+ intptr_t offset, bool is_setter) { -+ int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS ); -+ flags |= IS_FIELD | ((mods.is_static() ? JVM_REF_getStatic : JVM_REF_getField) << REFERENCE_KIND_SHIFT); -+ if (is_setter) flags += ((JVM_REF_putField - JVM_REF_getField) << REFERENCE_KIND_SHIFT); -+ oop vmtarget = field_holder; -+ int vmindex = offset; // determines the field uniquely when combined with static bit -+ java_lang_invoke_MemberName::set_flags(mname_oop, flags); - java_lang_invoke_MemberName::set_vmtarget(mname_oop, vmtarget); - java_lang_invoke_MemberName::set_vmindex(mname_oop, vmindex); -- java_lang_invoke_MemberName::set_flags(mname_oop, flags); -- java_lang_invoke_MemberName::set_clazz(mname_oop, Klass::cast(m->method_holder())->java_mirror()); -+ java_lang_invoke_MemberName::set_clazz(mname_oop, Klass::cast(field_holder)->java_mirror()); -+ if (name != NULL) -+ java_lang_invoke_MemberName::set_name(mname_oop, name); -+ if (type != NULL) -+ java_lang_invoke_MemberName::set_type(mname_oop, type); -+ // Note: name and type can be lazily computed by resolve_MemberName, -+ // if Java code needs them as resolved String and Class objects. -+ // Note that the incoming type oop might be pre-resolved (non-null). -+ // The base clazz and field offset (vmindex) must be eagerly stored, -+ // because they unambiguously identify the field. -+ // Although the fieldDescriptor::_index would also identify the field, -+ // we do not use it, because it is harder to decode. -+ // TO DO: maybe intern mname_oop -+ return mname_oop; - } - --void MethodHandles::init_MemberName(oop mname_oop, klassOop field_holder, AccessFlags mods, int offset) { -- int flags = (IS_FIELD | (jushort)( mods.as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS )); -- oop vmtarget = field_holder; -- int vmindex = offset; // determines the field uniquely when combined with static bit -- assert(vmindex != VM_INDEX_UNINITIALIZED, "bad alias on vmindex"); -- java_lang_invoke_MemberName::set_vmtarget(mname_oop, vmtarget); -- java_lang_invoke_MemberName::set_vmindex(mname_oop, vmindex); -- java_lang_invoke_MemberName::set_flags(mname_oop, flags); -- java_lang_invoke_MemberName::set_clazz(mname_oop, Klass::cast(field_holder)->java_mirror()); -+Handle MethodHandles::init_field_MemberName(oop mname_oop, FieldAccessInfo& info, TRAPS) { -+ return Handle(); -+#if 0 -+ KlassHandle field_holder = info.klass(); -+ intptr_t field_offset = info.field_offset(); -+ return init_field_MemberName(mname_oop, field_holder(), -+ info.access_flags(), -+ type, name, -+ field_offset, false /*is_setter*/); -+#endif - } - - --methodHandle MethodHandles::decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result) { -- methodHandle empty; -- int flags = java_lang_invoke_MemberName::flags(mname); -- if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0) return empty; // not invocable -- oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname); -- int vmindex = java_lang_invoke_MemberName::vmindex(mname); -- if (vmindex == VM_INDEX_UNINITIALIZED) return empty; // not resolved -- methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result); -- oop clazz = java_lang_invoke_MemberName::clazz(mname); -- if (clazz != NULL && java_lang_Class::is_instance(clazz)) { -- klassOop klass = java_lang_Class::as_klassOop(clazz); -- if (klass != NULL) receiver_limit_result = klass; -- } -- return m; -+// JVM 2.9 Special Methods: -+// A method is signature polymorphic if and only if all of the following conditions hold : -+// * It is declared in the java.lang.invoke.MethodHandle class. -+// * It has a single formal parameter of type Object[]. -+// * It has a return type of Object. -+// * It has the ACC_VARARGS and ACC_NATIVE flags set. -+bool MethodHandles::is_method_handle_invoke_name(klassOop klass, Symbol* name) { -+ if (klass == NULL) -+ return false; -+ // The following test will fail spuriously during bootstrap of MethodHandle itself: -+ // if (klass != SystemDictionary::MethodHandle_klass()) -+ // Test the name instead: -+ if (Klass::cast(klass)->name() != vmSymbols::java_lang_invoke_MethodHandle()) -+ return false; -+ Symbol* poly_sig = vmSymbols::object_array_object_signature(); -+ methodOop m = instanceKlass::cast(klass)->find_method(name, poly_sig); -+ if (m == NULL) return false; -+ int required = JVM_ACC_NATIVE | JVM_ACC_VARARGS; -+ int flags = m->access_flags().as_int(); -+ return (flags & required) == required; - } - -+ -+Symbol* MethodHandles::signature_polymorphic_intrinsic_name(vmIntrinsics::ID iid) { -+ assert(is_signature_polymorphic_intrinsic(iid), err_msg("iid=%d", iid)); -+ switch (iid) { -+ case vmIntrinsics::_invokeBasic: return vmSymbols::invokeBasic_name(); -+ case vmIntrinsics::_linkToVirtual: return vmSymbols::linkToVirtual_name(); -+ case vmIntrinsics::_linkToStatic: return vmSymbols::linkToStatic_name(); -+ case vmIntrinsics::_linkToSpecial: return vmSymbols::linkToSpecial_name(); -+ case vmIntrinsics::_linkToInterface: return vmSymbols::linkToInterface_name(); -+ } -+ assert(false, ""); -+ return 0; -+} -+ -+int MethodHandles::signature_polymorphic_intrinsic_ref_kind(vmIntrinsics::ID iid) { -+ switch (iid) { -+ case vmIntrinsics::_invokeBasic: return 0; -+ case vmIntrinsics::_linkToVirtual: return JVM_REF_invokeVirtual; -+ case vmIntrinsics::_linkToStatic: return JVM_REF_invokeStatic; -+ case vmIntrinsics::_linkToSpecial: return JVM_REF_invokeSpecial; -+ case vmIntrinsics::_linkToInterface: return JVM_REF_invokeInterface; -+ } -+ assert(false, err_msg("iid=%d", iid)); -+ return 0; -+} -+ -+vmIntrinsics::ID MethodHandles::signature_polymorphic_name_id(Symbol* name) { -+ vmSymbols::SID name_id = vmSymbols::find_sid(name); -+ switch (name_id) { -+ // The ID _invokeGeneric stands for all non-static signature-polymorphic methods, except built-ins. -+ case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name): return vmIntrinsics::_invokeGeneric; -+ // The only built-in non-static signature-polymorphic method is MethodHandle.invokeBasic: -+ case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeBasic_name): return vmIntrinsics::_invokeBasic; -+ -+ // There is one static signature-polymorphic method for each JVM invocation mode. -+ case vmSymbols::VM_SYMBOL_ENUM_NAME(linkToVirtual_name): return vmIntrinsics::_linkToVirtual; -+ case vmSymbols::VM_SYMBOL_ENUM_NAME(linkToStatic_name): return vmIntrinsics::_linkToStatic; -+ case vmSymbols::VM_SYMBOL_ENUM_NAME(linkToSpecial_name): return vmIntrinsics::_linkToSpecial; -+ case vmSymbols::VM_SYMBOL_ENUM_NAME(linkToInterface_name): return vmIntrinsics::_linkToInterface; -+ } -+ -+ // Cover the case of invokeExact and any future variants of invokeFoo. -+ klassOop mh_klass = SystemDictionary::well_known_klass( -+ SystemDictionary::WK_KLASS_ENUM_NAME(MethodHandle_klass) ); -+ if (mh_klass != NULL && is_method_handle_invoke_name(mh_klass, name)) -+ return vmIntrinsics::_invokeGeneric; -+ -+ // Note: The pseudo-intrinsic _compiledLambdaForm is never linked against. -+ // Instead it is used to mark lambda forms bound to invokehandle or invokedynamic. -+ return vmIntrinsics::_none; -+} -+ -+vmIntrinsics::ID MethodHandles::signature_polymorphic_name_id(klassOop klass, Symbol* name) { -+ if (klass != NULL && -+ Klass::cast(klass)->name() == vmSymbols::java_lang_invoke_MethodHandle()) { -+ vmIntrinsics::ID iid = signature_polymorphic_name_id(name); -+ if (iid != vmIntrinsics::_none) -+ return iid; -+ if (is_method_handle_invoke_name(klass, name)) -+ return vmIntrinsics::_invokeGeneric; -+ } -+ return vmIntrinsics::_none; -+} -+ -+ - // convert the external string or reflective type to an internal signature --Symbol* MethodHandles::convert_to_signature(oop type_str, bool polymorphic, TRAPS) { -+Symbol* MethodHandles::lookup_signature(oop type_str, bool intern_if_not_found, TRAPS) { - if (java_lang_invoke_MethodType::is_instance(type_str)) { -- return java_lang_invoke_MethodType::as_signature(type_str, polymorphic, CHECK_NULL); -+ return java_lang_invoke_MethodType::as_signature(type_str, intern_if_not_found, CHECK_NULL); - } else if (java_lang_Class::is_instance(type_str)) { - return java_lang_Class::as_signature(type_str, false, CHECK_NULL); - } else if (java_lang_String::is_instance(type_str)) { -- if (polymorphic) { -+ if (intern_if_not_found) { - return java_lang_String::as_symbol(type_str, CHECK_NULL); - } else { - return java_lang_String::as_symbol_or_null(type_str); -@@ -560,91 +391,297 @@ - } - } - -+static const char OBJ_SIG[] = "Ljava/lang/Object;"; -+enum { OBJ_SIG_LEN = 18 }; -+ -+bool MethodHandles::is_basic_type_signature(Symbol* sig) { -+ assert(vmSymbols::object_signature()->utf8_length() == (int)OBJ_SIG_LEN, ""); -+ assert(vmSymbols::object_signature()->equals(OBJ_SIG), ""); -+ const int len = sig->utf8_length(); -+ for (int i = 0; i < len; i++) { -+ switch (sig->byte_at(i)) { -+ case 'L': -+ // only java/lang/Object is valid here -+ if (sig->index_of_at(i, OBJ_SIG, OBJ_SIG_LEN) != i) -+ return false; -+ i += OBJ_SIG_LEN-1; //-1 because of i++ in loop -+ continue; -+ case '(': case ')': case 'V': -+ case 'I': case 'J': case 'F': case 'D': -+ continue; -+ //case '[': -+ //case 'Z': case 'B': case 'C': case 'S': -+ default: -+ return false; -+ } -+ } -+ return true; -+} -+ -+Symbol* MethodHandles::lookup_basic_type_signature(Symbol* sig, bool keep_last_arg, TRAPS) { -+ Symbol* bsig = NULL; -+ if (sig == NULL) { -+ return sig; -+ } else if (is_basic_type_signature(sig)) { -+ sig->increment_refcount(); -+ return sig; // that was easy -+ } else if (sig->byte_at(0) != '(') { -+ BasicType bt = char2type(sig->byte_at(0)); -+ if (is_subword_type(bt)) { -+ bsig = vmSymbols::int_signature(); -+ } else { -+ assert(bt == T_OBJECT || bt == T_ARRAY, "is_basic_type_signature was false"); -+ bsig = vmSymbols::object_signature(); -+ } -+ } else { -+ ResourceMark rm; -+ stringStream buffer(128); -+ buffer.put('('); -+ int arg_pos = 0, keep_arg_pos = -1; -+ if (keep_last_arg) -+ keep_arg_pos = ArgumentCount(sig).size() - 1; -+ for (SignatureStream ss(sig); !ss.is_done(); ss.next()) { -+ BasicType bt = ss.type(); -+ size_t this_arg_pos = buffer.size(); -+ if (ss.at_return_type()) { -+ buffer.put(')'); -+ } -+ if (arg_pos == keep_arg_pos) { -+ buffer.write((char*) ss.raw_bytes(), -+ (int) ss.raw_length()); -+ } else if (bt == T_OBJECT || bt == T_ARRAY) { -+ buffer.write(OBJ_SIG, OBJ_SIG_LEN); -+ } else { -+ if (is_subword_type(bt)) -+ bt = T_INT; -+ buffer.put(type2char(bt)); -+ } -+ arg_pos++; -+ } -+ const char* sigstr = buffer.base(); -+ int siglen = (int) buffer.size(); -+ bsig = SymbolTable::new_symbol(sigstr, siglen, THREAD); -+ } -+ assert(is_basic_type_signature(bsig) || -+ // detune assert in case the injected argument is not a basic type: -+ keep_last_arg, ""); -+ return bsig; -+} -+ -+void MethodHandles::print_as_basic_type_signature_on(outputStream* st, -+ Symbol* sig, -+ bool keep_arrays, -+ bool keep_basic_names) { -+ st = st ? st : tty; -+ int len = sig->utf8_length(); -+ int array = 0; -+ bool prev_type = false; -+ for (int i = 0; i < len; i++) { -+ char ch = sig->byte_at(i); -+ switch (ch) { -+ case '(': case ')': -+ prev_type = false; -+ st->put(ch); -+ continue; -+ case '[': -+ if (!keep_basic_names && keep_arrays) -+ st->put(ch); -+ array++; -+ continue; -+ case 'L': -+ { -+ if (prev_type) st->put(','); -+ int start = i+1, slash = start; -+ while (++i < len && (ch = sig->byte_at(i)) != ';') { -+ if (ch == '/' || ch == '.' || ch == '$') slash = i+1; -+ } -+ if (slash < i) start = slash; -+ if (!keep_basic_names) { -+ st->put('L'); -+ } else { -+ for (int j = start; j < i; j++) -+ st->put(sig->byte_at(j)); -+ prev_type = true; -+ } -+ break; -+ } -+ default: -+ { -+ if (array && char2type(ch) != T_ILLEGAL && !keep_arrays) { -+ ch = '['; -+ array = 0; -+ } -+ if (prev_type) st->put(','); -+ const char* n = NULL; -+ if (keep_basic_names) -+ n = type2name(char2type(ch)); -+ if (n == NULL) { -+ // unknown letter, or we don't want to know its name -+ st->put(ch); -+ } else { -+ st->print(n); -+ prev_type = true; -+ } -+ break; -+ } -+ } -+ // Switch break goes here to take care of array suffix: -+ if (prev_type) { -+ while (array > 0) { -+ st->print("[]"); -+ --array; -+ } -+ } -+ array = 0; -+ } -+} -+ -+ -+ -+static oop object_java_mirror() { -+ return Klass::cast(SystemDictionary::Object_klass())->java_mirror(); -+} -+ -+static oop field_name_or_null(Symbol* s) { -+ if (s == NULL) return NULL; -+ return StringTable::lookup(s); -+} -+ -+static oop field_signature_type_or_null(Symbol* s) { -+ if (s == NULL) return NULL; -+ BasicType bt = FieldType::basic_type(s); -+ if (is_java_primitive(bt)) { -+ assert(s->utf8_length() == 1, ""); -+ return java_lang_Class::primitive_mirror(bt); -+ } -+ // Here are some more short cuts for common types. -+ // They are optional, since reference types can be resolved lazily. -+ if (bt == T_OBJECT) { -+ if (s == vmSymbols::object_signature()) { -+ return object_java_mirror(); -+ } else if (s == vmSymbols::class_signature()) { -+ return Klass::cast(SystemDictionary::Class_klass())->java_mirror(); -+ } else if (s == vmSymbols::string_signature()) { -+ return Klass::cast(SystemDictionary::String_klass())->java_mirror(); -+ } else { -+ int len = s->utf8_length(); -+ if (s->byte_at(0) == 'L' && s->byte_at(len-1) == ';') { -+ TempNewSymbol cname = SymbolTable::probe((const char*)&s->bytes()[1], len-2); -+ if (cname == NULL) return NULL; -+ klassOop wkk = SystemDictionary::find_well_known_klass(cname); -+ if (wkk == NULL) return NULL; -+ return Klass::cast(wkk)->java_mirror(); -+ } -+ } -+ } -+ return NULL; -+} -+ - // An unresolved member name is a mere symbolic reference. - // Resolving it plants a vmtarget/vmindex in it, - // which refers dirctly to JVM internals. --void MethodHandles::resolve_MemberName(Handle mname, TRAPS) { -+Handle MethodHandles::resolve_MemberName(Handle mname, TRAPS) { -+ Handle empty; - assert(java_lang_invoke_MemberName::is_instance(mname()), ""); --#ifdef ASSERT -- // If this assert throws, renegotiate the sentinel value used by the Java code, -- // so that it is distinct from any valid vtable index value, and any special -- // values defined in methodOopDesc::VtableIndexFlag. -- // The point of the slop is to give the Java code and the JVM some room -- // to independently specify sentinel values. -- const int sentinel_slop = 10; -- const int sentinel_limit = methodOopDesc::highest_unused_vtable_index_value - sentinel_slop; -- assert(VM_INDEX_UNINITIALIZED < sentinel_limit, "Java sentinel != JVM sentinels"); --#endif -- if (java_lang_invoke_MemberName::vmindex(mname()) != VM_INDEX_UNINITIALIZED) -- return; // already resolved -+ -+ if (java_lang_invoke_MemberName::vmtarget(mname()) != NULL) { -+ // Already resolved. -+ DEBUG_ONLY(int vmindex = java_lang_invoke_MemberName::vmindex(mname())); -+ assert(vmindex >= methodOopDesc::nonvirtual_vtable_index, ""); -+ return mname; -+ } -+ - Handle defc_oop(THREAD, java_lang_invoke_MemberName::clazz(mname())); - Handle name_str(THREAD, java_lang_invoke_MemberName::name( mname())); - Handle type_str(THREAD, java_lang_invoke_MemberName::type( mname())); - int flags = java_lang_invoke_MemberName::flags(mname()); -+ int ref_kind = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK; -+ if (!ref_kind_is_valid(ref_kind)) { -+ THROW_MSG_(vmSymbols::java_lang_InternalError(), "obsolete MemberName format", empty); -+ } -+ -+ DEBUG_ONLY(int old_vmindex); -+ assert((old_vmindex = java_lang_invoke_MemberName::vmindex(mname())) == 0, "clean input"); - - if (defc_oop.is_null() || name_str.is_null() || type_str.is_null()) { -- THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nothing to resolve"); -+ THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), "nothing to resolve", empty); - } - - instanceKlassHandle defc; - { - klassOop defc_klassOop = java_lang_Class::as_klassOop(defc_oop()); -- if (defc_klassOop == NULL) return; // a primitive; no resolution possible -+ if (defc_klassOop == NULL) return empty; // a primitive; no resolution possible - if (!Klass::cast(defc_klassOop)->oop_is_instance()) { -- if (!Klass::cast(defc_klassOop)->oop_is_array()) return; -+ if (!Klass::cast(defc_klassOop)->oop_is_array()) return empty; - defc_klassOop = SystemDictionary::Object_klass(); - } - defc = instanceKlassHandle(THREAD, defc_klassOop); - } - if (defc.is_null()) { -- THROW_MSG(vmSymbols::java_lang_InternalError(), "primitive class"); -+ THROW_MSG_(vmSymbols::java_lang_InternalError(), "primitive class", empty); - } -- defc->link_class(CHECK); // possible safepoint -+ defc->link_class(CHECK_(empty)); // possible safepoint - - // convert the external string name to an internal symbol - TempNewSymbol name = java_lang_String::as_symbol_or_null(name_str()); -- if (name == NULL) return; // no such name -+ if (name == NULL) return empty; // no such name - if (name == vmSymbols::class_initializer_name()) -- return; // illegal name -+ return empty; // illegal name - -- Handle polymorphic_method_type; -- bool polymorphic_signature = false; -+ vmIntrinsics::ID mh_invoke_id = vmIntrinsics::_none; - if ((flags & ALL_KINDS) == IS_METHOD && -- (defc() == SystemDictionary::MethodHandle_klass() && -- methodOopDesc::is_method_handle_invoke_name(name))) { -- polymorphic_signature = true; -+ (defc() == SystemDictionary::MethodHandle_klass()) && -+ (ref_kind == JVM_REF_invokeVirtual || -+ ref_kind == JVM_REF_invokeSpecial || -+ // static invocation mode is required for _linkToVirtual, etc.: -+ ref_kind == JVM_REF_invokeStatic)) { -+ vmIntrinsics::ID iid = signature_polymorphic_name_id(name); -+ if (iid != vmIntrinsics::_none && -+ ((ref_kind == JVM_REF_invokeStatic) == is_signature_polymorphic_static(iid))) { -+ // Virtual methods invoke and invokeExact, plus internal invokers like _invokeBasic. -+ // For a static reference it could an internal linkage routine like _linkToVirtual, etc. -+ mh_invoke_id = iid; -+ } - } - - // convert the external string or reflective type to an internal signature -- TempNewSymbol type = convert_to_signature(type_str(), polymorphic_signature, CHECK); -- if (java_lang_invoke_MethodType::is_instance(type_str()) && polymorphic_signature) { -- polymorphic_method_type = type_str; // preserve exactly -- } -- if (type == NULL) return; // no such signature exists in the VM -+ TempNewSymbol type = lookup_signature(type_str(), (mh_invoke_id != vmIntrinsics::_none), CHECK_(empty)); -+ if (type == NULL) return empty; // no such signature exists in the VM - - // Time to do the lookup. - switch (flags & ALL_KINDS) { - case IS_METHOD: - { - CallInfo result; -+ bool do_dispatch = true; // default, neutral setting - { -- EXCEPTION_MARK; -- if ((flags & JVM_ACC_STATIC) != 0) { -+ assert(!HAS_PENDING_EXCEPTION, ""); -+ if (ref_kind == JVM_REF_invokeStatic) { -+ //do_dispatch = false; // no need, since statics are never dispatched - LinkResolver::resolve_static_call(result, -+ defc, name, type, KlassHandle(), false, false, THREAD); -+ } else if (ref_kind == JVM_REF_invokeInterface) { -+ LinkResolver::resolve_interface_call(result, Handle(), defc, - defc, name, type, KlassHandle(), false, false, THREAD); -- } else if (defc->is_interface()) { -- LinkResolver::resolve_interface_call(result, Handle(), defc, -+ } else if (mh_invoke_id != vmIntrinsics::_none) { -+ assert(!is_signature_polymorphic_static(mh_invoke_id), ""); -+ LinkResolver::resolve_handle_call(result, -+ defc, name, type, KlassHandle(), THREAD); -+ } else if (ref_kind == JVM_REF_invokeSpecial) { -+ do_dispatch = false; // force non-virtual linkage -+ LinkResolver::resolve_special_call(result, -+ defc, name, type, KlassHandle(), false, THREAD); -+ } else if (ref_kind == JVM_REF_invokeVirtual) { -+ LinkResolver::resolve_virtual_call(result, Handle(), defc, - defc, name, type, KlassHandle(), false, false, THREAD); - } else { -- LinkResolver::resolve_virtual_call(result, Handle(), defc, -- defc, name, type, KlassHandle(), false, false, THREAD); -+ assert(false, err_msg("ref_kind=%d", ref_kind)); - } - if (HAS_PENDING_EXCEPTION) { -- CLEAR_PENDING_EXCEPTION; -- break; // go to second chance -+ return empty; - } - } -- methodHandle m = result.resolved_method(); -+ /* - KlassHandle mklass = m->method_holder(); - KlassHandle receiver_limit = result.resolved_klass(); - if (receiver_limit.is_null() || -@@ -652,37 +689,15 @@ - receiver_limit->is_interface() && mklass->is_interface()) { - receiver_limit = mklass; - } -- oop vmtarget = NULL; -- int vmindex = methodOopDesc::nonvirtual_vtable_index; -- if (defc->is_interface()) { -- vmindex = klassItable::compute_itable_index(m()); -- assert(vmindex >= 0, ""); -- } else if (result.has_vtable_index()) { -- vmindex = result.vtable_index(); -- assert(vmindex >= 0, ""); -- } -- assert(vmindex != VM_INDEX_UNINITIALIZED, ""); -- if (vmindex < 0) { -- assert(result.is_statically_bound(), ""); -- vmtarget = m(); -- } else { -- vmtarget = result.resolved_klass()->as_klassOop(); -- } -- int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS); -- java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget); -- java_lang_invoke_MemberName::set_vmindex(mname(), vmindex); -- java_lang_invoke_MemberName::set_modifiers(mname(), mods); - java_lang_invoke_MemberName::set_clazz(mname(), receiver_limit->java_mirror()); -- DEBUG_ONLY(KlassHandle junk1; int junk2); -- assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(), -- "properly stored for later decoding"); -- return; -+ */ -+ return init_method_MemberName(mname(), result, THREAD); - } - case IS_CONSTRUCTOR: - { - CallInfo result; - { -- EXCEPTION_MARK; -+ assert(!HAS_PENDING_EXCEPTION, ""); - if (name == vmSymbols::object_initializer_name()) { - LinkResolver::resolve_special_call(result, - defc, name, type, KlassHandle(), false, THREAD); -@@ -690,22 +705,11 @@ - break; // will throw after end of switch - } - if (HAS_PENDING_EXCEPTION) { -- CLEAR_PENDING_EXCEPTION; -- return; -+ return empty; - } - } - assert(result.is_statically_bound(), ""); -- methodHandle m = result.resolved_method(); -- oop vmtarget = m(); -- int vmindex = methodOopDesc::nonvirtual_vtable_index; -- int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS); -- java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget); -- java_lang_invoke_MemberName::set_vmindex(mname(), vmindex); -- java_lang_invoke_MemberName::set_modifiers(mname(), mods); -- DEBUG_ONLY(KlassHandle junk1; int junk2); -- assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(), -- "properly stored for later decoding"); -- return; -+ return init_method_MemberName(mname(), result, THREAD); - } - case IS_FIELD: - { -@@ -713,54 +717,20 @@ - fieldDescriptor fd; // find_field initializes fd if found - KlassHandle sel_klass(THREAD, instanceKlass::cast(defc())->find_field(name, type, &fd)); - // check if field exists; i.e., if a klass containing the field def has been selected -- if (sel_klass.is_null()) return; -- oop vmtarget = sel_klass->as_klassOop(); -- int vmindex = fd.offset(); -- int mods = (fd.access_flags().as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS); -- if (vmindex == VM_INDEX_UNINITIALIZED) break; // should not happen -- java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget); -- java_lang_invoke_MemberName::set_vmindex(mname(), vmindex); -- java_lang_invoke_MemberName::set_modifiers(mname(), mods); -- return; -+ if (sel_klass.is_null()) return empty; // should not happen -+ oop type = field_signature_type_or_null(fd.signature()); -+ oop name = field_name_or_null(fd.name()); -+ bool is_setter = (ref_kind_is_valid(ref_kind) && ref_kind_is_setter(ref_kind)); -+ mname = Handle(THREAD, -+ init_field_MemberName(mname(), sel_klass->as_klassOop(), -+ fd.access_flags(), type, name, fd.offset(), is_setter)); -+ return mname; - } - default: -- THROW_MSG(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format"); -+ THROW_MSG_(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format", empty); - } - -- // Second chance. -- if (polymorphic_method_type.not_null()) { -- // Look on a non-null class loader. -- Handle cur_class_loader; -- const int nptypes = java_lang_invoke_MethodType::ptype_count(polymorphic_method_type()); -- for (int i = 0; i <= nptypes; i++) { -- oop type_mirror; -- if (i < nptypes) type_mirror = java_lang_invoke_MethodType::ptype(polymorphic_method_type(), i); -- else type_mirror = java_lang_invoke_MethodType::rtype(polymorphic_method_type()); -- klassOop example_type = java_lang_Class::as_klassOop(type_mirror); -- if (example_type == NULL) continue; -- oop class_loader = Klass::cast(example_type)->class_loader(); -- if (class_loader == NULL || class_loader == cur_class_loader()) continue; -- cur_class_loader = Handle(THREAD, class_loader); -- methodOop m = SystemDictionary::find_method_handle_invoke(name, -- type, -- KlassHandle(THREAD, example_type), -- THREAD); -- if (HAS_PENDING_EXCEPTION) { -- CLEAR_PENDING_EXCEPTION; -- m = NULL; -- // try again with a different class loader... -- } -- if (m != NULL && -- m->is_method_handle_invoke() && -- java_lang_invoke_MethodType::equals(polymorphic_method_type(), m->method_handle_type())) { -- int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS); -- java_lang_invoke_MemberName::set_vmtarget(mname(), m); -- java_lang_invoke_MemberName::set_vmindex(mname(), m->vtable_index()); -- java_lang_invoke_MemberName::set_modifiers(mname(), mods); -- return; -- } -- } -- } -+ return empty; - } - - // Conversely, a member name which is only initialized from JVM internals -@@ -771,7 +741,7 @@ - assert(java_lang_invoke_MemberName::is_instance(mname()), ""); - oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname()); - int vmindex = java_lang_invoke_MemberName::vmindex(mname()); -- if (vmtarget == NULL || vmindex == VM_INDEX_UNINITIALIZED) { -+ if (vmtarget == NULL) { - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nothing to expand"); - } - -@@ -792,14 +762,12 @@ - case IS_METHOD: - case IS_CONSTRUCTOR: - { -- KlassHandle receiver_limit; int decode_flags = 0; -- methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit, decode_flags); -+ assert(vmtarget->is_method(), "method or constructor vmtarget is methodOop"); -+ methodHandle m(THREAD, methodOop(vmtarget)); -+ DEBUG_ONLY(vmtarget = NULL); // safety - if (m.is_null()) break; - if (!have_defc) { - klassOop defc = m->method_holder(); -- if (receiver_limit.not_null() && receiver_limit() != defc -- && Klass::cast(receiver_limit())->is_subtype_of(defc)) -- defc = receiver_limit(); - java_lang_invoke_MemberName::set_clazz(mname(), Klass::cast(defc)->java_mirror()); - } - if (!have_name) { -@@ -816,9 +784,10 @@ - case IS_FIELD: - { - // This is taken from LinkResolver::resolve_field, sans access checks. -- if (!vmtarget->is_klass()) break; -+ assert(vmtarget->is_klass(), "field vmtarget is klassOop"); - if (!Klass::cast((klassOop) vmtarget)->oop_is_instance()) break; - instanceKlassHandle defc(THREAD, (klassOop) vmtarget); -+ DEBUG_ONLY(vmtarget = NULL); // safety - bool is_static = ((flags & JVM_ACC_STATIC) != 0); - fieldDescriptor fd; // find_field initializes fd if found - if (!defc->find_field_from_offset(vmindex, is_static, &fd)) -@@ -832,7 +801,11 @@ - java_lang_invoke_MemberName::set_name(mname(), name()); - } - if (!have_type) { -- Handle type = java_lang_String::create_from_symbol(fd.signature(), CHECK); -+ // If it is a primitive field type, don't mess with short strings like "I". -+ Handle type = field_signature_type_or_null(fd.signature()); -+ if (type.is_null()) { -+ java_lang_String::create_from_symbol(fd.signature(), CHECK); -+ } - java_lang_invoke_MemberName::set_type(mname(), type()); - } - return; -@@ -890,7 +863,13 @@ - oop result = results->obj_at(rfill++); - if (!java_lang_invoke_MemberName::is_instance(result)) - return -99; // caller bug! -- MethodHandles::init_MemberName(result, st.klass()->as_klassOop(), st.access_flags(), st.offset()); -+ oop type = field_signature_type_or_null(st.signature()); -+ oop name = field_name_or_null(st.name()); -+ oop saved = MethodHandles::init_field_MemberName(result, st.klass()->as_klassOop(), -+ st.access_flags(), type, name, -+ st.offset()); -+ if (saved != result) -+ results->obj_at_put(rfill-1, saved); // show saved instance to user - } else if (++overflow >= overflow_limit) { - match_flags = 0; break; // got tired of looking at overflow - } -@@ -938,7 +917,9 @@ - oop result = results->obj_at(rfill++); - if (!java_lang_invoke_MemberName::is_instance(result)) - return -99; // caller bug! -- MethodHandles::init_MemberName(result, m, true); -+ oop saved = MethodHandles::init_method_MemberName(result, m, true, NULL); -+ if (saved != result) -+ results->obj_at_put(rfill-1, saved); // show saved instance to user - } else if (++overflow >= overflow_limit) { - match_flags = 0; break; // got tired of looking at overflow - } -@@ -949,1925 +930,16 @@ - return rfill + overflow; - } - -- --// Decode this java.lang.Class object into an instanceKlass, if possible. --// Throw IAE if not --instanceKlassHandle MethodHandles::resolve_instance_klass(oop java_mirror_oop, TRAPS) { -- instanceKlassHandle empty; -- klassOop caller = NULL; -- if (java_lang_Class::is_instance(java_mirror_oop)) { -- caller = java_lang_Class::as_klassOop(java_mirror_oop); -- } -- if (caller == NULL || !Klass::cast(caller)->oop_is_instance()) { -- THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), "not a class", empty); -- } -- return instanceKlassHandle(THREAD, caller); --} -- -- -- --// Decode the vmtarget field of a method handle. --// Sanitize out methodOops, klassOops, and any other non-Java data. --// This is for debugging and reflection. --oop MethodHandles::encode_target(Handle mh, int format, TRAPS) { -- assert(java_lang_invoke_MethodHandle::is_instance(mh()), "must be a MH"); -- if (format == ETF_FORCE_DIRECT_HANDLE || -- format == ETF_COMPILE_DIRECT_HANDLE) { -- // Internal function for stress testing. -- Handle mt = java_lang_invoke_MethodHandle::type(mh()); -- int invocation_count = 10000; -- TempNewSymbol signature = java_lang_invoke_MethodType::as_signature(mt(), true, CHECK_NULL); -- bool omit_receiver_argument = true; -- MethodHandleCompiler mhc(mh, vmSymbols::invoke_name(), signature, invocation_count, omit_receiver_argument, CHECK_NULL); -- methodHandle m = mhc.compile(CHECK_NULL); -- if (StressMethodHandleWalk && Verbose || PrintMiscellaneous) { -- tty->print_cr("MethodHandleNatives.getTarget(%s)", -- format == ETF_FORCE_DIRECT_HANDLE ? "FORCE_DIRECT" : "COMPILE_DIRECT"); -- if (Verbose) { -- m->print_codes(); -- } -- } -- if (StressMethodHandleWalk) { -- InterpreterOopMap mask; -- OopMapCache::compute_one_oop_map(m, m->code_size() - 1, &mask); -- } -- if ((format == ETF_COMPILE_DIRECT_HANDLE || -- CompilationPolicy::must_be_compiled(m)) -- && !instanceKlass::cast(m->method_holder())->is_not_initialized() -- && CompilationPolicy::can_be_compiled(m)) { -- // Force compilation -- CompileBroker::compile_method(m, InvocationEntryBci, -- CompilationPolicy::policy()->initial_compile_level(), -- methodHandle(), 0, "MethodHandleNatives.getTarget", -- CHECK_NULL); -- } -- // Now wrap m in a DirectMethodHandle. -- instanceKlassHandle dmh_klass(THREAD, SystemDictionary::DirectMethodHandle_klass()); -- Handle dmh = dmh_klass->allocate_instance_handle(CHECK_NULL); -- JavaValue ignore_result(T_VOID); -- Symbol* init_name = vmSymbols::object_initializer_name(); -- Symbol* init_sig = vmSymbols::notifyGenericMethodType_signature(); -- JavaCalls::call_special(&ignore_result, dmh, -- SystemDictionaryHandles::MethodHandle_klass(), init_name, init_sig, -- java_lang_invoke_MethodHandle::type(mh()), CHECK_NULL); -- MethodHandles::init_DirectMethodHandle(dmh, m, false, CHECK_NULL); -- return dmh(); -- } -- if (format == ETF_HANDLE_OR_METHOD_NAME) { -- oop target = java_lang_invoke_MethodHandle::vmtarget(mh()); -- if (target == NULL) { -- return NULL; // unformed MH -- } -- klassOop tklass = target->klass(); -- if (Klass::cast(tklass)->is_subclass_of(SystemDictionary::Object_klass())) { -- return target; // target is another MH (or something else?) -- } -- } -- if (format == ETF_DIRECT_HANDLE) { -- oop target = mh(); -- for (;;) { -- if (target->klass() == SystemDictionary::DirectMethodHandle_klass()) { -- return target; -- } -- if (!java_lang_invoke_MethodHandle::is_instance(target)){ -- return NULL; // unformed MH -- } -- target = java_lang_invoke_MethodHandle::vmtarget(target); -- } -- } -- // cases of metadata in MH.vmtarget: -- // - AMH can have methodOop for static invoke with bound receiver -- // - DMH can have methodOop for static invoke (on variable receiver) -- // - DMH can have klassOop for dispatched (non-static) invoke -- KlassHandle receiver_limit; int decode_flags = 0; -- methodHandle m = decode_MethodHandle(mh(), receiver_limit, decode_flags); -- if (m.is_null()) return NULL; -- switch (format) { -- case ETF_REFLECT_METHOD: -- // same as jni_ToReflectedMethod: -- if (m->is_initializer()) { -- return Reflection::new_constructor(m, THREAD); -- } else { -- return Reflection::new_method(m, UseNewReflection, false, THREAD); -- } -- -- case ETF_HANDLE_OR_METHOD_NAME: // method, not handle -- case ETF_METHOD_NAME: -- { -- if (SystemDictionary::MemberName_klass() == NULL) break; -- instanceKlassHandle mname_klass(THREAD, SystemDictionary::MemberName_klass()); -- mname_klass->initialize(CHECK_NULL); -- Handle mname = mname_klass->allocate_instance_handle(CHECK_NULL); // possible safepoint -- java_lang_invoke_MemberName::set_vmindex(mname(), VM_INDEX_UNINITIALIZED); -- bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0); -- init_MemberName(mname(), m(), do_dispatch); -- expand_MemberName(mname, 0, CHECK_NULL); -- return mname(); -- } -- } -- -- // Unknown format code. -- char msg[50]; -- jio_snprintf(msg, sizeof(msg), "unknown getTarget format=%d", format); -- THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), msg); --} -- --static const char* always_null_names[] = { -- "java/lang/Void", -- "java/lang/Null", -- //"java/lang/Nothing", -- "sun/dyn/empty/Empty", -- "sun/invoke/empty/Empty", -- NULL --}; -- --static bool is_always_null_type(klassOop klass) { -- if (klass == NULL) return false; // safety -- if (!Klass::cast(klass)->oop_is_instance()) return false; -- instanceKlass* ik = instanceKlass::cast(klass); -- // Must be on the boot class path: -- if (ik->class_loader() != NULL) return false; -- // Check the name. -- Symbol* name = ik->name(); -- for (int i = 0; ; i++) { -- const char* test_name = always_null_names[i]; -- if (test_name == NULL) break; -- if (name->equals(test_name)) -- return true; -- } -- return false; --} -- --bool MethodHandles::class_cast_needed(klassOop src, klassOop dst) { -- if (dst == NULL) return true; -- if (src == NULL) return (dst != SystemDictionary::Object_klass()); -- if (src == dst || dst == SystemDictionary::Object_klass()) -- return false; // quickest checks -- Klass* srck = Klass::cast(src); -- Klass* dstk = Klass::cast(dst); -- if (dstk->is_interface()) { -- // interface receivers can safely be viewed as untyped, -- // because interface calls always include a dynamic check -- //dstk = Klass::cast(SystemDictionary::Object_klass()); -- return false; -- } -- if (srck->is_interface()) { -- // interface arguments must be viewed as untyped -- //srck = Klass::cast(SystemDictionary::Object_klass()); -- return true; -- } -- if (is_always_null_type(src)) { -- // some source types are known to be never instantiated; -- // they represent references which are always null -- // such null references never fail to convert safely -- return false; -- } -- return !srck->is_subclass_of(dstk->as_klassOop()); --} -- --static oop object_java_mirror() { -- return Klass::cast(SystemDictionary::Object_klass())->java_mirror(); --} -- --bool MethodHandles::is_float_fixed_reinterpretation_cast(BasicType src, BasicType dst) { -- if (src == T_FLOAT) return dst == T_INT; -- if (src == T_INT) return dst == T_FLOAT; -- if (src == T_DOUBLE) return dst == T_LONG; -- if (src == T_LONG) return dst == T_DOUBLE; -- return false; --} -- --bool MethodHandles::same_basic_type_for_arguments(BasicType src, -- BasicType dst, -- bool raw, -- bool for_return) { -- if (for_return) { -- // return values can always be forgotten: -- if (dst == T_VOID) return true; -- if (src == T_VOID) return raw && (dst == T_INT); -- // We allow caller to receive a garbage int, which is harmless. -- // This trick is pulled by trusted code (see VerifyType.canPassRaw). -- } -- assert(src != T_VOID && dst != T_VOID, "should not be here"); -- if (src == dst) return true; -- if (type2size[src] != type2size[dst]) return false; -- if (src == T_OBJECT || dst == T_OBJECT) return false; -- if (raw) return true; // bitwise reinterpretation; caller guarantees safety -- // allow reinterpretation casts for integral widening -- if (is_subword_type(src)) { // subwords can fit in int or other subwords -- if (dst == T_INT) // any subword fits in an int -- return true; -- if (src == T_BOOLEAN) // boolean fits in any subword -- return is_subword_type(dst); -- if (src == T_BYTE && dst == T_SHORT) -- return true; // remaining case: byte fits in short -- } -- // allow float/fixed reinterpretation casts -- if (is_float_fixed_reinterpretation_cast(src, dst)) -- return true; -- return false; --} -- --const char* MethodHandles::check_method_receiver(methodOop m, -- klassOop passed_recv_type) { -- assert(!m->is_static(), "caller resp."); -- if (passed_recv_type == NULL) -- return "receiver type is primitive"; -- if (class_cast_needed(passed_recv_type, m->method_holder())) { -- Klass* formal = Klass::cast(m->method_holder()); -- return SharedRuntime::generate_class_cast_message("receiver type", -- formal->external_name()); -- } -- return NULL; // checks passed --} -- --// Verify that m's signature can be called type-safely by a method handle --// of the given method type 'mtype'. --// It takes a TRAPS argument because it must perform symbol lookups. --void MethodHandles::verify_method_signature(methodHandle m, -- Handle mtype, -- int first_ptype_pos, -- KlassHandle insert_ptype, -- TRAPS) { -- Handle mhi_type; -- if (m->is_method_handle_invoke()) { -- // use this more exact typing instead of the symbolic signature: -- mhi_type = Handle(THREAD, m->method_handle_type()); -- } -- objArrayHandle ptypes(THREAD, java_lang_invoke_MethodType::ptypes(mtype())); -- int pnum = first_ptype_pos; -- int pmax = ptypes->length(); -- int anum = 0; // method argument -- const char* err = NULL; -- ResourceMark rm(THREAD); -- for (SignatureStream ss(m->signature()); !ss.is_done(); ss.next()) { -- oop ptype_oop = NULL; -- if (ss.at_return_type()) { -- if (pnum != pmax) -- { err = "too many arguments"; break; } -- ptype_oop = java_lang_invoke_MethodType::rtype(mtype()); -- } else { -- if (pnum >= pmax) -- { err = "not enough arguments"; break; } -- if (pnum >= 0) -- ptype_oop = ptypes->obj_at(pnum); -- else if (insert_ptype.is_null()) -- ptype_oop = NULL; -- else -- ptype_oop = insert_ptype->java_mirror(); -- pnum += 1; -- anum += 1; -- } -- KlassHandle pklass; -- BasicType ptype = T_OBJECT; -- bool have_ptype = false; -- // missing ptype_oop does not match any non-reference; use Object to report the error -- pklass = SystemDictionaryHandles::Object_klass(); -- if (ptype_oop != NULL) { -- have_ptype = true; -- klassOop pklass_oop = NULL; -- ptype = java_lang_Class::as_BasicType(ptype_oop, &pklass_oop); -- pklass = KlassHandle(THREAD, pklass_oop); -- } -- ptype_oop = NULL; //done with this -- KlassHandle aklass; -- BasicType atype = ss.type(); -- if (atype == T_ARRAY) atype = T_OBJECT; // fold all refs to T_OBJECT -- if (atype == T_OBJECT) { -- if (!have_ptype) { -- // null matches any reference -- continue; -- } -- if (mhi_type.is_null()) { -- // If we fail to resolve types at this point, we will usually throw an error. -- TempNewSymbol name = ss.as_symbol_or_null(); -- if (name != NULL) { -- instanceKlass* mk = instanceKlass::cast(m->method_holder()); -- Handle loader(THREAD, mk->class_loader()); -- Handle domain(THREAD, mk->protection_domain()); -- klassOop aklass_oop = SystemDictionary::resolve_or_null(name, loader, domain, CHECK); -- if (aklass_oop != NULL) -- aklass = KlassHandle(THREAD, aklass_oop); -- if (aklass.is_null() && -- pklass.not_null() && -- loader.is_null() && -- pklass->name() == name) -- // accept name equivalence here, since that's the best we can do -- aklass = pklass; -- } -- } else { -- // for method handle invokers we don't look at the name in the signature -- oop atype_oop; -- if (ss.at_return_type()) -- atype_oop = java_lang_invoke_MethodType::rtype(mhi_type()); -- else -- atype_oop = java_lang_invoke_MethodType::ptype(mhi_type(), anum-1); -- klassOop aklass_oop = NULL; -- atype = java_lang_Class::as_BasicType(atype_oop, &aklass_oop); -- aklass = KlassHandle(THREAD, aklass_oop); -- } -- } -- if (!ss.at_return_type()) { -- err = check_argument_type_change(ptype, pklass(), atype, aklass(), anum); -- } else { -- err = check_return_type_change(atype, aklass(), ptype, pklass()); // note reversal! -- } -- if (err != NULL) break; -- } -- -- if (err != NULL) { --#ifndef PRODUCT -- if (PrintMiscellaneous && (Verbose || WizardMode)) { -- tty->print("*** verify_method_signature failed: "); -- java_lang_invoke_MethodType::print_signature(mtype(), tty); -- tty->cr(); -- tty->print_cr(" first_ptype_pos = %d, insert_ptype = "UINTX_FORMAT, first_ptype_pos, insert_ptype()); -- tty->print(" Failing method: "); -- m->print(); -- } --#endif //PRODUCT -- THROW_MSG(vmSymbols::java_lang_InternalError(), err); -- } --} -- --// Main routine for verifying the MethodHandle.type of a proposed --// direct or bound-direct method handle. --void MethodHandles::verify_method_type(methodHandle m, -- Handle mtype, -- bool has_bound_recv, -- KlassHandle bound_recv_type, -- TRAPS) { -- bool m_needs_receiver = !m->is_static(); -- -- const char* err = NULL; -- -- int first_ptype_pos = m_needs_receiver ? 1 : 0; -- if (has_bound_recv) { -- first_ptype_pos -= 1; // ptypes do not include the bound argument; start earlier in them -- if (m_needs_receiver && bound_recv_type.is_null()) -- { err = "bound receiver is not an object"; goto die; } -- } -- -- if (m_needs_receiver && err == NULL) { -- objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(mtype()); -- if (ptypes->length() < first_ptype_pos) -- { err = "receiver argument is missing"; goto die; } -- if (has_bound_recv) -- err = check_method_receiver(m(), bound_recv_type->as_klassOop()); -- else -- err = check_method_receiver(m(), java_lang_Class::as_klassOop(ptypes->obj_at(first_ptype_pos-1))); -- if (err != NULL) goto die; -- } -- -- // Check the other arguments for mistypes. -- verify_method_signature(m, mtype, first_ptype_pos, bound_recv_type, CHECK); -- return; -- -- die: -- THROW_MSG(vmSymbols::java_lang_InternalError(), err); --} -- --void MethodHandles::verify_vmslots(Handle mh, TRAPS) { -- // Verify vmslots. -- int check_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(mh())); -- if (java_lang_invoke_MethodHandle::vmslots(mh()) != check_slots) { -- THROW_MSG(vmSymbols::java_lang_InternalError(), "bad vmslots in BMH"); -- } --} -- --void MethodHandles::verify_vmargslot(Handle mh, int argnum, int argslot, TRAPS) { -- // Verify that argslot points at the given argnum. -- int check_slot = argument_slot(java_lang_invoke_MethodHandle::type(mh()), argnum); -- if (argslot != check_slot || argslot < 0) { -- ResourceMark rm; -- const char* fmt = "for argnum of %d, vmargslot is %d, should be %d"; -- size_t msglen = strlen(fmt) + 3*11 + 1; -- char* msg = NEW_RESOURCE_ARRAY(char, msglen); -- jio_snprintf(msg, msglen, fmt, argnum, argslot, check_slot); -- THROW_MSG(vmSymbols::java_lang_InternalError(), msg); -- } --} -- --// Verify the correspondence between two method types. --// Apart from the advertised changes, caller method type X must --// be able to invoke the callee method Y type with no violations --// of type integrity. --// Return NULL if all is well, else a short error message. --const char* MethodHandles::check_method_type_change(oop src_mtype, int src_beg, int src_end, -- int insert_argnum, oop insert_type, -- int change_argnum, oop change_type, -- int delete_argnum, -- oop dst_mtype, int dst_beg, int dst_end, -- bool raw) { -- objArrayOop src_ptypes = java_lang_invoke_MethodType::ptypes(src_mtype); -- objArrayOop dst_ptypes = java_lang_invoke_MethodType::ptypes(dst_mtype); -- -- int src_max = src_ptypes->length(); -- int dst_max = dst_ptypes->length(); -- -- if (src_end == -1) src_end = src_max; -- if (dst_end == -1) dst_end = dst_max; -- -- assert(0 <= src_beg && src_beg <= src_end && src_end <= src_max, "oob"); -- assert(0 <= dst_beg && dst_beg <= dst_end && dst_end <= dst_max, "oob"); -- -- // pending actions; set to -1 when done: -- int ins_idx = insert_argnum, chg_idx = change_argnum, del_idx = delete_argnum; -- -- const char* err = NULL; -- -- // Walk along each array of parameter types, including a virtual -- // NULL end marker at the end of each. -- for (int src_idx = src_beg, dst_idx = dst_beg; -- (src_idx <= src_end && dst_idx <= dst_end); -- src_idx++, dst_idx++) { -- oop src_type = (src_idx == src_end) ? oop(NULL) : src_ptypes->obj_at(src_idx); -- oop dst_type = (dst_idx == dst_end) ? oop(NULL) : dst_ptypes->obj_at(dst_idx); -- bool fix_null_src_type = false; -- -- // Perform requested edits. -- if (ins_idx == src_idx) { -- // note that the inserted guy is never affected by a change or deletion -- ins_idx = -1; -- src_type = insert_type; -- fix_null_src_type = true; -- --src_idx; // back up to process src type on next loop -- src_idx = src_end; -- } else { -- // note that the changed guy can be immediately deleted -- if (chg_idx == src_idx) { -- chg_idx = -1; -- assert(src_idx < src_end, "oob"); -- src_type = change_type; -- fix_null_src_type = true; -- } -- if (del_idx == src_idx) { -- del_idx = -1; -- assert(src_idx < src_end, "oob"); -- --dst_idx; -- continue; // rerun loop after skipping this position -- } -- } -- -- if (src_type == NULL && fix_null_src_type) -- // explicit null in this case matches any dest reference -- src_type = (java_lang_Class::is_primitive(dst_type) ? object_java_mirror() : dst_type); -- -- // Compare the two argument types. -- if (src_type != dst_type) { -- if (src_type == NULL) return "not enough arguments"; -- if (dst_type == NULL) return "too many arguments"; -- err = check_argument_type_change(src_type, dst_type, dst_idx, raw); -- if (err != NULL) return err; -- } -- } -- -- // Now compare return types also. -- oop src_rtype = java_lang_invoke_MethodType::rtype(src_mtype); -- oop dst_rtype = java_lang_invoke_MethodType::rtype(dst_mtype); -- if (src_rtype != dst_rtype) { -- err = check_return_type_change(dst_rtype, src_rtype, raw); // note reversal! -- if (err != NULL) return err; -- } -- -- assert(err == NULL, ""); -- return NULL; // all is well --} -- -- --const char* MethodHandles::check_argument_type_change(BasicType src_type, -- klassOop src_klass, -- BasicType dst_type, -- klassOop dst_klass, -- int argnum, -- bool raw) { -- const char* err = NULL; -- const bool for_return = (argnum < 0); -- -- // just in case: -- if (src_type == T_ARRAY) src_type = T_OBJECT; -- if (dst_type == T_ARRAY) dst_type = T_OBJECT; -- -- // Produce some nice messages if VerifyMethodHandles is turned on: -- if (!same_basic_type_for_arguments(src_type, dst_type, raw, for_return)) { -- if (src_type == T_OBJECT) { -- if (raw && is_java_primitive(dst_type)) -- return NULL; // ref-to-prim discards ref and returns zero -- err = (!for_return -- ? "type mismatch: passing a %s for method argument #%d, which expects primitive %s" -- : "type mismatch: returning a %s, but caller expects primitive %s"); -- } else if (dst_type == T_OBJECT) { -- err = (!for_return -- ? "type mismatch: passing a primitive %s for method argument #%d, which expects %s" -- : "type mismatch: returning a primitive %s, but caller expects %s"); -- } else { -- err = (!for_return -- ? "type mismatch: passing a %s for method argument #%d, which expects %s" -- : "type mismatch: returning a %s, but caller expects %s"); -- } -- } else if (src_type == T_OBJECT && dst_type == T_OBJECT && -- class_cast_needed(src_klass, dst_klass)) { -- if (!class_cast_needed(dst_klass, src_klass)) { -- if (raw) -- return NULL; // reverse cast is OK; the MH target is trusted to enforce it -- err = (!for_return -- ? "cast required: passing a %s for method argument #%d, which expects %s" -- : "cast required: returning a %s, but caller expects %s"); -- } else { -- err = (!for_return -- ? "reference mismatch: passing a %s for method argument #%d, which expects %s" -- : "reference mismatch: returning a %s, but caller expects %s"); -- } -- } else { -- // passed the obstacle course -- return NULL; -- } -- -- // format, format, format -- const char* src_name = type2name(src_type); -- const char* dst_name = type2name(dst_type); -- if (src_name == NULL) src_name = "unknown type"; -- if (dst_name == NULL) dst_name = "unknown type"; -- if (src_type == T_OBJECT) -- src_name = (src_klass != NULL) ? Klass::cast(src_klass)->external_name() : "an unresolved class"; -- if (dst_type == T_OBJECT) -- dst_name = (dst_klass != NULL) ? Klass::cast(dst_klass)->external_name() : "an unresolved class"; -- -- size_t msglen = strlen(err) + strlen(src_name) + strlen(dst_name) + (argnum < 10 ? 1 : 11); -- char* msg = NEW_RESOURCE_ARRAY(char, msglen + 1); -- if (!for_return) { -- assert(strstr(err, "%d") != NULL, ""); -- jio_snprintf(msg, msglen, err, src_name, argnum, dst_name); -- } else { -- assert(strstr(err, "%d") == NULL, ""); -- jio_snprintf(msg, msglen, err, src_name, dst_name); -- } -- return msg; --} -- --// Compute the depth within the stack of the given argument, i.e., --// the combined size of arguments to the right of the given argument. --// For the last argument (ptypes.length-1) this will be zero. --// For the first argument (0) this will be the size of all --// arguments but that one. For the special number -1, this --// will be the size of all arguments, including the first. --// If the argument is neither -1 nor a valid argument index, --// then return a negative number. Otherwise, the result --// is in the range [0..vmslots] inclusive. --int MethodHandles::argument_slot(oop method_type, int arg) { -- objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(method_type); -- int argslot = 0; -- int len = ptypes->length(); -- if (arg < -1 || arg >= len) return -99; -- for (int i = len-1; i > arg; i--) { -- BasicType bt = java_lang_Class::as_BasicType(ptypes->obj_at(i)); -- argslot += type2size[bt]; -- } -- assert(argument_slot_to_argnum(method_type, argslot) == arg, "inverse works"); -- return argslot; --} -- --// Given a slot number, return the argument number. --int MethodHandles::argument_slot_to_argnum(oop method_type, int query_argslot) { -- objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(method_type); -- int argslot = 0; -- int len = ptypes->length(); -- for (int i = len-1; i >= 0; i--) { -- if (query_argslot == argslot) return i; -- BasicType bt = java_lang_Class::as_BasicType(ptypes->obj_at(i)); -- argslot += type2size[bt]; -- } -- // return pseudo-arg deepest in stack: -- if (query_argslot == argslot) return -1; -- return -99; // oob slot, or splitting a double-slot arg --} -- --methodHandle MethodHandles::dispatch_decoded_method(methodHandle m, -- KlassHandle receiver_limit, -- int decode_flags, -- KlassHandle receiver_klass, -- TRAPS) { -- assert((decode_flags & ~_DMF_DIRECT_MASK) == 0, "must be direct method reference"); -- assert((decode_flags & _dmf_has_receiver) != 0, "must have a receiver or first reference argument"); -- -- if (!m->is_static() && -- (receiver_klass.is_null() || !receiver_klass->is_subtype_of(m->method_holder()))) -- // given type does not match class of method, or receiver is null! -- // caller should have checked this, but let's be extra careful... -- return methodHandle(); -- -- if (receiver_limit.not_null() && -- (receiver_klass.not_null() && !receiver_klass->is_subtype_of(receiver_limit()))) -- // given type is not limited to the receiver type -- // note that a null receiver can match any reference value, for a static method -- return methodHandle(); -- -- if (!(decode_flags & MethodHandles::_dmf_does_dispatch)) { -- // pre-dispatched or static method (null receiver is OK for static) -- return m; -- -- } else if (receiver_klass.is_null()) { -- // null receiver value; cannot dispatch -- return methodHandle(); -- -- } else if (!(decode_flags & MethodHandles::_dmf_from_interface)) { -- // perform virtual dispatch -- int vtable_index = m->vtable_index(); -- guarantee(vtable_index >= 0, "valid vtable index"); -- -- // receiver_klass might be an arrayKlassOop but all vtables start at -- // the same place. The cast is to avoid virtual call and assertion. -- // See also LinkResolver::runtime_resolve_virtual_method. -- instanceKlass* inst = (instanceKlass*)Klass::cast(receiver_klass()); -- DEBUG_ONLY(inst->verify_vtable_index(vtable_index)); -- methodOop m_oop = inst->method_at_vtable(vtable_index); -- return methodHandle(THREAD, m_oop); -- -- } else { -- // perform interface dispatch -- int itable_index = klassItable::compute_itable_index(m()); -- guarantee(itable_index >= 0, "valid itable index"); -- instanceKlass* inst = instanceKlass::cast(receiver_klass()); -- methodOop m_oop = inst->method_at_itable(m->method_holder(), itable_index, THREAD); -- return methodHandle(THREAD, m_oop); -- } --} -- --void MethodHandles::verify_DirectMethodHandle(Handle mh, methodHandle m, TRAPS) { -- // Verify type. -- Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(mh())); -- verify_method_type(m, mtype, false, KlassHandle(), CHECK); -- -- // Verify vmslots. -- if (java_lang_invoke_MethodHandle::vmslots(mh()) != m->size_of_parameters()) { -- THROW_MSG(vmSymbols::java_lang_InternalError(), "bad vmslots in DMH"); -- } --} -- --void MethodHandles::init_DirectMethodHandle(Handle mh, methodHandle m, bool do_dispatch, TRAPS) { -- // Check arguments. -- if (mh.is_null() || m.is_null() || -- (!do_dispatch && m->is_abstract())) { -- THROW(vmSymbols::java_lang_InternalError()); -- } -- -- if (VerifyMethodHandles) { -- // The privileged code which invokes this routine should not make -- // a mistake about types, but it's better to verify. -- verify_DirectMethodHandle(mh, m, CHECK); -- } -- -- // Finally, after safety checks are done, link to the target method. -- // We will follow the same path as the latter part of -- // InterpreterRuntime::resolve_invoke(), which first finds the method -- // and then decides how to populate the constant pool cache entry -- // that links the interpreter calls to the method. We need the same -- // bits, and will use the same calling sequence code. -- -- int vmindex = methodOopDesc::garbage_vtable_index; -- Handle vmtarget; -- -- instanceKlass::cast(m->method_holder())->link_class(CHECK); -- -- MethodHandleEntry* me = NULL; -- if (do_dispatch && Klass::cast(m->method_holder())->is_interface()) { -- // We are simulating an invokeinterface instruction. -- // (We might also be simulating an invokevirtual on a miranda method, -- // but it is safe to treat it as an invokeinterface.) -- assert(!m->can_be_statically_bound(), "no final methods on interfaces"); -- vmindex = klassItable::compute_itable_index(m()); -- assert(vmindex >= 0, "(>=0) == do_dispatch"); -- // Set up same bits as ConstantPoolCacheEntry::set_interface_call(). -- vmtarget = m->method_holder(); // the interface -- me = MethodHandles::entry(MethodHandles::_invokeinterface_mh); -- } else if (!do_dispatch || m->can_be_statically_bound()) { -- // We are simulating an invokestatic or invokespecial instruction. -- // Set up the method pointer, just like ConstantPoolCacheEntry::set_method(). -- vmtarget = m; -- // this does not help dispatch, but it will make it possible to parse this MH: -- vmindex = methodOopDesc::nonvirtual_vtable_index; -- assert(vmindex < 0, "(>=0) == do_dispatch"); -- if (!m->is_static()) { -- me = MethodHandles::entry(MethodHandles::_invokespecial_mh); -- } else { -- me = MethodHandles::entry(MethodHandles::_invokestatic_mh); -- // Part of the semantics of a static call is an initialization barrier. -- // For a DMH, it is done now, when the handle is created. -- Klass* k = Klass::cast(m->method_holder()); -- if (k->should_be_initialized()) { -- k->initialize(CHECK); // possible safepoint -- } -- } -- } else { -- // We are simulating an invokevirtual instruction. -- // Set up the vtable index, just like ConstantPoolCacheEntry::set_method(). -- // The key logic is LinkResolver::runtime_resolve_virtual_method. -- vmindex = m->vtable_index(); -- vmtarget = m->method_holder(); -- me = MethodHandles::entry(MethodHandles::_invokevirtual_mh); -- } -- -- if (me == NULL) { THROW(vmSymbols::java_lang_InternalError()); } -- -- java_lang_invoke_DirectMethodHandle::set_vmtarget(mh(), vmtarget()); -- java_lang_invoke_DirectMethodHandle::set_vmindex( mh(), vmindex); -- DEBUG_ONLY(KlassHandle rlimit; int flags); -- assert(MethodHandles::decode_method(mh(), rlimit, flags) == m, -- "properly stored for later decoding"); -- DEBUG_ONLY(bool actual_do_dispatch = ((flags & _dmf_does_dispatch) != 0)); -- assert(!(actual_do_dispatch && !do_dispatch), -- "do not perform dispatch if !do_dispatch specified"); -- assert(actual_do_dispatch == (vmindex >= 0), "proper later decoding of do_dispatch"); -- assert(decode_MethodHandle_stack_pushes(mh()) == 0, "DMH does not move stack"); -- -- // Done! -- java_lang_invoke_MethodHandle::set_vmentry(mh(), me); --} -- --void MethodHandles::verify_BoundMethodHandle_with_receiver(Handle mh, -- methodHandle m, -- TRAPS) { -- // Verify type. -- KlassHandle bound_recv_type; -- { -- oop receiver = java_lang_invoke_BoundMethodHandle::argument(mh()); -- if (receiver != NULL) -- bound_recv_type = KlassHandle(THREAD, receiver->klass()); -- } -- Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(mh())); -- verify_method_type(m, mtype, true, bound_recv_type, CHECK); -- -- int receiver_pos = m->size_of_parameters() - 1; -- -- // Verify MH.vmargslot, which should point at the bound receiver. -- verify_vmargslot(mh, -1, java_lang_invoke_BoundMethodHandle::vmargslot(mh()), CHECK); -- //verify_vmslots(mh, CHECK); -- -- // Verify vmslots. -- if (java_lang_invoke_MethodHandle::vmslots(mh()) != receiver_pos) { -- THROW_MSG(vmSymbols::java_lang_InternalError(), "bad vmslots in BMH (receiver)"); -- } --} -- --// Initialize a BMH with a receiver bound directly to a methodOop. --void MethodHandles::init_BoundMethodHandle_with_receiver(Handle mh, -- methodHandle original_m, -- KlassHandle receiver_limit, -- int decode_flags, -- TRAPS) { -- // Check arguments. -- if (mh.is_null() || original_m.is_null()) { -- THROW(vmSymbols::java_lang_InternalError()); -- } -- -- KlassHandle receiver_klass; -- { -- oop receiver_oop = java_lang_invoke_BoundMethodHandle::argument(mh()); -- if (receiver_oop != NULL) -- receiver_klass = KlassHandle(THREAD, receiver_oop->klass()); -- } -- methodHandle m = dispatch_decoded_method(original_m, -- receiver_limit, decode_flags, -- receiver_klass, -- CHECK); -- if (m.is_null()) { THROW(vmSymbols::java_lang_InternalError()); } -- if (m->is_abstract()) { THROW(vmSymbols::java_lang_AbstractMethodError()); } -- -- int vmargslot = m->size_of_parameters() - 1; -- assert(java_lang_invoke_BoundMethodHandle::vmargslot(mh()) == vmargslot, ""); -- -- if (VerifyMethodHandles) { -- verify_BoundMethodHandle_with_receiver(mh, m, CHECK); -- } -- -- java_lang_invoke_BoundMethodHandle::set_vmtarget(mh(), m()); -- -- DEBUG_ONLY(KlassHandle junk1; int junk2); -- assert(MethodHandles::decode_method(mh(), junk1, junk2) == m, "properly stored for later decoding"); -- assert(decode_MethodHandle_stack_pushes(mh()) == 1, "BMH pushes one stack slot"); -- -- // Done! -- java_lang_invoke_MethodHandle::set_vmentry(mh(), MethodHandles::entry(MethodHandles::_bound_ref_direct_mh)); --} -- --void MethodHandles::verify_BoundMethodHandle(Handle mh, Handle target, int argnum, -- bool direct_to_method, TRAPS) { -- ResourceMark rm; -- Handle ptype_handle(THREAD, -- java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum)); -- KlassHandle ptype_klass; -- BasicType ptype = java_lang_Class::as_BasicType(ptype_handle(), &ptype_klass); -- int slots_pushed = type2size[ptype]; -- -- oop argument = java_lang_invoke_BoundMethodHandle::argument(mh()); -- -- const char* err = NULL; -- -- switch (ptype) { -- case T_OBJECT: -- if (argument != NULL) -- // we must implicitly convert from the arg type to the outgoing ptype -- err = check_argument_type_change(T_OBJECT, argument->klass(), ptype, ptype_klass(), argnum); -- break; -- -- case T_ARRAY: case T_VOID: -- assert(false, "array, void do not appear here"); -- default: -- if (ptype != T_INT && !is_subword_type(ptype)) { -- err = "unexpected parameter type"; -- break; -- } -- // check subrange of Integer.value, if necessary -- if (argument == NULL || argument->klass() != SystemDictionary::Integer_klass()) { -- err = "bound integer argument must be of type java.lang.Integer"; -- break; -- } -- if (ptype != T_INT) { -- int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); -- jint value = argument->int_field(value_offset); -- int vminfo = adapter_unbox_subword_vminfo(ptype); -- jint subword = truncate_subword_from_vminfo(value, vminfo); -- if (value != subword) { -- err = "bound subword value does not fit into the subword type"; -- break; -- } -- } -- break; -- case T_FLOAT: -- case T_DOUBLE: -- case T_LONG: -- { -- // we must implicitly convert from the unboxed arg type to the outgoing ptype -- BasicType argbox = java_lang_boxing_object::basic_type(argument); -- if (argbox != ptype) { -- err = check_argument_type_change(T_OBJECT, (argument == NULL -- ? SystemDictionary::Object_klass() -- : argument->klass()), -- ptype, ptype_klass(), argnum); -- assert(err != NULL, "this must be an error"); -- } -- break; -- } -- } -- -- if (err == NULL) { -- DEBUG_ONLY(int this_pushes = decode_MethodHandle_stack_pushes(mh())); -- if (direct_to_method) { -- assert(this_pushes == slots_pushed, "BMH pushes one or two stack slots"); -- } else { -- int target_pushes = decode_MethodHandle_stack_pushes(target()); -- assert(this_pushes == slots_pushed + target_pushes, "BMH stack motion must be correct"); -- } -- } -- -- if (err == NULL) { -- // Verify the rest of the method type. -- err = check_method_type_insertion(java_lang_invoke_MethodHandle::type(mh()), -- argnum, ptype_handle(), -- java_lang_invoke_MethodHandle::type(target())); -- } -- -- if (err != NULL) { -- THROW_MSG(vmSymbols::java_lang_InternalError(), err); -- } --} -- --void MethodHandles::init_BoundMethodHandle(Handle mh, Handle target, int argnum, TRAPS) { -- // Check arguments. -- if (mh.is_null() || target.is_null() || !java_lang_invoke_MethodHandle::is_instance(target())) { -- THROW(vmSymbols::java_lang_InternalError()); -- } -- -- int argslot = java_lang_invoke_BoundMethodHandle::vmargslot(mh()); -- -- if (VerifyMethodHandles) { -- int insert_after = argnum - 1; -- verify_vmargslot(mh, insert_after, argslot, CHECK); -- verify_vmslots(mh, CHECK); -- } -- -- // Get bound type and required slots. -- BasicType ptype; -- { -- oop ptype_oop = java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum); -- ptype = java_lang_Class::as_BasicType(ptype_oop); -- } -- int slots_pushed = type2size[ptype]; -- -- // If (a) the target is a direct non-dispatched method handle, -- // or (b) the target is a dispatched direct method handle and we -- // are binding the receiver, cut out the middle-man. -- // Do this by decoding the DMH and using its methodOop directly as vmtarget. -- bool direct_to_method = false; -- if (OptimizeMethodHandles && -- target->klass() == SystemDictionary::DirectMethodHandle_klass() && -- (argnum != 0 || java_lang_invoke_BoundMethodHandle::argument(mh()) != NULL) && -- (argnum == 0 || java_lang_invoke_DirectMethodHandle::vmindex(target()) < 0)) { -- KlassHandle receiver_limit; int decode_flags = 0; -- methodHandle m = decode_method(target(), receiver_limit, decode_flags); -- if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "DMH failed to decode"); } -- DEBUG_ONLY(int m_vmslots = m->size_of_parameters() - slots_pushed); // pos. of 1st arg. -- assert(java_lang_invoke_BoundMethodHandle::vmslots(mh()) == m_vmslots, "type w/ m sig"); -- if (argnum == 0 && (decode_flags & _dmf_has_receiver) != 0) { -- init_BoundMethodHandle_with_receiver(mh, m, -- receiver_limit, decode_flags, -- CHECK); -- return; -- } -- -- // Even if it is not a bound receiver, we still might be able -- // to bind another argument and still invoke the methodOop directly. -- if (!(decode_flags & _dmf_does_dispatch)) { -- direct_to_method = true; -- java_lang_invoke_BoundMethodHandle::set_vmtarget(mh(), m()); -- } -- } -- if (!direct_to_method) -- java_lang_invoke_BoundMethodHandle::set_vmtarget(mh(), target()); -- -- if (VerifyMethodHandles) { -- verify_BoundMethodHandle(mh, target, argnum, direct_to_method, CHECK); -- } -- -- // Next question: Is this a ref, int, or long bound value? -- MethodHandleEntry* me = NULL; -- if (ptype == T_OBJECT) { -- if (direct_to_method) me = MethodHandles::entry(_bound_ref_direct_mh); -- else me = MethodHandles::entry(_bound_ref_mh); -- } else if (slots_pushed == 2) { -- if (direct_to_method) me = MethodHandles::entry(_bound_long_direct_mh); -- else me = MethodHandles::entry(_bound_long_mh); -- } else if (slots_pushed == 1) { -- if (direct_to_method) me = MethodHandles::entry(_bound_int_direct_mh); -- else me = MethodHandles::entry(_bound_int_mh); -- } else { -- assert(false, ""); -- } -- -- // Done! -- java_lang_invoke_MethodHandle::set_vmentry(mh(), me); --} -- --static void throw_InternalError_for_bad_conversion(int conversion, const char* err, TRAPS) { -- char msg[200]; -- jio_snprintf(msg, sizeof(msg), "bad adapter (conversion=0x%08x): %s", conversion, err); -- THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), msg); --} -- --void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { -- ResourceMark rm; -- jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh()); -- int argslot = java_lang_invoke_AdapterMethodHandle::vmargslot(mh()); -- -- verify_vmargslot(mh, argnum, argslot, CHECK); -- verify_vmslots(mh, CHECK); -- -- jint conv_op = adapter_conversion_op(conversion); -- if (!conv_op_valid(conv_op)) { -- throw_InternalError_for_bad_conversion(conversion, "unknown conversion op", THREAD); -- return; -- } -- EntryKind ek = adapter_entry_kind(conv_op); -- -- int stack_move = adapter_conversion_stack_move(conversion); -- BasicType src = adapter_conversion_src_type(conversion); -- BasicType dest = adapter_conversion_dest_type(conversion); -- int vminfo = adapter_conversion_vminfo(conversion); // should be zero -- -- Handle argument(THREAD, java_lang_invoke_AdapterMethodHandle::argument(mh())); -- Handle target(THREAD, java_lang_invoke_AdapterMethodHandle::vmtarget(mh())); -- Handle src_mtype(THREAD, java_lang_invoke_MethodHandle::type(mh())); -- Handle dst_mtype(THREAD, java_lang_invoke_MethodHandle::type(target())); -- Handle arg_mtype; -- -- const char* err = NULL; -- -- if (err == NULL) { -- // Check that the correct argument is supplied, but only if it is required. -- switch (ek) { -- case _adapter_check_cast: // target type of cast -- case _adapter_ref_to_prim: // wrapper type from which to unbox -- case _adapter_spread_args: // array type to spread from -- if (!java_lang_Class::is_instance(argument()) -- || java_lang_Class::is_primitive(argument())) -- { err = "adapter requires argument of type java.lang.Class"; break; } -- if (ek == _adapter_spread_args) { -- // Make sure it is a suitable collection type. (Array, for now.) -- Klass* ak = Klass::cast(java_lang_Class::as_klassOop(argument())); -- if (!ak->oop_is_array()) -- { err = "spread adapter requires argument representing an array class"; break; } -- BasicType et = arrayKlass::cast(ak->as_klassOop())->element_type(); -- if (et != dest && stack_move <= 0) -- { err = "spread adapter requires array class argument of correct type"; break; } -- } -- break; -- case _adapter_prim_to_ref: // boxer MH to use -- case _adapter_collect_args: // method handle which collects the args -- case _adapter_fold_args: // method handle which collects the args -- if (!java_lang_invoke_MethodHandle::is_instance(argument())) -- { err = "MethodHandle adapter argument required"; break; } -- arg_mtype = Handle(THREAD, java_lang_invoke_MethodHandle::type(argument())); -- break; -- default: -- if (argument.not_null()) -- { err = "adapter has spurious argument"; break; } -- break; -- } -- } -- -- if (err == NULL) { -- // Check that the src/dest types are supplied if needed. -- // Also check relevant parameter or return types. -- switch (ek) { -- case _adapter_check_cast: -- if (src != T_OBJECT || dest != T_OBJECT) { -- err = "adapter requires object src/dest conversion subfields"; -- } -- break; -- case _adapter_prim_to_prim: -- if (!is_java_primitive(src) || !is_java_primitive(dest) || src == dest) { -- err = "adapter requires primitive src/dest conversion subfields"; break; -- } -- if ( (src == T_FLOAT || src == T_DOUBLE) && !(dest == T_FLOAT || dest == T_DOUBLE) || -- !(src == T_FLOAT || src == T_DOUBLE) && (dest == T_FLOAT || dest == T_DOUBLE)) { -- err = "adapter cannot convert beween floating and fixed-point"; break; -- } -- break; -- case _adapter_ref_to_prim: -- if (src != T_OBJECT || !is_java_primitive(dest) -- || argument() != Klass::cast(SystemDictionary::box_klass(dest))->java_mirror()) { -- err = "adapter requires primitive dest conversion subfield"; break; -- } -- break; -- case _adapter_prim_to_ref: -- if (!is_java_primitive(src) || dest != T_OBJECT) { -- err = "adapter requires primitive src conversion subfield"; break; -- } -- break; -- case _adapter_swap_args: -- { -- if (!src || !dest) { -- err = "adapter requires src/dest conversion subfields for swap"; break; -- } -- int src_size = type2size[src]; -- if (src_size != type2size[dest]) { -- err = "adapter requires equal sizes for src/dest"; break; -- } -- int src_slot = argslot; -- int dest_slot = vminfo; -- int src_arg = argnum; -- int dest_arg = argument_slot_to_argnum(src_mtype(), dest_slot); -- verify_vmargslot(mh, dest_arg, dest_slot, CHECK); -- if (!(dest_slot >= src_slot + src_size) && -- !(src_slot >= dest_slot + src_size)) { -- err = "source, destination slots must be distinct"; break; -- } else if (!(src_slot > dest_slot)) { -- err = "source of swap must be deeper in stack"; break; -- } -- err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), dest_arg), -- java_lang_invoke_MethodType::ptype(dst_mtype(), src_arg), -- dest_arg); -- if (err == NULL) -- err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), src_arg), -- java_lang_invoke_MethodType::ptype(dst_mtype(), dest_arg), -- src_arg); -- break; -- } -- case _adapter_rot_args: -- { -- if (!src || !dest) { -- err = "adapter requires src/dest conversion subfields for rotate"; break; -- } -- int src_slot = argslot; -- int limit_raw = vminfo; -- bool rot_down = (src_slot < limit_raw); -- int limit_bias = (rot_down ? MethodHandles::OP_ROT_ARGS_DOWN_LIMIT_BIAS : 0); -- int limit_slot = limit_raw - limit_bias; -- int src_arg = argnum; -- int limit_arg = argument_slot_to_argnum(src_mtype(), limit_slot); -- verify_vmargslot(mh, limit_arg, limit_slot, CHECK); -- if (src_slot == limit_slot) { -- err = "source, destination slots must be distinct"; break; -- } -- if (!rot_down) { // rotate slots up == shift arguments left -- // limit_slot is an inclusive lower limit -- assert((src_slot > limit_slot) && (src_arg < limit_arg), ""); -- // rotate up: [limit_slot..src_slot-ss] --> [limit_slot+ss..src_slot] -- // that is: [src_arg+1..limit_arg] --> [src_arg..limit_arg-1] -- for (int i = src_arg+1; i <= limit_arg && err == NULL; i++) { -- err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), i), -- java_lang_invoke_MethodType::ptype(dst_mtype(), i-1), -- i); -- } -- } else { // rotate slots down == shfit arguments right -- // limit_slot is an exclusive upper limit -- assert((src_slot < limit_slot - limit_bias) && (src_arg > limit_arg + limit_bias), ""); -- // rotate down: [src_slot+ss..limit_slot) --> [src_slot..limit_slot-ss) -- // that is: (limit_arg..src_arg-1] --> (dst_arg+1..src_arg] -- for (int i = limit_arg+1; i <= src_arg-1 && err == NULL; i++) { -- err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), i), -- java_lang_invoke_MethodType::ptype(dst_mtype(), i+1), -- i); -- } -- } -- if (err == NULL) { -- int dest_arg = (rot_down ? limit_arg+1 : limit_arg); -- err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), src_arg), -- java_lang_invoke_MethodType::ptype(dst_mtype(), dest_arg), -- src_arg); -- } -- } -- break; -- case _adapter_spread_args: -- case _adapter_collect_args: -- case _adapter_fold_args: -- { -- bool is_spread = (ek == _adapter_spread_args); -- bool is_fold = (ek == _adapter_fold_args); -- BasicType coll_type = is_spread ? src : dest; -- BasicType elem_type = is_spread ? dest : src; -- // coll_type is type of args in collected form (or T_VOID if none) -- // elem_type is common type of args in spread form (or T_VOID if missing or heterogeneous) -- if (coll_type == 0 || elem_type == 0) { -- err = "adapter requires src/dest subfields for spread or collect"; break; -- } -- if (is_spread && coll_type != T_OBJECT) { -- err = "spread adapter requires object type for argument bundle"; break; -- } -- Handle spread_mtype = (is_spread ? dst_mtype : src_mtype); -- int spread_slot = argslot; -- int spread_arg = argnum; -- int slots_pushed = stack_move / stack_move_unit(); -- int coll_slot_count = type2size[coll_type]; -- int spread_slot_count = (is_spread ? slots_pushed : -slots_pushed) + coll_slot_count; -- if (is_fold) spread_slot_count = argument_slot_count(arg_mtype()); -- if (!is_spread) { -- int init_slots = argument_slot_count(src_mtype()); -- int coll_slots = argument_slot_count(arg_mtype()); -- if (spread_slot_count > init_slots || -- spread_slot_count != coll_slots) { -- err = "collect adapter has inconsistent arg counts"; break; -- } -- int next_slots = argument_slot_count(dst_mtype()); -- int unchanged_slots_in = (init_slots - spread_slot_count); -- int unchanged_slots_out = (next_slots - coll_slot_count - (is_fold ? spread_slot_count : 0)); -- if (unchanged_slots_in != unchanged_slots_out) { -- err = "collect adapter continuation has inconsistent arg counts"; break; -- } -- } -- } -- break; -- default: -- if (src != 0 || dest != 0) { -- err = "adapter has spurious src/dest conversion subfields"; break; -- } -- break; -- } -- } -- -- if (err == NULL) { -- // Check the stack_move subfield. -- // It must always report the net change in stack size, positive or negative. -- int slots_pushed = stack_move / stack_move_unit(); -- switch (ek) { -- case _adapter_prim_to_prim: -- case _adapter_ref_to_prim: -- case _adapter_prim_to_ref: -- if (slots_pushed != type2size[dest] - type2size[src]) { -- err = "wrong stack motion for primitive conversion"; -- } -- break; -- case _adapter_dup_args: -- if (slots_pushed <= 0) { -- err = "adapter requires conversion subfield slots_pushed > 0"; -- } -- break; -- case _adapter_drop_args: -- if (slots_pushed >= 0) { -- err = "adapter requires conversion subfield slots_pushed < 0"; -- } -- break; -- case _adapter_collect_args: -- case _adapter_fold_args: -- if (slots_pushed > 2) { -- err = "adapter requires conversion subfield slots_pushed <= 2"; -- } -- break; -- case _adapter_spread_args: -- if (slots_pushed < -1) { -- err = "adapter requires conversion subfield slots_pushed >= -1"; -- } -- break; -- default: -- if (stack_move != 0) { -- err = "adapter has spurious stack_move conversion subfield"; -- } -- break; -- } -- if (err == NULL && stack_move != slots_pushed * stack_move_unit()) { -- err = "stack_move conversion subfield must be multiple of stack_move_unit"; -- } -- } -- -- if (err == NULL) { -- // Make sure this adapter's stack pushing is accurately recorded. -- int slots_pushed = stack_move / stack_move_unit(); -- int this_vmslots = java_lang_invoke_MethodHandle::vmslots(mh()); -- int target_vmslots = java_lang_invoke_MethodHandle::vmslots(target()); -- int target_pushes = decode_MethodHandle_stack_pushes(target()); -- if (slots_pushed != (target_vmslots - this_vmslots)) { -- err = "stack_move inconsistent with previous and current MethodType vmslots"; -- } else { -- int this_pushes = decode_MethodHandle_stack_pushes(mh()); -- if (slots_pushed + target_pushes != this_pushes) { -- if (this_pushes == 0) -- err = "adapter push count not initialized"; -- else -- err = "adapter push count is wrong"; -- } -- } -- -- // While we're at it, check that the stack motion decoder works: -- DEBUG_ONLY(int this_pushes = decode_MethodHandle_stack_pushes(mh())); -- assert(this_pushes == slots_pushed + target_pushes, "AMH stack motion must be correct"); -- } -- -- if (err == NULL && vminfo != 0) { -- switch (ek) { -- case _adapter_swap_args: -- case _adapter_rot_args: -- case _adapter_prim_to_ref: -- case _adapter_collect_args: -- case _adapter_fold_args: -- break; // OK -- default: -- err = "vminfo subfield is reserved to the JVM"; -- } -- } -- -- // Do additional ad hoc checks. -- if (err == NULL) { -- switch (ek) { -- case _adapter_retype_only: -- err = check_method_type_passthrough(src_mtype(), dst_mtype(), false); -- break; -- -- case _adapter_retype_raw: -- err = check_method_type_passthrough(src_mtype(), dst_mtype(), true); -- break; -- -- case _adapter_check_cast: -- { -- // The actual value being checked must be a reference: -- err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), argnum), -- object_java_mirror(), argnum); -- if (err != NULL) break; -- -- // The output of the cast must fit with the destination argument: -- Handle cast_class = argument; -- err = check_method_type_conversion(src_mtype(), -- argnum, cast_class(), -- dst_mtype()); -- } -- break; -- -- // %%% TO DO: continue in remaining cases to verify src/dst_mtype if VerifyMethodHandles -- } -- } -- -- if (err != NULL) { -- throw_InternalError_for_bad_conversion(conversion, err, THREAD); -- return; -- } -- --} -- --void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS) { -- Handle argument = java_lang_invoke_AdapterMethodHandle::argument(mh()); -- int argslot = java_lang_invoke_AdapterMethodHandle::vmargslot(mh()); -- jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh()); -- jint conv_op = adapter_conversion_op(conversion); -- -- // adjust the adapter code to the internal EntryKind enumeration: -- EntryKind ek_orig = adapter_entry_kind(conv_op); -- EntryKind ek_opt = ek_orig; // may be optimized -- EntryKind ek_try; // temp -- -- // Finalize the vmtarget field (Java initialized it to null). -- if (!java_lang_invoke_MethodHandle::is_instance(target())) { -- throw_InternalError_for_bad_conversion(conversion, "bad target", THREAD); -- return; -- } -- java_lang_invoke_AdapterMethodHandle::set_vmtarget(mh(), target()); -- -- int stack_move = adapter_conversion_stack_move(conversion); -- BasicType src = adapter_conversion_src_type(conversion); -- BasicType dest = adapter_conversion_dest_type(conversion); -- int vminfo = adapter_conversion_vminfo(conversion); // should be zero -- -- int slots_pushed = stack_move / stack_move_unit(); -- -- if (VerifyMethodHandles) { -- verify_AdapterMethodHandle(mh, argnum, CHECK); -- } -- -- const char* err = NULL; -- -- if (!conv_op_supported(conv_op)) { -- err = "adapter not yet implemented in the JVM"; -- } -- -- // Now it's time to finish the case analysis and pick a MethodHandleEntry. -- switch (ek_orig) { -- case _adapter_retype_only: -- case _adapter_retype_raw: -- case _adapter_check_cast: -- case _adapter_dup_args: -- case _adapter_drop_args: -- // these work fine via general case code -- break; -- -- case _adapter_prim_to_prim: -- { -- // Non-subword cases are {int,float,long,double} -> {int,float,long,double}. -- // And, the {float,double} -> {int,long} cases must be handled by Java. -- switch (type2size[src] *4+ type2size[dest]) { -- case 1 *4+ 1: -- assert(src == T_INT || is_subword_type(src), "source is not float"); -- // Subword-related cases are int -> {boolean,byte,char,short}. -- ek_opt = _adapter_opt_i2i; -- vminfo = adapter_prim_to_prim_subword_vminfo(dest); -- break; -- case 2 *4+ 1: -- if (src == T_LONG && (dest == T_INT || is_subword_type(dest))) { -- ek_opt = _adapter_opt_l2i; -- vminfo = adapter_prim_to_prim_subword_vminfo(dest); -- } else if (src == T_DOUBLE && dest == T_FLOAT) { -- ek_opt = _adapter_opt_d2f; -- } else { -- goto throw_not_impl; // runs user code, hence could block -- } -- break; -- case 1 *4+ 2: -- if ((src == T_INT || is_subword_type(src)) && dest == T_LONG) { -- ek_opt = _adapter_opt_i2l; -- } else if (src == T_FLOAT && dest == T_DOUBLE) { -- ek_opt = _adapter_opt_f2d; -- } else { -- goto throw_not_impl; // runs user code, hence could block -- } -- break; -- default: -- goto throw_not_impl; // runs user code, hence could block -- break; -- } -- } -- break; -- -- case _adapter_ref_to_prim: -- { -- switch (type2size[dest]) { -- case 1: -- ek_opt = _adapter_opt_unboxi; -- vminfo = adapter_unbox_subword_vminfo(dest); -- break; -- case 2: -- ek_opt = _adapter_opt_unboxl; -- break; -- default: -- goto throw_not_impl; -- break; -- } -- } -- break; -- -- case _adapter_prim_to_ref: -- { -- // vminfo will be the location to insert the return value -- vminfo = argslot; -- ek_opt = _adapter_opt_collect_ref; -- ensure_vmlayout_field(target, CHECK); -- // for MethodHandleWalk: -- if (java_lang_invoke_AdapterMethodHandle::is_instance(argument())) -- ensure_vmlayout_field(argument, CHECK); -- if (!OptimizeMethodHandles) break; -- switch (type2size[src]) { -- case 1: -- ek_try = EntryKind(_adapter_opt_filter_S0_ref + argslot); -- if (ek_try < _adapter_opt_collect_LAST && -- ek_adapter_opt_collect_slot(ek_try) == argslot) { -- assert(ek_adapter_opt_collect_count(ek_try) == 1 && -- ek_adapter_opt_collect_type(ek_try) == T_OBJECT, ""); -- ek_opt = ek_try; -- break; -- } -- // else downgrade to variable slot: -- ek_opt = _adapter_opt_collect_1_ref; -- break; -- case 2: -- ek_try = EntryKind(_adapter_opt_collect_2_S0_ref + argslot); -- if (ek_try < _adapter_opt_collect_LAST && -- ek_adapter_opt_collect_slot(ek_try) == argslot) { -- assert(ek_adapter_opt_collect_count(ek_try) == 2 && -- ek_adapter_opt_collect_type(ek_try) == T_OBJECT, ""); -- ek_opt = ek_try; -- break; -- } -- // else downgrade to variable slot: -- ek_opt = _adapter_opt_collect_2_ref; -- break; -- default: -- goto throw_not_impl; -- break; -- } -- } -- break; -- -- case _adapter_swap_args: -- case _adapter_rot_args: -- { -- int swap_slots = type2size[src]; -- int src_slot = argslot; -- int dest_slot = vminfo; -- int rotate = (ek_orig == _adapter_swap_args) ? 0 : (src_slot > dest_slot) ? 1 : -1; -- switch (swap_slots) { -- case 1: -- ek_opt = (!rotate ? _adapter_opt_swap_1 : -- rotate > 0 ? _adapter_opt_rot_1_up : _adapter_opt_rot_1_down); -- break; -- case 2: -- ek_opt = (!rotate ? _adapter_opt_swap_2 : -- rotate > 0 ? _adapter_opt_rot_2_up : _adapter_opt_rot_2_down); -- break; -- default: -- goto throw_not_impl; -- break; -- } -- } -- break; -- -- case _adapter_spread_args: -- { -- // vminfo will be the required length of the array -- int array_size = (slots_pushed + 1) / (type2size[dest] == 2 ? 2 : 1); -- vminfo = array_size; -- // general case -- switch (dest) { -- case T_BOOLEAN : // fall through to T_BYTE: -- case T_BYTE : ek_opt = _adapter_opt_spread_byte; break; -- case T_CHAR : ek_opt = _adapter_opt_spread_char; break; -- case T_SHORT : ek_opt = _adapter_opt_spread_short; break; -- case T_INT : ek_opt = _adapter_opt_spread_int; break; -- case T_LONG : ek_opt = _adapter_opt_spread_long; break; -- case T_FLOAT : ek_opt = _adapter_opt_spread_float; break; -- case T_DOUBLE : ek_opt = _adapter_opt_spread_double; break; -- case T_OBJECT : ek_opt = _adapter_opt_spread_ref; break; -- case T_VOID : if (array_size != 0) goto throw_not_impl; -- ek_opt = _adapter_opt_spread_ref; break; -- default : goto throw_not_impl; -- } -- assert(array_size == 0 || // it doesn't matter what the spreader is -- (ek_adapter_opt_spread_count(ek_opt) == -1 && -- (ek_adapter_opt_spread_type(ek_opt) == dest || -- (ek_adapter_opt_spread_type(ek_opt) == T_BYTE && dest == T_BOOLEAN))), -- err_msg("dest=%d ek_opt=%d", dest, ek_opt)); -- -- if (array_size <= 0) { -- // since the general case does not handle length 0, this case is required: -- ek_opt = _adapter_opt_spread_0; -- break; -- } -- if (dest == T_OBJECT) { -- ek_try = EntryKind(_adapter_opt_spread_1_ref - 1 + array_size); -- if (ek_try < _adapter_opt_spread_LAST && -- ek_adapter_opt_spread_count(ek_try) == array_size) { -- assert(ek_adapter_opt_spread_type(ek_try) == dest, ""); -- ek_opt = ek_try; -- break; -- } -- } -- break; -- } -- break; -- -- case _adapter_collect_args: -- { -- int elem_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(argument())); -- // vminfo will be the location to insert the return value -- vminfo = argslot; -- ensure_vmlayout_field(target, CHECK); -- ensure_vmlayout_field(argument, CHECK); -- -- // general case: -- switch (dest) { -- default : if (!is_subword_type(dest)) goto throw_not_impl; -- // else fall through: -- case T_INT : ek_opt = _adapter_opt_collect_int; break; -- case T_LONG : ek_opt = _adapter_opt_collect_long; break; -- case T_FLOAT : ek_opt = _adapter_opt_collect_float; break; -- case T_DOUBLE : ek_opt = _adapter_opt_collect_double; break; -- case T_OBJECT : ek_opt = _adapter_opt_collect_ref; break; -- case T_VOID : ek_opt = _adapter_opt_collect_void; break; -- } -- assert(ek_adapter_opt_collect_slot(ek_opt) == -1 && -- ek_adapter_opt_collect_count(ek_opt) == -1 && -- (ek_adapter_opt_collect_type(ek_opt) == dest || -- ek_adapter_opt_collect_type(ek_opt) == T_INT && is_subword_type(dest)), -- ""); -- -- if (dest == T_OBJECT && elem_slots == 1 && OptimizeMethodHandles) { -- // filter operation on a ref -- ek_try = EntryKind(_adapter_opt_filter_S0_ref + argslot); -- if (ek_try < _adapter_opt_collect_LAST && -- ek_adapter_opt_collect_slot(ek_try) == argslot) { -- assert(ek_adapter_opt_collect_count(ek_try) == elem_slots && -- ek_adapter_opt_collect_type(ek_try) == dest, ""); -- ek_opt = ek_try; -- break; -- } -- ek_opt = _adapter_opt_collect_1_ref; -- break; -- } -- -- if (dest == T_OBJECT && elem_slots == 2 && OptimizeMethodHandles) { -- // filter of two arguments -- ek_try = EntryKind(_adapter_opt_collect_2_S0_ref + argslot); -- if (ek_try < _adapter_opt_collect_LAST && -- ek_adapter_opt_collect_slot(ek_try) == argslot) { -- assert(ek_adapter_opt_collect_count(ek_try) == elem_slots && -- ek_adapter_opt_collect_type(ek_try) == dest, ""); -- ek_opt = ek_try; -- break; -- } -- ek_opt = _adapter_opt_collect_2_ref; -- break; -- } -- -- if (dest == T_OBJECT && OptimizeMethodHandles) { -- // try to use a fixed length adapter -- ek_try = EntryKind(_adapter_opt_collect_0_ref + elem_slots); -- if (ek_try < _adapter_opt_collect_LAST && -- ek_adapter_opt_collect_count(ek_try) == elem_slots) { -- assert(ek_adapter_opt_collect_slot(ek_try) == -1 && -- ek_adapter_opt_collect_type(ek_try) == dest, ""); -- ek_opt = ek_try; -- break; -- } -- } -- -- break; -- } -- -- case _adapter_fold_args: -- { -- int elem_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(argument())); -- // vminfo will be the location to insert the return value -- vminfo = argslot + elem_slots; -- ensure_vmlayout_field(target, CHECK); -- ensure_vmlayout_field(argument, CHECK); -- -- switch (dest) { -- default : if (!is_subword_type(dest)) goto throw_not_impl; -- // else fall through: -- case T_INT : ek_opt = _adapter_opt_fold_int; break; -- case T_LONG : ek_opt = _adapter_opt_fold_long; break; -- case T_FLOAT : ek_opt = _adapter_opt_fold_float; break; -- case T_DOUBLE : ek_opt = _adapter_opt_fold_double; break; -- case T_OBJECT : ek_opt = _adapter_opt_fold_ref; break; -- case T_VOID : ek_opt = _adapter_opt_fold_void; break; -- } -- assert(ek_adapter_opt_collect_slot(ek_opt) == -1 && -- ek_adapter_opt_collect_count(ek_opt) == -1 && -- (ek_adapter_opt_collect_type(ek_opt) == dest || -- ek_adapter_opt_collect_type(ek_opt) == T_INT && is_subword_type(dest)), -- ""); -- -- if (dest == T_OBJECT && elem_slots == 0 && OptimizeMethodHandles) { -- // if there are no args, just pretend it's a collect -- ek_opt = _adapter_opt_collect_0_ref; -- break; -- } -- -- if (dest == T_OBJECT && OptimizeMethodHandles) { -- // try to use a fixed length adapter -- ek_try = EntryKind(_adapter_opt_fold_1_ref - 1 + elem_slots); -- if (ek_try < _adapter_opt_fold_LAST && -- ek_adapter_opt_collect_count(ek_try) == elem_slots) { -- assert(ek_adapter_opt_collect_slot(ek_try) == -1 && -- ek_adapter_opt_collect_type(ek_try) == dest, ""); -- ek_opt = ek_try; -- break; -- } -- } -- -- break; -- } -- -- default: -- // should have failed much earlier; must be a missing case here -- assert(false, "incomplete switch"); -- // and fall through: -- -- throw_not_impl: -- if (err == NULL) -- err = "unknown adapter type"; -- break; -- } -- -- if (err == NULL && (vminfo & CONV_VMINFO_MASK) != vminfo) { -- // should not happen, since vminfo is used to encode arg/slot indexes < 255 -- err = "vminfo overflow"; -- } -- -- if (err == NULL && !have_entry(ek_opt)) { -- err = "adapter stub for this kind of method handle is missing"; -- } -- -- if (err == NULL && ek_opt == ek_orig) { -- switch (ek_opt) { -- case _adapter_prim_to_prim: -- case _adapter_ref_to_prim: -- case _adapter_prim_to_ref: -- case _adapter_swap_args: -- case _adapter_rot_args: -- case _adapter_collect_args: -- case _adapter_fold_args: -- case _adapter_spread_args: -- // should be handled completely by optimized cases; see above -- err = "init_AdapterMethodHandle should not issue this"; -- break; -- } -- } -- -- if (err != NULL) { -- throw_InternalError_for_bad_conversion(conversion, err_msg("%s: conv_op %d ek_opt %d", err, conv_op, ek_opt), THREAD); -- return; -- } -- -- // Rebuild the conversion value; maybe parts of it were changed. -- jint new_conversion = adapter_conversion(conv_op, src, dest, stack_move, vminfo); -- -- // Finalize the conversion field. (Note that it is final to Java code.) -- java_lang_invoke_AdapterMethodHandle::set_conversion(mh(), new_conversion); -- -- if (java_lang_invoke_CountingMethodHandle::is_instance(mh())) { -- assert(ek_orig == _adapter_retype_only, "only one handled"); -- ek_opt = _adapter_opt_profiling; -- } -- -- // Done! -- java_lang_invoke_MethodHandle::set_vmentry(mh(), entry(ek_opt)); -- -- // There should be enough memory barriers on exit from native methods -- // to ensure that the MH is fully initialized to all threads before -- // Java code can publish it in global data structures. --} -- --void MethodHandles::ensure_vmlayout_field(Handle target, TRAPS) { -- Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(target())); -- Handle mtform(THREAD, java_lang_invoke_MethodType::form(mtype())); -- if (mtform.is_null()) { THROW(vmSymbols::java_lang_InternalError()); } -- if (java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) { -- if (java_lang_invoke_MethodTypeForm::vmlayout(mtform()) == NULL) { -- // fill it in -- Handle erased_mtype(THREAD, java_lang_invoke_MethodTypeForm::erasedType(mtform())); -- TempNewSymbol erased_signature -- = java_lang_invoke_MethodType::as_signature(erased_mtype(), /*intern:*/true, CHECK); -- methodOop cookie -- = SystemDictionary::find_method_handle_invoke(vmSymbols::invokeExact_name(), -- erased_signature, -- SystemDictionaryHandles::Object_klass(), -- THREAD); -- java_lang_invoke_MethodTypeForm::init_vmlayout(mtform(), cookie); -- } -- } -- assert(java_lang_invoke_MethodTypeForm::vmslots(mtform()) == argument_slot_count(mtype()), "must agree"); --} -- --#ifdef ASSERT -- --extern "C" --void print_method_handle(oop mh); -- --static void stress_method_handle_walk_impl(Handle mh, TRAPS) { -- if (StressMethodHandleWalk) { -- // Exercise the MethodHandleWalk code in various ways and validate -- // the resulting method oop. Some of these produce output so they -- // are guarded under Verbose. -- ResourceMark rm; -- HandleMark hm; -- if (Verbose) { -- print_method_handle(mh()); -- } -- TempNewSymbol name = SymbolTable::new_symbol("invoke", CHECK); -- Handle mt = java_lang_invoke_MethodHandle::type(mh()); -- TempNewSymbol signature = java_lang_invoke_MethodType::as_signature(mt(), true, CHECK); -- MethodHandleCompiler mhc(mh, name, signature, 10000, false, CHECK); -- methodHandle m = mhc.compile(CHECK); -- if (Verbose) { -- m->print_codes(); -- } -- InterpreterOopMap mask; -- OopMapCache::compute_one_oop_map(m, m->code_size() - 1, &mask); -- // compile to object code if -Xcomp or WizardMode -- if ((WizardMode || -- CompilationPolicy::must_be_compiled(m)) -- && !instanceKlass::cast(m->method_holder())->is_not_initialized() -- && CompilationPolicy::can_be_compiled(m)) { -- // Force compilation -- CompileBroker::compile_method(m, InvocationEntryBci, -- CompilationPolicy::policy()->initial_compile_level(), -- methodHandle(), 0, "StressMethodHandleWalk", -- CHECK); -- } -- } --} -- --static void stress_method_handle_walk(Handle mh, TRAPS) { -- stress_method_handle_walk_impl(mh, THREAD); -- if (HAS_PENDING_EXCEPTION) { -- oop ex = PENDING_EXCEPTION; -- CLEAR_PENDING_EXCEPTION; -- tty->print("StressMethodHandleWalk: "); -- java_lang_Throwable::print(ex, tty); -- tty->cr(); -- } --} --#else -- --static void stress_method_handle_walk(Handle mh, TRAPS) {} -- --#endif -- - // --// Here are the native methods on sun.invoke.MethodHandleImpl. -+// Here are the native methods in java.lang.invoke.MethodHandleNatives - // They are the private interface between this JVM and the HotSpot-specific - // Java code that implements JSR 292 method handles. - // - // Note: We use a JVM_ENTRY macro to define each of these, for this is the way - // that intrinsic (non-JNI) native methods are defined in HotSpot. - // -- --// direct method handles for invokestatic or invokespecial --// void init(DirectMethodHandle self, MemberName ref, boolean doDispatch, Class caller); --JVM_ENTRY(void, MHN_init_DMH(JNIEnv *env, jobject igcls, jobject mh_jh, -- jobject target_jh, jboolean do_dispatch, jobject caller_jh)) { -- ResourceMark rm; // for error messages -- -- // This is the guy we are initializing: -- if (mh_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "self is null"); } -- Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh)); -- -- // Early returns out of this method leave the DMH in an unfinished state. -- assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); -- -- // which method are we really talking about? -- if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); } -- Handle target(THREAD, JNIHandles::resolve_non_null(target_jh)); -- if (java_lang_invoke_MemberName::is_instance(target()) && -- java_lang_invoke_MemberName::vmindex(target()) == VM_INDEX_UNINITIALIZED) { -- MethodHandles::resolve_MemberName(target, CHECK); -- } -- -- KlassHandle receiver_limit; int decode_flags = 0; -- methodHandle m = MethodHandles::decode_method(target(), receiver_limit, decode_flags); -- if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "no such method"); } -- -- // The trusted Java code that calls this method should already have performed -- // access checks on behalf of the given caller. But, we can verify this. -- if (VerifyMethodHandles && caller_jh != NULL) { -- KlassHandle caller(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(caller_jh))); -- // If this were a bytecode, the first access check would be against -- // the "reference class" mentioned in the CONSTANT_Methodref. -- // We don't know at this point which class that was, and if we -- // check against m.method_holder we might get the wrong answer. -- // So we just make sure to handle this check when the resolution -- // happens, when we call resolve_MemberName. -- // -- // (A public class can inherit public members from private supers, -- // and it would be wrong to check access against the private super -- // if the original symbolic reference was against the public class.) -- // -- // If there were a bytecode, the next step would be to lookup the method -- // in the reference class, then then check the method's access bits. -- // Emulate LinkResolver::check_method_accessability. -- klassOop resolved_klass = m->method_holder(); -- if (!Reflection::verify_field_access(caller->as_klassOop(), -- resolved_klass, resolved_klass, -- m->access_flags(), -- true)) { -- // %%% following cutout belongs in Reflection::verify_field_access? -- bool same_pm = Reflection::is_same_package_member(caller->as_klassOop(), -- resolved_klass, THREAD); -- if (!same_pm) { -- THROW_MSG(vmSymbols::java_lang_InternalError(), m->name_and_sig_as_C_string()); -- } -- } -- } -- -- MethodHandles::init_DirectMethodHandle(mh, m, (do_dispatch != JNI_FALSE), CHECK); -- stress_method_handle_walk(mh, CHECK); --} --JVM_END -- --// bound method handles --JVM_ENTRY(void, MHN_init_BMH(JNIEnv *env, jobject igcls, jobject mh_jh, -- jobject target_jh, int argnum)) { -- ResourceMark rm; // for error messages -- -- // This is the guy we are initializing: -- if (mh_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "self is null"); } -- Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh)); -- -- // Early returns out of this method leave the BMH in an unfinished state. -- assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); -- -- if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); } -- Handle target(THREAD, JNIHandles::resolve_non_null(target_jh)); -- -- if (!java_lang_invoke_MethodHandle::is_instance(target())) { -- // Target object is a reflective method. (%%% Do we need this alternate path?) -- Untested("init_BMH of non-MH"); -- if (argnum != 0) { THROW(vmSymbols::java_lang_InternalError()); } -- KlassHandle receiver_limit; int decode_flags = 0; -- methodHandle m = MethodHandles::decode_method(target(), receiver_limit, decode_flags); -- MethodHandles::init_BoundMethodHandle_with_receiver(mh, m, -- receiver_limit, -- decode_flags, -- CHECK); -- } else { -- // Build a BMH on top of a DMH or another BMH: -- MethodHandles::init_BoundMethodHandle(mh, target, argnum, CHECK); -- } -- -- if (StressMethodHandleWalk) { -- if (mh->klass() == SystemDictionary::BoundMethodHandle_klass()) -- stress_method_handle_walk(mh, CHECK); -- // else don't, since the subclass has not yet initialized its own fields -- } --} --JVM_END -- --// adapter method handles --JVM_ENTRY(void, MHN_init_AMH(JNIEnv *env, jobject igcls, jobject mh_jh, -- jobject target_jh, int argnum)) { -- // This is the guy we are initializing: -- if (mh_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "self is null"); } -- if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); } -- Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh)); -- Handle target(THREAD, JNIHandles::resolve_non_null(target_jh)); -- -- // Early returns out of this method leave the AMH in an unfinished state. -- assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); -- -- MethodHandles::init_AdapterMethodHandle(mh, target, argnum, CHECK); -- stress_method_handle_walk(mh, CHECK); --} --JVM_END -- --// method type forms --JVM_ENTRY(void, MHN_init_MT(JNIEnv *env, jobject igcls, jobject erased_jh)) { -- if (erased_jh == NULL) return; -- if (TraceMethodHandles) { -- tty->print("creating MethodType form "); -- if (WizardMode || Verbose) { // Warning: this calls Java code on the MH! -- // call Object.toString() -- Symbol* name = vmSymbols::toString_name(); -- Symbol* sig = vmSymbols::void_string_signature(); -- JavaCallArguments args(Handle(THREAD, JNIHandles::resolve_non_null(erased_jh))); -- JavaValue result(T_OBJECT); -- JavaCalls::call_virtual(&result, SystemDictionary::Object_klass(), name, sig, -- &args, CHECK); -- Handle str(THREAD, (oop)result.get_jobject()); -- java_lang_String::print(str, tty); -- } -- tty->cr(); -- } --} --JVM_END -- --// debugging and reflection --JVM_ENTRY(jobject, MHN_getTarget(JNIEnv *env, jobject igcls, jobject mh_jh, jint format)) { -- Handle mh(THREAD, JNIHandles::resolve(mh_jh)); -- if (!java_lang_invoke_MethodHandle::is_instance(mh())) { -- THROW_NULL(vmSymbols::java_lang_IllegalArgumentException()); -- } -- oop target = MethodHandles::encode_target(mh, format, CHECK_NULL); -- return JNIHandles::make_local(THREAD, target); --} --JVM_END -- - JVM_ENTRY(jint, MHN_getConstant(JNIEnv *env, jobject igcls, jint which)) { - switch (which) { -- case MethodHandles::GC_JVM_PUSH_LIMIT: -- guarantee(MethodHandlePushLimit >= 2 && MethodHandlePushLimit <= 0xFF, -- "MethodHandlePushLimit parameter must be in valid range"); -- return MethodHandlePushLimit; -- case MethodHandles::GC_JVM_STACK_MOVE_UNIT: -- // return number of words per slot, signed according to stack direction -- return MethodHandles::stack_move_unit(); -- case MethodHandles::GC_CONV_OP_IMPLEMENTED_MASK: -- return MethodHandles::adapter_conversion_ops_supported_mask(); - case MethodHandles::GC_COUNT_GWT: - #ifdef COMPILER2 - return true; -@@ -2880,64 +952,54 @@ - JVM_END - - #ifndef PRODUCT --#define EACH_NAMED_CON(template) \ -- /* hold back this one until JDK stabilizes */ \ -- /* template(MethodHandles,GC_JVM_PUSH_LIMIT) */ \ -- /* hold back this one until JDK stabilizes */ \ -- /* template(MethodHandles,GC_JVM_STACK_MOVE_UNIT) */ \ -- /* hold back this one until JDK stabilizes */ \ -- /* template(MethodHandles,GC_OP_ROT_ARGS_DOWN_LIMIT_BIAS) */ \ -- template(MethodHandles,ETF_HANDLE_OR_METHOD_NAME) \ -- template(MethodHandles,ETF_DIRECT_HANDLE) \ -- template(MethodHandles,ETF_METHOD_NAME) \ -- template(MethodHandles,ETF_REFLECT_METHOD) \ -+#define EACH_NAMED_CON(template, requirement) \ -+ template(MethodHandles,GC_COUNT_GWT) \ - template(java_lang_invoke_MemberName,MN_IS_METHOD) \ - template(java_lang_invoke_MemberName,MN_IS_CONSTRUCTOR) \ - template(java_lang_invoke_MemberName,MN_IS_FIELD) \ - template(java_lang_invoke_MemberName,MN_IS_TYPE) \ - template(java_lang_invoke_MemberName,MN_SEARCH_SUPERCLASSES) \ - template(java_lang_invoke_MemberName,MN_SEARCH_INTERFACES) \ -- template(java_lang_invoke_MemberName,VM_INDEX_UNINITIALIZED) \ -- template(java_lang_invoke_AdapterMethodHandle,OP_RETYPE_ONLY) \ -- template(java_lang_invoke_AdapterMethodHandle,OP_RETYPE_RAW) \ -- template(java_lang_invoke_AdapterMethodHandle,OP_CHECK_CAST) \ -- template(java_lang_invoke_AdapterMethodHandle,OP_PRIM_TO_PRIM) \ -- template(java_lang_invoke_AdapterMethodHandle,OP_REF_TO_PRIM) \ -- template(java_lang_invoke_AdapterMethodHandle,OP_PRIM_TO_REF) \ -- template(java_lang_invoke_AdapterMethodHandle,OP_SWAP_ARGS) \ -- template(java_lang_invoke_AdapterMethodHandle,OP_ROT_ARGS) \ -- template(java_lang_invoke_AdapterMethodHandle,OP_DUP_ARGS) \ -- template(java_lang_invoke_AdapterMethodHandle,OP_DROP_ARGS) \ -- template(java_lang_invoke_AdapterMethodHandle,OP_COLLECT_ARGS) \ -- template(java_lang_invoke_AdapterMethodHandle,OP_SPREAD_ARGS) \ -- /* hold back this one until JDK stabilizes */ \ -- /*template(java_lang_invoke_AdapterMethodHandle,CONV_OP_LIMIT)*/ \ -- template(java_lang_invoke_AdapterMethodHandle,CONV_OP_MASK) \ -- template(java_lang_invoke_AdapterMethodHandle,CONV_VMINFO_MASK) \ -- template(java_lang_invoke_AdapterMethodHandle,CONV_VMINFO_SHIFT) \ -- template(java_lang_invoke_AdapterMethodHandle,CONV_OP_SHIFT) \ -- template(java_lang_invoke_AdapterMethodHandle,CONV_DEST_TYPE_SHIFT) \ -- template(java_lang_invoke_AdapterMethodHandle,CONV_SRC_TYPE_SHIFT) \ -- template(java_lang_invoke_AdapterMethodHandle,CONV_STACK_MOVE_SHIFT) \ -- template(java_lang_invoke_AdapterMethodHandle,CONV_STACK_MOVE_MASK) \ -+ template(java_lang_invoke_MemberName,MN_REFERENCE_KIND_SHIFT) \ -+ template(java_lang_invoke_MemberName,MN_REFERENCE_KIND_MASK) \ -+ template(MethodHandles,GC_LAMBDA_SUPPORT) \ - /*end*/ - -+#define IGNORE_REQ(req_expr) /* req_expr */ - #define ONE_PLUS(scope,value) 1+ --static const int con_value_count = EACH_NAMED_CON(ONE_PLUS) 0; -+static const int con_value_count = EACH_NAMED_CON(ONE_PLUS, IGNORE_REQ) 0; - #define VALUE_COMMA(scope,value) scope::value, --static const int con_values[con_value_count+1] = { EACH_NAMED_CON(VALUE_COMMA) 0 }; -+static const int con_values[con_value_count+1] = { EACH_NAMED_CON(VALUE_COMMA, IGNORE_REQ) 0 }; - #define STRING_NULL(scope,value) #value "\0" --static const char con_names[] = { EACH_NAMED_CON(STRING_NULL) }; -+static const char con_names[] = { EACH_NAMED_CON(STRING_NULL, IGNORE_REQ) }; -+ -+static bool advertise_con_value(int which) { -+ if (which < 0) return false; -+ bool ok = true; -+ int count = 0; -+#define INC_COUNT(scope,value) \ -+ ++count; -+#define CHECK_REQ(req_expr) \ -+ if (which < count) return ok; \ -+ ok = (req_expr); -+ EACH_NAMED_CON(INC_COUNT, CHECK_REQ); -+#undef INC_COUNT -+#undef CHECK_REQ -+ assert(count == con_value_count, ""); -+ if (which < count) return ok; -+ return false; -+} - - #undef ONE_PLUS - #undef VALUE_COMMA - #undef STRING_NULL - #undef EACH_NAMED_CON --#endif -+#endif // PRODUCT - - JVM_ENTRY(jint, MHN_getNamedCon(JNIEnv *env, jobject igcls, jint which, jobjectArray box_jh)) { - #ifndef PRODUCT -- if (which >= 0 && which < con_value_count) { -+ if (advertise_con_value(which)) { -+ assert(which >= 0 && which < con_value_count, ""); - int con = con_values[which]; - objArrayHandle box(THREAD, (objArrayOop) JNIHandles::resolve(box_jh)); - if (box.not_null() && box->klass() == Universe::objectArrayKlassObj() && box->length() > 0) { -@@ -2973,13 +1035,14 @@ - JVM_END - - // void resolve(MemberName self, Class caller) --JVM_ENTRY(void, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) { -- if (mname_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "mname is null"); } -+JVM_ENTRY(jobject, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) { -+ if (mname_jh == NULL) { THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "mname is null"); } - Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh)); - - // The trusted Java code that calls this method should already have performed - // access checks on behalf of the given caller. But, we can verify this. -- if (VerifyMethodHandles && caller_jh != NULL) { -+ if (VerifyMethodHandles && caller_jh != NULL && -+ java_lang_invoke_MemberName::clazz(mname()) != NULL) { - klassOop reference_klass = java_lang_Class::as_klassOop(java_lang_invoke_MemberName::clazz(mname())); - if (reference_klass != NULL) { - // Emulate LinkResolver::check_klass_accessability. -@@ -2987,15 +1050,97 @@ - if (!Reflection::verify_class_access(caller, - reference_klass, - true)) { -- THROW_MSG(vmSymbols::java_lang_InternalError(), Klass::cast(reference_klass)->external_name()); -+ THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), Klass::cast(reference_klass)->external_name()); - } - } - } - -- MethodHandles::resolve_MemberName(mname, CHECK); -+ Handle resolved = MethodHandles::resolve_MemberName(mname, CHECK_NULL); -+ if (resolved.is_null()) { -+ int flags = java_lang_invoke_MemberName::flags(mname()); -+ int ref_kind = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK; -+ if (!MethodHandles::ref_kind_is_valid(ref_kind)) { -+ THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "obsolete MemberName format"); -+ } -+ if ((flags & ALL_KINDS) == IS_FIELD) { -+ THROW_MSG_NULL(vmSymbols::java_lang_NoSuchMethodError(), "field resolution failed"); -+ } else if ((flags & ALL_KINDS) == IS_METHOD || -+ (flags & ALL_KINDS) == IS_CONSTRUCTOR) { -+ THROW_MSG_NULL(vmSymbols::java_lang_NoSuchFieldError(), "method resolution failed"); -+ } else { -+ THROW_MSG_NULL(vmSymbols::java_lang_LinkageError(), "resolution failed"); -+ } -+ } -+ -+ return JNIHandles::make_local(THREAD, resolved()); - } - JVM_END - -+static jlong find_member_field_offset(oop mname, bool must_be_static, TRAPS) { -+ if (mname == NULL || -+ java_lang_invoke_MemberName::vmtarget(mname) == NULL) { -+ THROW_MSG_0(vmSymbols::java_lang_InternalError(), "mname not resolved"); -+ } else { -+ int flags = java_lang_invoke_MemberName::flags(mname); -+ if ((flags & IS_FIELD) != 0 && -+ (must_be_static -+ ? (flags & JVM_ACC_STATIC) != 0 -+ : (flags & JVM_ACC_STATIC) == 0)) { -+ int vmindex = java_lang_invoke_MemberName::vmindex(mname); -+ return (jlong) vmindex; -+ } -+ } -+ const char* msg = (must_be_static ? "static field required" : "non-static field required"); -+ THROW_MSG_0(vmSymbols::java_lang_InternalError(), msg); -+ return 0; -+} -+ -+JVM_ENTRY(jlong, MHN_objectFieldOffset(JNIEnv *env, jobject igcls, jobject mname_jh)) { -+ return find_member_field_offset(JNIHandles::resolve(mname_jh), false, THREAD); -+} -+JVM_END -+ -+JVM_ENTRY(jlong, MHN_staticFieldOffset(JNIEnv *env, jobject igcls, jobject mname_jh)) { -+ return find_member_field_offset(JNIHandles::resolve(mname_jh), true, THREAD); -+} -+JVM_END -+ -+JVM_ENTRY(jobject, MHN_staticFieldBase(JNIEnv *env, jobject igcls, jobject mname_jh)) { -+ // use the other function to perform sanity checks: -+ jlong ignore = find_member_field_offset(JNIHandles::resolve(mname_jh), true, CHECK_NULL); -+ oop clazz = java_lang_invoke_MemberName::clazz(JNIHandles::resolve_non_null(mname_jh)); -+ return JNIHandles::make_local(THREAD, clazz); -+} -+JVM_END -+ -+JVM_ENTRY(jobject, MHN_getMemberVMInfo(JNIEnv *env, jobject igcls, jobject mname_jh)) { -+ if (mname_jh == NULL) return NULL; -+ Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh)); -+ intptr_t vmindex = java_lang_invoke_MemberName::vmindex(mname()); -+ Handle vmtarget = java_lang_invoke_MemberName::vmtarget(mname()); -+ objArrayHandle result = oopFactory::new_objArray(SystemDictionary::Object_klass(), 2, CHECK_NULL); -+ jvalue vmindex_value; vmindex_value.j = (long)vmindex; -+ oop x = java_lang_boxing_object::create(T_LONG, &vmindex_value, CHECK_NULL); -+ result->obj_at_put(0, x); -+ x = NULL; -+ if (vmtarget.is_null() || vmtarget->is_instance()) { -+ x = vmtarget(); -+ } else if (vmtarget->is_klass()) { -+ x = Klass::cast((klassOop) vmtarget())->java_mirror(); -+ } else { -+ Handle mname2 = MethodHandles::new_MemberName(CHECK_NULL); -+ if (vmtarget->is_method()) -+ x = MethodHandles::init_method_MemberName(mname2(), methodOop(vmtarget()), false, NULL); -+ else -+ x = MethodHandles::init_MemberName(mname2(), vmtarget()); -+ } -+ result->obj_at_put(1, x); -+ return JNIHandles::make_local(env, result()); -+} -+JVM_END -+ -+ -+ - // static native int getMembers(Class defc, String matchName, String matchSig, - // int matchFlags, Class caller, int skip, MemberName[] results); - JVM_ENTRY(jint, MHN_getMembers(JNIEnv *env, jobject igcls, -@@ -3061,45 +1206,6 @@ - } - JVM_END - --methodOop MethodHandles::resolve_raise_exception_method(TRAPS) { -- if (_raise_exception_method != NULL) { -- // no need to do it twice -- return raise_exception_method(); -- } -- // LinkResolver::resolve_invokedynamic can reach this point -- // because an invokedynamic has failed very early (7049415) -- KlassHandle MHN_klass = SystemDictionaryHandles::MethodHandleNatives_klass(); -- if (MHN_klass.not_null()) { -- TempNewSymbol raiseException_name = SymbolTable::new_symbol("raiseException", CHECK_NULL); -- TempNewSymbol raiseException_sig = SymbolTable::new_symbol("(ILjava/lang/Object;Ljava/lang/Object;)V", CHECK_NULL); -- methodOop raiseException_method = instanceKlass::cast(MHN_klass->as_klassOop()) -- ->find_method(raiseException_name, raiseException_sig); -- if (raiseException_method != NULL && raiseException_method->is_static()) { -- return raiseException_method; -- } -- } -- // not found; let the caller deal with it -- return NULL; --} --void MethodHandles::raise_exception(int code, oop actual, oop required, TRAPS) { -- methodOop raiseException_method = resolve_raise_exception_method(CHECK); -- if (raiseException_method != NULL && -- instanceKlass::cast(raiseException_method->method_holder())->is_not_initialized()) { -- instanceKlass::cast(raiseException_method->method_holder())->initialize(CHECK); -- // it had better be resolved by now, or maybe JSR 292 failed to load -- raiseException_method = raise_exception_method(); -- } -- if (raiseException_method == NULL) { -- THROW_MSG(vmSymbols::java_lang_InternalError(), "no raiseException method"); -- } -- JavaCallArguments args; -- args.push_int(code); -- args.push_oop(actual); -- args.push_oop(required); -- JavaValue result(T_VOID); -- JavaCalls::call(&result, raiseException_method, &args, CHECK); --} -- - JVM_ENTRY(jobject, MH_invoke_UOE(JNIEnv *env, jobject igmh, jobjectArray igargs)) { - TempNewSymbol UOE_name = SymbolTable::new_symbol("java/lang/UnsupportedOperationException", CHECK_NULL); - THROW_MSG_NULL(UOE_name, "MethodHandle.invoke cannot be invoked reflectively"); -@@ -3129,39 +1235,30 @@ - #define MT JLINV"MethodType;" - #define MH JLINV"MethodHandle;" - #define MEM JLINV"MemberName;" --#define AMH JLINV"AdapterMethodHandle;" --#define BMH JLINV"BoundMethodHandle;" --#define DMH JLINV"DirectMethodHandle;" - - #define CC (char*) /*cast a literal from (const char*)*/ - #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f) - - // These are the native methods on java.lang.invoke.MethodHandleNatives. --static JNINativeMethod methods[] = { -- // void init(MemberName self, AccessibleObject ref) -- {CC"init", CC"("AMH""MH"I)V", FN_PTR(MHN_init_AMH)}, -- {CC"init", CC"("BMH""OBJ"I)V", FN_PTR(MHN_init_BMH)}, -- {CC"init", CC"("DMH""OBJ"Z"CLS")V", FN_PTR(MHN_init_DMH)}, -- {CC"init", CC"("MT")V", FN_PTR(MHN_init_MT)}, -+static JNINativeMethod required_methods_JDK8[] = { - {CC"init", CC"("MEM""OBJ")V", FN_PTR(MHN_init_Mem)}, - {CC"expand", CC"("MEM")V", FN_PTR(MHN_expand_Mem)}, -- {CC"resolve", CC"("MEM""CLS")V", FN_PTR(MHN_resolve_Mem)}, -- {CC"getTarget", CC"("MH"I)"OBJ, FN_PTR(MHN_getTarget)}, -+ {CC"resolve", CC"("MEM""CLS")"MEM, FN_PTR(MHN_resolve_Mem)}, - {CC"getConstant", CC"(I)I", FN_PTR(MHN_getConstant)}, - // static native int getNamedCon(int which, Object[] name) - {CC"getNamedCon", CC"(I["OBJ")I", FN_PTR(MHN_getNamedCon)}, - // static native int getMembers(Class defc, String matchName, String matchSig, - // int matchFlags, Class caller, int skip, MemberName[] results); -- {CC"getMembers", CC"("CLS""STRG""STRG"I"CLS"I["MEM")I", FN_PTR(MHN_getMembers)} --}; -- --static JNINativeMethod call_site_methods[] = { -+ {CC"getMembers", CC"("CLS""STRG""STRG"I"CLS"I["MEM")I", FN_PTR(MHN_getMembers)}, -+ {CC"objectFieldOffset", CC"("MEM")J", FN_PTR(MHN_objectFieldOffset)}, - {CC"setCallSiteTargetNormal", CC"("CS""MH")V", FN_PTR(MHN_setCallSiteTargetNormal)}, -- {CC"setCallSiteTargetVolatile", CC"("CS""MH")V", FN_PTR(MHN_setCallSiteTargetVolatile)} -+ {CC"setCallSiteTargetVolatile", CC"("CS""MH")V", FN_PTR(MHN_setCallSiteTargetVolatile)}, -+ {CC"staticFieldOffset", CC"("MEM")J", FN_PTR(MHN_staticFieldOffset)}, -+ {CC"staticFieldBase", CC"("MEM")"OBJ, FN_PTR(MHN_staticFieldBase)}, -+ {CC"getMemberVMInfo", CC"("MEM")"OBJ, FN_PTR(MHN_getMemberVMInfo)} - }; - - static JNINativeMethod invoke_methods[] = { -- // void init(MemberName self, AccessibleObject ref) - {CC"invoke", CC"(["OBJ")"OBJ, FN_PTR(MH_invoke_UOE)}, - {CC"invokeExact", CC"(["OBJ")"OBJ, FN_PTR(MH_invokeExact_UOE)} - }; -@@ -3169,8 +1266,6 @@ - // This one function is exported, used by NativeLookup. - - JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class)) { -- assert(MethodHandles::spot_check_entry_names(), "entry enum is OK"); -- - if (!EnableInvokeDynamic) { - warning("JSR 292 is disabled in this JVM. Use -XX:+UnlockDiagnosticVMOptions -XX:+EnableInvokeDynamic to enable."); - return; // bind nothing -@@ -3179,36 +1274,32 @@ - assert(!MethodHandles::enabled(), "must not be enabled"); - bool enable_MH = true; - -- { -+ jclass MH_class = NULL; -+ if (SystemDictionary::MethodHandle_klass() == NULL) { -+ enable_MH = false; -+ } else { -+ oop mirror = Klass::cast(SystemDictionary::MethodHandle_klass())->java_mirror(); -+ MH_class = (jclass) JNIHandles::make_local(env, mirror); -+ } -+ -+ int status; -+ -+ if (enable_MH) { - ThreadToNativeFromVM ttnfv(thread); -- int status = env->RegisterNatives(MHN_class, methods, sizeof(methods)/sizeof(JNINativeMethod)); -- if (!env->ExceptionOccurred()) { -- const char* L_MH_name = (JLINV "MethodHandle"); -- const char* MH_name = L_MH_name+1; -- jclass MH_class = env->FindClass(MH_name); -+ -+ status = env->RegisterNatives(MHN_class, required_methods_JDK8, sizeof(required_methods_JDK8)/sizeof(JNINativeMethod)); -+ if (status == JNI_OK && !env->ExceptionOccurred()) { - status = env->RegisterNatives(MH_class, invoke_methods, sizeof(invoke_methods)/sizeof(JNINativeMethod)); - } -- if (env->ExceptionOccurred()) { -+ if (status != JNI_OK || env->ExceptionOccurred()) { - warning("JSR 292 method handle code is mismatched to this JVM. Disabling support."); - enable_MH = false; - env->ExceptionClear(); - } -- -- status = env->RegisterNatives(MHN_class, call_site_methods, sizeof(call_site_methods)/sizeof(JNINativeMethod)); -- if (env->ExceptionOccurred()) { -- // Exception is okay until 7087357 -- env->ExceptionClear(); -- } - } - -- if (enable_MH) { -- methodOop raiseException_method = MethodHandles::resolve_raise_exception_method(CHECK); -- if (raiseException_method != NULL) { -- MethodHandles::set_raise_exception_method(raiseException_method); -- } else { -- warning("JSR 292 method handle code is mismatched to this JVM. Disabling support."); -- enable_MH = false; -- } -+ if (TraceInvokeDynamic) { -+ tty->print_cr("MethodHandle support loaded (using LambdaForms)"); - } - - if (enable_MH) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/prims/methodHandles.hpp ---- openjdk/hotspot/src/share/vm/prims/methodHandles.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/prims/methodHandles.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -33,523 +33,36 @@ - - class MacroAssembler; - class Label; --class MethodHandleEntry; - - class MethodHandles: AllStatic { - // JVM support for MethodHandle, MethodType, and related types - // in java.lang.invoke and sun.invoke. - // See also javaClasses for layouts java_lang_invoke_Method{Handle,Type,Type::Form}. - public: -- enum EntryKind { -- _raise_exception, // stub for error generation from other stubs -- _invokestatic_mh, // how a MH emulates invokestatic -- _invokespecial_mh, // ditto for the other invokes... -- _invokevirtual_mh, -- _invokeinterface_mh, -- _bound_ref_mh, // reference argument is bound -- _bound_int_mh, // int argument is bound (via an Integer or Float) -- _bound_long_mh, // long argument is bound (via a Long or Double) -- _bound_ref_direct_mh, // same as above, with direct linkage to methodOop -- _bound_int_direct_mh, -- _bound_long_direct_mh, -- -- _adapter_mh_first, // adapter sequence goes here... -- _adapter_retype_only = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY, -- _adapter_retype_raw = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW, -- _adapter_check_cast = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST, -- _adapter_prim_to_prim = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM, -- _adapter_ref_to_prim = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM, -- _adapter_prim_to_ref = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF, -- _adapter_swap_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS, -- _adapter_rot_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS, -- _adapter_dup_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS, -- _adapter_drop_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS, -- _adapter_collect_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS, -- _adapter_spread_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS, -- _adapter_fold_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS, -- _adapter_unused_13 = _adapter_mh_first + 13, //hole in the CONV_OP enumeration -- _adapter_mh_last = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::CONV_OP_LIMIT - 1, -- -- // Optimized adapter types -- -- // argument list reordering -- _adapter_opt_swap_1, -- _adapter_opt_swap_2, -- _adapter_opt_rot_1_up, -- _adapter_opt_rot_1_down, -- _adapter_opt_rot_2_up, -- _adapter_opt_rot_2_down, -- // primitive single to single: -- _adapter_opt_i2i, // i2c, i2z, i2b, i2s -- // primitive double to single: -- _adapter_opt_l2i, -- _adapter_opt_d2f, -- // primitive single to double: -- _adapter_opt_i2l, -- _adapter_opt_f2d, -- // conversion between floating point and integer type is handled by Java -- -- // reference to primitive: -- _adapter_opt_unboxi, -- _adapter_opt_unboxl, -- -- // %% Maybe tame the following with a VM_SYMBOLS_DO type macro? -- -- // how a blocking adapter returns (platform-dependent) -- _adapter_opt_return_ref, -- _adapter_opt_return_int, -- _adapter_opt_return_long, -- _adapter_opt_return_float, -- _adapter_opt_return_double, -- _adapter_opt_return_void, -- _adapter_opt_return_S0_ref, // return ref to S=0 (last slot) -- _adapter_opt_return_S1_ref, // return ref to S=1 (2nd-to-last slot) -- _adapter_opt_return_S2_ref, -- _adapter_opt_return_S3_ref, -- _adapter_opt_return_S4_ref, -- _adapter_opt_return_S5_ref, -- _adapter_opt_return_any, // dynamically select r/i/l/f/d -- _adapter_opt_return_FIRST = _adapter_opt_return_ref, -- _adapter_opt_return_LAST = _adapter_opt_return_any, -- -- // spreading (array length cases 0, 1, ...) -- _adapter_opt_spread_0, // spread empty array to N=0 arguments -- _adapter_opt_spread_1_ref, // spread Object[] to N=1 argument -- _adapter_opt_spread_2_ref, // spread Object[] to N=2 arguments -- _adapter_opt_spread_3_ref, // spread Object[] to N=3 arguments -- _adapter_opt_spread_4_ref, // spread Object[] to N=4 arguments -- _adapter_opt_spread_5_ref, // spread Object[] to N=5 arguments -- _adapter_opt_spread_ref, // spread Object[] to N arguments -- _adapter_opt_spread_byte, // spread byte[] or boolean[] to N arguments -- _adapter_opt_spread_char, // spread char[], etc., to N arguments -- _adapter_opt_spread_short, // spread short[], etc., to N arguments -- _adapter_opt_spread_int, // spread int[], short[], etc., to N arguments -- _adapter_opt_spread_long, // spread long[] to N arguments -- _adapter_opt_spread_float, // spread float[] to N arguments -- _adapter_opt_spread_double, // spread double[] to N arguments -- _adapter_opt_spread_FIRST = _adapter_opt_spread_0, -- _adapter_opt_spread_LAST = _adapter_opt_spread_double, -- -- // blocking filter/collect conversions -- // These collect N arguments and replace them (at slot S) by a return value -- // which is passed to the final target, along with the unaffected arguments. -- // collect_{N}_{T} collects N arguments at any position into a T value -- // collect_{N}_S{S}_{T} collects N arguments at slot S into a T value -- // collect_{T} collects any number of arguments at any position -- // filter_S{S}_{T} is the same as collect_1_S{S}_{T} (a unary collection) -- // (collect_2 is also usable as a filter, with long or double arguments) -- _adapter_opt_collect_ref, // combine N arguments, replace with a reference -- _adapter_opt_collect_int, // combine N arguments, replace with an int, short, etc. -- _adapter_opt_collect_long, // combine N arguments, replace with a long -- _adapter_opt_collect_float, // combine N arguments, replace with a float -- _adapter_opt_collect_double, // combine N arguments, replace with a double -- _adapter_opt_collect_void, // combine N arguments, replace with nothing -- // if there is a small fixed number to push, do so without a loop: -- _adapter_opt_collect_0_ref, // collect N=0 arguments, insert a reference -- _adapter_opt_collect_1_ref, // collect N=1 argument, replace with a reference -- _adapter_opt_collect_2_ref, // combine N=2 arguments, replace with a reference -- _adapter_opt_collect_3_ref, // combine N=3 arguments, replace with a reference -- _adapter_opt_collect_4_ref, // combine N=4 arguments, replace with a reference -- _adapter_opt_collect_5_ref, // combine N=5 arguments, replace with a reference -- // filters are an important special case because they never move arguments: -- _adapter_opt_filter_S0_ref, // filter N=1 argument at S=0, replace with a reference -- _adapter_opt_filter_S1_ref, // filter N=1 argument at S=1, replace with a reference -- _adapter_opt_filter_S2_ref, // filter N=1 argument at S=2, replace with a reference -- _adapter_opt_filter_S3_ref, // filter N=1 argument at S=3, replace with a reference -- _adapter_opt_filter_S4_ref, // filter N=1 argument at S=4, replace with a reference -- _adapter_opt_filter_S5_ref, // filter N=1 argument at S=5, replace with a reference -- // these move arguments, but they are important for boxing -- _adapter_opt_collect_2_S0_ref, // combine last N=2 arguments, replace with a reference -- _adapter_opt_collect_2_S1_ref, // combine N=2 arguments at S=1, replace with a reference -- _adapter_opt_collect_2_S2_ref, // combine N=2 arguments at S=2, replace with a reference -- _adapter_opt_collect_2_S3_ref, // combine N=2 arguments at S=3, replace with a reference -- _adapter_opt_collect_2_S4_ref, // combine N=2 arguments at S=4, replace with a reference -- _adapter_opt_collect_2_S5_ref, // combine N=2 arguments at S=5, replace with a reference -- _adapter_opt_collect_FIRST = _adapter_opt_collect_ref, -- _adapter_opt_collect_LAST = _adapter_opt_collect_2_S5_ref, -- -- // blocking folding conversions -- // these are like collects, but retain all the N arguments for the final target -- //_adapter_opt_fold_0_ref, // same as _adapter_opt_collect_0_ref -- // fold_{N}_{T} processes N arguments at any position into a T value, which it inserts -- // fold_{T} processes any number of arguments at any position -- _adapter_opt_fold_ref, // process N arguments, prepend a reference -- _adapter_opt_fold_int, // process N arguments, prepend an int, short, etc. -- _adapter_opt_fold_long, // process N arguments, prepend a long -- _adapter_opt_fold_float, // process N arguments, prepend a float -- _adapter_opt_fold_double, // process N arguments, prepend a double -- _adapter_opt_fold_void, // process N arguments, but leave the list unchanged -- _adapter_opt_fold_1_ref, // process N=1 argument, prepend a reference -- _adapter_opt_fold_2_ref, // process N=2 arguments, prepend a reference -- _adapter_opt_fold_3_ref, // process N=3 arguments, prepend a reference -- _adapter_opt_fold_4_ref, // process N=4 arguments, prepend a reference -- _adapter_opt_fold_5_ref, // process N=5 arguments, prepend a reference -- _adapter_opt_fold_FIRST = _adapter_opt_fold_ref, -- _adapter_opt_fold_LAST = _adapter_opt_fold_5_ref, -- -- _adapter_opt_profiling, -- -- _EK_LIMIT, -- _EK_FIRST = 0 -- }; -- - public: - static bool enabled() { return _enabled; } - static void set_enabled(bool z); - - private: -- enum { // import java_lang_invoke_AdapterMethodHandle::CONV_OP_* -- CONV_OP_LIMIT = java_lang_invoke_AdapterMethodHandle::CONV_OP_LIMIT, -- CONV_OP_MASK = java_lang_invoke_AdapterMethodHandle::CONV_OP_MASK, -- CONV_TYPE_MASK = java_lang_invoke_AdapterMethodHandle::CONV_TYPE_MASK, -- CONV_VMINFO_MASK = java_lang_invoke_AdapterMethodHandle::CONV_VMINFO_MASK, -- CONV_VMINFO_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_VMINFO_SHIFT, -- CONV_OP_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_OP_SHIFT, -- CONV_DEST_TYPE_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_DEST_TYPE_SHIFT, -- CONV_SRC_TYPE_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_SRC_TYPE_SHIFT, -- CONV_STACK_MOVE_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_STACK_MOVE_SHIFT, -- CONV_STACK_MOVE_MASK = java_lang_invoke_AdapterMethodHandle::CONV_STACK_MOVE_MASK -- }; -- - static bool _enabled; -- static MethodHandleEntry* _entries[_EK_LIMIT]; -- static const char* _entry_names[_EK_LIMIT+1]; -- static jobject _raise_exception_method; -- static address _adapter_return_handlers[CONV_TYPE_MASK+1]; - - // Adapters. - static MethodHandlesAdapterBlob* _adapter_code; - -- static bool ek_valid(EntryKind ek) { return (uint)ek < (uint)_EK_LIMIT; } -- static bool conv_op_valid(int op) { return (uint)op < (uint)CONV_OP_LIMIT; } -- -- public: -- static bool have_entry(EntryKind ek) { return ek_valid(ek) && _entries[ek] != NULL; } -- static MethodHandleEntry* entry(EntryKind ek) { assert(ek_valid(ek), "initialized"); -- return _entries[ek]; } -- static const char* entry_name(EntryKind ek) { assert(ek_valid(ek), "oob"); -- return _entry_names[ek]; } -- static EntryKind adapter_entry_kind(int op) { assert(conv_op_valid(op), "oob"); -- return EntryKind(_adapter_mh_first + op); } -- -- static void init_entry(EntryKind ek, MethodHandleEntry* me) { -- assert(ek_valid(ek), "oob"); -- assert(_entries[ek] == NULL, "no double initialization"); -- _entries[ek] = me; -- } -- -- // Some adapter helper functions. -- static EntryKind ek_original_kind(EntryKind ek) { -- if (ek <= _adapter_mh_last) return ek; -- switch (ek) { -- case _adapter_opt_swap_1: -- case _adapter_opt_swap_2: -- return _adapter_swap_args; -- case _adapter_opt_rot_1_up: -- case _adapter_opt_rot_1_down: -- case _adapter_opt_rot_2_up: -- case _adapter_opt_rot_2_down: -- return _adapter_rot_args; -- case _adapter_opt_i2i: -- case _adapter_opt_l2i: -- case _adapter_opt_d2f: -- case _adapter_opt_i2l: -- case _adapter_opt_f2d: -- return _adapter_prim_to_prim; -- case _adapter_opt_unboxi: -- case _adapter_opt_unboxl: -- return _adapter_ref_to_prim; -- } -- if (ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST) -- return _adapter_spread_args; -- if (ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST) -- return _adapter_collect_args; -- if (ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST) -- return _adapter_fold_args; -- if (ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST) -- return _adapter_opt_return_any; -- if (ek == _adapter_opt_profiling) -- return _adapter_retype_only; -- assert(false, "oob"); -- return _EK_LIMIT; -- } -- -- static bool ek_supported(MethodHandles::EntryKind ek); -- -- static BasicType ek_bound_mh_arg_type(EntryKind ek) { -- switch (ek) { -- case _bound_int_mh : // fall-thru -- case _bound_int_direct_mh : return T_INT; -- case _bound_long_mh : // fall-thru -- case _bound_long_direct_mh : return T_LONG; -- default : return T_OBJECT; -- } -- } -- -- static int ek_adapter_opt_swap_slots(EntryKind ek) { -- switch (ek) { -- case _adapter_opt_swap_1 : return 1; -- case _adapter_opt_swap_2 : return 2; -- case _adapter_opt_rot_1_up : return 1; -- case _adapter_opt_rot_1_down : return 1; -- case _adapter_opt_rot_2_up : return 2; -- case _adapter_opt_rot_2_down : return 2; -- default : ShouldNotReachHere(); return -1; -- } -- } -- -- static int ek_adapter_opt_swap_mode(EntryKind ek) { -- switch (ek) { -- case _adapter_opt_swap_1 : return 0; -- case _adapter_opt_swap_2 : return 0; -- case _adapter_opt_rot_1_up : return 1; -- case _adapter_opt_rot_1_down : return -1; -- case _adapter_opt_rot_2_up : return 1; -- case _adapter_opt_rot_2_down : return -1; -- default : ShouldNotReachHere(); return 0; -- } -- } -- -- static int ek_adapter_opt_collect_count(EntryKind ek) { -- assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST || -- ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST, ""); -- switch (ek) { -- case _adapter_opt_collect_0_ref : return 0; -- case _adapter_opt_filter_S0_ref : -- case _adapter_opt_filter_S1_ref : -- case _adapter_opt_filter_S2_ref : -- case _adapter_opt_filter_S3_ref : -- case _adapter_opt_filter_S4_ref : -- case _adapter_opt_filter_S5_ref : -- case _adapter_opt_fold_1_ref : -- case _adapter_opt_collect_1_ref : return 1; -- case _adapter_opt_collect_2_S0_ref : -- case _adapter_opt_collect_2_S1_ref : -- case _adapter_opt_collect_2_S2_ref : -- case _adapter_opt_collect_2_S3_ref : -- case _adapter_opt_collect_2_S4_ref : -- case _adapter_opt_collect_2_S5_ref : -- case _adapter_opt_fold_2_ref : -- case _adapter_opt_collect_2_ref : return 2; -- case _adapter_opt_fold_3_ref : -- case _adapter_opt_collect_3_ref : return 3; -- case _adapter_opt_fold_4_ref : -- case _adapter_opt_collect_4_ref : return 4; -- case _adapter_opt_fold_5_ref : -- case _adapter_opt_collect_5_ref : return 5; -- default : return -1; // sentinel value for "variable" -- } -- } -- -- static int ek_adapter_opt_collect_slot(EntryKind ek) { -- assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST || -- ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST, ""); -- switch (ek) { -- case _adapter_opt_collect_2_S0_ref : -- case _adapter_opt_filter_S0_ref : return 0; -- case _adapter_opt_collect_2_S1_ref : -- case _adapter_opt_filter_S1_ref : return 1; -- case _adapter_opt_collect_2_S2_ref : -- case _adapter_opt_filter_S2_ref : return 2; -- case _adapter_opt_collect_2_S3_ref : -- case _adapter_opt_filter_S3_ref : return 3; -- case _adapter_opt_collect_2_S4_ref : -- case _adapter_opt_filter_S4_ref : return 4; -- case _adapter_opt_collect_2_S5_ref : -- case _adapter_opt_filter_S5_ref : return 5; -- default : return -1; // sentinel value for "variable" -- } -- } -- -- static BasicType ek_adapter_opt_collect_type(EntryKind ek) { -- assert(ek >= _adapter_opt_collect_FIRST && ek <= _adapter_opt_collect_LAST || -- ek >= _adapter_opt_fold_FIRST && ek <= _adapter_opt_fold_LAST, ""); -- switch (ek) { -- case _adapter_opt_fold_int : -- case _adapter_opt_collect_int : return T_INT; -- case _adapter_opt_fold_long : -- case _adapter_opt_collect_long : return T_LONG; -- case _adapter_opt_fold_float : -- case _adapter_opt_collect_float : return T_FLOAT; -- case _adapter_opt_fold_double : -- case _adapter_opt_collect_double : return T_DOUBLE; -- case _adapter_opt_fold_void : -- case _adapter_opt_collect_void : return T_VOID; -- default : return T_OBJECT; -- } -- } -- -- static int ek_adapter_opt_return_slot(EntryKind ek) { -- assert(ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST, ""); -- switch (ek) { -- case _adapter_opt_return_S0_ref : return 0; -- case _adapter_opt_return_S1_ref : return 1; -- case _adapter_opt_return_S2_ref : return 2; -- case _adapter_opt_return_S3_ref : return 3; -- case _adapter_opt_return_S4_ref : return 4; -- case _adapter_opt_return_S5_ref : return 5; -- default : return -1; // sentinel value for "variable" -- } -- } -- -- static BasicType ek_adapter_opt_return_type(EntryKind ek) { -- assert(ek >= _adapter_opt_return_FIRST && ek <= _adapter_opt_return_LAST, ""); -- switch (ek) { -- case _adapter_opt_return_int : return T_INT; -- case _adapter_opt_return_long : return T_LONG; -- case _adapter_opt_return_float : return T_FLOAT; -- case _adapter_opt_return_double : return T_DOUBLE; -- case _adapter_opt_return_void : return T_VOID; -- case _adapter_opt_return_any : return T_CONFLICT; // sentinel value for "variable" -- default : return T_OBJECT; -- } -- } -- -- static int ek_adapter_opt_spread_count(EntryKind ek) { -- assert(ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST, ""); -- switch (ek) { -- case _adapter_opt_spread_0 : return 0; -- case _adapter_opt_spread_1_ref : return 1; -- case _adapter_opt_spread_2_ref : return 2; -- case _adapter_opt_spread_3_ref : return 3; -- case _adapter_opt_spread_4_ref : return 4; -- case _adapter_opt_spread_5_ref : return 5; -- default : return -1; // sentinel value for "variable" -- } -- } -- -- static BasicType ek_adapter_opt_spread_type(EntryKind ek) { -- assert(ek >= _adapter_opt_spread_FIRST && ek <= _adapter_opt_spread_LAST, ""); -- switch (ek) { -- // (there is no _adapter_opt_spread_boolean; we use byte) -- case _adapter_opt_spread_byte : return T_BYTE; -- case _adapter_opt_spread_char : return T_CHAR; -- case _adapter_opt_spread_short : return T_SHORT; -- case _adapter_opt_spread_int : return T_INT; -- case _adapter_opt_spread_long : return T_LONG; -- case _adapter_opt_spread_float : return T_FLOAT; -- case _adapter_opt_spread_double : return T_DOUBLE; -- default : return T_OBJECT; -- } -- } -- -- static methodOop raise_exception_method() { -- oop rem = JNIHandles::resolve(_raise_exception_method); -- assert(rem == NULL || rem->is_method(), ""); -- return (methodOop) rem; -- } -- static void set_raise_exception_method(methodOop rem) { -- assert(_raise_exception_method == NULL, ""); -- _raise_exception_method = JNIHandles::make_global(Handle(rem)); -- } -- static methodOop resolve_raise_exception_method(TRAPS); -- // call raise_exception_method from C code: -- static void raise_exception(int code, oop actual, oop required, TRAPS); -- -- static jint adapter_conversion(int conv_op, BasicType src, BasicType dest, -- int stack_move = 0, int vminfo = 0) { -- assert(conv_op_valid(conv_op), "oob"); -- jint conv = ((conv_op << CONV_OP_SHIFT) -- | (src << CONV_SRC_TYPE_SHIFT) -- | (dest << CONV_DEST_TYPE_SHIFT) -- | (stack_move << CONV_STACK_MOVE_SHIFT) -- | (vminfo << CONV_VMINFO_SHIFT) -- ); -- assert(adapter_conversion_op(conv) == conv_op, "decode conv_op"); -- assert(adapter_conversion_src_type(conv) == src, "decode src"); -- assert(adapter_conversion_dest_type(conv) == dest, "decode dest"); -- assert(adapter_conversion_stack_move(conv) == stack_move, "decode stack_move"); -- assert(adapter_conversion_vminfo(conv) == vminfo, "decode vminfo"); -- return conv; -- } -- static int adapter_conversion_op(jint conv) { -- return ((conv >> CONV_OP_SHIFT) & 0xF); -- } -- static BasicType adapter_conversion_src_type(jint conv) { -- return (BasicType)((conv >> CONV_SRC_TYPE_SHIFT) & 0xF); -- } -- static BasicType adapter_conversion_dest_type(jint conv) { -- return (BasicType)((conv >> CONV_DEST_TYPE_SHIFT) & 0xF); -- } -- static int adapter_conversion_stack_move(jint conv) { -- return (conv >> CONV_STACK_MOVE_SHIFT); -- } -- static int adapter_conversion_vminfo(jint conv) { -- return (conv >> CONV_VMINFO_SHIFT) & CONV_VMINFO_MASK; -- } -- -- // Bit mask of conversion_op values. May vary by platform. -- static int adapter_conversion_ops_supported_mask(); -- -- static bool conv_op_supported(int conv_op) { -- assert(conv_op_valid(conv_op), ""); -- return ((adapter_conversion_ops_supported_mask() & nth_bit(conv_op)) != 0); -- } -- -- // Offset in words that the interpreter stack pointer moves when an argument is pushed. -- // The stack_move value must always be a multiple of this. -- static int stack_move_unit() { -- return frame::interpreter_frame_expression_stack_direction() * Interpreter::stackElementWords; -- } -- -- // Adapter frame traversal. (Implementation-specific.) -- static frame ricochet_frame_sender(const frame& fr, RegisterMap* reg_map); -- static void ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map); -- -- enum { CONV_VMINFO_SIGN_FLAG = 0x80 }; -- // Shift values for prim-to-prim conversions. -- static int adapter_prim_to_prim_subword_vminfo(BasicType dest) { -- if (dest == T_BOOLEAN) return (BitsPerInt - 1); // boolean is 1 bit -- if (dest == T_CHAR) return (BitsPerInt - BitsPerShort); -- if (dest == T_BYTE) return (BitsPerInt - BitsPerByte ) | CONV_VMINFO_SIGN_FLAG; -- if (dest == T_SHORT) return (BitsPerInt - BitsPerShort) | CONV_VMINFO_SIGN_FLAG; -- return 0; // case T_INT -- } -- // Shift values for unboxing a primitive. -- static int adapter_unbox_subword_vminfo(BasicType dest) { -- if (dest == T_BOOLEAN) return (BitsPerInt - BitsPerByte ); // implemented as 1 byte -- if (dest == T_CHAR) return (BitsPerInt - BitsPerShort); -- if (dest == T_BYTE) return (BitsPerInt - BitsPerByte ) | CONV_VMINFO_SIGN_FLAG; -- if (dest == T_SHORT) return (BitsPerInt - BitsPerShort) | CONV_VMINFO_SIGN_FLAG; -- return 0; // case T_INT -- } -- // Here is the transformation the i2i adapter must perform: -- static int truncate_subword_from_vminfo(jint value, int vminfo) { -- int shift = vminfo & ~CONV_VMINFO_SIGN_FLAG; -- jint tem = value << shift; -- if ((vminfo & CONV_VMINFO_SIGN_FLAG) != 0) { -- return (jint)tem >> shift; -- } else { -- return (juint)tem >> shift; -- } -- } -- -- static inline address from_compiled_entry(EntryKind ek); -- static inline address from_interpreted_entry(EntryKind ek); -- -- // helpers for decode_method. -- static methodOop decode_methodOop(methodOop m, int& decode_flags_result); -- static methodHandle decode_vmtarget(oop vmtarget, int vmindex, oop mtype, KlassHandle& receiver_limit_result, int& decode_flags_result); -- static methodHandle decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result); -- static methodHandle decode_MethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); -- static methodHandle decode_DirectMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); -- static methodHandle decode_BoundMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); -- static methodHandle decode_AdapterMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result); -- -- // Find out how many stack slots an mh pushes or pops. -- // The result is *not* reported as a multiple of stack_move_unit(); -- // It is a signed net number of pushes (a difference in vmslots). -- // To compare with a stack_move value, first multiply by stack_move_unit(). -- static int decode_MethodHandle_stack_pushes(oop mh); -- - public: - // working with member names -- static void resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type -+ static Handle resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type - static void expand_MemberName(Handle mname, int suppress, TRAPS); // expand defc/name/type if missing - static Handle new_MemberName(TRAPS); // must be followed by init_MemberName -- static void init_MemberName(oop mname_oop, oop target); // compute vmtarget/vmindex from target -- static void init_MemberName(oop mname_oop, methodOop m, bool do_dispatch = true); -- static void init_MemberName(oop mname_oop, klassOop field_holder, AccessFlags mods, int offset); -+ static oop init_MemberName(oop mname_oop, oop target_oop); // compute vmtarget/vmindex from target -+ static oop init_method_MemberName(oop mname_oop, methodOop m, bool do_dispatch, -+ klassOop receiver_limit); -+ static oop init_field_MemberName(oop mname_oop, klassOop field_holder, -+ AccessFlags mods, oop type, oop name, -+ intptr_t offset, bool is_setter = false); -+ static Handle init_method_MemberName(oop mname_oop, CallInfo& info, TRAPS); -+ static Handle init_field_MemberName(oop mname_oop, FieldAccessInfo& info, TRAPS); -+ static int method_ref_kind(methodOop m, bool do_dispatch_if_possible = true); - static int find_MemberNames(klassOop k, Symbol* name, Symbol* sig, - int mflags, klassOop caller, - int skip, objArrayOop results); -@@ -559,169 +72,113 @@ - // Generate MethodHandles adapters. - static void generate_adapters(); - -- // Called from InterpreterGenerator and MethodHandlesAdapterGenerator. -- static address generate_method_handle_interpreter_entry(MacroAssembler* _masm); -- static void generate_method_handle_stub(MacroAssembler* _masm, EntryKind ek); -+ // Called from MethodHandlesAdapterGenerator. -+ static address generate_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid); -+ static void generate_method_handle_dispatch(MacroAssembler* _masm, -+ vmIntrinsics::ID iid, -+ Register receiver_reg, -+ Register member_reg, -+ bool for_compiler_entry); - -- // argument list parsing -- static int argument_slot(oop method_type, int arg); -- static int argument_slot_count(oop method_type) { return argument_slot(method_type, -1); } -- static int argument_slot_to_argnum(oop method_type, int argslot); -+ // Queries -+ static bool is_signature_polymorphic(vmIntrinsics::ID iid) { -+ return (iid >= vmIntrinsics::FIRST_MH_SIG_POLY && -+ iid <= vmIntrinsics::LAST_MH_SIG_POLY); -+ } - -- // Runtime support -- enum { // bit-encoded flags from decode_method or decode_vmref -- _dmf_has_receiver = 0x01, // target method has leading reference argument -- _dmf_does_dispatch = 0x02, // method handle performs virtual or interface dispatch -- _dmf_from_interface = 0x04, // peforms interface dispatch -- _DMF_DIRECT_MASK = (_dmf_from_interface*2 - _dmf_has_receiver), -- _dmf_binds_method = 0x08, -- _dmf_binds_argument = 0x10, -- _DMF_BOUND_MASK = (_dmf_binds_argument*2 - _dmf_binds_method), -- _dmf_adapter_lsb = 0x20, -- _DMF_ADAPTER_MASK = (_dmf_adapter_lsb << CONV_OP_LIMIT) - _dmf_adapter_lsb -- }; -- static methodHandle decode_method(oop x, KlassHandle& receiver_limit_result, int& decode_flags_result); -+ static bool is_signature_polymorphic_intrinsic(vmIntrinsics::ID iid) { -+ assert(is_signature_polymorphic(iid), ""); -+ // Most sig-poly methods are intrinsics which do not require an -+ // appeal to Java for adapter code. -+ return (iid != vmIntrinsics::_invokeGeneric); -+ } -+ -+ static bool is_signature_polymorphic_static(vmIntrinsics::ID iid) { -+ assert(is_signature_polymorphic(iid), ""); -+ return (iid >= vmIntrinsics::FIRST_MH_STATIC && -+ iid <= vmIntrinsics::LAST_MH_SIG_POLY); -+ } -+ -+ static bool has_member_arg(vmIntrinsics::ID iid) { -+ assert(is_signature_polymorphic(iid), ""); -+ return (iid >= vmIntrinsics::_linkToVirtual && -+ iid <= vmIntrinsics::_linkToInterface); -+ } -+ static bool has_member_arg(Symbol* klass, Symbol* name) { -+ if ((klass == vmSymbols::java_lang_invoke_MethodHandle()) && -+ is_signature_polymorphic_name(name)) { -+ vmIntrinsics::ID iid = signature_polymorphic_name_id(name); -+ return has_member_arg(iid); -+ } -+ return false; -+ } -+ -+ static Symbol* signature_polymorphic_intrinsic_name(vmIntrinsics::ID iid); -+ static int signature_polymorphic_intrinsic_ref_kind(vmIntrinsics::ID iid); -+ -+ static vmIntrinsics::ID signature_polymorphic_name_id(klassOop klass, Symbol* name); -+ static vmIntrinsics::ID signature_polymorphic_name_id(Symbol* name); -+ static bool is_signature_polymorphic_name(Symbol* name) { -+ return signature_polymorphic_name_id(name) != vmIntrinsics::_none; -+ } -+ static bool is_method_handle_invoke_name(klassOop klass, Symbol* name); -+ static bool is_signature_polymorphic_name(klassOop klass, Symbol* name) { -+ return signature_polymorphic_name_id(klass, name) != vmIntrinsics::_none; -+ } -+ - enum { - // format of query to getConstant: -- GC_JVM_PUSH_LIMIT = 0, -- GC_JVM_STACK_MOVE_UNIT = 1, -- GC_CONV_OP_IMPLEMENTED_MASK = 2, -- GC_OP_ROT_ARGS_DOWN_LIMIT_BIAS = 3, - GC_COUNT_GWT = 4, -- -- // format of result from getTarget / encode_target: -- ETF_HANDLE_OR_METHOD_NAME = 0, // all available data (immediate MH or method) -- ETF_DIRECT_HANDLE = 1, // ultimate method handle (will be a DMH, may be self) -- ETF_METHOD_NAME = 2, // ultimate method as MemberName -- ETF_REFLECT_METHOD = 3, // ultimate method as java.lang.reflect object (sans refClass) -- ETF_FORCE_DIRECT_HANDLE = 64, -- ETF_COMPILE_DIRECT_HANDLE = 65, -- -- // ad hoc constants -- OP_ROT_ARGS_DOWN_LIMIT_BIAS = -1 -+ GC_LAMBDA_SUPPORT = 5 - }; - static int get_named_constant(int which, Handle name_box, TRAPS); -- static oop encode_target(Handle mh, int format, TRAPS); // report vmtarget (to Java code) -- static bool class_cast_needed(klassOop src, klassOop dst); - -- static instanceKlassHandle resolve_instance_klass(oop java_mirror_oop, TRAPS); -- static instanceKlassHandle resolve_instance_klass(jclass java_mirror_jh, TRAPS) { -- return resolve_instance_klass(JNIHandles::resolve(java_mirror_jh), THREAD); -+public: -+ static Symbol* lookup_signature(oop type_str, bool polymorphic, TRAPS); // use TempNewSymbol -+ static Symbol* lookup_basic_type_signature(Symbol* sig, bool keep_last_arg, TRAPS); // use TempNewSymbol -+ static Symbol* lookup_basic_type_signature(Symbol* sig, TRAPS) { -+ return lookup_basic_type_signature(sig, false, THREAD); -+ } -+ static bool is_basic_type_signature(Symbol* sig); -+ -+ static Symbol* lookup_method_type(Symbol* msig, Handle mtype, TRAPS); -+ -+ static void print_as_method_type_on(outputStream* st, Symbol* sig) { -+ print_as_basic_type_signature_on(st, sig, true, true); -+ } -+ static void print_as_basic_type_signature_on(outputStream* st, Symbol* sig, bool keep_arrays = false, bool keep_basic_names = false); -+ -+ // decoding CONSTANT_MethodHandle constants -+ enum { JVM_REF_MIN = JVM_REF_getField, JVM_REF_MAX = JVM_REF_invokeInterface }; -+ static bool ref_kind_is_valid(int ref_kind) { -+ return (ref_kind >= JVM_REF_MIN && ref_kind <= JVM_REF_MAX); -+ } -+ static bool ref_kind_is_field(int ref_kind) { -+ assert(ref_kind_is_valid(ref_kind), ""); -+ return (ref_kind <= JVM_REF_putStatic); -+ } -+ static bool ref_kind_is_getter(int ref_kind) { -+ assert(ref_kind_is_valid(ref_kind), ""); -+ return (ref_kind <= JVM_REF_getStatic); -+ } -+ static bool ref_kind_is_setter(int ref_kind) { -+ return ref_kind_is_field(ref_kind) && !ref_kind_is_getter(ref_kind); -+ } -+ static bool ref_kind_is_method(int ref_kind) { -+ return !ref_kind_is_field(ref_kind) && (ref_kind != JVM_REF_newInvokeSpecial); -+ } -+ static bool ref_kind_has_receiver(int ref_kind) { -+ assert(ref_kind_is_valid(ref_kind), ""); -+ return (ref_kind & 1) != 0; -+ } -+ static bool ref_kind_is_static(int ref_kind) { -+ return !ref_kind_has_receiver(ref_kind) && (ref_kind != JVM_REF_newInvokeSpecial); -+ } -+ static bool ref_kind_does_dispatch(int ref_kind) { -+ return (ref_kind == JVM_REF_invokeVirtual || -+ ref_kind == JVM_REF_invokeInterface); - } - -- private: -- // These checkers operate on a pair of whole MethodTypes: -- static const char* check_method_type_change(oop src_mtype, int src_beg, int src_end, -- int insert_argnum, oop insert_type, -- int change_argnum, oop change_type, -- int delete_argnum, -- oop dst_mtype, int dst_beg, int dst_end, -- bool raw = false); -- static const char* check_method_type_insertion(oop src_mtype, -- int insert_argnum, oop insert_type, -- oop dst_mtype) { -- oop no_ref = NULL; -- return check_method_type_change(src_mtype, 0, -1, -- insert_argnum, insert_type, -- -1, no_ref, -1, dst_mtype, 0, -1); -- } -- static const char* check_method_type_conversion(oop src_mtype, -- int change_argnum, oop change_type, -- oop dst_mtype) { -- oop no_ref = NULL; -- return check_method_type_change(src_mtype, 0, -1, -1, no_ref, -- change_argnum, change_type, -- -1, dst_mtype, 0, -1); -- } -- static const char* check_method_type_passthrough(oop src_mtype, oop dst_mtype, bool raw) { -- oop no_ref = NULL; -- return check_method_type_change(src_mtype, 0, -1, -- -1, no_ref, -1, no_ref, -1, -- dst_mtype, 0, -1, raw); -- } -- -- // These checkers operate on pairs of argument or return types: -- static const char* check_argument_type_change(BasicType src_type, klassOop src_klass, -- BasicType dst_type, klassOop dst_klass, -- int argnum, bool raw = false); -- -- static const char* check_argument_type_change(oop src_type, oop dst_type, -- int argnum, bool raw = false) { -- klassOop src_klass = NULL, dst_klass = NULL; -- BasicType src_bt = java_lang_Class::as_BasicType(src_type, &src_klass); -- BasicType dst_bt = java_lang_Class::as_BasicType(dst_type, &dst_klass); -- return check_argument_type_change(src_bt, src_klass, -- dst_bt, dst_klass, argnum, raw); -- } -- -- static const char* check_return_type_change(oop src_type, oop dst_type, bool raw = false) { -- return check_argument_type_change(src_type, dst_type, -1, raw); -- } -- -- static const char* check_return_type_change(BasicType src_type, klassOop src_klass, -- BasicType dst_type, klassOop dst_klass) { -- return check_argument_type_change(src_type, src_klass, dst_type, dst_klass, -1); -- } -- -- static const char* check_method_receiver(methodOop m, klassOop passed_recv_type); -- -- // These verifiers can block, and will throw an error if the checking fails: -- static void verify_vmslots(Handle mh, TRAPS); -- static void verify_vmargslot(Handle mh, int argnum, int argslot, TRAPS); -- -- static void verify_method_type(methodHandle m, Handle mtype, -- bool has_bound_oop, -- KlassHandle bound_oop_type, -- TRAPS); -- -- static void verify_method_signature(methodHandle m, Handle mtype, -- int first_ptype_pos, -- KlassHandle insert_ptype, TRAPS); -- -- static void verify_DirectMethodHandle(Handle mh, methodHandle m, TRAPS); -- static void verify_BoundMethodHandle(Handle mh, Handle target, int argnum, -- bool direct_to_method, TRAPS); -- static void verify_BoundMethodHandle_with_receiver(Handle mh, methodHandle m, TRAPS); -- static void verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS); -- -- public: -- -- // Fill in the fields of a DirectMethodHandle mh. (MH.type must be pre-filled.) -- static void init_DirectMethodHandle(Handle mh, methodHandle method, bool do_dispatch, TRAPS); -- -- // Fill in the fields of a BoundMethodHandle mh. (MH.type, BMH.argument must be pre-filled.) -- static void init_BoundMethodHandle(Handle mh, Handle target, int argnum, TRAPS); -- static void init_BoundMethodHandle_with_receiver(Handle mh, -- methodHandle original_m, -- KlassHandle receiver_limit, -- int decode_flags, -- TRAPS); -- -- // Fill in the fields of an AdapterMethodHandle mh. (MH.type must be pre-filled.) -- static void init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS); -- static void ensure_vmlayout_field(Handle target, TRAPS); -- --#ifdef ASSERT -- static bool spot_check_entry_names(); --#endif -- -- private: -- static methodHandle dispatch_decoded_method(methodHandle m, -- KlassHandle receiver_limit, -- int decode_flags, -- KlassHandle receiver_klass, -- TRAPS); -- --public: -- static bool is_float_fixed_reinterpretation_cast(BasicType src, BasicType dst); -- static bool same_basic_type_for_arguments(BasicType src, BasicType dst, -- bool raw = false, -- bool for_return = false); -- static bool same_basic_type_for_returns(BasicType src, BasicType dst, bool raw = false) { -- return same_basic_type_for_arguments(src, dst, raw, true); -- } -- -- static Symbol* convert_to_signature(oop type_str, bool polymorphic, TRAPS); - - #ifdef TARGET_ARCH_x86 - # include "methodHandles_x86.hpp" -@@ -738,61 +195,6 @@ - #ifdef TARGET_ARCH_ppc - # include "methodHandles_ppc.hpp" - #endif --}; -- -- --// Access methods for the "entry" field of a java.lang.invoke.MethodHandle. --// The field is primarily a jump target for compiled calls. --// However, we squirrel away some nice pointers for other uses, --// just before the jump target. --// Aspects of a method handle entry: --// - from_compiled_entry - stub used when compiled code calls the MH --// - from_interpreted_entry - stub used when the interpreter calls the MH --// - type_checking_entry - stub for runtime casting between MHForm siblings (NYI) --class MethodHandleEntry { -- public: -- class Data { -- friend class MethodHandleEntry; -- size_t _total_size; // size including Data and code stub -- MethodHandleEntry* _type_checking_entry; -- address _from_interpreted_entry; -- MethodHandleEntry* method_entry() { return (MethodHandleEntry*)(this + 1); } -- }; -- -- Data* data() { return (Data*)this - 1; } -- -- address start_address() { return (address) data(); } -- address end_address() { return start_address() + data()->_total_size; } -- -- address from_compiled_entry() { return (address) this; } -- -- address from_interpreted_entry() { return data()->_from_interpreted_entry; } -- void set_from_interpreted_entry(address e) { data()->_from_interpreted_entry = e; } -- -- MethodHandleEntry* type_checking_entry() { return data()->_type_checking_entry; } -- void set_type_checking_entry(MethodHandleEntry* e) { data()->_type_checking_entry = e; } -- -- void set_end_address(address end_addr) { -- size_t total_size = end_addr - start_address(); -- assert(total_size > 0 && total_size < 0x1000, "reasonable end address"); -- data()->_total_size = total_size; -- } -- -- // Compiler support: -- static int from_interpreted_entry_offset_in_bytes() { -- return (int)( offset_of(Data, _from_interpreted_entry) - sizeof(Data) ); -- } -- static int type_checking_entry_offset_in_bytes() { -- return (int)( offset_of(Data, _from_interpreted_entry) - sizeof(Data) ); -- } -- -- static address start_compiled_entry(MacroAssembler* _masm, -- address interpreted_entry = NULL); -- static MethodHandleEntry* finish_compiled_entry(MacroAssembler* masm, address start_addr); --}; -- --address MethodHandles::from_compiled_entry(EntryKind ek) { return entry(ek)->from_compiled_entry(); } --address MethodHandles::from_interpreted_entry(EntryKind ek) { return entry(ek)->from_interpreted_entry(); } - - - //------------------------------------------------------------------------------ -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/prims/unsafe.cpp ---- openjdk/hotspot/src/share/vm/prims/unsafe.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/prims/unsafe.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -779,16 +779,33 @@ - return JNIHandles::make_local(env, JNIHandles::resolve_non_null(clazz)); - UNSAFE_END - --UNSAFE_ENTRY(void, Unsafe_EnsureClassInitialized(JNIEnv *env, jobject unsafe, jobject clazz)) -+UNSAFE_ENTRY(void, Unsafe_EnsureClassInitialized(JNIEnv *env, jobject unsafe, jobject clazz)) { - UnsafeWrapper("Unsafe_EnsureClassInitialized"); - if (clazz == NULL) { - THROW(vmSymbols::java_lang_NullPointerException()); - } - oop mirror = JNIHandles::resolve_non_null(clazz); -- instanceKlass* k = instanceKlass::cast(java_lang_Class::as_klassOop(mirror)); -- if (k != NULL) { -+ -+ klassOop klass = java_lang_Class::as_klassOop(mirror); -+ if (klass != NULL && Klass::cast(klass)->should_be_initialized()) { -+ instanceKlass* k = instanceKlass::cast(klass); - k->initialize(CHECK); - } -+} -+UNSAFE_END -+ -+UNSAFE_ENTRY(jboolean, Unsafe_ShouldBeInitialized(JNIEnv *env, jobject unsafe, jobject clazz)) { -+ UnsafeWrapper("Unsafe_ShouldBeInitialized"); -+ if (clazz == NULL) { -+ THROW_(vmSymbols::java_lang_NullPointerException(), false); -+ } -+ oop mirror = JNIHandles::resolve_non_null(clazz); -+ klassOop klass = java_lang_Class::as_klassOop(mirror); -+ if (klass != NULL && Klass::cast(klass)->should_be_initialized()) { -+ return true; -+ } -+ return false; -+} - UNSAFE_END - - static void getBaseAndScale(int& base, int& scale, jclass acls, TRAPS) { -@@ -1584,6 +1601,10 @@ - {CC"defineAnonymousClass", CC"("DAC_Args")"CLS, FN_PTR(Unsafe_DefineAnonymousClass)}, - }; - -+JNINativeMethod lform_methods[] = { -+ {CC"shouldBeInitialized",CC"("CLS")Z", FN_PTR(Unsafe_ShouldBeInitialized)}, -+}; -+ - #undef CC - #undef FN_PTR - -@@ -1654,6 +1675,15 @@ - env->ExceptionClear(); - } - } -+ if (EnableInvokeDynamic) { -+ env->RegisterNatives(unsafecls, lform_methods, sizeof(lform_methods)/sizeof(JNINativeMethod)); -+ if (env->ExceptionOccurred()) { -+ if (PrintMiscellaneous && (Verbose || WizardMode)) { -+ tty->print_cr("Warning: SDK 1.7 LambdaForm support in Unsafe not found."); -+ } -+ env->ExceptionClear(); -+ } -+ } - int status = env->RegisterNatives(unsafecls, methods, sizeof(methods)/sizeof(JNINativeMethod)); - if (env->ExceptionOccurred()) { - if (PrintMiscellaneous && (Verbose || WizardMode)) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/arguments.cpp ---- openjdk/hotspot/src/share/vm/runtime/arguments.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/arguments.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -3057,15 +3057,6 @@ - } - #endif // PRODUCT - -- // Transitional -- if (EnableMethodHandles || AnonymousClasses) { -- if (!EnableInvokeDynamic && !FLAG_IS_DEFAULT(EnableInvokeDynamic)) { -- warning("EnableMethodHandles and AnonymousClasses are obsolete. Keeping EnableInvokeDynamic disabled."); -- } else { -- EnableInvokeDynamic = true; -- } -- } -- - // JSR 292 is not supported before 1.7 - if (!JDK_Version::is_gte_jdk17x_version()) { - if (EnableInvokeDynamic) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/deoptimization.cpp ---- openjdk/hotspot/src/share/vm/runtime/deoptimization.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/deoptimization.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -388,7 +388,7 @@ - if (deopt_sender.is_interpreted_frame()) { - methodHandle method = deopt_sender.interpreter_frame_method(); - Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci()); -- if (cur.is_method_handle_invoke()) { -+ if (cur.is_invokedynamic() || cur.is_invokehandle()) { - // Method handle invokes may involve fairly arbitrary chains of - // calls so it's impossible to know how much actual space the - // caller has for locals. -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/fieldDescriptor.hpp ---- openjdk/hotspot/src/share/vm/runtime/fieldDescriptor.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/fieldDescriptor.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -115,6 +115,7 @@ - void initialize(klassOop k, int index); - - // Print -+ void print() { print_on(tty); } - void print_on(outputStream* st) const PRODUCT_RETURN; - void print_on_for(outputStream* st, oop obj) PRODUCT_RETURN; - }; -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/fprofiler.cpp ---- openjdk/hotspot/src/share/vm/runtime/fprofiler.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/fprofiler.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -337,11 +337,13 @@ - char c = (char) n->byte_at(i); - st->print("%c", c); - } -- if( Verbose ) { -+ if (Verbose || WizardMode) { - // Disambiguate overloaded methods - Symbol* sig = m->signature(); - sig->print_symbol_on(st); -- } -+ } else if (MethodHandles::is_signature_polymorphic(m->intrinsic_id())) -+ // compare with methodOopDesc::print_short_name -+ MethodHandles::print_as_basic_type_signature_on(st, m->signature(), true); - } - - virtual void print(outputStream* st, int total_ticks) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/frame.cpp ---- openjdk/hotspot/src/share/vm/runtime/frame.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/frame.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -170,11 +170,9 @@ - } - - // type testers --bool frame::is_ricochet_frame() const { -- RicochetBlob* rcb = SharedRuntime::ricochet_blob(); -- return (_cb == rcb && rcb != NULL && rcb->returns_to_bounce_addr(_pc)); -+bool frame::is_ignored_frame() const { -+ return false; // FIXME: some LambdaForm frames should be ignored - } -- - bool frame::is_deoptimized_frame() const { - assert(_deopt_state != unknown, "not answerable"); - return _deopt_state == is_deoptimized; -@@ -348,17 +346,12 @@ - frame frame::real_sender(RegisterMap* map) const { - frame result = sender(map); - while (result.is_runtime_frame() || -- result.is_ricochet_frame()) { -+ result.is_ignored_frame()) { - result = result.sender(map); - } - return result; - } - --frame frame::sender_for_ricochet_frame(RegisterMap* map) const { -- assert(is_ricochet_frame(), ""); -- return MethodHandles::ricochet_frame_sender(*this, map); --} -- - // Note: called by profiler - NOT for current thread - frame frame::profile_find_Java_sender_frame(JavaThread *thread) { - // If we don't recognize this frame, walk back up the stack until we do -@@ -541,7 +534,6 @@ - const char* frame::print_name() const { - if (is_native_frame()) return "Native"; - if (is_interpreted_frame()) return "Interpreted"; -- if (is_ricochet_frame()) return "Ricochet"; - if (is_compiled_frame()) { - if (is_deoptimized_frame()) return "Deoptimized"; - return "Compiled"; -@@ -728,8 +720,6 @@ - st->print("v ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name()); - } else if (_cb->is_deoptimization_stub()) { - st->print("v ~DeoptimizationBlob"); -- } else if (_cb->is_ricochet_stub()) { -- st->print("v ~RichochetBlob"); - } else if (_cb->is_exception_stub()) { - st->print("v ~ExceptionBlob"); - } else if (_cb->is_safepoint_stub()) { -@@ -993,9 +983,6 @@ - - void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) { - assert(_cb != NULL, "sanity check"); -- if (_cb == SharedRuntime::ricochet_blob()) { -- oops_ricochet_do(f, reg_map); -- } - if (_cb->oop_maps() != NULL) { - OopMapSet::oops_do(this, reg_map, f); - -@@ -1014,11 +1001,6 @@ - cf->do_code_blob(_cb); - } - --void frame::oops_ricochet_do(OopClosure* f, const RegisterMap* map) { -- assert(is_ricochet_frame(), ""); -- MethodHandles::ricochet_frame_oops_do(*this, f, map); --} -- - class CompiledArgumentOopFinder: public SignatureInfo { - protected: - OopClosure* _f; -@@ -1087,7 +1069,7 @@ - // First consult the ADLC on where it puts parameter 0 for this signature. - VMReg reg = SharedRuntime::name_for_receiver(); - oop r = *caller.oopmapreg_to_location(reg, reg_map); -- assert( Universe::heap()->is_in_or_null(r), "bad receiver" ); -+ assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (intptr_t) r, (intptr_t) r)); - return r; - } - -@@ -1407,8 +1389,6 @@ - values.describe(-1, info_address, - FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no, - nm, nm->method()->name_and_sig_as_C_string()), 2); -- } else if (is_ricochet_frame()) { -- values.describe(-1, info_address, err_msg("#%d ricochet frame", frame_no), 2); - } else { - // provide default info if not handled before - char *info = (char *) "special frame"; -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/frame.hpp ---- openjdk/hotspot/src/share/vm/runtime/frame.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/frame.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -135,7 +135,7 @@ - bool is_interpreted_frame() const; - bool is_java_frame() const; - bool is_entry_frame() const; // Java frame called from C? -- bool is_ricochet_frame() const; -+ bool is_ignored_frame() const; - bool is_native_frame() const; - bool is_runtime_frame() const; - bool is_compiled_frame() const; -@@ -176,7 +176,6 @@ - // Helper methods for better factored code in frame::sender - frame sender_for_compiled_frame(RegisterMap* map) const; - frame sender_for_entry_frame(RegisterMap* map) const; -- frame sender_for_ricochet_frame(RegisterMap* map) const; - frame sender_for_interpreter_frame(RegisterMap* map) const; - frame sender_for_native_frame(RegisterMap* map) const; - -@@ -415,7 +414,6 @@ - // Oops-do's - void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f); - void oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache = true); -- void oops_ricochet_do(OopClosure* f, const RegisterMap* map); - - private: - void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/globals.hpp ---- openjdk/hotspot/src/share/vm/runtime/globals.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/globals.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -931,6 +931,9 @@ - diagnostic(bool, PrintAdapterHandlers, false, \ - "Print code generated for i2c/c2i adapters") \ - \ -+ diagnostic(bool, VerifyAdapterCalls, trueInDebug, \ -+ "Verify that i2c/c2i adapters are called properly") \ -+ \ - develop(bool, VerifyAdapterSharing, false, \ - "Verify that the code for shared adapters is the equivalent") \ - \ -@@ -3836,12 +3839,6 @@ - product(bool, AnonymousClasses, false, \ - "support sun.misc.Unsafe.defineAnonymousClass (deprecated)") \ - \ -- experimental(bool, EnableMethodHandles, false, \ -- "support method handles (deprecated)") \ -- \ -- diagnostic(intx, MethodHandlePushLimit, 3, \ -- "number of additional stack slots a method handle may push") \ -- \ - diagnostic(bool, PrintMethodHandleStubs, false, \ - "Print generated stub code for method handles") \ - \ -@@ -3851,19 +3848,12 @@ - diagnostic(bool, VerifyMethodHandles, trueInDebug, \ - "perform extra checks when constructing method handles") \ - \ -- diagnostic(bool, OptimizeMethodHandles, true, \ -- "when constructing method handles, try to improve them") \ -- \ -- develop(bool, StressMethodHandleWalk, false, \ -- "Process all method handles with MethodHandleWalk") \ -+ diagnostic(bool, ShowHiddenFrames, false, \ -+ "show method handle implementation frames (usually hidden)") \ - \ - experimental(bool, TrustFinalNonStaticFields, false, \ - "trust final non-static declarations for constant folding") \ - \ -- experimental(bool, AllowInvokeGeneric, false, \ -- "accept MethodHandle.invoke and MethodHandle.invokeGeneric " \ -- "as equivalent methods") \ -- \ - develop(bool, TraceInvokeDynamic, false, \ - "trace internal invoke dynamic operations") \ - \ -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/os.cpp ---- openjdk/hotspot/src/share/vm/runtime/os.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/os.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -807,7 +807,7 @@ - // the interpreter is generated into a buffer blob - InterpreterCodelet* i = Interpreter::codelet_containing(addr); - if (i != NULL) { -- st->print_cr(INTPTR_FORMAT " is an Interpreter codelet", addr); -+ st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", addr, (int)(addr - i->code_begin())); - i->print_on(st); - return; - } -@@ -818,14 +818,15 @@ - } - // - if (AdapterHandlerLibrary::contains(b)) { -- st->print_cr(INTPTR_FORMAT " is an AdapterHandler", addr); -+ st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", addr, (int)(addr - b->code_begin())); - AdapterHandlerLibrary::print_handler_on(st, b); - } - // the stubroutines are generated into a buffer blob - StubCodeDesc* d = StubCodeDesc::desc_for(addr); - if (d != NULL) { -+ st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", addr, (int)(addr - d->begin())); - d->print_on(st); -- if (verbose) st->cr(); -+ st->cr(); - return; - } - if (StubRoutines::contains(addr)) { -@@ -840,26 +841,25 @@ - } - VtableStub* v = VtableStubs::stub_containing(addr); - if (v != NULL) { -+ st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", addr, (int)(addr - v->entry_point())); - v->print_on(st); -+ st->cr(); - return; - } - } -- if (verbose && b->is_nmethod()) { -+ nmethod* nm = b->as_nmethod_or_null(); -+ if (nm != NULL) { - ResourceMark rm; -- st->print("%#p: Compiled ", addr); -- ((nmethod*)b)->method()->print_value_on(st); -- st->print(" = (CodeBlob*)" INTPTR_FORMAT, b); -- st->cr(); -+ st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT, -+ addr, (int)(addr - nm->entry_point()), nm); -+ if (verbose) { -+ st->print(" for "); -+ nm->method()->print_value_on(st); -+ } -+ nm->print_nmethod(verbose); - return; - } -- st->print(INTPTR_FORMAT " ", b); -- if ( b->is_nmethod()) { -- if (b->is_zombie()) { -- st->print_cr("is zombie nmethod"); -- } else if (b->is_not_entrant()) { -- st->print_cr("is non-entrant nmethod"); -- } -- } -+ st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", addr, (int)(addr - b->code_begin())); - b->print_on(st); - return; - } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/reflection.cpp ---- openjdk/hotspot/src/share/vm/runtime/reflection.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/reflection.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -36,7 +36,6 @@ - #include "oops/objArrayKlass.hpp" - #include "oops/objArrayOop.hpp" - #include "prims/jvm.h" --#include "prims/methodHandleWalk.hpp" - #include "runtime/arguments.hpp" - #include "runtime/handles.inline.hpp" - #include "runtime/javaCalls.hpp" -@@ -502,11 +501,6 @@ - under_host_klass(accessee_ik, accessor)) - return true; - -- // Adapter frames can access anything. -- if (MethodHandleCompiler::klass_is_method_handle_adapter_holder(accessor)) -- // This is an internal adapter frame from the MethodHandleCompiler. -- return true; -- - if (RelaxAccessControlCheck || - (accessor_ik->major_version() < JAVA_1_5_VERSION && - accessee_ik->major_version() < JAVA_1_5_VERSION)) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/sharedRuntime.cpp ---- openjdk/hotspot/src/share/vm/runtime/sharedRuntime.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/sharedRuntime.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -88,8 +88,6 @@ - RuntimeStub* SharedRuntime::_resolve_static_call_blob; - - DeoptimizationBlob* SharedRuntime::_deopt_blob; --RicochetBlob* SharedRuntime::_ricochet_blob; -- - SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob; - SafepointBlob* SharedRuntime::_polling_page_return_handler_blob; - -@@ -109,7 +107,6 @@ - _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), false); - _polling_page_return_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), true); - -- generate_ricochet_blob(); - generate_deopt_blob(); - - #ifdef COMPILER2 -@@ -117,33 +114,6 @@ - #endif // COMPILER2 - } - --//----------------------------generate_ricochet_blob--------------------------- --void SharedRuntime::generate_ricochet_blob() { -- if (!EnableInvokeDynamic) return; // leave it as a null -- -- // allocate space for the code -- ResourceMark rm; -- // setup code generation tools -- CodeBuffer buffer("ricochet_blob", 256 LP64_ONLY(+ 256), 256); // XXX x86 LP64L: 512, 512 -- MacroAssembler* masm = new MacroAssembler(&buffer); -- -- int bounce_offset = -1, exception_offset = -1, frame_size_in_words = -1; -- MethodHandles::RicochetFrame::generate_ricochet_blob(masm, &bounce_offset, &exception_offset, &frame_size_in_words); -- -- // ------------- -- // make sure all code is generated -- masm->flush(); -- -- // failed to generate? -- if (bounce_offset < 0 || exception_offset < 0 || frame_size_in_words < 0) { -- assert(false, "bad ricochet blob"); -- return; -- } -- -- _ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words); --} -- -- - #include - - #ifndef USDT2 -@@ -527,10 +497,6 @@ - if (Interpreter::contains(return_address)) { - return Interpreter::rethrow_exception_entry(); - } -- // Ricochet frame unwind code -- if (SharedRuntime::ricochet_blob() != NULL && SharedRuntime::ricochet_blob()->returns_to_bounce_addr(return_address)) { -- return SharedRuntime::ricochet_blob()->exception_addr(); -- } - - guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub"); - guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!"); -@@ -768,13 +734,6 @@ - throw_and_post_jvmti_exception(thread, exception); - JRT_END - --JRT_ENTRY(void, SharedRuntime::throw_WrongMethodTypeException(JavaThread* thread, oopDesc* required, oopDesc* actual)) -- assert(thread == JavaThread::current() && required->is_oop() && actual->is_oop(), "bad args"); -- ResourceMark rm; -- char* message = SharedRuntime::generate_wrong_method_type_message(thread, required, actual); -- throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_invoke_WrongMethodTypeException(), message); --JRT_END -- - address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread, - address pc, - SharedRuntime::ImplicitExceptionKind exception_kind) -@@ -857,6 +816,12 @@ - return StubRoutines::throw_NullPointerException_at_call_entry(); - } - -+ if (nm->method()->is_method_handle_intrinsic()) { -+ // exception happened inside MH dispatch code, similar to a vtable stub -+ Events::log_exception(thread, "NullPointerException in MH adapter " INTPTR_FORMAT, pc); -+ return StubRoutines::throw_NullPointerException_at_call_entry(); -+ } -+ - #ifndef PRODUCT - _implicit_null_throws++; - #endif -@@ -1045,16 +1010,17 @@ - assert(!vfst.at_end(), "Java frame must exist"); - - // Find caller and bci from vframe -- methodHandle caller (THREAD, vfst.method()); -- int bci = vfst.bci(); -+ methodHandle caller(THREAD, vfst.method()); -+ int bci = vfst.bci(); - - // Find bytecode - Bytecode_invoke bytecode(caller, bci); -- bc = bytecode.java_code(); -+ bc = bytecode.invoke_code(); - int bytecode_index = bytecode.index(); - - // Find receiver for non-static call -- if (bc != Bytecodes::_invokestatic) { -+ if (bc != Bytecodes::_invokestatic && -+ bc != Bytecodes::_invokedynamic) { - // This register map must be update since we need to find the receiver for - // compiled frames. The receiver might be in a register. - RegisterMap reg_map2(thread); -@@ -1075,25 +1041,32 @@ - } - - // Resolve method. This is parameterized by bytecode. -- constantPoolHandle constants (THREAD, caller->constants()); -- assert (receiver.is_null() || receiver->is_oop(), "wrong receiver"); -+ constantPoolHandle constants(THREAD, caller->constants()); -+ assert(receiver.is_null() || receiver->is_oop(), "wrong receiver"); - LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle)); - - #ifdef ASSERT - // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls - if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) { - assert(receiver.not_null(), "should have thrown exception"); -- KlassHandle receiver_klass (THREAD, receiver->klass()); -+ KlassHandle receiver_klass(THREAD, receiver->klass()); - klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle)); - // klass is already loaded -- KlassHandle static_receiver_klass (THREAD, rk); -- assert(receiver_klass->is_subtype_of(static_receiver_klass()), "actual receiver must be subclass of static receiver klass"); -+ KlassHandle static_receiver_klass(THREAD, rk); -+ // Method handle invokes might have been optimized to a direct call -+ // so don't check for the receiver class. -+ // FIXME this weakens the assert too much -+ methodHandle callee = callinfo.selected_method(); -+ assert(receiver_klass->is_subtype_of(static_receiver_klass()) || -+ callee->is_method_handle_intrinsic() || -+ callee->is_compiled_lambda_form(), -+ "actual receiver must be subclass of static receiver klass"); - if (receiver_klass->oop_is_instance()) { - if (instanceKlass::cast(receiver_klass())->is_not_initialized()) { - tty->print_cr("ERROR: Klass not yet initialized!!"); - receiver_klass.print(); - } -- assert (!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized"); -+ assert(!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized"); - } - } - #endif -@@ -1186,8 +1159,10 @@ - call_info, CHECK_(methodHandle())); - methodHandle callee_method = call_info.selected_method(); - -- assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) || -- ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode"); -+ assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) || -+ (!is_virtual && invoke_code == Bytecodes::_invokehandle ) || -+ (!is_virtual && invoke_code == Bytecodes::_invokedynamic) || -+ ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode"); - - #ifndef PRODUCT - // tracing/debugging/statistics -@@ -1202,16 +1177,17 @@ - (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static", - Bytecodes::name(invoke_code)); - callee_method->print_short_name(tty); -- tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); -+ tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT, caller_frame.pc(), callee_method->code()); - } - #endif - -- // JSR 292 -+ // JSR 292 key invariant: - // If the resolved method is a MethodHandle invoke target the call -- // site must be a MethodHandle call site. -- if (callee_method->is_method_handle_invoke()) { -- assert(caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site"); -- } -+ // site must be a MethodHandle call site, because the lambda form might tail-call -+ // leaving the stack in a state unknown to either caller or callee -+ // TODO detune for now but we might need it again -+// assert(!callee_method->is_compiled_lambda_form() || -+// caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site"); - - // Compute entry points. This might require generation of C2I converter - // frames, so we cannot be holding any locks here. Furthermore, the -@@ -1284,7 +1260,6 @@ - assert(stub_frame.is_runtime_frame(), "sanity check"); - frame caller_frame = stub_frame.sender(®_map); - assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame"); -- assert(!caller_frame.is_ricochet_frame(), "unexpected frame"); - #endif /* ASSERT */ - - methodHandle callee_method; -@@ -1320,21 +1295,9 @@ - address sender_pc = caller_frame.pc(); - CodeBlob* sender_cb = caller_frame.cb(); - nmethod* sender_nm = sender_cb->as_nmethod_or_null(); -- bool is_mh_invoke_via_adapter = false; // Direct c2c call or via adapter? -- if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) { -- // If the callee_target is set, then we have come here via an i2c -- // adapter. -- methodOop callee = thread->callee_target(); -- if (callee != NULL) { -- assert(callee->is_method(), "sanity"); -- is_mh_invoke_via_adapter = true; -- } -- } - - if (caller_frame.is_interpreted_frame() || -- caller_frame.is_entry_frame() || -- caller_frame.is_ricochet_frame() || -- is_mh_invoke_via_adapter) { -+ caller_frame.is_entry_frame()) { - methodOop callee = thread->callee_target(); - guarantee(callee != NULL && callee->is_method(), "bad handshake"); - thread->set_vm_result(callee); -@@ -1677,12 +1640,6 @@ - // Get the return PC for the passed caller PC. - address return_pc = caller_pc + frame::pc_return_offset; - -- // Don't fixup method handle call sites as the executed method -- // handle adapters are doing the required MethodHandle chain work. -- if (nm->is_method_handle_return(return_pc)) { -- return; -- } -- - // There is a benign race here. We could be attempting to patch to a compiled - // entry point at the same time the callee is being deoptimized. If that is - // the case then entry_point may in fact point to a c2i and we'd patch the -@@ -1788,97 +1745,6 @@ - return generate_class_cast_message(objName, targetKlass->external_name()); - } - --char* SharedRuntime::generate_wrong_method_type_message(JavaThread* thread, -- oopDesc* required, -- oopDesc* actual) { -- if (TraceMethodHandles) { -- tty->print_cr("WrongMethodType thread="PTR_FORMAT" req="PTR_FORMAT" act="PTR_FORMAT"", -- thread, required, actual); -- } -- assert(EnableInvokeDynamic, ""); -- oop singleKlass = wrong_method_type_is_for_single_argument(thread, required); -- char* message = NULL; -- if (singleKlass != NULL) { -- const char* objName = "argument or return value"; -- if (actual != NULL) { -- // be flexible about the junk passed in: -- klassOop ak = (actual->is_klass() -- ? (klassOop)actual -- : actual->klass()); -- objName = Klass::cast(ak)->external_name(); -- } -- Klass* targetKlass = Klass::cast(required->is_klass() -- ? (klassOop)required -- : java_lang_Class::as_klassOop(required)); -- message = generate_class_cast_message(objName, targetKlass->external_name()); -- } else { -- // %%% need to get the MethodType string, without messing around too much -- const char* desc = NULL; -- // Get a signature from the invoke instruction -- const char* mhName = "method handle"; -- const char* targetType = "the required signature"; -- int targetArity = -1, mhArity = -1; -- vframeStream vfst(thread, true); -- if (!vfst.at_end()) { -- Bytecode_invoke call(vfst.method(), vfst.bci()); -- methodHandle target; -- { -- EXCEPTION_MARK; -- target = call.static_target(THREAD); -- if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; } -- } -- if (target.not_null() -- && target->is_method_handle_invoke() -- && required == target->method_handle_type()) { -- targetType = target->signature()->as_C_string(); -- targetArity = ArgumentCount(target->signature()).size(); -- } -- } -- KlassHandle kignore; int dmf_flags = 0; -- methodHandle actual_method = MethodHandles::decode_method(actual, kignore, dmf_flags); -- if ((dmf_flags & ~(MethodHandles::_dmf_has_receiver | -- MethodHandles::_dmf_does_dispatch | -- MethodHandles::_dmf_from_interface)) != 0) -- actual_method = methodHandle(); // MH does extra binds, drops, etc. -- bool has_receiver = ((dmf_flags & MethodHandles::_dmf_has_receiver) != 0); -- if (actual_method.not_null()) { -- mhName = actual_method->signature()->as_C_string(); -- mhArity = ArgumentCount(actual_method->signature()).size(); -- if (!actual_method->is_static()) mhArity += 1; -- } else if (java_lang_invoke_MethodHandle::is_instance(actual)) { -- oopDesc* mhType = java_lang_invoke_MethodHandle::type(actual); -- mhArity = java_lang_invoke_MethodType::ptype_count(mhType); -- stringStream st; -- java_lang_invoke_MethodType::print_signature(mhType, &st); -- mhName = st.as_string(); -- } -- if (targetArity != -1 && targetArity != mhArity) { -- if (has_receiver && targetArity == mhArity-1) -- desc = " cannot be called without a receiver argument as "; -- else -- desc = " cannot be called with a different arity as "; -- } -- message = generate_class_cast_message(mhName, targetType, -- desc != NULL ? desc : -- " cannot be called as "); -- } -- if (TraceMethodHandles) { -- tty->print_cr("WrongMethodType => message=%s", message); -- } -- return message; --} -- --oop SharedRuntime::wrong_method_type_is_for_single_argument(JavaThread* thr, -- oopDesc* required) { -- if (required == NULL) return NULL; -- if (required->klass() == SystemDictionary::Class_klass()) -- return required; -- if (required->is_klass()) -- return Klass::cast(klassOop(required))->java_mirror(); -- return NULL; --} -- -- - char* SharedRuntime::generate_class_cast_message( - const char* objName, const char* targetKlassName, const char* desc) { - size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1; -@@ -2119,8 +1985,17 @@ - // that allows sharing of adapters for the same calling convention. - class AdapterFingerPrint : public CHeapObj { - private: -+ enum { -+ _basic_type_bits = 4, -+ _basic_type_mask = right_n_bits(_basic_type_bits), -+ _basic_types_per_int = BitsPerInt / _basic_type_bits, -+ _compact_int_count = 3 -+ }; -+ // TO DO: Consider integrating this with a more global scheme for compressing signatures. -+ // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive. -+ - union { -- int _compact[3]; -+ int _compact[_compact_int_count]; - int* _fingerprint; - } _value; - int _length; // A negative length indicates the fingerprint is in the compact form, -@@ -2129,8 +2004,7 @@ - // Remap BasicTypes that are handled equivalently by the adapters. - // These are correct for the current system but someday it might be - // necessary to make this mapping platform dependent. -- static BasicType adapter_encoding(BasicType in) { -- assert((~0xf & in) == 0, "must fit in 4 bits"); -+ static int adapter_encoding(BasicType in) { - switch(in) { - case T_BOOLEAN: - case T_BYTE: -@@ -2141,6 +2015,8 @@ - - case T_OBJECT: - case T_ARRAY: -+ // In other words, we assume that any register good enough for -+ // an int or long is good enough for a managed pointer. - #ifdef _LP64 - return T_LONG; - #else -@@ -2165,8 +2041,9 @@ - // The fingerprint is based on the BasicType signature encoded - // into an array of ints with eight entries per int. - int* ptr; -- int len = (total_args_passed + 7) >> 3; -- if (len <= (int)(sizeof(_value._compact) / sizeof(int))) { -+ int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int; -+ if (len <= _compact_int_count) { -+ assert(_compact_int_count == 3, "else change next line"); - _value._compact[0] = _value._compact[1] = _value._compact[2] = 0; - // Storing the signature encoded as signed chars hits about 98% - // of the time. -@@ -2182,10 +2059,12 @@ - int sig_index = 0; - for (int index = 0; index < len; index++) { - int value = 0; -- for (int byte = 0; byte < 8; byte++) { -- if (sig_index < total_args_passed) { -- value = (value << 4) | adapter_encoding(sig_bt[sig_index++]); -- } -+ for (int byte = 0; byte < _basic_types_per_int; byte++) { -+ int bt = ((sig_index < total_args_passed) -+ ? adapter_encoding(sig_bt[sig_index++]) -+ : 0); -+ assert((bt & _basic_type_mask) == bt, "must fit in 4 bits"); -+ value = (value << _basic_type_bits) | bt; - } - ptr[index] = value; - } -@@ -2235,6 +2114,7 @@ - return false; - } - if (_length < 0) { -+ assert(_compact_int_count == 3, "else change next line"); - return _value._compact[0] == other->_value._compact[0] && - _value._compact[1] == other->_value._compact[1] && - _value._compact[2] == other->_value._compact[2]; -@@ -2531,13 +2411,17 @@ - entry->relocate(B->content_begin()); - #ifndef PRODUCT - // debugging suppport -- if (PrintAdapterHandlers) { -- tty->cr(); -- tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = %s, %d bytes generated)", -+ if (PrintAdapterHandlers || PrintStubCode) { -+ entry->print_adapter_on(tty); -+ tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)", - _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"), -- method->signature()->as_C_string(), fingerprint->as_string(), insts_size ); -+ method->signature()->as_C_string(), insts_size); - tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry()); -- Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + insts_size); -+ if (Verbose || PrintStubCode) { -+ address first_pc = entry->base_address(); -+ if (first_pc != NULL) -+ Disassembler::decode(first_pc, first_pc + insts_size); -+ } - } - #endif - -@@ -2561,11 +2445,25 @@ - return entry; - } - -+address AdapterHandlerEntry::base_address() { -+ address base = _i2c_entry; -+ if (base == NULL) base = _c2i_entry; -+ assert(base <= _c2i_entry || _c2i_entry == NULL, ""); -+ assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == NULL, ""); -+ return base; -+} -+ - void AdapterHandlerEntry::relocate(address new_base) { -- ptrdiff_t delta = new_base - _i2c_entry; -+ address old_base = base_address(); -+ assert(old_base != NULL, ""); -+ ptrdiff_t delta = new_base - old_base; -+ if (_i2c_entry != NULL) - _i2c_entry += delta; -+ if (_c2i_entry != NULL) - _c2i_entry += delta; -+ if (_c2i_unverified_entry != NULL) - _c2i_unverified_entry += delta; -+ assert(base_address() == new_base, ""); - } - - -@@ -2614,7 +2512,9 @@ - ResourceMark rm; - nmethod* nm = NULL; - -- assert(method->has_native_function(), "must have something valid to call!"); -+ assert(method->is_native(), "must be native"); -+ assert(method->is_method_handle_intrinsic() || -+ method->has_native_function(), "must have something valid to call!"); - - { - // perform the work while holding the lock, but perform any printing outside the lock -@@ -2651,9 +2551,11 @@ - assert( i==total_args_passed, "" ); - BasicType ret_type = ss.type(); - -- // Now get the compiled-Java layout as input arguments -- int comp_args_on_stack; -- comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false); -+ // Now get the compiled-Java layout as input (or output) arguments. -+ // NOTE: Stubs for compiled entry points of method handle intrinsics -+ // are just trampolines so the argument registers must be outgoing ones. -+ const bool is_outgoing = method->is_method_handle_intrinsic(); -+ int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, is_outgoing); - - // Generate the compiled-to-native wrapper code - nm = SharedRuntime::generate_native_wrapper(&_masm, -@@ -2939,18 +2841,22 @@ - AdapterHandlerTableIterator iter(_adapters); - while (iter.has_next()) { - AdapterHandlerEntry* a = iter.next(); -- if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) { -+ if (b == CodeCache::find_blob(a->get_i2c_entry())) { - st->print("Adapter for signature: "); -- st->print_cr("%s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT, -- a->fingerprint()->as_string(), -- a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry()); -- -+ a->print_adapter_on(tty); - return; - } - } - assert(false, "Should have found handler"); - } - -+void AdapterHandlerEntry::print_adapter_on(outputStream* st) const { -+ st->print_cr("AHE@" INTPTR_FORMAT ": %s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT, -+ (intptr_t) this, fingerprint()->as_string(), -+ get_i2c_entry(), get_c2i_entry(), get_c2i_unverified_entry()); -+ -+} -+ - #ifndef PRODUCT - - void AdapterHandlerLibrary::print_statistics() { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/sharedRuntime.hpp ---- openjdk/hotspot/src/share/vm/runtime/sharedRuntime.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/sharedRuntime.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -61,7 +61,6 @@ - static RuntimeStub* _resolve_static_call_blob; - - static DeoptimizationBlob* _deopt_blob; -- static RicochetBlob* _ricochet_blob; - - static SafepointBlob* _polling_page_safepoint_handler_blob; - static SafepointBlob* _polling_page_return_handler_blob; -@@ -187,7 +186,6 @@ - static void throw_NullPointerException(JavaThread* thread); - static void throw_NullPointerException_at_call(JavaThread* thread); - static void throw_StackOverflowError(JavaThread* thread); -- static void throw_WrongMethodTypeException(JavaThread* thread, oopDesc* required, oopDesc* actual); - static address continuation_for_implicit_exception(JavaThread* thread, - address faulting_pc, - ImplicitExceptionKind exception_kind); -@@ -223,16 +221,6 @@ - return _resolve_static_call_blob->entry_point(); - } - -- static RicochetBlob* ricochet_blob() { --#ifdef X86 -- // Currently only implemented on x86 -- assert(!EnableInvokeDynamic || _ricochet_blob != NULL, "oops"); --#endif -- return _ricochet_blob; -- } -- -- static void generate_ricochet_blob(); -- - static SafepointBlob* polling_page_return_handler_blob() { return _polling_page_return_handler_blob; } - static SafepointBlob* polling_page_safepoint_handler_blob() { return _polling_page_safepoint_handler_blob; } - -@@ -291,27 +279,6 @@ - static char* generate_class_cast_message(JavaThread* thr, const char* name); - - /** -- * Fill in the message for a WrongMethodTypeException -- * -- * @param thr the current thread -- * @param mtype (optional) expected method type (or argument class) -- * @param mhandle (optional) actual method handle (or argument) -- * @return the dynamically allocated exception message -- * -- * BCP for the frame on top of the stack must refer to an -- * 'invokevirtual' op for a method handle, or an 'invokedyamic' op. -- * The caller (or one of its callers) must use a ResourceMark -- * in order to correctly free the result. -- */ -- static char* generate_wrong_method_type_message(JavaThread* thr, -- oopDesc* mtype = NULL, -- oopDesc* mhandle = NULL); -- -- /** Return non-null if the mtype is a klass or Class, not a MethodType. */ -- static oop wrong_method_type_is_for_single_argument(JavaThread* thr, -- oopDesc* mtype); -- -- /** - * Fill in the "X cannot be cast to a Y" message for ClassCastException - * - * @param name the name of the class of the object attempted to be cast -@@ -453,6 +420,10 @@ - // convention (handlizes oops, etc), transitions to native, makes the call, - // returns to java state (possibly blocking), unhandlizes any result and - // returns. -+ // -+ // The wrapper may contain special-case code if the given method -+ // is a JNI critical method, or a compiled method handle adapter, -+ // such as _invokeBasic, _linkToVirtual, etc. - static nmethod *generate_native_wrapper(MacroAssembler* masm, - methodHandle method, - int compile_id, -@@ -647,13 +618,14 @@ - AdapterHandlerEntry(); - - public: -- address get_i2c_entry() { return _i2c_entry; } -- address get_c2i_entry() { return _c2i_entry; } -- address get_c2i_unverified_entry() { return _c2i_unverified_entry; } -+ address get_i2c_entry() const { return _i2c_entry; } -+ address get_c2i_entry() const { return _c2i_entry; } -+ address get_c2i_unverified_entry() const { return _c2i_unverified_entry; } - -+ address base_address(); - void relocate(address new_base); - -- AdapterFingerPrint* fingerprint() { return _fingerprint; } -+ AdapterFingerPrint* fingerprint() const { return _fingerprint; } - - AdapterHandlerEntry* next() { - return (AdapterHandlerEntry*)BasicHashtableEntry::next(); -@@ -665,7 +637,8 @@ - bool compare_code(unsigned char* code, int length, int total_args_passed, BasicType* sig_bt); - #endif - -- void print(); -+ //virtual void print_on(outputStream* st) const; DO NOT USE -+ void print_adapter_on(outputStream* st) const; - }; - - class AdapterHandlerLibrary: public AllStatic { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/signature.hpp ---- openjdk/hotspot/src/share/vm/runtime/signature.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/signature.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -396,6 +396,8 @@ - enum FailureMode { ReturnNull, CNFException, NCDFError }; - klassOop as_klass(Handle class_loader, Handle protection_domain, FailureMode failure_mode, TRAPS); - oop as_java_mirror(Handle class_loader, Handle protection_domain, FailureMode failure_mode, TRAPS); -+ const jbyte* raw_bytes() { return _signature->bytes() + _begin; } -+ int raw_length() { return _end - _begin; } - - // return same as_symbol except allocation of new symbols is avoided. - Symbol* as_symbol_or_null(); -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/stubRoutines.cpp ---- openjdk/hotspot/src/share/vm/runtime/stubRoutines.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/stubRoutines.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -53,7 +53,6 @@ - address StubRoutines::_throw_IncompatibleClassChangeError_entry = NULL; - address StubRoutines::_throw_NullPointerException_at_call_entry = NULL; - address StubRoutines::_throw_StackOverflowError_entry = NULL; --address StubRoutines::_throw_WrongMethodTypeException_entry = NULL; - address StubRoutines::_handler_for_unsafe_access_entry = NULL; - jint StubRoutines::_verify_oop_count = 0; - address StubRoutines::_verify_oop_subroutine_entry = NULL; -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/stubRoutines.hpp ---- openjdk/hotspot/src/share/vm/runtime/stubRoutines.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/stubRoutines.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -130,7 +130,6 @@ - static address _throw_IncompatibleClassChangeError_entry; - static address _throw_NullPointerException_at_call_entry; - static address _throw_StackOverflowError_entry; -- static address _throw_WrongMethodTypeException_entry; - static address _handler_for_unsafe_access_entry; - - static address _atomic_xchg_entry; -@@ -225,6 +224,9 @@ - (_code2 != NULL && _code2->blob_contains(addr)) ; - } - -+ static CodeBlob* code1() { return _code1; } -+ static CodeBlob* code2() { return _code2; } -+ - // Debugging - static jint verify_oop_count() { return _verify_oop_count; } - static jint* verify_oop_count_addr() { return &_verify_oop_count; } -@@ -254,7 +256,6 @@ - static address throw_IncompatibleClassChangeError_entry(){ return _throw_IncompatibleClassChangeError_entry; } - static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; } - static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_entry; } -- static address throw_WrongMethodTypeException_entry() { return _throw_WrongMethodTypeException_entry; } - - // Exceptions during unsafe access - should throw Java exception rather - // than crash. -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/vframe.cpp ---- openjdk/hotspot/src/share/vm/runtime/vframe.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/vframe.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -410,8 +410,9 @@ - Klass::cast(method()->method_holder()) - ->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass())) { - // This is an auxilary frame -- skip it -- } else if (method()->is_method_handle_adapter()) { -- // This is an internal adapter frame from the MethodHandleCompiler -- skip it -+ } else if (method()->is_method_handle_intrinsic() || -+ method()->is_compiled_lambda_form()) { -+ // This is an internal adapter frame for method handles -- skip it - } else { - // This is non-excluded frame, we need to count it against the depth - if (depth-- <= 0) { -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/vframeArray.cpp ---- openjdk/hotspot/src/share/vm/runtime/vframeArray.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/vframeArray.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -24,6 +24,7 @@ - - #include "precompiled.hpp" - #include "classfile/vmSymbols.hpp" -+#include "interpreter/bytecode.hpp" - #include "interpreter/interpreter.hpp" - #include "memory/allocation.inline.hpp" - #include "memory/resourceArea.hpp" -@@ -510,7 +511,8 @@ - // in the above picture. - - // Find the skeletal interpreter frames to unpack into -- RegisterMap map(JavaThread::current(), false); -+ JavaThread* THREAD = JavaThread::current(); -+ RegisterMap map(THREAD, false); - // Get the youngest frame we will unpack (last to be unpacked) - frame me = unpack_frame.sender(&map); - int index; -@@ -520,29 +522,37 @@ - me = me.sender(&map); - } - -+ // Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee -+ // Unpack the frames from the oldest (frames() -1) to the youngest (0) - frame caller_frame = me; -- -- // Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee -- -- // Unpack the frames from the oldest (frames() -1) to the youngest (0) -- - for (index = frames() - 1; index >= 0 ; index--) { -- int callee_parameters = index == 0 ? 0 : element(index-1)->method()->size_of_parameters(); -- int callee_locals = index == 0 ? 0 : element(index-1)->method()->max_locals(); -- element(index)->unpack_on_stack(caller_actual_parameters, -- callee_parameters, -- callee_locals, -- &caller_frame, -- index == 0, -- exec_mode); -+ vframeArrayElement* elem = element(index); // caller -+ int callee_parameters, callee_locals; -+ if (index == 0) { -+ callee_parameters = callee_locals = 0; -+ } else { -+ methodHandle caller = elem->method(); -+ methodHandle callee = element(index - 1)->method(); -+ Bytecode_invoke inv(caller, elem->bci()); -+ // invokedynamic instructions don't have a class but obviously don't have a MemberName appendix. -+ // NOTE: Use machinery here that avoids resolving of any kind. -+ const bool has_member_arg = -+ !inv.is_invokedynamic() && MethodHandles::has_member_arg(inv.klass(), inv.name()); -+ callee_parameters = callee->size_of_parameters() + (has_member_arg ? 1 : 0); -+ callee_locals = callee->max_locals(); -+ } -+ elem->unpack_on_stack(caller_actual_parameters, -+ callee_parameters, -+ callee_locals, -+ &caller_frame, -+ index == 0, -+ exec_mode); - if (index == frames() - 1) { -- Deoptimization::unwind_callee_save_values(element(index)->iframe(), this); -+ Deoptimization::unwind_callee_save_values(elem->iframe(), this); - } -- caller_frame = *element(index)->iframe(); -+ caller_frame = *elem->iframe(); - caller_actual_parameters = callee_parameters; - } -- -- - deallocate_monitor_chunks(); - } - -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/runtime/vmStructs.cpp ---- openjdk/hotspot/src/share/vm/runtime/vmStructs.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/runtime/vmStructs.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -827,13 +827,6 @@ - /* CodeBlobs (NOTE: incomplete, but only a little) */ \ - /***************************************************/ \ - \ -- NOT_ZERO(X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _sender_pc, address))) \ -- NOT_ZERO(X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _exact_sender_sp, intptr_t*))) \ -- NOT_ZERO(X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _sender_link, intptr_t*))) \ -- NOT_ZERO(X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _saved_args_base, intptr_t*))) \ -- \ -- static_field(SharedRuntime, _ricochet_blob, RicochetBlob*) \ -- \ - nonstatic_field(CodeBlob, _name, const char*) \ - nonstatic_field(CodeBlob, _size, int) \ - nonstatic_field(CodeBlob, _header_size, int) \ -@@ -878,11 +871,8 @@ - nonstatic_field(nmethod, _compile_id, int) \ - nonstatic_field(nmethod, _exception_cache, ExceptionCache*) \ - nonstatic_field(nmethod, _marked_for_deoptimization, bool) \ -- \ -- nonstatic_field(RicochetBlob, _bounce_offset, int) \ -- nonstatic_field(RicochetBlob, _exception_offset, int) \ -- \ -- unchecked_c2_static_field(Deoptimization, _trap_reason_name, void*) \ -+ \ -+ unchecked_c2_static_field(Deoptimization, _trap_reason_name, void*) \ - \ - /********************************/ \ - /* JavaCalls (NOTE: incomplete) */ \ -@@ -1623,7 +1613,6 @@ - /*************************************************************/ \ - \ - declare_toplevel_type(SharedRuntime) \ -- X86_ONLY(declare_toplevel_type(MethodHandles::RicochetFrame)) \ - \ - declare_toplevel_type(CodeBlob) \ - declare_type(BufferBlob, CodeBlob) \ -@@ -1634,7 +1623,6 @@ - declare_type(SingletonBlob, CodeBlob) \ - declare_type(SafepointBlob, SingletonBlob) \ - declare_type(DeoptimizationBlob, SingletonBlob) \ -- declare_type(RicochetBlob, SingletonBlob) \ - declare_c2_type(ExceptionBlob, SingletonBlob) \ - declare_c2_type(UncommonTrapBlob, CodeBlob) \ - \ -@@ -2381,7 +2369,7 @@ - declare_constant(instanceKlass::initialization_error) \ - \ - /*********************************/ \ -- /* Symbol* - symbol max length */ \ -+ /* Symbol* - symbol max length */ \ - /*********************************/ \ - \ - declare_constant(Symbol::max_symbol_length) \ -@@ -2394,21 +2382,16 @@ - declare_constant(constantPoolOopDesc::_indy_argc_offset) \ - declare_constant(constantPoolOopDesc::_indy_argv_offset) \ - \ -- /*********************************************/ \ -- /* ConstantPoolCacheEntry FlagBitValues enum */ \ -- /*********************************************/ \ -+ /********************************/ \ -+ /* ConstantPoolCacheEntry enums */ \ -+ /********************************/ \ - \ -- declare_constant(ConstantPoolCacheEntry::hotSwapBit) \ -- declare_constant(ConstantPoolCacheEntry::methodInterface) \ -- declare_constant(ConstantPoolCacheEntry::volatileField) \ -- declare_constant(ConstantPoolCacheEntry::vfinalMethod) \ -- declare_constant(ConstantPoolCacheEntry::finalField) \ -- \ -- /******************************************/ \ -- /* ConstantPoolCacheEntry FlagValues enum */ \ -- /******************************************/ \ -- \ -- declare_constant(ConstantPoolCacheEntry::tosBits) \ -+ declare_constant(ConstantPoolCacheEntry::is_volatile_shift) \ -+ declare_constant(ConstantPoolCacheEntry::is_final_shift) \ -+ declare_constant(ConstantPoolCacheEntry::is_forced_virtual_shift) \ -+ declare_constant(ConstantPoolCacheEntry::is_vfinal_shift) \ -+ declare_constant(ConstantPoolCacheEntry::is_field_entry_shift) \ -+ declare_constant(ConstantPoolCacheEntry::tos_state_shift) \ - \ - /***************************************/ \ - /* java_lang_Thread::ThreadStatus enum */ \ -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/services/heapDumper.cpp ---- openjdk/hotspot/src/share/vm/services/heapDumper.cpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/services/heapDumper.cpp Thu Feb 06 14:24:53 2014 +0000 -@@ -1650,9 +1650,6 @@ - if (fr->is_entry_frame()) { - last_entry_frame = fr; - } -- if (fr->is_ricochet_frame()) { -- fr->oops_ricochet_do(&blk, vf->register_map()); -- } - } - vf = vf->sender(); - } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/utilities/accessFlags.hpp ---- openjdk/hotspot/src/share/vm/utilities/accessFlags.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/utilities/accessFlags.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -55,9 +55,6 @@ - JVM_ACC_IS_OBSOLETE = 0x00020000, // RedefineClasses() has made method obsolete - JVM_ACC_IS_PREFIXED_NATIVE = 0x00040000, // JVMTI has prefixed this native method - -- JVM_MH_INVOKE_BITS // = 0x10001100 // MethodHandle.invoke quasi-native -- = (JVM_ACC_NATIVE | JVM_ACC_SYNTHETIC | JVM_ACC_MONITOR_MATCH), -- - // klassOop flags - JVM_ACC_HAS_MIRANDA_METHODS = 0x10000000, // True if this class has miranda methods in it's vtable - JVM_ACC_HAS_VANILLA_CONSTRUCTOR = 0x20000000, // True if klass has a vanilla default constructor -@@ -131,15 +128,6 @@ - bool is_obsolete () const { return (_flags & JVM_ACC_IS_OBSOLETE ) != 0; } - bool is_prefixed_native () const { return (_flags & JVM_ACC_IS_PREFIXED_NATIVE ) != 0; } - -- // JSR 292: A method of the form MethodHandle.invoke(A...)R method is -- // neither bytecoded nor a JNI native, but rather a fast call through -- // a lightweight method handle object. Because it is not bytecoded, -- // it has the native bit set, but the monitor-match bit is also set -- // to distinguish it from a JNI native (which never has the match bit set). -- // The synthetic bit is also present, because such a method is never -- // explicitly defined in Java code. -- bool is_method_handle_invoke () const { return (_flags & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS; } -- - // klassOop flags - bool has_miranda_methods () const { return (_flags & JVM_ACC_HAS_MIRANDA_METHODS ) != 0; } - bool has_vanilla_constructor () const { return (_flags & JVM_ACC_HAS_VANILLA_CONSTRUCTOR) != 0; } -diff -r 3442eb7ef2d2 -r 19ac51ce4be7 src/share/vm/utilities/exceptions.hpp ---- openjdk/hotspot/src/share/vm/utilities/exceptions.hpp Tue Jan 14 20:24:44 2014 -0500 -+++ openjdk/hotspot/src/share/vm/utilities/exceptions.hpp Thu Feb 06 14:24:53 2014 +0000 -@@ -220,6 +220,9 @@ - #define THROW_ARG(name, signature, args) \ - { Exceptions::_throw_args(THREAD_AND_LOCATION, name, signature, args); return; } - -+#define THROW_MSG_CAUSE(name, message, cause) \ -+ { Exceptions::_throw_msg_cause(THREAD_AND_LOCATION, name, message, cause); return; } -+ - #define THROW_OOP_(e, result) \ - { Exceptions::_throw_oop(THREAD_AND_LOCATION, e); return result; } - -@@ -238,6 +241,9 @@ - #define THROW_ARG_(name, signature, args, result) \ - { Exceptions::_throw_args(THREAD_AND_LOCATION, name, signature, args); return result; } - -+#define THROW_MSG_CAUSE(name, message, cause) \ -+ { Exceptions::_throw_msg_cause(THREAD_AND_LOCATION, name, message, cause); return; } -+ - #define THROW_MSG_CAUSE_(name, message, cause, result) \ - { Exceptions::_throw_msg_cause(THREAD_AND_LOCATION, name, message, cause); return result; } - diff -r 531847dfec6f -r ed2108ad126a patches/zero/7192406-exact_return_type_info.patch --- a/patches/zero/7192406-exact_return_type_info.patch Thu Mar 27 03:50:20 2014 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,660 +0,0 @@ -# HG changeset patch -# User andrew -# Date 1391697224 0 -# Thu Feb 06 14:33:44 2014 +0000 -# Node ID 38ae397aa523096aa3f94d23e1a38aa75e55f8f5 -# Parent 98f6e8bc55e8dbac329e3d871de88b2a7529ff6d -7192406: JSR 292: C2 needs exact return type information for invokedynamic and invokehandle call sites -Reviewed-by: kvn - -diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/c1/c1_GraphBuilder.cpp ---- openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Mon Sep 10 16:37:22 2012 -0700 -+++ openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu Feb 06 14:33:44 2014 +0000 -@@ -1646,15 +1646,16 @@ - code == Bytecodes::_invokespecial || - code == Bytecodes::_invokevirtual || - code == Bytecodes::_invokeinterface; -- const bool is_invokedynamic = (code == Bytecodes::_invokedynamic); - - bool will_link; -- ciMethod* target = stream()->get_method(will_link); -+ ciSignature* declared_signature = NULL; -+ ciMethod* target = stream()->get_method(will_link, &declared_signature); - ciKlass* holder = stream()->get_declared_method_holder(); - const Bytecodes::Code bc_raw = stream()->cur_bc_raw(); -+ assert(declared_signature != NULL, "cannot be null"); - - // FIXME bail out for now -- if ((bc_raw == Bytecodes::_invokehandle || is_invokedynamic) && !will_link) { -+ if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) { - BAILOUT("unlinked call site (FIXME needs patching or recompile support)"); - } - -@@ -1834,7 +1835,7 @@ - bool success = false; - if (target->is_method_handle_intrinsic()) { - // method handle invokes -- success = for_method_handle_inline(target); -+ success = try_method_handle_inline(target); - } else { - // static binding => check if callee is ok - success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver); -@@ -1871,7 +1872,7 @@ - - // inlining not successful => standard invoke - bool is_loaded = target->is_loaded(); -- ValueType* result_type = as_ValueType(target->return_type()); -+ ValueType* result_type = as_ValueType(declared_signature->return_type()); - - // We require the debug info to be the "state before" because - // invokedynamics may deoptimize. -@@ -3794,7 +3795,7 @@ - } - - --bool GraphBuilder::for_method_handle_inline(ciMethod* callee) { -+bool GraphBuilder::try_method_handle_inline(ciMethod* callee) { - ValueStack* state_before = state()->copy_for_parsing(); - vmIntrinsics::ID iid = callee->intrinsic_id(); - switch (iid) { -@@ -3829,7 +3830,7 @@ - // If the target is another method handle invoke try recursivly to get - // a better target. - if (target->is_method_handle_intrinsic()) { -- if (for_method_handle_inline(target)) { -+ if (try_method_handle_inline(target)) { - return true; - } - } else { -diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/c1/c1_GraphBuilder.hpp ---- openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Mon Sep 10 16:37:22 2012 -0700 -+++ openjdk/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Thu Feb 06 14:33:44 2014 +0000 -@@ -346,7 +346,7 @@ - const char* should_not_inline(ciMethod* callee) const; - - // JSR 292 support -- bool for_method_handle_inline(ciMethod* callee); -+ bool try_method_handle_inline(ciMethod* callee); - - // helpers - void inline_bailout(const char* msg); -diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/ci/bcEscapeAnalyzer.cpp ---- openjdk/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp Mon Sep 10 16:37:22 2012 -0700 -+++ openjdk/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp Thu Feb 06 14:33:44 2014 +0000 -@@ -236,12 +236,16 @@ - ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder); - ciInstanceKlass* actual_recv = callee_holder; - -- // some methods are obviously bindable without any type checks so -- // convert them directly to an invokespecial. -+ // Some methods are obviously bindable without any type checks so -+ // convert them directly to an invokespecial or invokestatic. - if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) { - switch (code) { -- case Bytecodes::_invokevirtual: code = Bytecodes::_invokespecial; break; -- case Bytecodes::_invokehandle: code = Bytecodes::_invokestatic; break; -+ case Bytecodes::_invokevirtual: -+ code = Bytecodes::_invokespecial; -+ break; -+ case Bytecodes::_invokehandle: -+ code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial; -+ break; - } - } - -@@ -826,8 +830,8 @@ - break; - case Bytecodes::_getstatic: - case Bytecodes::_getfield: -- { bool will_link; -- ciField* field = s.get_field(will_link); -+ { bool ignored_will_link; -+ ciField* field = s.get_field(ignored_will_link); - BasicType field_type = field->type()->basic_type(); - if (s.cur_bc() != Bytecodes::_getstatic) { - set_method_escape(state.apop()); -@@ -865,16 +869,21 @@ - case Bytecodes::_invokestatic: - case Bytecodes::_invokedynamic: - case Bytecodes::_invokeinterface: -- { bool will_link; -- ciMethod* target = s.get_method(will_link); -- ciKlass* holder = s.get_declared_method_holder(); -+ { bool ignored_will_link; -+ ciSignature* declared_signature = NULL; -+ ciMethod* target = s.get_method(ignored_will_link, &declared_signature); -+ ciKlass* holder = s.get_declared_method_holder(); -+ assert(declared_signature != NULL, "cannot be null"); - // Push appendix argument, if one. - if (s.has_appendix()) { - state.apush(unknown_obj); - } - // Pass in raw bytecode because we need to see invokehandle instructions. - invoke(state, s.cur_bc_raw(), target, holder); -- ciType* return_type = target->return_type(); -+ // We are using the return type of the declared signature here because -+ // it might be a more concrete type than the one from the target (for -+ // e.g. invokedynamic and invokehandle). -+ ciType* return_type = declared_signature->return_type(); - if (!return_type->is_primitive_type()) { - state.apush(unknown_obj); - } else if (return_type->is_one_word()) { -diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/ci/ciEnv.cpp ---- openjdk/hotspot/src/share/vm/ci/ciEnv.cpp Mon Sep 10 16:37:22 2012 -0700 -+++ openjdk/hotspot/src/share/vm/ci/ciEnv.cpp Thu Feb 06 14:33:44 2014 +0000 -@@ -738,91 +738,81 @@ - ciMethod* ciEnv::get_method_by_index_impl(constantPoolHandle cpool, - int index, Bytecodes::Code bc, - ciInstanceKlass* accessor) { -- int holder_index = cpool->klass_ref_index_at(index); -- bool holder_is_accessible; -- ciKlass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor); -- ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder); -+ if (bc == Bytecodes::_invokedynamic) { -+ ConstantPoolCacheEntry* secondary_entry = cpool->cache()->secondary_entry_at(index); -+ const bool is_resolved = !secondary_entry->is_f1_null(); -+ // FIXME: code generation could allow for null (unlinked) call site -+ // The call site could be made patchable as follows: -+ // Load the appendix argument from the constant pool. -+ // Test the appendix argument and jump to a known deopt routine if it is null. -+ // Jump through a patchable call site, which is initially a deopt routine. -+ // Patch the call site to the nmethod entry point of the static compiled lambda form. -+ // As with other two-component call sites, both values must be independently verified. - -- // Get the method's name and signature. -- Symbol* name_sym = cpool->name_ref_at(index); -- Symbol* sig_sym = cpool->signature_ref_at(index); -+ if (is_resolved) { -+ // Get the invoker methodOop and the extra argument from the constant pool. -+ methodOop adapter = secondary_entry->f2_as_vfinal_method(); -+ return get_object(adapter)->as_method(); -+ } - -- if (cpool->has_preresolution() -- || (holder == ciEnv::MethodHandle_klass() && -- MethodHandles::is_signature_polymorphic_name(holder->get_klassOop(), name_sym))) { -- // Short-circuit lookups for JSR 292-related call sites. -- // That is, do not rely only on name-based lookups, because they may fail -- // if the names are not resolvable in the boot class loader (7056328). -- switch (bc) { -- case Bytecodes::_invokevirtual: -- case Bytecodes::_invokeinterface: -- case Bytecodes::_invokespecial: -- case Bytecodes::_invokestatic: -- { -- oop appendix_oop = NULL; -- methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index); -- if (m != NULL) { -- return get_object(m)->as_method(); -- } -- } -- break; -- } -- } -- -- if (holder_is_accessible) { // Our declared holder is loaded. -- instanceKlass* lookup = declared_holder->get_instanceKlass(); -- methodOop m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc); -- if (m != NULL && -- (bc == Bytecodes::_invokestatic -- ? instanceKlass::cast(m->method_holder())->is_not_initialized() -- : !instanceKlass::cast(m->method_holder())->is_loaded())) { -- m = NULL; -- } -- if (m != NULL) { -- // We found the method. -- return get_object(m)->as_method(); -- } -- } -- -- // Either the declared holder was not loaded, or the method could -- // not be found. Create a dummy ciMethod to represent the failed -- // lookup. -- ciSymbol* name = get_symbol(name_sym); -- ciSymbol* signature = get_symbol(sig_sym); -- return get_unloaded_method(declared_holder, name, signature, accessor); --} -- -- --// ------------------------------------------------------------------ --// ciEnv::get_fake_invokedynamic_method_impl --ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool, -- int index, Bytecodes::Code bc, -- ciInstanceKlass* accessor) { -- // Compare the following logic with InterpreterRuntime::resolve_invokedynamic. -- assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic"); -- -- ConstantPoolCacheEntry* secondary_entry = cpool->cache()->secondary_entry_at(index); -- bool is_resolved = !secondary_entry->is_f1_null(); -- // FIXME: code generation could allow for null (unlinked) call site -- // The call site could be made patchable as follows: -- // Load the appendix argument from the constant pool. -- // Test the appendix argument and jump to a known deopt routine if it is null. -- // Jump through a patchable call site, which is initially a deopt routine. -- // Patch the call site to the nmethod entry point of the static compiled lambda form. -- // As with other two-component call sites, both values must be independently verified. -- -- // Call site might not be resolved yet. -- // Stop the code path here with an unlinked method. -- if (!is_resolved) { -+ // Fake a method that is equivalent to a declared method. - ciInstanceKlass* holder = get_object(SystemDictionary::MethodHandle_klass())->as_instance_klass(); - ciSymbol* name = ciSymbol::invokeBasic_name(); - ciSymbol* signature = get_symbol(cpool->signature_ref_at(index)); - return get_unloaded_method(holder, name, signature, accessor); -+ } else { -+ const int holder_index = cpool->klass_ref_index_at(index); -+ bool holder_is_accessible; -+ ciKlass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor); -+ ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder); -+ -+ // Get the method's name and signature. -+ Symbol* name_sym = cpool->name_ref_at(index); -+ Symbol* sig_sym = cpool->signature_ref_at(index); -+ -+ if (cpool->has_preresolution() -+ || (holder == ciEnv::MethodHandle_klass() && -+ MethodHandles::is_signature_polymorphic_name(holder->get_klassOop(), name_sym))) { -+ // Short-circuit lookups for JSR 292-related call sites. -+ // That is, do not rely only on name-based lookups, because they may fail -+ // if the names are not resolvable in the boot class loader (7056328). -+ switch (bc) { -+ case Bytecodes::_invokevirtual: -+ case Bytecodes::_invokeinterface: -+ case Bytecodes::_invokespecial: -+ case Bytecodes::_invokestatic: -+ { -+ methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index); -+ if (m != NULL) { -+ return get_object(m)->as_method(); -+ } -+ } -+ break; -+ } -+ } -+ -+ if (holder_is_accessible) { // Our declared holder is loaded. -+ instanceKlass* lookup = declared_holder->get_instanceKlass(); -+ methodOop m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc); -+ if (m != NULL && -+ (bc == Bytecodes::_invokestatic -+ ? instanceKlass::cast(m->method_holder())->is_not_initialized() -+ : !instanceKlass::cast(m->method_holder())->is_loaded())) { -+ m = NULL; -+ } -+ if (m != NULL) { -+ // We found the method. -+ return get_object(m)->as_method(); -+ } -+ } -+ -+ // Either the declared holder was not loaded, or the method could -+ // not be found. Create a dummy ciMethod to represent the failed -+ // lookup. -+ ciSymbol* name = get_symbol(name_sym); -+ ciSymbol* signature = get_symbol(sig_sym); -+ return get_unloaded_method(declared_holder, name, signature, accessor); - } -- -- // Get the invoker methodOop and the extra argument from the constant pool. -- methodOop adapter = secondary_entry->f2_as_vfinal_method(); -- return get_object(adapter)->as_method(); - } - - -@@ -853,11 +843,7 @@ - ciMethod* ciEnv::get_method_by_index(constantPoolHandle cpool, - int index, Bytecodes::Code bc, - ciInstanceKlass* accessor) { -- if (bc == Bytecodes::_invokedynamic) { -- GUARDED_VM_ENTRY(return get_fake_invokedynamic_method_impl(cpool, index, bc, accessor);) -- } else { -- GUARDED_VM_ENTRY(return get_method_by_index_impl( cpool, index, bc, accessor);) -- } -+ GUARDED_VM_ENTRY(return get_method_by_index_impl(cpool, index, bc, accessor);) - } - - -diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/ci/ciEnv.hpp ---- openjdk/hotspot/src/share/vm/ci/ciEnv.hpp Mon Sep 10 16:37:22 2012 -0700 -+++ openjdk/hotspot/src/share/vm/ci/ciEnv.hpp Thu Feb 06 14:33:44 2014 +0000 -@@ -152,9 +152,6 @@ - ciMethod* get_method_by_index_impl(constantPoolHandle cpool, - int method_index, Bytecodes::Code bc, - ciInstanceKlass* loading_klass); -- ciMethod* get_fake_invokedynamic_method_impl(constantPoolHandle cpool, -- int index, Bytecodes::Code bc, -- ciInstanceKlass* accessor); - - // Helper methods - bool check_klass_accessibility(ciKlass* accessing_klass, -diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/ci/ciMethod.cpp ---- openjdk/hotspot/src/share/vm/ci/ciMethod.cpp Mon Sep 10 16:37:22 2012 -0700 -+++ openjdk/hotspot/src/share/vm/ci/ciMethod.cpp Thu Feb 06 14:33:44 2014 +0000 -@@ -1216,9 +1216,10 @@ - holder()->print_name_on(st); - st->print(" signature="); - signature()->as_symbol()->print_symbol_on(st); -- st->print(" arg_size=%d", arg_size()); - if (is_loaded()) { -- st->print(" loaded=true flags="); -+ st->print(" loaded=true"); -+ st->print(" arg_size=%d", arg_size()); -+ st->print(" flags="); - flags().print_member_flags(st); - } else { - st->print(" loaded=false"); -diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/ci/ciStreams.cpp ---- openjdk/hotspot/src/share/vm/ci/ciStreams.cpp Mon Sep 10 16:37:22 2012 -0700 -+++ openjdk/hotspot/src/share/vm/ci/ciStreams.cpp Thu Feb 06 14:33:44 2014 +0000 -@@ -355,11 +355,23 @@ - // ciBytecodeStream::get_method - // - // If this is a method invocation bytecode, get the invoked method. --ciMethod* ciBytecodeStream::get_method(bool& will_link) { -+// Additionally return the declared signature to get more concrete -+// type information if required (Cf. invokedynamic and invokehandle). -+ciMethod* ciBytecodeStream::get_method(bool& will_link, ciSignature* *declared_signature_result) { - VM_ENTRY_MARK; -+ ciEnv* env = CURRENT_ENV; - constantPoolHandle cpool(_method->get_methodOop()->constants()); -- ciMethod* m = CURRENT_ENV->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder); -+ ciMethod* m = env->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder); - will_link = m->is_loaded(); -+ // Get declared method signature and return it. -+ if (has_optional_appendix()) { -+ const int sig_index = get_method_signature_index(); -+ Symbol* sig_sym = cpool->symbol_at(sig_index); -+ ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass(); -+ (*declared_signature_result) = new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym)); -+ } else { -+ (*declared_signature_result) = m->signature(); -+ } - return m; - } - -@@ -419,35 +431,18 @@ - } - - // ------------------------------------------------------------------ --// ciBytecodeStream::get_declared_method_signature --// --// Get the declared signature of the currently referenced method. --// --// This is always the same as the signature of the resolved method --// itself, except for _invokehandle and _invokedynamic calls. --// --ciSignature* ciBytecodeStream::get_declared_method_signature() { -- int sig_index = get_method_signature_index(); -- VM_ENTRY_MARK; -- ciEnv* env = CURRENT_ENV; -- constantPoolHandle cpool(_method->get_methodOop()->constants()); -- Symbol* sig_sym = cpool->symbol_at(sig_index); -- ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass(); -- return new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym)); --} -- --// ------------------------------------------------------------------ - // ciBytecodeStream::get_method_signature_index - // - // Get the constant pool index of the signature of the method - // referenced by the current bytecode. Used for generating - // deoptimization information. - int ciBytecodeStream::get_method_signature_index() { -- VM_ENTRY_MARK; -- constantPoolOop cpool = _holder->get_instanceKlass()->constants(); -- int method_index = get_method_index(); -- int name_and_type_index = cpool->name_and_type_ref_index_at(method_index); -- return cpool->signature_ref_index_at(name_and_type_index); -+ GUARDED_VM_ENTRY( -+ constantPoolOop cpool = _holder->get_instanceKlass()->constants(); -+ const int method_index = get_method_index(); -+ const int name_and_type_index = cpool->name_and_type_ref_index_at(method_index); -+ return cpool->signature_ref_index_at(name_and_type_index); -+ ) - } - - // ------------------------------------------------------------------ -diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/ci/ciStreams.hpp ---- openjdk/hotspot/src/share/vm/ci/ciStreams.hpp Mon Sep 10 16:37:22 2012 -0700 -+++ openjdk/hotspot/src/share/vm/ci/ciStreams.hpp Thu Feb 06 14:33:44 2014 +0000 -@@ -151,6 +151,8 @@ - // Does this instruction contain an index which refes into the CP cache? - bool has_cache_index() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); } - -+ bool has_optional_appendix() { return Bytecodes::has_optional_appendix(cur_bc_raw()); } -+ - int get_index_u1() const { - return bytecode().get_index_u1(cur_bc_raw()); - } -@@ -257,13 +259,11 @@ - int get_field_holder_index(); - int get_field_signature_index(); - -- // If this is a method invocation bytecode, get the invoked method. -- ciMethod* get_method(bool& will_link); -+ ciMethod* get_method(bool& will_link, ciSignature* *declared_signature_result); - bool has_appendix(); - ciObject* get_appendix(); - ciKlass* get_declared_method_holder(); - int get_method_holder_index(); -- ciSignature* get_declared_method_signature(); - int get_method_signature_index(); - - ciCPCache* get_cpcache() const; -diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/ci/ciTypeFlow.cpp ---- openjdk/hotspot/src/share/vm/ci/ciTypeFlow.cpp Mon Sep 10 16:37:22 2012 -0700 -+++ openjdk/hotspot/src/share/vm/ci/ciTypeFlow.cpp Thu Feb 06 14:33:44 2014 +0000 -@@ -643,9 +643,11 @@ - // ------------------------------------------------------------------ - // ciTypeFlow::StateVector::do_invoke - void ciTypeFlow::StateVector::do_invoke(ciBytecodeStream* str, -- bool has_receiver_foo) { -+ bool has_receiver) { - bool will_link; -- ciMethod* callee = str->get_method(will_link); -+ ciSignature* declared_signature = NULL; -+ ciMethod* callee = str->get_method(will_link, &declared_signature); -+ assert(declared_signature != NULL, "cannot be null"); - if (!will_link) { - // We weren't able to find the method. - if (str->cur_bc() == Bytecodes::_invokedynamic) { -@@ -658,22 +660,12 @@ - trap(str, unloaded_holder, str->get_method_holder_index()); - } - } else { -- // TODO Use Bytecode_invoke after metadata changes. -- //Bytecode_invoke inv(str->method(), str->cur_bci()); -- //const bool has_receiver = callee->is_loaded() ? !callee->is_static() : inv.has_receiver(); -- Bytecode inv(str); -- Bytecodes::Code code = inv.invoke_code(); -- const bool has_receiver = callee->is_loaded() ? !callee->is_static() : code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic; -- -- ciSignature* signature = callee->signature(); -- ciSignatureStream sigstr(signature); -- // Push appendix argument, if one. -- if (str->has_appendix()) { -- ciObject* appendix = str->get_appendix(); -- push_object(appendix->klass()); -- } -- int arg_size = signature->size(); -- int stack_base = stack_size() - arg_size; -+ // We are using the declared signature here because it might be -+ // different from the callee signature (Cf. invokedynamic and -+ // invokehandle). -+ ciSignatureStream sigstr(declared_signature); -+ const int arg_size = declared_signature->size(); -+ const int stack_base = stack_size() - arg_size; - int i = 0; - for( ; !sigstr.at_return_type(); sigstr.next()) { - ciType* type = sigstr.type(); -@@ -689,7 +681,6 @@ - for (int j = 0; j < arg_size; j++) { - pop(); - } -- assert(!callee->is_loaded() || has_receiver == !callee->is_static(), "mismatch"); - if (has_receiver) { - // Check this? - pop_object(); -diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/interpreter/bytecodes.hpp ---- openjdk/hotspot/src/share/vm/interpreter/bytecodes.hpp Mon Sep 10 16:37:22 2012 -0700 -+++ openjdk/hotspot/src/share/vm/interpreter/bytecodes.hpp Thu Feb 06 14:33:44 2014 +0000 -@@ -424,6 +424,8 @@ - || code == _fconst_0 || code == _dconst_0); } - static bool is_invoke (Code code) { return (_invokevirtual <= code && code <= _invokedynamic); } - -+ static bool has_optional_appendix(Code code) { return code == _invokedynamic || code == _invokehandle; } -+ - static int compute_flags (const char* format, int more_flags = 0); // compute the flags - static int flags (int code, bool is_wide) { - assert(code == (u_char)code, "must be a byte"); -diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/opto/doCall.cpp ---- openjdk/hotspot/src/share/vm/opto/doCall.cpp Mon Sep 10 16:37:22 2012 -0700 -+++ openjdk/hotspot/src/share/vm/opto/doCall.cpp Thu Feb 06 14:33:44 2014 +0000 -@@ -341,25 +341,26 @@ - kill_dead_locals(); - - // Set frequently used booleans -- bool is_virtual = bc() == Bytecodes::_invokevirtual; -- bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface; -- bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial; -- bool is_invokedynamic = bc() == Bytecodes::_invokedynamic; -+ const bool is_virtual = bc() == Bytecodes::_invokevirtual; -+ const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface; -+ const bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial; - - // Find target being called - bool will_link; -- ciMethod* bc_callee = iter().get_method(will_link); // actual callee from bytecode -- ciInstanceKlass* holder_klass = bc_callee->holder(); -- ciKlass* holder = iter().get_declared_method_holder(); -+ ciSignature* declared_signature = NULL; -+ ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode -+ ciInstanceKlass* holder_klass = orig_callee->holder(); -+ ciKlass* holder = iter().get_declared_method_holder(); - ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); -+ assert(declared_signature != NULL, "cannot be null"); - - // uncommon-trap when callee is unloaded, uninitialized or will not link - // bailout when too many arguments for register representation -- if (!will_link || can_not_compile_call_site(bc_callee, klass)) { -+ if (!will_link || can_not_compile_call_site(orig_callee, klass)) { - #ifndef PRODUCT - if (PrintOpto && (Verbose || WizardMode)) { - method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci()); -- bc_callee->print_name(); tty->cr(); -+ orig_callee->print_name(); tty->cr(); - } - #endif - return; -@@ -372,7 +373,7 @@ - // Note: In the absence of miranda methods, an abstract class K can perform - // an invokevirtual directly on an interface method I.m if K implements I. - -- const int nargs = bc_callee->arg_size(); -+ const int nargs = orig_callee->arg_size(); - - // Push appendix argument (MethodType, CallSite, etc.), if one. - if (iter().has_appendix()) { -@@ -392,13 +393,13 @@ - // Choose call strategy. - bool call_is_virtual = is_virtual_or_interface; - int vtable_index = methodOopDesc::invalid_vtable_index; -- ciMethod* callee = bc_callee; -+ ciMethod* callee = orig_callee; - - // Try to get the most accurate receiver type - if (is_virtual_or_interface) { - Node* receiver_node = stack(sp() - nargs); - const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); -- ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, bc_callee, receiver_type); -+ ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, orig_callee, receiver_type); - - // Have the call been sufficiently improved such that it is no longer a virtual? - if (optimized_virtual_method != NULL) { -@@ -425,7 +426,8 @@ - // It decides whether inlining is desirable or not. - CallGenerator* cg = C->call_generator(callee, vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); - -- bc_callee = callee = NULL; // don't use bc_callee and callee after this point -+ // NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead. -+ orig_callee = callee = NULL; - - // --------------------- - // Round double arguments before call -@@ -506,9 +508,9 @@ - round_double_result(cg->method()); - - ciType* rtype = cg->method()->return_type(); -- if (iter().cur_bc_raw() == Bytecodes::_invokehandle || is_invokedynamic) { -+ if (Bytecodes::has_optional_appendix(iter().cur_bc_raw())) { - // Be careful here with return types. -- ciType* ctype = iter().get_declared_method_signature()->return_type(); -+ ciType* ctype = declared_signature->return_type(); - if (ctype != rtype) { - BasicType rt = rtype->basic_type(); - BasicType ct = ctype->basic_type(); -@@ -537,15 +539,13 @@ - } else if (rt == T_OBJECT) { - assert(ct == T_OBJECT, err_msg("rt=T_OBJECT, ct=%d", ct)); - if (ctype->is_loaded()) { -- Node* if_fail = top(); -- retnode = gen_checkcast(retnode, makecon(TypeKlassPtr::make(ctype->as_klass())), &if_fail); -- if (if_fail != top()) { -- PreserveJVMState pjvms(this); -- set_control(if_fail); -- builtin_throw(Deoptimization::Reason_class_check); -+ const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass()); -+ const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass()); -+ if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { -+ Node* cast_obj = _gvn.transform(new (C, 2) CheckCastPPNode(control(), retnode, sig_type)); -+ pop(); -+ push(cast_obj); - } -- pop(); -- push(retnode); - } - } else { - assert(ct == rt, err_msg("unexpected mismatch rt=%d, ct=%d", rt, ct)); -diff -r 98f6e8bc55e8 -r 38ae397aa523 src/share/vm/opto/graphKit.cpp ---- openjdk/hotspot/src/share/vm/opto/graphKit.cpp Mon Sep 10 16:37:22 2012 -0700 -+++ openjdk/hotspot/src/share/vm/opto/graphKit.cpp Thu Feb 06 14:33:44 2014 +0000 -@@ -1006,11 +1006,11 @@ - case Bytecodes::_putfield: - { - bool is_get = (depth >= 0), is_static = (depth & 1); -- bool ignore; - ciBytecodeStream iter(method()); - iter.reset_to_bci(bci()); - iter.next(); -- ciField* field = iter.get_field(ignore); -+ bool ignored_will_link; -+ ciField* field = iter.get_field(ignored_will_link); - int size = field->type()->size(); - inputs = (is_static ? 0 : 1); - if (is_get) { -@@ -1028,11 +1028,13 @@ - case Bytecodes::_invokedynamic: - case Bytecodes::_invokeinterface: - { -- bool ignore; - ciBytecodeStream iter(method()); - iter.reset_to_bci(bci()); - iter.next(); -- ciMethod* callee = iter.get_method(ignore); -+ bool ignored_will_link; -+ ciSignature* declared_signature = NULL; -+ ciMethod* callee = iter.get_method(ignored_will_link, &declared_signature); -+ assert(declared_signature != NULL, "cannot be null"); - // (Do not use ciMethod::arg_size(), because - // it might be an unloaded method, which doesn't - // know whether it is static or not.) -@@ -1046,7 +1048,7 @@ - // remove any appendix arguments that were popped. - inputs = callee->invoke_arg_size(code) - (callee->has_member_arg() ? 1 : 0); - } -- int size = callee->return_type()->size(); -+ int size = declared_signature->return_type()->size(); - depth = size - inputs; - } - break; diff -r 531847dfec6f -r ed2108ad126a patches/zero/7196242-loopsandthreads_crashed.patch --- a/patches/zero/7196242-loopsandthreads_crashed.patch Thu Mar 27 03:50:20 2014 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,124 +0,0 @@ -# HG changeset patch -# User twisti -# Date 1347320242 25200 -# Mon Sep 10 16:37:22 2012 -0700 -# Node ID 98f6e8bc55e8dbac329e3d871de88b2a7529ff6d -# Parent 19ac51ce4be77e6895816f9823bce63a72392e89 -7196242: vm/mlvm/indy/stress/java/loopsAndThreads crashed -Reviewed-by: jrose, coleenp, jmasa, kvn - -diff -r 19ac51ce4be7 -r 98f6e8bc55e8 src/share/vm/interpreter/interpreterRuntime.cpp ---- openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Thu Feb 06 14:24:53 2014 +0000 -+++ openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Mon Sep 10 16:37:22 2012 -0700 -@@ -762,6 +762,7 @@ - } // end JvmtiHideSingleStepping - - cache_entry(thread)->set_method_handle( -+ pool, - info.resolved_method(), - info.resolved_appendix()); - } -@@ -788,6 +789,7 @@ - } // end JvmtiHideSingleStepping - - pool->cache()->secondary_entry_at(index)->set_dynamic_call( -+ pool, - info.resolved_method(), - info.resolved_appendix()); - } -diff -r 19ac51ce4be7 -r 98f6e8bc55e8 src/share/vm/oops/cpCacheOop.cpp ---- openjdk/hotspot/src/share/vm/oops/cpCacheOop.cpp Thu Feb 06 14:24:53 2014 +0000 -+++ openjdk/hotspot/src/share/vm/oops/cpCacheOop.cpp Mon Sep 10 16:37:22 2012 -0700 -@@ -265,25 +265,36 @@ - } - - --void ConstantPoolCacheEntry::set_method_handle(methodHandle adapter, Handle appendix) { -+void ConstantPoolCacheEntry::set_method_handle(constantPoolHandle cpool, -+ methodHandle adapter, Handle appendix) { - assert(!is_secondary_entry(), ""); -- set_method_handle_common(Bytecodes::_invokehandle, adapter, appendix); -+ set_method_handle_common(cpool, Bytecodes::_invokehandle, adapter, appendix); - } - --void ConstantPoolCacheEntry::set_dynamic_call(methodHandle adapter, Handle appendix) { -+void ConstantPoolCacheEntry::set_dynamic_call(constantPoolHandle cpool, -+ methodHandle adapter, Handle appendix) { - assert(is_secondary_entry(), ""); -- set_method_handle_common(Bytecodes::_invokedynamic, adapter, appendix); -+ set_method_handle_common(cpool, Bytecodes::_invokedynamic, adapter, appendix); - } - --void ConstantPoolCacheEntry::set_method_handle_common(Bytecodes::Code invoke_code, methodHandle adapter, Handle appendix) { -+void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool, -+ Bytecodes::Code invoke_code, -+ methodHandle adapter, -+ Handle appendix) { - // NOTE: This CPCE can be the subject of data races. - // There are three words to update: flags, f2, f1 (in that order). - // Writers must store all other values before f1. - // Readers must test f1 first for non-null before reading other fields. -- // Competing writers must acquire exclusive access on the first -- // write, to flags, using a compare/exchange. -- // A losing writer must spin until the winner writes f1, -- // so that when he returns, he can use the linked cache entry. -+ // Competing writers must acquire exclusive access via a lock. -+ // A losing writer waits on the lock until the winner writes f1 and leaves -+ // the lock, so that when the losing writer returns, he can use the linked -+ // cache entry. -+ -+ Thread* THREAD = Thread::current(); -+ ObjectLocker ol(cpool, THREAD); -+ if (!is_f1_null()) { -+ return; -+ } - - bool has_appendix = appendix.not_null(); - if (!has_appendix) { -@@ -292,20 +303,11 @@ - appendix = Universe::void_mirror(); - } - -- bool owner = -- init_method_flags_atomic(as_TosState(adapter->result_type()), -+ set_method_flags(as_TosState(adapter->result_type()), - ((has_appendix ? 1 : 0) << has_appendix_shift) | - ( 1 << is_vfinal_shift) | - ( 1 << is_final_shift), - adapter->size_of_parameters()); -- if (!owner) { -- while (is_f1_null()) { -- // Pause momentarily on a low-level lock, to allow racing thread to win. -- MutexLockerEx mu(Patching_lock, Mutex::_no_safepoint_check_flag); -- os::yield(); -- } -- return; -- } - - if (TraceInvokeDynamic) { - tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method="PTR_FORMAT" ", -diff -r 19ac51ce4be7 -r 98f6e8bc55e8 src/share/vm/oops/cpCacheOop.hpp ---- openjdk/hotspot/src/share/vm/oops/cpCacheOop.hpp Thu Feb 06 14:24:53 2014 +0000 -+++ openjdk/hotspot/src/share/vm/oops/cpCacheOop.hpp Mon Sep 10 16:37:22 2012 -0700 -@@ -222,11 +222,13 @@ - ); - - void set_method_handle( -+ constantPoolHandle cpool, // holding constant pool (required for locking) - methodHandle method, // adapter for invokeExact, etc. - Handle appendix // stored in f1; could be a java.lang.invoke.MethodType - ); - - void set_dynamic_call( -+ constantPoolHandle cpool, // holding constant pool (required for locking) - methodHandle method, // adapter for this call site - Handle appendix // stored in f1; could be a java.lang.invoke.CallSite - ); -@@ -247,6 +249,7 @@ - // resolution logic needs to make slightly different assessments about the - // number and types of arguments. - void set_method_handle_common( -+ constantPoolHandle cpool, // holding constant pool (required for locking) - Bytecodes::Code invoke_code, // _invokehandle or _invokedynamic - methodHandle adapter, // invoker method (f2) - Handle appendix // appendix such as CallSite, MethodType, etc. (f1) diff -r 531847dfec6f -r ed2108ad126a patches/zero/7200949-jruby_fail.patch --- a/patches/zero/7200949-jruby_fail.patch Thu Mar 27 03:50:20 2014 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1045 +0,0 @@ -# HG changeset patch -# User twisti -# Date 1391807860 0 -# Fri Feb 07 21:17:40 2014 +0000 -# Node ID a66016d23db17dbe4b8d54b1680f732a116e0a4c -# Parent 38ae397aa523096aa3f94d23e1a38aa75e55f8f5 -7200949: JSR 292: rubybench/bench/time/bench_base64.rb fails with jruby.jar not on boot class path -Reviewed-by: jrose, kvn - -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciClassList.hpp ---- openjdk/hotspot/src/share/vm/ci/ciClassList.hpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/ci/ciClassList.hpp Fri Feb 07 21:17:40 2014 +0000 -@@ -49,6 +49,7 @@ - class ciCallSite; - class ciMemberName; - class ciMethodHandle; -+class ciMethodType; - class ciMethod; - class ciMethodData; - class ciReceiverTypeData; // part of ciMethodData -@@ -105,6 +106,7 @@ - friend class ciMethod; \ - friend class ciMethodData; \ - friend class ciMethodHandle; \ -+friend class ciMethodType; \ - friend class ciReceiverTypeData; \ - friend class ciSymbol; \ - friend class ciArray; \ -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciMethodType.hpp ---- /dev/null Thu Jan 01 00:00:00 1970 +0000 -+++ openjdk/hotspot/src/share/vm/ci/ciMethodType.hpp Fri Feb 07 21:17:40 2014 +0000 -@@ -0,0 +1,76 @@ -+/* -+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. -+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -+ * -+ * This code is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 only, as -+ * published by the Free Software Foundation. -+ * -+ * This code is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -+ * version 2 for more details (a copy is included in the LICENSE file that -+ * accompanied this code). -+ * -+ * You should have received a copy of the GNU General Public License version -+ * 2 along with this work; if not, write to the Free Software Foundation, -+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -+ * or visit www.oracle.com if you need additional information or have any -+ * questions. -+ * -+ */ -+ -+#ifndef SHARE_VM_CI_CIMETHODTYPE_HPP -+#define SHARE_VM_CI_CIMETHODTYPE_HPP -+ -+#include "ci/ciInstance.hpp" -+#include "ci/ciUtilities.hpp" -+#include "classfile/javaClasses.hpp" -+ -+// ciMethodType -+// -+// The class represents a java.lang.invoke.MethodType object. -+class ciMethodType : public ciInstance { -+private: -+ ciType* class_to_citype(oop klass_oop) const { -+ if (java_lang_Class::is_primitive(klass_oop)) { -+ BasicType bt = java_lang_Class::primitive_type(klass_oop); -+ return ciType::make(bt); -+ } else { -+ klassOop k = java_lang_Class::as_klassOop(klass_oop); -+ return CURRENT_ENV->get_object(k)->as_klass(); -+ } -+ } -+ -+public: -+ ciMethodType(instanceHandle h_i) : ciInstance(h_i) {} -+ -+ // What kind of ciObject is this? -+ bool is_method_type() const { return true; } -+ -+ ciType* rtype() const { -+ GUARDED_VM_ENTRY( -+ oop rtype = java_lang_invoke_MethodType::rtype(get_oop()); -+ return class_to_citype(rtype); -+ ) -+ } -+ -+ int ptype_count() const { -+ GUARDED_VM_ENTRY(return java_lang_invoke_MethodType::ptype_count(get_oop());) -+ } -+ -+ int ptype_slot_count() const { -+ GUARDED_VM_ENTRY(return java_lang_invoke_MethodType::ptype_slot_count(get_oop());) -+ } -+ -+ ciType* ptype_at(int index) const { -+ GUARDED_VM_ENTRY( -+ oop ptype = java_lang_invoke_MethodType::ptype(get_oop(), index); -+ return class_to_citype(ptype); -+ ) -+ } -+}; -+ -+#endif // SHARE_VM_CI_CIMETHODTYPE_HPP -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciObject.hpp ---- openjdk/hotspot/src/share/vm/ci/ciObject.hpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/ci/ciObject.hpp Fri Feb 07 21:17:40 2014 +0000 -@@ -146,6 +146,7 @@ - virtual bool is_method() { return false; } - virtual bool is_method_data() { return false; } - virtual bool is_method_handle() const { return false; } -+ virtual bool is_method_type() const { return false; } - virtual bool is_array() { return false; } - virtual bool is_obj_array() { return false; } - virtual bool is_type_array() { return false; } -@@ -193,103 +194,107 @@ - } - - // Subclass casting with assertions. -- ciNullObject* as_null_object() { -+ ciNullObject* as_null_object() { - assert(is_null_object(), "bad cast"); - return (ciNullObject*)this; - } -- ciCallSite* as_call_site() { -+ ciCallSite* as_call_site() { - assert(is_call_site(), "bad cast"); -- return (ciCallSite*) this; -+ return (ciCallSite*)this; - } -- ciCPCache* as_cpcache() { -+ ciCPCache* as_cpcache() { - assert(is_cpcache(), "bad cast"); -- return (ciCPCache*) this; -+ return (ciCPCache*)this; - } -- ciInstance* as_instance() { -+ ciInstance* as_instance() { - assert(is_instance(), "bad cast"); - return (ciInstance*)this; - } -- ciMemberName* as_member_name() { -+ ciMemberName* as_member_name() { - assert(is_member_name(), "bad cast"); - return (ciMemberName*)this; - } -- ciMethod* as_method() { -+ ciMethod* as_method() { - assert(is_method(), "bad cast"); - return (ciMethod*)this; - } -- ciMethodData* as_method_data() { -+ ciMethodData* as_method_data() { - assert(is_method_data(), "bad cast"); - return (ciMethodData*)this; - } -- ciMethodHandle* as_method_handle() { -+ ciMethodHandle* as_method_handle() { - assert(is_method_handle(), "bad cast"); -- return (ciMethodHandle*) this; -+ return (ciMethodHandle*)this; - } -- ciArray* as_array() { -+ ciMethodType* as_method_type() { -+ assert(is_method_type(), "bad cast"); -+ return (ciMethodType*)this; -+ } -+ ciArray* as_array() { - assert(is_array(), "bad cast"); - return (ciArray*)this; - } -- ciObjArray* as_obj_array() { -+ ciObjArray* as_obj_array() { - assert(is_obj_array(), "bad cast"); - return (ciObjArray*)this; - } -- ciTypeArray* as_type_array() { -+ ciTypeArray* as_type_array() { - assert(is_type_array(), "bad cast"); - return (ciTypeArray*)this; - } -- ciSymbol* as_symbol() { -+ ciSymbol* as_symbol() { - assert(is_symbol(), "bad cast"); - return (ciSymbol*)this; - } -- ciType* as_type() { -+ ciType* as_type() { - assert(is_type(), "bad cast"); - return (ciType*)this; - } -- ciReturnAddress* as_return_address() { -+ ciReturnAddress* as_return_address() { - assert(is_return_address(), "bad cast"); - return (ciReturnAddress*)this; - } -- ciKlass* as_klass() { -+ ciKlass* as_klass() { - assert(is_klass(), "bad cast"); - return (ciKlass*)this; - } -- ciInstanceKlass* as_instance_klass() { -+ ciInstanceKlass* as_instance_klass() { - assert(is_instance_klass(), "bad cast"); - return (ciInstanceKlass*)this; - } -- ciMethodKlass* as_method_klass() { -+ ciMethodKlass* as_method_klass() { - assert(is_method_klass(), "bad cast"); - return (ciMethodKlass*)this; - } -- ciArrayKlass* as_array_klass() { -+ ciArrayKlass* as_array_klass() { - assert(is_array_klass(), "bad cast"); - return (ciArrayKlass*)this; - } -- ciObjArrayKlass* as_obj_array_klass() { -+ ciObjArrayKlass* as_obj_array_klass() { - assert(is_obj_array_klass(), "bad cast"); - return (ciObjArrayKlass*)this; - } -- ciTypeArrayKlass* as_type_array_klass() { -+ ciTypeArrayKlass* as_type_array_klass() { - assert(is_type_array_klass(), "bad cast"); - return (ciTypeArrayKlass*)this; - } -- ciKlassKlass* as_klass_klass() { -+ ciKlassKlass* as_klass_klass() { - assert(is_klass_klass(), "bad cast"); - return (ciKlassKlass*)this; - } -- ciInstanceKlassKlass* as_instance_klass_klass() { -+ ciInstanceKlassKlass* as_instance_klass_klass() { - assert(is_instance_klass_klass(), "bad cast"); - return (ciInstanceKlassKlass*)this; - } -- ciArrayKlassKlass* as_array_klass_klass() { -+ ciArrayKlassKlass* as_array_klass_klass() { - assert(is_array_klass_klass(), "bad cast"); - return (ciArrayKlassKlass*)this; - } -- ciObjArrayKlassKlass* as_obj_array_klass_klass() { -+ ciObjArrayKlassKlass* as_obj_array_klass_klass() { - assert(is_obj_array_klass_klass(), "bad cast"); - return (ciObjArrayKlassKlass*)this; - } -- ciTypeArrayKlassKlass* as_type_array_klass_klass() { -+ ciTypeArrayKlassKlass* as_type_array_klass_klass() { - assert(is_type_array_klass_klass(), "bad cast"); - return (ciTypeArrayKlassKlass*)this; - } -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciObjectFactory.cpp ---- openjdk/hotspot/src/share/vm/ci/ciObjectFactory.cpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/ci/ciObjectFactory.cpp Fri Feb 07 21:17:40 2014 +0000 -@@ -32,6 +32,7 @@ - #include "ci/ciMethod.hpp" - #include "ci/ciMethodData.hpp" - #include "ci/ciMethodHandle.hpp" -+#include "ci/ciMethodType.hpp" - #include "ci/ciMethodKlass.hpp" - #include "ci/ciNullObject.hpp" - #include "ci/ciObjArray.hpp" -@@ -349,6 +350,8 @@ - return new (arena()) ciMemberName(h_i); - else if (java_lang_invoke_MethodHandle::is_instance(o)) - return new (arena()) ciMethodHandle(h_i); -+ else if (java_lang_invoke_MethodType::is_instance(o)) -+ return new (arena()) ciMethodType(h_i); - else - return new (arena()) ciInstance(h_i); - } else if (o->is_objArray()) { -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciSignature.cpp ---- openjdk/hotspot/src/share/vm/ci/ciSignature.cpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/ci/ciSignature.cpp Fri Feb 07 21:17:40 2014 +0000 -@@ -23,6 +23,7 @@ - */ - - #include "precompiled.hpp" -+#include "ci/ciMethodType.hpp" - #include "ci/ciSignature.hpp" - #include "ci/ciUtilities.hpp" - #include "memory/allocation.inline.hpp" -@@ -80,6 +81,24 @@ - } - - // ------------------------------------------------------------------ -+// ciSignature::ciSignature -+ciSignature::ciSignature(ciKlass* accessing_klass, ciSymbol* symbol, ciMethodType* method_type) : -+ _symbol(symbol), -+ _accessing_klass(accessing_klass), -+ _size( method_type->ptype_slot_count()), -+ _count(method_type->ptype_count()) -+{ -+ ASSERT_IN_VM; -+ EXCEPTION_CONTEXT; -+ Arena* arena = CURRENT_ENV->arena(); -+ _types = new (arena) GrowableArray(arena, _count + 1, 0, NULL); -+ for (int i = 0; i < _count; i++) { -+ _types->append(method_type->ptype_at(i)); -+ } -+ _types->append(method_type->rtype()); -+} -+ -+// ------------------------------------------------------------------ - // ciSignature::return_type - // - // What is the return type of this signature? -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciSignature.hpp ---- openjdk/hotspot/src/share/vm/ci/ciSignature.hpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/ci/ciSignature.hpp Fri Feb 07 21:17:40 2014 +0000 -@@ -47,6 +47,7 @@ - friend class ciObjectFactory; - - ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* signature); -+ ciSignature(ciKlass* accessing_klass, ciSymbol* signature, ciMethodType* method_type); - - void get_all_klasses(); - -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciStreams.cpp ---- openjdk/hotspot/src/share/vm/ci/ciStreams.cpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/ci/ciStreams.cpp Fri Feb 07 21:17:40 2014 +0000 -@@ -363,12 +363,15 @@ - constantPoolHandle cpool(_method->get_methodOop()->constants()); - ciMethod* m = env->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder); - will_link = m->is_loaded(); -- // Get declared method signature and return it. -- if (has_optional_appendix()) { -- const int sig_index = get_method_signature_index(); -- Symbol* sig_sym = cpool->symbol_at(sig_index); -- ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass(); -- (*declared_signature_result) = new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym)); -+ -+ // Use the MethodType stored in the CP cache to create a signature -+ // with correct types (in respect to class loaders). -+ if (has_method_type()) { -+ ciSymbol* sig_sym = env->get_symbol(cpool->symbol_at(get_method_signature_index())); -+ ciKlass* pool_holder = env->get_object(cpool->pool_holder())->as_klass(); -+ ciMethodType* method_type = get_method_type(); -+ ciSignature* declared_signature = new (env->arena()) ciSignature(pool_holder, sig_sym, method_type); -+ (*declared_signature_result) = declared_signature; - } else { - (*declared_signature_result) = m->signature(); - } -@@ -399,6 +402,31 @@ - } - - // ------------------------------------------------------------------ -+// ciBytecodeStream::has_method_type -+// -+// Returns true if there is a MethodType argument stored in the -+// constant pool cache at the current bci. -+bool ciBytecodeStream::has_method_type() { -+ GUARDED_VM_ENTRY( -+ constantPoolHandle cpool(_method->get_methodOop()->constants()); -+ return constantPoolOopDesc::has_method_type_at_if_loaded(cpool, get_method_index()); -+ ) -+} -+ -+// ------------------------------------------------------------------ -+// ciBytecodeStream::get_method_type -+// -+// Return the MethodType stored in the constant pool cache at -+// the current bci. -+ciMethodType* ciBytecodeStream::get_method_type() { -+ GUARDED_VM_ENTRY( -+ constantPoolHandle cpool(_method->get_methodOop()->constants()); -+ oop method_type_oop = constantPoolOopDesc::method_type_at_if_loaded(cpool, get_method_index()); -+ return CURRENT_ENV->get_object(method_type_oop)->as_method_type(); -+ ) -+} -+ -+// ------------------------------------------------------------------ - // ciBytecodeStream::get_declared_method_holder - // - // Get the declared holder of the currently referenced method. -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/ci/ciStreams.hpp ---- openjdk/hotspot/src/share/vm/ci/ciStreams.hpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/ci/ciStreams.hpp Fri Feb 07 21:17:40 2014 +0000 -@@ -259,12 +259,14 @@ - int get_field_holder_index(); - int get_field_signature_index(); - -- ciMethod* get_method(bool& will_link, ciSignature* *declared_signature_result); -- bool has_appendix(); -- ciObject* get_appendix(); -- ciKlass* get_declared_method_holder(); -- int get_method_holder_index(); -- int get_method_signature_index(); -+ ciMethod* get_method(bool& will_link, ciSignature* *declared_signature_result); -+ bool has_appendix(); -+ ciObject* get_appendix(); -+ bool has_method_type(); -+ ciMethodType* get_method_type(); -+ ciKlass* get_declared_method_holder(); -+ int get_method_holder_index(); -+ int get_method_signature_index(); - - ciCPCache* get_cpcache() const; - ciCallSite* get_call_site(); -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/classfile/systemDictionary.cpp ---- openjdk/hotspot/src/share/vm/classfile/systemDictionary.cpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/classfile/systemDictionary.cpp Fri Feb 07 21:17:40 2014 +0000 -@@ -2432,7 +2432,8 @@ - methodHandle SystemDictionary::find_method_handle_invoker(Symbol* name, - Symbol* signature, - KlassHandle accessing_klass, -- Handle* appendix_result, -+ Handle *appendix_result, -+ Handle *method_type_result, - TRAPS) { - methodHandle empty; - assert(EnableInvokeDynamic, ""); -@@ -2464,6 +2465,7 @@ - vmSymbols::linkMethod_signature(), - &args, CHECK_(empty)); - Handle mname(THREAD, (oop) result.get_jobject()); -+ (*method_type_result) = method_type; - return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD); - } - -@@ -2610,7 +2612,8 @@ - Handle bootstrap_specifier, - Symbol* name, - Symbol* type, -- Handle* appendix_result, -+ Handle *appendix_result, -+ Handle *method_type_result, - TRAPS) { - methodHandle empty; - Handle bsm, info; -@@ -2653,6 +2656,7 @@ - vmSymbols::linkCallSite_signature(), - &args, CHECK_(empty)); - Handle mname(THREAD, (oop) result.get_jobject()); -+ (*method_type_result) = method_type; - return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD); - } - -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/classfile/systemDictionary.hpp ---- openjdk/hotspot/src/share/vm/classfile/systemDictionary.hpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/classfile/systemDictionary.hpp Fri Feb 07 21:17:40 2014 +0000 -@@ -488,6 +488,7 @@ - Symbol* signature, - KlassHandle accessing_klass, - Handle *appendix_result, -+ Handle *method_type_result, - TRAPS); - // for a given signature, find the internal MethodHandle method (linkTo* or invokeBasic) - // (does not ask Java, since this is a low-level intrinsic defined by the JVM) -@@ -514,6 +515,7 @@ - Symbol* name, - Symbol* type, - Handle *appendix_result, -+ Handle *method_type_result, - TRAPS); - - // Utility for printing loader "name" as part of tracing constraints -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/interpreter/bytecodeInterpreter.cpp ---- openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Fri Feb 07 21:17:40 2014 +0000 -@@ -2216,12 +2216,11 @@ - methodOop method = cache->f1_as_method(); - VERIFY_OOP(method); - -- /** Re-enabled in 7200949 - if (cache->has_appendix()) { - constantPoolOop constants = METHOD->constants(); - SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); - MORE_STACK(1); -- } **/ -+ } - - istate->set_msg(call_method); - istate->set_callee(method); -@@ -2249,12 +2248,11 @@ - - VERIFY_OOP(method); - -- /** Re-enabled in 7200949 - if (cache->has_appendix()) { - constantPoolOop constants = METHOD->constants(); - SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); - MORE_STACK(1); -- } **/ -+ } - - istate->set_msg(call_method); - istate->set_callee(method); -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/interpreter/interpreterRuntime.cpp ---- openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Fri Feb 07 21:17:40 2014 +0000 -@@ -764,7 +764,8 @@ - cache_entry(thread)->set_method_handle( - pool, - info.resolved_method(), -- info.resolved_appendix()); -+ info.resolved_appendix(), -+ info.resolved_method_type()); - } - IRT_END - -@@ -791,7 +792,8 @@ - pool->cache()->secondary_entry_at(index)->set_dynamic_call( - pool, - info.resolved_method(), -- info.resolved_appendix()); -+ info.resolved_appendix(), -+ info.resolved_method_type()); - } - IRT_END - -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/interpreter/linkResolver.cpp ---- openjdk/hotspot/src/share/vm/interpreter/linkResolver.cpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/interpreter/linkResolver.cpp Fri Feb 07 21:17:40 2014 +0000 -@@ -99,7 +99,7 @@ - assert(!resolved_method->is_compiled_lambda_form(), "these must be handled via an invokehandle call"); - } - --void CallInfo::set_handle(methodHandle resolved_method, Handle resolved_appendix, TRAPS) { -+void CallInfo::set_handle(methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS) { - if (resolved_method.is_null()) { - THROW_MSG(vmSymbols::java_lang_InternalError(), "resolved method is null"); - } -@@ -110,7 +110,8 @@ - int vtable_index = methodOopDesc::nonvirtual_vtable_index; - assert(resolved_method->vtable_index() == vtable_index, ""); - set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK); -- _resolved_appendix = resolved_appendix; -+ _resolved_appendix = resolved_appendix; -+ _resolved_method_type = resolved_method_type; - } - - void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) { -@@ -221,7 +222,8 @@ - void LinkResolver::lookup_polymorphic_method(methodHandle& result, - KlassHandle klass, Symbol* name, Symbol* full_signature, - KlassHandle current_klass, -- Handle* appendix_result_or_null, -+ Handle *appendix_result_or_null, -+ Handle *method_type_result, - TRAPS) { - vmIntrinsics::ID iid = MethodHandles::signature_polymorphic_name_id(name); - if (TraceMethodHandles) { -@@ -275,10 +277,12 @@ - } - - Handle appendix; -+ Handle method_type; - result = SystemDictionary::find_method_handle_invoker(name, - full_signature, - current_klass, - &appendix, -+ &method_type, - CHECK); - if (TraceMethodHandles) { - tty->print("lookup_polymorphic_method => (via Java) "); -@@ -307,6 +311,7 @@ - - assert(appendix_result_or_null != NULL, ""); - (*appendix_result_or_null) = appendix; -+ (*method_type_result) = method_type; - return; - } - } -@@ -419,7 +424,7 @@ - if (resolved_method.is_null()) { - // JSR 292: see if this is an implicitly generated method MethodHandle.linkToVirtual(*...), etc - lookup_polymorphic_method(resolved_method, resolved_klass, method_name, method_signature, -- current_klass, (Handle*)NULL, THREAD); -+ current_klass, (Handle*)NULL, (Handle*)NULL, THREAD); - if (HAS_PENDING_EXCEPTION) { - nested_exception = Handle(THREAD, PENDING_EXCEPTION); - CLEAR_PENDING_EXCEPTION; -@@ -1207,11 +1212,12 @@ - assert(resolved_klass() == SystemDictionary::MethodHandle_klass(), ""); - assert(MethodHandles::is_signature_polymorphic_name(method_name), ""); - methodHandle resolved_method; -- Handle resolved_appendix; -+ Handle resolved_appendix; -+ Handle resolved_method_type; - lookup_polymorphic_method(resolved_method, resolved_klass, - method_name, method_signature, -- current_klass, &resolved_appendix, CHECK); -- result.set_handle(resolved_method, resolved_appendix, CHECK); -+ current_klass, &resolved_appendix, &resolved_method_type, CHECK); -+ result.set_handle(resolved_method, resolved_appendix, resolved_method_type, CHECK); - } - - -@@ -1219,7 +1225,7 @@ - assert(EnableInvokeDynamic, ""); - pool->set_invokedynamic(); // mark header to flag active call sites - -- //resolve_pool(, method_name, method_signature, current_klass, pool, index, CHECK); -+ //resolve_pool(, method_name, method_signature, current_klass, pool, index, CHECK); - Symbol* method_name = pool->name_ref_at(index); - Symbol* method_signature = pool->signature_ref_at(index); - KlassHandle current_klass = KlassHandle(THREAD, pool->pool_holder()); -@@ -1236,9 +1242,10 @@ - bootstrap_specifier = Handle(THREAD, bsm_info); - } - if (!cpce->is_f1_null()) { -- methodHandle method(THREAD, cpce->f2_as_vfinal_method()); -- Handle appendix(THREAD, cpce->has_appendix() ? cpce->f1_appendix() : (oop)NULL); -- result.set_handle(method, appendix, CHECK); -+ methodHandle method( THREAD, cpce->f2_as_vfinal_method()); -+ Handle appendix( THREAD, cpce->appendix_if_resolved(pool)); -+ Handle method_type(THREAD, cpce->method_type_if_resolved(pool)); -+ result.set_handle(method, appendix, method_type, CHECK); - return; - } - -@@ -1260,11 +1267,13 @@ - // JSR 292: this must resolve to an implicitly generated method MH.linkToCallSite(*...) - // The appendix argument is likely to be a freshly-created CallSite. - Handle resolved_appendix; -+ Handle resolved_method_type; - methodHandle resolved_method = - SystemDictionary::find_dynamic_call_site_invoker(current_klass, - bootstrap_specifier, - method_name, method_signature, - &resolved_appendix, -+ &resolved_method_type, - CHECK); - if (HAS_PENDING_EXCEPTION) { - if (TraceMethodHandles) { -@@ -1285,7 +1294,7 @@ - THROW_MSG_CAUSE(vmSymbols::java_lang_BootstrapMethodError(), - "BootstrapMethodError", nested_exception) - } -- result.set_handle(resolved_method, resolved_appendix, CHECK); -+ result.set_handle(resolved_method, resolved_appendix, resolved_method_type, CHECK); - } - - //------------------------------------------------------------------------------------------------------------------------ -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/interpreter/linkResolver.hpp ---- openjdk/hotspot/src/share/vm/interpreter/linkResolver.hpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/interpreter/linkResolver.hpp Fri Feb 07 21:17:40 2014 +0000 -@@ -76,12 +76,13 @@ - methodHandle _selected_method; // dynamic (actual) target method - int _vtable_index; // vtable index of selected method - Handle _resolved_appendix; // extra argument in constant pool (if CPCE::has_appendix) -+ Handle _resolved_method_type; // MethodType (for invokedynamic and invokehandle call sites) - -- void set_static( KlassHandle resolved_klass, methodHandle resolved_method , TRAPS); -- void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method , TRAPS); -- void set_virtual( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS); -- void set_handle( methodHandle resolved_method, Handle resolved_appendix, TRAPS); -- void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS); -+ void set_static( KlassHandle resolved_klass, methodHandle resolved_method , TRAPS); -+ void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method , TRAPS); -+ void set_virtual( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index , TRAPS); -+ void set_handle( methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS); -+ void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index , TRAPS); - - friend class LinkResolver; - -@@ -91,6 +92,7 @@ - methodHandle resolved_method() const { return _resolved_method; } - methodHandle selected_method() const { return _selected_method; } - Handle resolved_appendix() const { return _resolved_appendix; } -+ Handle resolved_method_type() const { return _resolved_method_type; } - - BasicType result_type() const { return selected_method()->result_type(); } - bool has_vtable_index() const { return _vtable_index >= 0; } -@@ -113,7 +115,7 @@ - static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); - static void lookup_method_in_interfaces (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); - static void lookup_polymorphic_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, -- KlassHandle current_klass, Handle* appendix_result_or_null, TRAPS); -+ KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS); - - static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); - -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/interpreter/rewriter.cpp ---- openjdk/hotspot/src/share/vm/interpreter/rewriter.cpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/interpreter/rewriter.cpp Fri Feb 07 21:17:40 2014 +0000 -@@ -163,10 +163,14 @@ - if (status == 0) { - if (_pool->klass_ref_at_noresolve(cp_index) == vmSymbols::java_lang_invoke_MethodHandle() && - MethodHandles::is_signature_polymorphic_name(SystemDictionary::MethodHandle_klass(), -- _pool->name_ref_at(cp_index))) -+ _pool->name_ref_at(cp_index))) { -+ assert(has_cp_cache(cp_index), "should already have an entry"); -+ int cpc = maybe_add_cp_cache_entry(cp_index); // should already have an entry -+ int cpc2 = add_secondary_cp_cache_entry(cpc); - status = +1; -- else -+ } else { - status = -1; -+ } - _method_handle_invokers[cp_index] = status; - } - // We use a special internal bytecode for such methods (if non-static). -@@ -195,6 +199,10 @@ - int cp_index = Bytes::get_Java_u2(p); - int cpc = maybe_add_cp_cache_entry(cp_index); // add lazily - int cpc2 = add_secondary_cp_cache_entry(cpc); -+ // The second secondary entry is required to store the MethodType and -+ // must be the next entry. -+ int cpc3 = add_secondary_cp_cache_entry(cpc); -+ assert(cpc2 + 1 == cpc3, err_msg_res("must be consecutive: %d + 1 == %d", cpc2, cpc3)); - - // Replace the trailing four bytes with a CPC index for the dynamic - // call site. Unlike other CPC entries, there is one per bytecode, -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/oops/constantPoolOop.cpp ---- openjdk/hotspot/src/share/vm/oops/constantPoolOop.cpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/oops/constantPoolOop.cpp Fri Feb 07 21:17:40 2014 +0000 -@@ -270,13 +270,7 @@ - int which) { - assert(!constantPoolCacheOopDesc::is_secondary_index(which), "no indy instruction here"); - if (cpool->cache() == NULL) return NULL; // nothing to load yet -- int cache_index = get_cpcache_index(which); -- if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) { -- if (PrintMiscellaneous && (Verbose||WizardMode)) { -- tty->print_cr("bad operand %d in:", which); cpool->print(); -- } -- return NULL; -- } -+ int cache_index = decode_cpcache_index(which, true); - ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); - return e->method_if_resolved(cpool); - } -@@ -284,44 +278,33 @@ - - bool constantPoolOopDesc::has_appendix_at_if_loaded(constantPoolHandle cpool, int which) { - if (cpool->cache() == NULL) return false; // nothing to load yet -- // XXX Is there a simpler way to get to the secondary entry? -- ConstantPoolCacheEntry* e; -- if (constantPoolCacheOopDesc::is_secondary_index(which)) { -- e = cpool->cache()->secondary_entry_at(which); -- } else { -- int cache_index = get_cpcache_index(which); -- if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) { -- if (PrintMiscellaneous && (Verbose||WizardMode)) { -- tty->print_cr("bad operand %d in:", which); cpool->print(); -- } -- return false; -- } -- e = cpool->cache()->entry_at(cache_index); -- } -+ int cache_index = decode_cpcache_index(which, true); -+ ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); - return e->has_appendix(); - } - - - oop constantPoolOopDesc::appendix_at_if_loaded(constantPoolHandle cpool, int which) { - if (cpool->cache() == NULL) return NULL; // nothing to load yet -- // XXX Is there a simpler way to get to the secondary entry? -- ConstantPoolCacheEntry* e; -- if (constantPoolCacheOopDesc::is_secondary_index(which)) { -- e = cpool->cache()->secondary_entry_at(which); -- } else { -- int cache_index = get_cpcache_index(which); -- if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) { -- if (PrintMiscellaneous && (Verbose||WizardMode)) { -- tty->print_cr("bad operand %d in:", which); cpool->print(); -- } -- return NULL; -- } -- e = cpool->cache()->entry_at(cache_index); -- } -- if (!e->has_appendix()) { -- return NULL; -- } -- return e->f1_as_instance(); -+ int cache_index = decode_cpcache_index(which, true); -+ ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); -+ return e->appendix_if_resolved(cpool); -+} -+ -+ -+bool constantPoolOopDesc::has_method_type_at_if_loaded(constantPoolHandle cpool, int which) { -+ if (cpool->cache() == NULL) return false; // nothing to load yet -+ int cache_index = decode_cpcache_index(which, true); -+ ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); -+ return e->has_method_type(); -+} -+ -+oop constantPoolOopDesc::method_type_at_if_loaded(constantPoolHandle cpool, int which) { -+ if (cpool->cache() == NULL) return NULL; // nothing to load yet -+ int cache_index = decode_cpcache_index(which, true); -+ ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); // get next CPC entry -+ ConstantPoolCacheEntry* e2 = cpool->cache()->find_secondary_entry_for(e); -+ return e2->method_type_if_resolved(cpool); - } - - -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/oops/constantPoolOop.hpp ---- openjdk/hotspot/src/share/vm/oops/constantPoolOop.hpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/oops/constantPoolOop.hpp Fri Feb 07 21:17:40 2014 +0000 -@@ -674,6 +674,8 @@ - static methodOop method_at_if_loaded (constantPoolHandle this_oop, int which); - static bool has_appendix_at_if_loaded (constantPoolHandle this_oop, int which); - static oop appendix_at_if_loaded (constantPoolHandle this_oop, int which); -+ static bool has_method_type_at_if_loaded (constantPoolHandle this_oop, int which); -+ static oop method_type_at_if_loaded (constantPoolHandle this_oop, int which); - static klassOop klass_at_if_loaded (constantPoolHandle this_oop, int which); - static klassOop klass_ref_at_if_loaded (constantPoolHandle this_oop, int which); - // Same as above - but does LinkResolving. -@@ -704,6 +706,12 @@ - #endif //ASSERT - - static int get_cpcache_index(int index) { return index - CPCACHE_INDEX_TAG; } -+ static int decode_cpcache_index(int raw_index, bool invokedynamic_ok = false) { -+ if (invokedynamic_ok && constantPoolCacheOopDesc::is_secondary_index(raw_index)) -+ return constantPoolCacheOopDesc::decode_secondary_index(raw_index); -+ else -+ return get_cpcache_index(raw_index); -+ } - - private: - -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/oops/cpCacheOop.cpp ---- openjdk/hotspot/src/share/vm/oops/cpCacheOop.cpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/oops/cpCacheOop.cpp Fri Feb 07 21:17:40 2014 +0000 -@@ -266,21 +266,23 @@ - - - void ConstantPoolCacheEntry::set_method_handle(constantPoolHandle cpool, -- methodHandle adapter, Handle appendix) { -+ methodHandle adapter, -+ Handle appendix, Handle method_type) { - assert(!is_secondary_entry(), ""); -- set_method_handle_common(cpool, Bytecodes::_invokehandle, adapter, appendix); -+ set_method_handle_common(cpool, Bytecodes::_invokehandle, adapter, appendix, method_type); - } - - void ConstantPoolCacheEntry::set_dynamic_call(constantPoolHandle cpool, -- methodHandle adapter, Handle appendix) { -+ methodHandle adapter, -+ Handle appendix, Handle method_type) { - assert(is_secondary_entry(), ""); -- set_method_handle_common(cpool, Bytecodes::_invokedynamic, adapter, appendix); -+ set_method_handle_common(cpool, Bytecodes::_invokedynamic, adapter, appendix, method_type); - } - - void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool, - Bytecodes::Code invoke_code, - methodHandle adapter, -- Handle appendix) { -+ Handle appendix, Handle method_type) { - // NOTE: This CPCE can be the subject of data races. - // There are three words to update: flags, f2, f1 (in that order). - // Writers must store all other values before f1. -@@ -296,23 +298,28 @@ - return; - } - -- bool has_appendix = appendix.not_null(); -+ const bool has_appendix = appendix.not_null(); -+ const bool has_method_type = method_type.not_null(); -+ - if (!has_appendix) { - // The extra argument is not used, but we need a non-null value to signify linkage state. - // Set it to something benign that will never leak memory. - appendix = Universe::void_mirror(); - } - -+ // Write the flags. - set_method_flags(as_TosState(adapter->result_type()), -- ((has_appendix ? 1 : 0) << has_appendix_shift) | -- ( 1 << is_vfinal_shift) | -- ( 1 << is_final_shift), -+ ((has_appendix ? 1 : 0) << has_appendix_shift) | -+ ((has_method_type ? 1 : 0) << has_method_type_shift) | -+ ( 1 << is_vfinal_shift) | -+ ( 1 << is_final_shift), - adapter->size_of_parameters()); - - if (TraceInvokeDynamic) { -- tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method="PTR_FORMAT" ", -+ tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ", - invoke_code, -- (intptr_t)appendix(), (has_appendix ? "" : " (unused)"), -+ (intptr_t)appendix(), (has_appendix ? "" : " (unused)"), -+ (intptr_t)method_type(), (has_method_type ? "" : " (unused)"), - (intptr_t)adapter()); - adapter->print(); - if (has_appendix) appendix()->print(); -@@ -336,14 +343,31 @@ - // The fact that String and List are involved is encoded in the MethodType in f1. - // This allows us to create fewer method oops, while keeping type safety. - // -+ - set_f2_as_vfinal_method(adapter()); -+ -+ // Store MethodType, if any. -+ if (has_method_type) { -+ ConstantPoolCacheEntry* e2 = cpool->cache()->find_secondary_entry_for(this); -+ -+ // Write the flags. -+ e2->set_method_flags(as_TosState(adapter->result_type()), -+ ((has_method_type ? 1 : 0) << has_method_type_shift) | -+ ( 1 << is_vfinal_shift) | -+ ( 1 << is_final_shift), -+ adapter->size_of_parameters()); -+ e2->release_set_f1(method_type()); -+ } -+ - assert(appendix.not_null(), "needed for linkage state"); - release_set_f1(appendix()); // This must be the last one to set (see NOTE above)! -+ - if (!is_secondary_entry()) { - // The interpreter assembly code does not check byte_2, - // but it is used by is_resolved, method_if_resolved, etc. - set_bytecode_2(invoke_code); - } -+ - NOT_PRODUCT(verify(tty)); - if (TraceInvokeDynamic) { - this->print(tty, 0); -@@ -401,6 +425,20 @@ - } - - -+oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) { -+ if (is_f1_null() || !has_appendix()) -+ return NULL; -+ return f1_appendix(); -+} -+ -+ -+oop ConstantPoolCacheEntry::method_type_if_resolved(constantPoolHandle cpool) { -+ if (is_f1_null() || !has_method_type()) -+ return NULL; -+ return f1_as_instance(); -+} -+ -+ - class LocalOopClosure: public OopClosure { - private: - void (*_f)(oop*); -diff -r 38ae397aa523 -r a66016d23db1 src/share/vm/oops/cpCacheOop.hpp ---- openjdk/hotspot/src/share/vm/oops/cpCacheOop.hpp Thu Feb 06 14:33:44 2014 +0000 -+++ openjdk/hotspot/src/share/vm/oops/cpCacheOop.hpp Fri Feb 07 21:17:40 2014 +0000 -@@ -167,10 +167,11 @@ - tos_state_mask = right_n_bits(tos_state_bits), - tos_state_shift = BitsPerInt - tos_state_bits, // see verify_tos_state_shift below - // misc. option bits; can be any bit position in [16..27] -- is_vfinal_shift = 21, -- is_volatile_shift = 22, -- is_final_shift = 23, -- has_appendix_shift = 24, -+ is_vfinal_shift = 20, -+ is_volatile_shift = 21, -+ is_final_shift = 22, -+ has_appendix_shift = 23, -+ has_method_type_shift = 24, - is_forced_virtual_shift = 25, - is_field_entry_shift = 26, - // low order bits give field index (for FieldInfo) or method parameter size: -@@ -224,13 +225,15 @@ - void set_method_handle( - constantPoolHandle cpool, // holding constant pool (required for locking) - methodHandle method, // adapter for invokeExact, etc. -- Handle appendix // stored in f1; could be a java.lang.invoke.MethodType -+ Handle appendix, // stored in f1; could be a java.lang.invoke.MethodType -+ Handle method_type // stored in f1 (of secondary entry); is a java.lang.invoke.MethodType - ); - - void set_dynamic_call( - constantPoolHandle cpool, // holding constant pool (required for locking) - methodHandle method, // adapter for this call site -- Handle appendix // stored in f1; could be a java.lang.invoke.CallSite -+ Handle appendix, // stored in f1; could be a java.lang.invoke.CallSite -+ Handle method_type // stored in f1 (of secondary entry); is a java.lang.invoke.MethodType - ); - - // Common code for invokedynamic and MH invocations. -@@ -252,10 +255,13 @@ - constantPoolHandle cpool, // holding constant pool (required for locking) - Bytecodes::Code invoke_code, // _invokehandle or _invokedynamic - methodHandle adapter, // invoker method (f2) -- Handle appendix // appendix such as CallSite, MethodType, etc. (f1) -+ Handle appendix, // appendix such as CallSite, MethodType, etc. (f1) -+ Handle method_type // MethodType (f1 of secondary entry) - ); - -- methodOop method_if_resolved(constantPoolHandle cpool); -+ methodOop method_if_resolved(constantPoolHandle cpool); -+ oop appendix_if_resolved(constantPoolHandle cpool); -+ oop method_type_if_resolved(constantPoolHandle cpool); - - void set_parameter_size(int value); - -@@ -267,11 +273,11 @@ - case Bytecodes::_getfield : // fall through - case Bytecodes::_invokespecial : // fall through - case Bytecodes::_invokestatic : // fall through -+ case Bytecodes::_invokehandle : // fall through -+ case Bytecodes::_invokedynamic : // fall through - case Bytecodes::_invokeinterface : return 1; - case Bytecodes::_putstatic : // fall through - case Bytecodes::_putfield : // fall through -- case Bytecodes::_invokehandle : // fall through -- case Bytecodes::_invokedynamic : // fall through - case Bytecodes::_invokevirtual : return 2; - default : break; - } -@@ -310,7 +316,8 @@ - int parameter_size() const { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); } - bool is_volatile() const { return (_flags & (1 << is_volatile_shift)) != 0; } - bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; } -- bool has_appendix() const { return (_flags & (1 << has_appendix_shift)) != 0; } -+ bool has_appendix() const { return (_flags & (1 << has_appendix_shift)) != 0; } -+ bool has_method_type() const { return (_flags & (1 << has_method_type_shift)) != 0; } - bool is_forced_virtual() const { return (_flags & (1 << is_forced_virtual_shift)) != 0; } - bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; } - bool is_method_entry() const { return (_flags & (1 << is_field_entry_shift)) == 0; } -@@ -446,6 +453,29 @@ - return entry_at(primary_index); - } - -+ int index_of(ConstantPoolCacheEntry* e) { -+ assert(base() <= e && e < base() + length(), "oob"); -+ int cpc_index = (e - base()); -+ assert(entry_at(cpc_index) == e, "sanity"); -+ return cpc_index; -+ } -+ ConstantPoolCacheEntry* find_secondary_entry_for(ConstantPoolCacheEntry* e) { -+ const int cpc_index = index_of(e); -+ if (e->is_secondary_entry()) { -+ ConstantPoolCacheEntry* e2 = entry_at(cpc_index + 1); -+ assert(e->main_entry_index() == e2->main_entry_index(), ""); -+ return e2; -+ } else { -+ for (int i = length() - 1; i >= 0; i--) { -+ ConstantPoolCacheEntry* e2 = entry_at(i); -+ if (cpc_index == e2->main_entry_index()) -+ return e2; -+ } -+ } -+ fatal("no secondary entry found"); -+ return NULL; -+ } -+ - // Code generation - static ByteSize base_offset() { return in_ByteSize(sizeof(constantPoolCacheOopDesc)); } - static ByteSize entry_offset(int raw_index) { diff -r 531847dfec6f -r ed2108ad126a patches/zero/8029507-jvm_method_processing.patch --- a/patches/zero/8029507-jvm_method_processing.patch Thu Mar 27 03:50:20 2014 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,215 +0,0 @@ -diff -r a66016d23db1 src/share/vm/prims/methodHandles.cpp ---- openjdk/hotspot/src/share/vm/prims/methodHandles.cpp Fri Feb 07 21:17:40 2014 +0000 -+++ openjdk/hotspot/src/share/vm/prims/methodHandles.cpp Mon Feb 24 20:42:09 2014 +0000 -@@ -175,30 +175,32 @@ - } - - oop MethodHandles::init_method_MemberName(oop mname_oop, methodOop m, bool do_dispatch, -- klassOop receiver_limit) { -+ klassOop resolved_klass) { - AccessFlags mods = m->access_flags(); - int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS ); - int vmindex = methodOopDesc::nonvirtual_vtable_index; // implies never any dispatch -- klassOop mklass = m->method_holder(); -- if (receiver_limit == NULL) -- receiver_limit = mklass; -+ bool is_itable_call = false; -+ klassOop m_klass = m->method_holder(); -+ // resolved_klass is a copy of CallInfo::resolved_klass, if available -+ if (resolved_klass == NULL) -+ resolved_klass = m_klass; - if (m->is_initializer()) { - flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT); - } else if (mods.is_static()) { - flags |= IS_METHOD | (JVM_REF_invokeStatic << REFERENCE_KIND_SHIFT); -- } else if (receiver_limit != mklass && -- !Klass::cast(receiver_limit)->is_subtype_of(mklass)) { -+ } else if (resolved_klass != m_klass && -+ !Klass::cast(resolved_klass)->is_subtype_of(m_klass)) { - return NULL; // bad receiver limit -- } else if (Klass::cast(receiver_limit)->is_interface() && -- Klass::cast(mklass)->is_interface()) { -+ } else if (Klass::cast(resolved_klass)->is_interface() && -+ Klass::cast(m_klass)->is_interface()) { - flags |= IS_METHOD | (JVM_REF_invokeInterface << REFERENCE_KIND_SHIFT); -- receiver_limit = mklass; // ignore passed-in limit; interfaces are interconvertible - vmindex = klassItable::compute_itable_index(m); -- } else if (mklass != receiver_limit && Klass::cast(mklass)->is_interface()) { -+ is_itable_call = true; -+ } else if (m_klass != resolved_klass && Klass::cast(m_klass)->is_interface()) { - flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT); - // it is a miranda method, so m->vtable_index is not what we want - ResourceMark rm; -- klassVtable* vt = instanceKlass::cast(receiver_limit)->vtable(); -+ klassVtable* vt = instanceKlass::cast(resolved_klass)->vtable(); - vmindex = vt->index_of_miranda(m->name(), m->signature()); - } else if (!do_dispatch || m->can_be_statically_bound()) { - flags |= IS_METHOD | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT); -@@ -207,10 +209,36 @@ - vmindex = m->vtable_index(); - } - -+ if (vmindex >= 0 && !is_itable_call) { -+ if (Klass::cast(m_klass)->is_interface()) { -+ // This is a vtable call to an interface method (abstract "miranda method"). -+ // The vtable index is meaningless without a class (not interface) receiver type, so get one. -+ // (LinkResolver should help us figure this out.) -+ KlassHandle m_klass_non_interface = resolved_klass; -+ if (m_klass_non_interface->is_interface()) { -+ m_klass_non_interface = SystemDictionary::Object_klass(); -+#ifdef ASSERT -+ { ResourceMark rm; -+ methodOop m2 = m_klass_non_interface->vtable()->method_at(vmindex); -+ assert(m->name() == m2->name() && m->signature() == m2->signature(), -+ err_msg("at %d, %s != %s", vmindex, -+ m->name_and_sig_as_C_string(), m2->name_and_sig_as_C_string())); -+ } -+#endif //ASSERT -+ } -+ if (!m->is_public()) { -+ assert(m->is_public(), "virtual call must be to public interface method"); -+ return NULL; // elicit an error later in product build -+ } -+ assert(Klass::cast(resolved_klass)->is_subtype_of(m_klass_non_interface()), "virtual call must be type-safe"); -+ m_klass = m_klass_non_interface(); -+ } -+ } -+ - java_lang_invoke_MemberName::set_flags(mname_oop, flags); - java_lang_invoke_MemberName::set_vmtarget(mname_oop, m); - java_lang_invoke_MemberName::set_vmindex(mname_oop, vmindex); // vtable/itable index -- java_lang_invoke_MemberName::set_clazz(mname_oop, Klass::cast(receiver_limit)->java_mirror()); -+ java_lang_invoke_MemberName::set_clazz(mname_oop, Klass::cast(m_klass)->java_mirror()); - // Note: name and type can be lazily computed by resolve_MemberName, - // if Java code needs them as resolved String and MethodType objects. - // The clazz must be eagerly stored, because it provides a GC -@@ -580,7 +608,7 @@ - // An unresolved member name is a mere symbolic reference. - // Resolving it plants a vmtarget/vmindex in it, - // which refers dirctly to JVM internals. --Handle MethodHandles::resolve_MemberName(Handle mname, TRAPS) { -+Handle MethodHandles::resolve_MemberName(Handle mname, KlassHandle caller, TRAPS) { - Handle empty; - assert(java_lang_invoke_MemberName::is_instance(mname()), ""); - -@@ -659,21 +687,49 @@ - if (ref_kind == JVM_REF_invokeStatic) { - //do_dispatch = false; // no need, since statics are never dispatched - LinkResolver::resolve_static_call(result, -- defc, name, type, KlassHandle(), false, false, THREAD); -+ defc, name, type, caller, caller.not_null(), false, THREAD); - } else if (ref_kind == JVM_REF_invokeInterface) { - LinkResolver::resolve_interface_call(result, Handle(), defc, -- defc, name, type, KlassHandle(), false, false, THREAD); -+ defc, name, type, caller, caller.not_null(), false, THREAD); - } else if (mh_invoke_id != vmIntrinsics::_none) { - assert(!is_signature_polymorphic_static(mh_invoke_id), ""); - LinkResolver::resolve_handle_call(result, -- defc, name, type, KlassHandle(), THREAD); -+ defc, name, type, caller, THREAD); - } else if (ref_kind == JVM_REF_invokeSpecial) { - do_dispatch = false; // force non-virtual linkage - LinkResolver::resolve_special_call(result, -- defc, name, type, KlassHandle(), false, THREAD); -+ defc, name, type, caller, caller.not_null(), THREAD); -+ // CR 8029533: -+ // As a corner case, invokespecial can return a method *below* its resolved_klass. -+ // Since method search *starts* at the resolved_klass, the eventual -+ // method is almost always in a supertype *above* the resolved_klass. -+ // This pattern breaks when an invokespecial "over-reaches" beyond an -+ // immediate super to a method overridden by a super class. -+ // In that case, the selected method will be below the resolved_klass. -+ // (This is the behavior enabled by the famous ACC_SUPER classfile flag.) -+ // -+ // Downstream of this code, we make assumptions about resolved_klass being below m. -+ // (See init_method_MemberName, the comment "bad receiver limit".) -+ // We basically want to patch result._resolved_klass to be m.method_holder(). -+ // The simplest way to get this happier outcome is to re-resolve. -+ if (!HAS_PENDING_EXCEPTION && -+ caller.not_null() && -+ result.resolved_method().not_null()) { -+ // this is the m_klass value that will be checked later: -+ klassOop m_klass = result.resolved_method()->method_holder(); -+ if (m_klass != result.resolved_klass()() && -+ Klass::cast(m_klass)->is_subtype_of(result.resolved_klass()())) { -+ KlassHandle adjusted_defc(THREAD, m_klass); -+ LinkResolver::resolve_special_call(result, -+ adjusted_defc, name, type, caller, caller.not_null(), THREAD); -+ assert(HAS_PENDING_EXCEPTION // if there is something like an OOM, pass it up to caller -+ || result.resolved_method()->method_holder() == adjusted_defc(), -+ "same method, different resolved_klass"); -+ } -+ } - } else if (ref_kind == JVM_REF_invokeVirtual) { - LinkResolver::resolve_virtual_call(result, Handle(), defc, -- defc, name, type, KlassHandle(), false, false, THREAD); -+ defc, name, type, caller, caller.not_null(), false, THREAD); - } else { - assert(false, err_msg("ref_kind=%d", ref_kind)); - } -@@ -700,7 +756,7 @@ - assert(!HAS_PENDING_EXCEPTION, ""); - if (name == vmSymbols::object_initializer_name()) { - LinkResolver::resolve_special_call(result, -- defc, name, type, KlassHandle(), false, THREAD); -+ defc, name, type, caller, caller.not_null(), THREAD); - } else { - break; // will throw after end of switch - } -@@ -1044,7 +1100,12 @@ - if (VerifyMethodHandles && caller_jh != NULL && - java_lang_invoke_MemberName::clazz(mname()) != NULL) { - klassOop reference_klass = java_lang_Class::as_klassOop(java_lang_invoke_MemberName::clazz(mname())); -- if (reference_klass != NULL) { -+ if (reference_klass != NULL && Klass::cast(reference_klass)->oop_is_objArray()) { -+ reference_klass = objArrayKlass::cast(reference_klass)->bottom_klass(); -+ } -+ -+ // Reflection::verify_class_access can only handle instance classes. -+ if (reference_klass != NULL && Klass::cast(reference_klass)->oop_is_instance()) { - // Emulate LinkResolver::check_klass_accessability. - klassOop caller = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(caller_jh)); - if (!Reflection::verify_class_access(caller, -@@ -1055,7 +1116,11 @@ - } - } - -- Handle resolved = MethodHandles::resolve_MemberName(mname, CHECK_NULL); -+ KlassHandle caller(THREAD, -+ caller_jh == NULL ? (klassOop) NULL : -+ java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(caller_jh))); -+ Handle resolved = MethodHandles::resolve_MemberName(mname, caller, CHECK_NULL); -+ - if (resolved.is_null()) { - int flags = java_lang_invoke_MemberName::flags(mname()); - int ref_kind = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK; -diff -r a66016d23db1 src/share/vm/prims/methodHandles.hpp ---- openjdk/hotspot/src/share/vm/prims/methodHandles.hpp Fri Feb 07 21:17:40 2014 +0000 -+++ openjdk/hotspot/src/share/vm/prims/methodHandles.hpp Mon Feb 24 20:42:09 2014 +0000 -@@ -51,12 +51,12 @@ - - public: - // working with member names -- static Handle resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type -+ static Handle resolve_MemberName(Handle mname, KlassHandle caller, TRAPS); // compute vmtarget/vmindex from name/type - static void expand_MemberName(Handle mname, int suppress, TRAPS); // expand defc/name/type if missing - static Handle new_MemberName(TRAPS); // must be followed by init_MemberName - static oop init_MemberName(oop mname_oop, oop target_oop); // compute vmtarget/vmindex from target - static oop init_method_MemberName(oop mname_oop, methodOop m, bool do_dispatch, -- klassOop receiver_limit); -+ klassOop resolved_klass); - static oop init_field_MemberName(oop mname_oop, klassOop field_holder, - AccessFlags mods, oop type, oop name, - intptr_t offset, bool is_setter = false); -diff -r a66016d23db1 src/share/vm/runtime/reflection.cpp ---- openjdk/hotspot/src/share/vm/runtime/reflection.cpp Fri Feb 07 21:17:40 2014 +0000 -+++ openjdk/hotspot/src/share/vm/runtime/reflection.cpp Mon Feb 24 20:42:09 2014 +0000 -@@ -460,7 +460,7 @@ - // doesn't have a classloader. - if ((current_class == NULL) || - (current_class == new_class) || -- (instanceKlass::cast(new_class)->is_public()) || -+ (Klass::cast(new_class)->is_public()) || - is_same_class_package(current_class, new_class)) { - return true; - } diff -r 531847dfec6f -r ed2108ad126a patches/zero/arm-7023639.patch --- a/patches/zero/arm-7023639.patch Thu Mar 27 03:50:20 2014 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,361 +0,0 @@ -diff -r ad4c46e70192 src/cpu/zero/vm/asm_helper.cpp ---- openjdk/hotspot/src/cpu/zero/vm/asm_helper.cpp Tue Mar 11 15:45:00 2014 +0000 -+++ openjdk/hotspot/src/cpu/zero/vm/asm_helper.cpp Wed Mar 12 13:34:40 2014 +0000 -@@ -23,6 +23,10 @@ - #define ARCH_VFP (1<<17) - #define ARCH_CLZ (1<<18) - -+/* A workaround for private and protected fields */ -+#define private public -+#define protected public -+ - #include "precompiled.hpp" - #include "asm/assembler.hpp" - #include "interp_masm_zero.hpp" -@@ -33,8 +37,10 @@ - #include "oops/methodDataOop.hpp" - #include "oops/methodOop.hpp" - #include "oops/oop.inline.hpp" -+#include "oops/klassOop.hpp" - #include "prims/jvmtiExport.hpp" - #include "prims/jvmtiThreadState.hpp" -+#include "runtime/frame.hpp" - #include "runtime/deoptimization.hpp" - #include "runtime/frame.inline.hpp" - #include "runtime/sharedRuntime.hpp" -@@ -68,11 +74,22 @@ - // particular method. - #define NAME1 "sun.nio.ch.FileChannelImpl$Unmapper.run()V" - #define EQ(S1, S2) (S1 && (strncmp(S1, S2, strlen(S2)) == 0)) --extern "C" void my_trace(void *jpc, void *istate) -+extern "C" void my_trace(void *jpc, interpreterState istate) - { -- char *name = meth((interpreterState)istate); -- if (EQ(name, NAME1)); -- asm volatile("nop"); // Somewhere to put a breakpoint -+ JavaThread *jt = istate->thread(); -+ if (jt->zero_stack()->sp() && jt->top_zero_frame()) { -+ bool has_last_Java_frame = jt->has_last_Java_frame(); -+ if (!has_last_Java_frame) -+ jt->set_last_Java_frame(); -+ -+ StackFrameStream sfs(jt); -+ for(int i = 0; !sfs.is_done(); sfs.next(), i++) { -+ } -+ -+ // Reset the frame anchor if necessary -+ if (!has_last_Java_frame) -+ jt->reset_last_Java_frame(); -+ } - } - - extern "C" unsigned hwcap(void) -@@ -603,7 +620,7 @@ - print_def("CONSTANTPOOL_CACHE", offset_of(constantPoolOopDesc, _cache)); - print_def("CONSTANTPOOL_POOL_HOLDER", offset_of(constantPoolOopDesc, _pool_holder)); - print_def("CONSTANTPOOL_BASE", sizeof(constantPoolOopDesc)); -- print_def("CP_CACHE_VOLATILE_FIELD_FLAG_BIT", ConstantPoolCacheEntry::volatileField); -+ print_def("CP_CACHE_VOLATILE_FIELD_FLAG_BIT", ConstantPoolCacheEntry::is_volatile_shift); - print_def("CP_CACHE_FLAGS", offset_of(ConstantPoolCacheEntry, _flags)); - nl(); - print_def("CP_OFFSET", in_bytes(constantPoolCacheOopDesc::base_offset())); -@@ -704,10 +721,10 @@ - print_def("class_fully_initialized", instanceKlass::fully_initialized); - print_def("class_init_error", instanceKlass::initialization_error); - nl(); -- print_def("flag_methodInterface", 1 << ConstantPoolCacheEntry::methodInterface); -- print_def("flag_volatileField", 1 << ConstantPoolCacheEntry::volatileField); -- print_def("flag_vfinalMethod", 1 << ConstantPoolCacheEntry::vfinalMethod); -- print_def("flag_finalField", 1 << ConstantPoolCacheEntry::finalField); -+ print_def("flag_methodInterface", 1 << ConstantPoolCacheEntry::has_method_type_shift); -+ print_def("flag_volatileField", 1 << ConstantPoolCacheEntry::is_volatile_shift); -+ print_def("flag_vfinalMethod", 1 << ConstantPoolCacheEntry::is_vfinal_shift); -+ print_def("flag_finalField", 1 << ConstantPoolCacheEntry::is_final_shift); - nl(); - print_def("INVOCATIONCOUNTER_COUNTINCREMENT", InvocationCounter::count_increment); - nl(); -diff -r ad4c46e70192 src/cpu/zero/vm/bytecodes_arm.def ---- openjdk/hotspot/src/cpu/zero/vm/bytecodes_arm.def Tue Mar 11 15:45:00 2014 +0000 -+++ openjdk/hotspot/src/cpu/zero/vm/bytecodes_arm.def Wed Mar 12 13:34:40 2014 +0000 -@@ -1950,7 +1950,7 @@ - str r3, [thread, #THREAD_LAST_JAVA_SP] - ldr r0, [istate, #ISTATE_METHOD] - ldr r3, [r2, #0] -- ldrh r0, [r0, #40] -+ ldrh r0, [r0, #METHOD_MAXLOCALS] - add r1, r2, #4 - str r3, [thread, #THREAD_TOP_ZERO_FRAME] - -@@ -1981,7 +1981,7 @@ - str r3, [thread, #THREAD_LAST_JAVA_SP] - ldr r0, [istate, #ISTATE_METHOD] - ldr r3, [stack, #0] -- ldrh r0, [r0, #40] -+ ldrh r0, [r0, #METHOD_MAXLOCALS] - - str r3, [thread, #THREAD_TOP_ZERO_FRAME] - str r1, [stack, r0, lsl #2]! -@@ -2008,7 +2008,7 @@ - str r3, [thread, #THREAD_LAST_JAVA_SP] - ldr r0, [istate, #ISTATE_METHOD] - ldr r3, [r2, #0] -- ldrh r0, [r0, #40] -+ ldrh r0, [r0, #METHOD_MAXLOCALS] - add r1, r2, #4 - str r3, [thread, #THREAD_TOP_ZERO_FRAME] - -@@ -3993,7 +3996,7 @@ - str r3, [thread, #THREAD_LAST_JAVA_SP] - ldr r0, [istate, #ISTATE_METHOD] - ldr r3, [stack, #0] -- ldrh r0, [r0, #40] -+ ldrh r0, [r0, #METHOD_MAXLOCALS] - str r3, [thread, #THREAD_TOP_ZERO_FRAME] - str r1, [stack, r0, lsl #2]! - -@@ -4022,7 +4025,7 @@ - str r3, [thread, #THREAD_LAST_JAVA_SP] - ldr r0, [istate, #ISTATE_METHOD] - ldr r3, [stack, #0] -- ldrh r0, [r0, #40] -+ ldrh r0, [r0, #METHOD_MAXLOCALS] - str r3, [thread, #THREAD_TOP_ZERO_FRAME] - str r1, [stack, r0, lsl #2]! - -@@ -4429,7 +4432,7 @@ - str r3, [thread, #THREAD_LAST_JAVA_SP] - ldr r0, [istate, #ISTATE_METHOD] - ldr r3, [stack, #0] -- ldrh r0, [r0, #40] -+ ldrh r0, [r0, #METHOD_MAXLOCALS] - str r3, [thread, #THREAD_TOP_ZERO_FRAME] - str r1, [stack, r0, lsl #2]! - -diff -r ad4c46e70192 src/cpu/zero/vm/cppInterpreter_arm.S ---- openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_arm.S Tue Mar 11 15:45:00 2014 +0000 -+++ openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_arm.S Wed Mar 12 13:34:40 2014 +0000 -@@ -3431,7 +3444,7 @@ - str r3, [thread, #THREAD_LAST_JAVA_FP] - ldr r0, [istate, #ISTATE_METHOD] - ldr r3, [r2, #0] -- ldrh r0, [r0, #40] -+ ldrh r0, [r0, #METHOD_MAXLOCALS] - add r1, r2, #4 - str r3, [thread, #THREAD_TOP_ZERO_FRAME] - -@@ -3560,7 +3573,7 @@ - str r3, [thread, #THREAD_LAST_JAVA_SP] - ldr r0, [istate, #ISTATE_METHOD] - ldr r3, [r2, #0] -- ldrh r0, [r0, #40] -+ ldrh r0, [r0, #METHOD_MAXLOCALS] - add r1, r2, #4 - str r3, [thread, #THREAD_TOP_ZERO_FRAME] - -diff -r ad4c46e70192 src/cpu/zero/vm/cppInterpreter_zero.cpp ---- openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Tue Mar 11 15:45:00 2014 +0000 -+++ openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed Mar 12 13:34:40 2014 +0000 -@@ -1,6 +1,6 @@ - /* -- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. -- * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc. -+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2013 Red Hat, Inc. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it -@@ -66,9 +66,10 @@ - CALL_VM_NOCHECK_NOFIX(func) \ - fixup_after_potential_safepoint() - -- --#ifdef z_CPPDEBUG -+//#define CPPIDEBUG 1 -+#ifdef CPPIDEBUG - #define CPPINT_DEBUG( Z_code_ ) Z_code_ -+CPPINT_DEBUG ( static const char *FFng_Zero_Flag = "CPPINT_DEBUG_ON\n"; ) - #else - #define CPPINT_DEBUG( Z_code_ ) - #endif -@@ -618,6 +619,25 @@ - return 0; - } - -+int CppInterpreter::method_handle_entry(methodOop method, -+ intptr_t UNUSED, TRAPS) { -+ JavaThread *thread = (JavaThread *) THREAD; -+ ZeroStack *stack = thread->zero_stack(); -+ CPPINT_DEBUG( tty->print_cr( "method_handle : 0x%x , thread: 0x%x , stack: %0x%x.", \ -+ method, thread, stack ); ) -+ -+ return MethodHandles::method_handle_entry_invokeBasic(method, UNUSED, THREAD); -+} -+ -+void CppInterpreter::process_method_handle(oop method_handle, TRAPS) { -+ JavaThread *thread = (JavaThread *) THREAD; -+ ZeroStack *stack = thread->zero_stack(); -+ CPPINT_DEBUG( tty->print_cr( "process_method_handle : 0x%x , thread: 0x%x , stack: %0x%x.", \ -+ method_handle, thread, stack ); ) -+ methodOop method = (methodOop) java_lang_invoke_MemberName::vmtarget(method_handle); -+ MethodHandles::invoke_target(method, THREAD); -+} -+ - // The new slots will be inserted before slot insert_before. - // Slots < insert_before will have the same slot number after the insert. - // Slots >= insert_before will become old_slot + num_slots. -diff -r ad4c46e70192 src/cpu/zero/vm/cppInterpreter_zero.hpp ---- openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_zero.hpp Tue Mar 11 15:45:00 2014 +0000 -+++ openjdk/hotspot/src/cpu/zero/vm/cppInterpreter_zero.hpp Wed Mar 12 13:34:40 2014 +0000 -@@ -1,6 +1,6 @@ - /* -- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. -- * Copyright 2007, 2008, 2010, 2011 Red Hat, Inc. -+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. -+ * Copyright 2013 Red Hat, Inc. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it -@@ -37,6 +37,7 @@ - static int accessor_entry(methodOop method, intptr_t UNUSED, TRAPS); - static int empty_entry(methodOop method, intptr_t UNUSED, TRAPS); - static int method_handle_entry(methodOop method, intptr_t UNUSED, TRAPS); -+ static void process_method_handle(oop method_handle, TRAPS); - - public: - // Main loop of normal_entry -@@ -44,7 +45,6 @@ - - private: - // Helpers for method_handle_entry -- static void process_method_handle(oop method_handle, TRAPS); - static void insert_vmslots(int insert_before, int num_slots, TRAPS); - static void remove_vmslots(int first_slot, int num_slots, TRAPS); - static BasicType result_type_of_handle(oop method_handle); -diff -r ad4c46e70192 src/cpu/zero/vm/methodHandles_zero.hpp ---- openjdk/hotspot/src/cpu/zero/vm/methodHandles_zero.hpp Tue Mar 11 15:45:00 2014 +0000 -+++ openjdk/hotspot/src/cpu/zero/vm/methodHandles_zero.hpp Wed Mar 12 13:34:40 2014 +0000 -@@ -1,6 +1,6 @@ - /* -- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. -- * Copyright 2011, 2012 Red Hat, Inc. -+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. -+ * Copyright 2013 Red Hat, Inc. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it -@@ -29,10 +29,11 @@ - adapter_code_size = sizeof(ZeroEntry) * (Interpreter::method_handle_invoke_LAST - Interpreter::method_handle_invoke_FIRST + 1) - }; - -+public: -+ static void invoke_target(methodOop method, TRAPS); -+ static int method_handle_entry_invokeBasic(methodOop method, intptr_t UNUSED, TRAPS); - private: - static oop popFromStack(TRAPS); -- static void invoke_target(methodOop method, TRAPS); -- static int method_handle_entry_invokeBasic(methodOop method, intptr_t UNUSED, TRAPS); - static int method_handle_entry_linkToStaticOrSpecial(methodOop method, intptr_t UNUSED, TRAPS); - static int method_handle_entry_linkToVirtual(methodOop method, intptr_t UNUSED, TRAPS); - static int method_handle_entry_linkToInterface(methodOop method, intptr_t UNUSED, TRAPS); -diff -r ad4c46e70192 src/cpu/zero/vm/thumb2.cpp ---- openjdk/hotspot/src/cpu/zero/vm/thumb2.cpp Tue Mar 11 15:45:00 2014 +0000 -+++ openjdk/hotspot/src/cpu/zero/vm/thumb2.cpp Wed Mar 12 13:34:40 2014 +0000 -@@ -430,13 +430,15 @@ - - #ifdef PRODUCT - --#define JASSERT(cond, msg) 0 -+#define JASSERT(cond, msg) - #define J_Unimplemented() longjmp(compiler_error_env, COMPILER_RESULT_FATAL) -+#define JDEBUG_( _j_ ) - - #else - - #define JASSERT(cond, msg) do { if (!(cond)) fatal(msg); } while (0) - #define J_Unimplemented() { report_unimplemented(__FILE__, __LINE__); BREAKPOINT; } -+#define JDEBUG_( _j_ ) _j_ - - #endif // PRODUCT - -@@ -4571,7 +4573,7 @@ - if (!cache->is_resolved((Bytecodes::Code)opc_getfield)) return 0; - - TosState tos_type = cache->flag_state(); -- int field_offset = cache->f2(); -+ int field_offset = cache->f2_as_index(); - - // Slow entry point - callee save - // R0 = method -@@ -5886,7 +5890,7 @@ - } - - TosState tos_type = cache->flag_state(); -- int field_offset = cache->f2(); -+ int field_offset = cache->f2_as_index(); - - if (tos_type == ltos || tos_type == dtos) { - Reg r_lo, r_hi; -@@ -5949,7 +5953,8 @@ - } - - TosState tos_type = cache->flag_state(); -- int field_offset = cache->f2(); -+ int field_offset = cache->f2_as_index(); -+ JDEBUG_( tty->print("f2_as_index getstatic %d: %s: %s %d\n", index , name->as_C_string(), sig->as_C_string(), field_offset); ); - - if (tos_type == ltos || tos_type == dtos) { - Reg r_lo, r_hi, r_addr; -@@ -6018,7 +6023,7 @@ - storeBarrier(jinfo->codebuf); - - TosState tos_type = cache->flag_state(); -- int field_offset = cache->f2(); -+ int field_offset = cache->f2_as_index(); - - if (tos_type == ltos || tos_type == dtos) { - Reg r_lo, r_hi; -@@ -6083,7 +6088,7 @@ - storeBarrier(jinfo->codebuf); - - TosState tos_type = cache->flag_state(); -- int field_offset = cache->f2(); -+ int field_offset = cache->f2_as_index(); - Reg r_obj; - - if (tos_type == ltos || tos_type == dtos) { -@@ -6163,7 +6168,7 @@ - break; - } - -- callee = opcode == opc_invokevirtual ? (methodOop)cache->f2() : (methodOop)cache->f1(); -+ callee = opcode == opc_invokevirtual ? (methodOop)cache->f2_as_index() : (methodOop)cache->f1_as_instance(); - - if (opcode != opc_invokevirtual || cache->is_vfinal()) { - if (handle_special_method(callee, jinfo, stackdepth)) -@@ -6181,7 +6186,7 @@ - JASSERT(cache->parameter_size() == 1, "not 1 parameter to accessor"); - - TosState tos_type = entry->flag_state(); -- int field_offset = entry->f2(); -+ int field_offset = entry->f2_as_index(); - - JASSERT(tos_type == btos || tos_type == ctos || tos_type == stos || tos_type == atos || tos_type == itos, "not itos or atos"); - -@@ -6228,7 +6233,7 @@ - ldr_imm(jinfo->codebuf, ARM_R0, ARM_R0, - CP_OFFSET + (index << 4) + (opcode == opc_invokevirtual ? 8 : 4), 1, 0); - else -- ldr_imm(jinfo->codebuf, ARM_R0, ARM_R3, INSTANCEKLASS_VTABLE_OFFSET + cache->f2() * 4, 1, 0); -+ ldr_imm(jinfo->codebuf, ARM_R0, ARM_R3, INSTANCEKLASS_VTABLE_OFFSET + cache->f2_as_index() * 4, 1, 0); - add_imm(jinfo->codebuf, ARM_R2, ARM_R2, bci+CONSTMETHOD_CODEOFFSET); - str_imm(jinfo->codebuf, ARM_R1, Rthread, THREAD_LAST_JAVA_SP, 1, 0); - str_imm(jinfo->codebuf, ARM_R1, Rthread, THREAD_LAST_JAVA_FP, 1, 0); -@@ -6298,7 +6303,7 @@ - int r = JSTACK_REG(jstack); - PUSH(jstack, r); - ldr_imm(jinfo->codebuf, r, Ristate, ISTATE_CONSTANTS, 1, 0); -- ldr_imm(jinfo->codebuf, r, r, CP_OFFSET + (index << 4) + 4, 1, 0); // offset to cache->f1() -+ ldr_imm(jinfo->codebuf, r, r, CP_OFFSET + (index << 4) + 4, 1, 0); // offset to cache->f1_as_instance() - } else { - Thumb2_Exit(jinfo, H_EXIT_TO_INTERPRETER, bci, stackdepth); - }