view src/cpu/aarch64/vm/methodHandles_aarch64.cpp @ 10905:f57189b7648d

8257192: Integrate AArch64 JIT port into 8u 7009641: Don't fail VM when CodeCache is full 8073108: [AArch64] Use x86 and SPARC CPU instructions for GHASH acceleration 8130309: Need to bailout cleanly if creation of stubs fails when codecache is out of space (AArch64 changes) 8131779: AARCH64: add Montgomery multiply intrinsic 8132875: AArch64: Fix error introduced into AArch64 CodeCache by commit for 8130309 8135018: AARCH64: Missing memory barriers for CMS collector 8145320: Create unsafe_arraycopy and generic_arraycopy for AArch64 8148328: aarch64: redundant lsr instructions in stub code. 8148783: aarch64: SEGV running SpecJBB2013 8148948: aarch64: generate_copy_longs calls align() incorrectly 8149080: AArch64: Recognise disjoint array copy in stub code 8149365: aarch64: memory copy does not prefetch on backwards copy 8149907: aarch64: use load/store pair instructions in call_stub 8150038: aarch64: make use of CBZ and CBNZ when comparing narrow pointer with zero 8150045: arraycopy causes segfaults in SATB during garbage collection 8150082: aarch64: optimise small array copy 8150229: aarch64: pipeline class for several instructions is not set correctly 8150313: aarch64: optimise array copy using SIMD instructions 8150394: aarch64: add support for 8.1 LSE CAS instructions 8150652: Remove unused code in AArch64 back end 8151340: aarch64: prefetch the destination word for write prior to ldxr/stxr loops. 8151502: optimize pd_disjoint_words and pd_conjoint_words 8151775: aarch64: add support for 8.1 LSE atomic operations 8152537: aarch64: Make use of CBZ and CBNZ when comparing unsigned values with zero. 8152840: aarch64: improve _unsafe_arraycopy stub routine 8153172: aarch64: hotspot crashes after the 8.1 LSE patch is merged 8153713: aarch64: improve short array clearing using store pair 8153797: aarch64: Add Arrays.fill stub code 8154413: AArch64: Better byte behaviour 8154537: AArch64: some integer rotate instructions are never emitted 8154739: AArch64: TemplateTable::fast_xaccess loads in wrong mode 8155015: Aarch64: bad assert in spill generation code 8155100: AArch64: Relax alignment requirement for byte_map_base 8155612: Aarch64: vector nodes need to support misaligned offset 8155617: aarch64: ClearArray does not use DC ZVA 8155627: Enable SA on AArch64 8155653: TestVectorUnalignedOffset.java not pushed with 8155612 8156731: aarch64: java/util/Arrays/Correct.java fails due to _generic_arraycopy stub routine 8157841: aarch64: prefetch ignores cache line size 8157906: aarch64: some more integer rotate instructions are never emitted 8158913: aarch64: SEGV running Spark terasort 8159052: aarch64: optimise unaligned copies in pd_disjoint_words and pd_conjoint_words 8159063: aarch64: optimise unaligned array copy long 8160748: [AArch64] Inconsistent types for ideal_reg 8161072: AArch64: jtreg compiler/uncommontrap/TestDeoptOOM failure 8161190: AArch64: Fix overflow in immediate cmp instruction 8164113: AArch64: follow-up the fix for 8161598 8165673: AArch64: Fix JNI floating point argument handling 8167200: AArch64: Broken stack pointer adjustment in interpreter 8167421: AArch64: in one core system, fatal error: Illegal threadstate encountered 8167595: AArch64: SEGV in stub code cipherBlockChaining_decryptAESCrypt 8168699: Validate special case invocations [AArch64 support] 8168888: Port 8160591: Improve internal array handling to AArch64. 8170100: AArch64: Crash in C1-compiled code accessing References 8170188: jtreg test compiler/types/TestMeetIncompatibleInterfaceArrays.java causes JVM crash 8170873: PPC64/aarch64: Poor StrictMath performance due to non-optimized compilation 8171537: aarch64: compiler/c1/Test6849574.java generates guarantee failure in C1 8172881: AArch64: assertion failure: the int pressure is incorrect 8173472: AArch64: C1 comparisons with null only use 32-bit instructions 8176100: [AArch64] [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles 8177661: Correct ad rule output register types from iRegX to iRegXNoSp 8179954: AArch64: C1 and C2 volatile accesses are not sequentially consistent 8182581: aarch64: fix for crash caused by earlyret of compiled method 8183925: [AArch64] Decouple crash protection from watcher thread 8186325: AArch64: jtreg test hotspot/test/gc/g1/TestJNIWeakG1/TestJNIWeakG1.java SEGV 8187224: aarch64: some inconsistency between aarch64_ad.m4 and aarch64.ad 8189170: [AArch64] Add option to disable stack overflow checking in primordial thread for use with JNI_CreateJavaJVM 8193133: Assertion failure because 0xDEADDEAD can be in-heap 8195685: AArch64 port of 8174962: Better interface invocations 8195859: AArch64: vtableStubs gtest fails after 8174962 8196136: AArch64: Correct register use in patch for JDK-8194686 8196221: AArch64: Mistake in committed patch for JDK-8195859 8199712: [AArch64] Flight Recorder 8203481: Incorrect constraint for unextended_sp in frame:safe_for_sender 8203699: java/lang/invoke/SpecialInterfaceCall fails with SIGILL on aarch64 8205421: AARCH64: StubCodeMark should be placed after alignment 8206163: AArch64: incorrect code generation for StoreCM 8207345: Trampoline generation code reads from uninitialized memory 8207838: AArch64: Float registers incorrectly restored in JNI call 8209413: AArch64: NPE in clhsdb jstack command 8209414: [AArch64] method handle invocation does not respect JVMTI interp_only mode 8209415: Fix JVMTI test failure HS202 8209420: Track membars for volatile accesses so they can be properly optimized 8209835: Aarch64: elide barriers on all volatile operations 8210425: [AArch64] sharedRuntimeTrig/sharedRuntimeTrans compiled without optimization 8211064: [AArch64] Interpreter and c1 don't correctly handle jboolean results in native calls 8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better 8213134: AArch64: vector shift failed with MaxVectorSize=8 8213419: [AArch64] C2 may hang in MulLNode::Ideal()/MulINode::Ideal() with gcc 8.2.1 8214857: "bad trailing membar" assert failure at memnode.cpp:3220 8215951: AArch64: jtreg test vmTestbase/nsk/jvmti/PopFrame/popframe005 segfaults 8215961: jdk/jfr/event/os/TestCPUInformation.java fails on AArch64 8216350: AArch64: monitor unlock fast path not called 8216989: CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier() does not check for zero length on AARCH64 8217368: AArch64: C2 recursive stack locking optimisation not triggered 8218185: aarch64: missing LoadStore barrier in TemplateTable::putfield_or_static 8219011: Implement MacroAssembler::warn method on AArch64 8219635: aarch64: missing LoadStore barrier in TemplateTable::fast_storefield 8221220: AArch64: Add StoreStore membar explicitly for Volatile Writes in TemplateTable 8221658: aarch64: add necessary predicate for ubfx patterns 8224671: AArch64: mauve System.arraycopy test failure 8224828: aarch64: rflags is not correct after safepoint poll 8224851: AArch64: fix warnings and errors with Clang and GCC 8.3 8224880: AArch64: java/javac error with AllocatePrefetchDistance 8228400: Remove built-in AArch64 simulator 8228406: Superfluous change in chaitin.hpp 8228593: Revert explicit JDK 7 support additions 8228716: Revert InstanceKlass::print_on debug additions 8228718: Revert incorrect backport of JDK-8129757 to 8-aarch64 8228725: AArch64: Purge method call format support 8228747: Revert "unused" attribute from test_arraycopy_func 8228767: Revert ResourceMark additions 8228770: Revert development hsdis changes 8229123: Revert build fixes for aarch64/zero 8229124: Revert disassembler.cpp changes 8229145: Revert TemplateTable::bytecode() visibility change 8233839: aarch64: missing memory barrier in NewObjectArrayStub and NewTypeArrayStub 8237512: AArch64: aarch64TestHook leaks a BufferBlob 8246482: Build failures with +JFR -PCH 8247979: aarch64: missing side effect of killing flags for clearArray_reg_reg 8248219: aarch64: missing memory barrier in fast_storefield and fast_accessfield Reviewed-by: shade, aph
author andrew
date Mon, 01 Feb 2021 03:48:36 +0000
parents
children f79e943d15a7
line wrap: on
line source

/*
 * Copyright (c) 2013, Red Hat Inc.
 * Copyright (c) 1997, 2012, Oracle and/or its affiliates.
 * All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "prims/methodHandles.hpp"

#define __ _masm->

#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif

#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")

void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
  if (VerifyMethodHandles)
    verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class),
                 "MH argument is a Class");
  __ ldr(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes()));
}

#ifdef ASSERT
static int check_nonzero(const char* xname, int x) {
  assert(x != 0, err_msg("%s should be nonzero", xname));
  return x;
}
#define NONZERO(x) check_nonzero(#x, x)
#else //ASSERT
#define NONZERO(x) (x)
#endif //PRODUCT

#ifdef ASSERT
void MethodHandles::verify_klass(MacroAssembler* _masm,
                                 Register obj, SystemDictionary::WKID klass_id,
                                 const char* error_message) {
  Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
  KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
  Register temp = rscratch2;
  Register temp2 = rscratch1; // used by MacroAssembler::cmpptr
  Label L_ok, L_bad;
  BLOCK_COMMENT("verify_klass {");
  __ verify_oop(obj);
  __ cbz(obj, L_bad);
  __ push(RegSet::of(temp, temp2), sp);
  __ load_klass(temp, obj);
  __ cmpptr(temp, ExternalAddress((address) klass_addr));
  __ br(Assembler::EQ, L_ok);
  intptr_t super_check_offset = klass->super_check_offset();
  __ ldr(temp, Address(temp, super_check_offset));
  __ cmpptr(temp, ExternalAddress((address) klass_addr));
  __ br(Assembler::EQ, L_ok);
  __ pop(RegSet::of(temp, temp2), sp);
  __ bind(L_bad);
  __ stop(error_message);
  __ BIND(L_ok);
  __ pop(RegSet::of(temp, temp2), sp);
  BLOCK_COMMENT("} verify_klass");
}

void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {  }

#endif //ASSERT

void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
                                            bool for_compiler_entry) {
  assert(method == rmethod, "interpreter calling convention");
  Label L_no_such_method;
  __ cbz(rmethod, L_no_such_method);
  __ verify_method_ptr(method);

  if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
    Label run_compiled_code;
    // JVMTI events, such as single-stepping, are implemented partly by avoiding running
    // compiled code in threads for which the event is enabled.  Check here for
    // interp_only_mode if these events CAN be enabled.

    __ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
    __ cbzw(rscratch1, run_compiled_code);
    __ ldr(rscratch1, Address(method, Method::interpreter_entry_offset()));
    __ br(rscratch1);
    __ BIND(run_compiled_code);
  }

  const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
                                                     Method::from_interpreted_offset();
  __ ldr(rscratch1,Address(method, entry_offset));
  __ br(rscratch1);
  __ bind(L_no_such_method);
  __ far_jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry()));
}

void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
                                        Register recv, Register method_temp,
                                        Register temp2,
                                        bool for_compiler_entry) {
  BLOCK_COMMENT("jump_to_lambda_form {");
  // This is the initial entry point of a lazy method handle.
  // After type checking, it picks up the invoker from the LambdaForm.
  assert_different_registers(recv, method_temp, temp2);
  assert(recv != noreg, "required register");
  assert(method_temp == rmethod, "required register for loading method");

  //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); });

  // Load the invoker, as MH -> MH.form -> LF.vmentry
  __ verify_oop(recv);
  __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())));
  __ verify_oop(method_temp);
  __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
  __ verify_oop(method_temp);
  // the following assumes that a Method* is normally compressed in the vmtarget field:
  __ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));

  if (VerifyMethodHandles && !for_compiler_entry) {
    // make sure recv is already on stack
    __ ldr(temp2, Address(method_temp, Method::const_offset()));
    __ load_sized_value(temp2,
                        Address(temp2, ConstMethod::size_of_parameters_offset()),
                        sizeof(u2), /*is_signed*/ false);
    // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
    Label L;
    __ ldr(rscratch1, __ argument_address(temp2, -1));
    __ cmp(recv, rscratch1);
    __ br(Assembler::EQ, L);
    __ ldr(r0, __ argument_address(temp2, -1));
    __ hlt(0);
    __ BIND(L);
  }

  jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry);
  BLOCK_COMMENT("} jump_to_lambda_form");
}

// Code generation
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
                                                                vmIntrinsics::ID iid) {
  const bool not_for_compiler_entry = false;  // this is the interpreter entry
  assert(is_signature_polymorphic(iid), "expected invoke iid");
  if (iid == vmIntrinsics::_invokeGeneric ||
      iid == vmIntrinsics::_compiledLambdaForm) {
    // Perhaps surprisingly, the symbolic references visible to Java are not directly used.
    // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
    // They all allow an appendix argument.
    __ hlt(0);           // empty stubs make SG sick
    return NULL;
  }

  // r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
  // rmethod: Method*
  // r3: argument locator (parameter slot count, added to rsp)
  // r1: used as temp to hold mh or receiver
  // r0, r11: garbage temps, blown away
  Register argp   = r3;   // argument list ptr, live on error paths
  Register temp   = r0;
  Register mh     = r1;   // MH receiver; dies quickly and is recycled

  // here's where control starts out:
  __ align(CodeEntryAlignment);
  address entry_point = __ pc();

  if (VerifyMethodHandles) {
    Label L;
    BLOCK_COMMENT("verify_intrinsic_id {");
    __ ldrb(rscratch1, Address(rmethod, Method::intrinsic_id_offset_in_bytes()));
    __ cmp(rscratch1, (int) iid);
    __ br(Assembler::EQ, L);
    if (iid == vmIntrinsics::_linkToVirtual ||
        iid == vmIntrinsics::_linkToSpecial) {
      // could do this for all kinds, but would explode assembly code size
      trace_method_handle(_masm, "bad Method*::intrinsic_id");
    }
    __ hlt(0);
    __ bind(L);
    BLOCK_COMMENT("} verify_intrinsic_id");
  }

  // First task:  Find out how big the argument list is.
  Address r3_first_arg_addr;
  int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
  assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
  if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
    __ ldr(argp, Address(rmethod, Method::const_offset()));
    __ load_sized_value(argp,
                        Address(argp, ConstMethod::size_of_parameters_offset()),
                        sizeof(u2), /*is_signed*/ false);
    // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
    r3_first_arg_addr = __ argument_address(argp, -1);
  } else {
    DEBUG_ONLY(argp = noreg);
  }

  if (!is_signature_polymorphic_static(iid)) {
    __ ldr(mh, r3_first_arg_addr);
    DEBUG_ONLY(argp = noreg);
  }

  // r3_first_arg_addr is live!

  trace_method_handle_interpreter_entry(_masm, iid);
  if (iid == vmIntrinsics::_invokeBasic) {
    generate_method_handle_dispatch(_masm, iid, mh, noreg, not_for_compiler_entry);

  } else {
    // Adjust argument list by popping the trailing MemberName argument.
    Register recv = noreg;
    if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
      // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
      __ ldr(recv = r2, r3_first_arg_addr);
    }
    DEBUG_ONLY(argp = noreg);
    Register rmember = rmethod;  // MemberName ptr; incoming method ptr is dead now
    __ pop(rmember);             // extract last argument
    generate_method_handle_dispatch(_masm, iid, recv, rmember, not_for_compiler_entry);
  }

  return entry_point;
}


void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
                                                    vmIntrinsics::ID iid,
                                                    Register receiver_reg,
                                                    Register member_reg,
                                                    bool for_compiler_entry) {
  assert(is_signature_polymorphic(iid), "expected invoke iid");
  // temps used in this code are not used in *either* compiled or interpreted calling sequences
  Register temp1 = r10;
  Register temp2 = r11;
  Register temp3 = r14;  // r13 is live by this point: it contains the sender SP
  if (for_compiler_entry) {
    assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
    assert_different_registers(temp1,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
    assert_different_registers(temp2,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
    assert_different_registers(temp3,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
  }

  assert_different_registers(temp1, temp2, temp3, receiver_reg);
  assert_different_registers(temp1, temp2, temp3, member_reg);

  if (iid == vmIntrinsics::_invokeBasic) {
    // indirect through MH.form.vmentry.vmtarget
    jump_to_lambda_form(_masm, receiver_reg, rmethod, temp1, for_compiler_entry);

  } else {
    // The method is a member invoker used by direct method handles.
    if (VerifyMethodHandles) {
      // make sure the trailing argument really is a MemberName (caller responsibility)
      verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName),
                   "MemberName required for invokeVirtual etc.");
    }

    Address member_clazz(    member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
    Address member_vmindex(  member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
    Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));

    Register temp1_recv_klass = temp1;
    if (iid != vmIntrinsics::_linkToStatic) {
      __ verify_oop(receiver_reg);
      if (iid == vmIntrinsics::_linkToSpecial) {
        // Don't actually load the klass; just null-check the receiver.
        __ null_check(receiver_reg);
      } else {
        // load receiver klass itself
        __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes());
        __ load_klass(temp1_recv_klass, receiver_reg);
        __ verify_klass_ptr(temp1_recv_klass);
      }
      BLOCK_COMMENT("check_receiver {");
      // The receiver for the MemberName must be in receiver_reg.
      // Check the receiver against the MemberName.clazz
      if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
        // Did not load it above...
        __ load_klass(temp1_recv_klass, receiver_reg);
        __ verify_klass_ptr(temp1_recv_klass);
      }
      if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
        Label L_ok;
        Register temp2_defc = temp2;
        __ load_heap_oop(temp2_defc, member_clazz);
        load_klass_from_Class(_masm, temp2_defc);
        __ verify_klass_ptr(temp2_defc);
        __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok);
        // If we get here, the type check failed!
        __ hlt(0);
        // __ STOP("receiver class disagrees with MemberName.clazz");
        __ bind(L_ok);
      }
      BLOCK_COMMENT("} check_receiver");
    }
    if (iid == vmIntrinsics::_linkToSpecial ||
        iid == vmIntrinsics::_linkToStatic) {
      DEBUG_ONLY(temp1_recv_klass = noreg);  // these guys didn't load the recv_klass
    }

    // Live registers at this point:
    //  member_reg - MemberName that was the trailing argument
    //  temp1_recv_klass - klass of stacked receiver, if needed
    //  r13 - interpreter linkage (if interpreted)  ??? FIXME
    //  r1 ... r0 - compiler arguments (if compiled)

    Label L_incompatible_class_change_error;
    switch (iid) {
    case vmIntrinsics::_linkToSpecial:
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
      }
      __ ldr(rmethod, member_vmtarget);
      break;

    case vmIntrinsics::_linkToStatic:
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
      }
      __ ldr(rmethod, member_vmtarget);
      break;

    case vmIntrinsics::_linkToVirtual:
    {
      // same as TemplateTable::invokevirtual,
      // minus the CP setup and profiling:

      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
      }

      // pick out the vtable index from the MemberName, and then we can discard it:
      Register temp2_index = temp2;
      __ ldr(temp2_index, member_vmindex);

      if (VerifyMethodHandles) {
        Label L_index_ok;
        __ cmpw(temp2_index, 0U);
        __ br(Assembler::GE, L_index_ok);
        __ hlt(0);
        __ BIND(L_index_ok);
      }

      // Note:  The verifier invariants allow us to ignore MemberName.clazz and vmtarget
      // at this point.  And VerifyMethodHandles has already checked clazz, if needed.

      // get target Method* & entry point
      __ lookup_virtual_method(temp1_recv_klass, temp2_index, rmethod);
      break;
    }

    case vmIntrinsics::_linkToInterface:
    {
      // same as TemplateTable::invokeinterface
      // (minus the CP setup and profiling, with different argument motion)
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3);
      }

      Register temp3_intf = temp3;
      __ load_heap_oop(temp3_intf, member_clazz);
      load_klass_from_Class(_masm, temp3_intf);
      __ verify_klass_ptr(temp3_intf);

      Register rindex = rmethod;
      __ ldr(rindex, member_vmindex);
      if (VerifyMethodHandles) {
        Label L;
        __ cmpw(rindex, 0U);
        __ br(Assembler::GE, L);
        __ hlt(0);
        __ bind(L);
      }

      // given intf, index, and recv klass, dispatch to the implementation method
      __ lookup_interface_method(temp1_recv_klass, temp3_intf,
                                 // note: next two args must be the same:
                                 rindex, rmethod,
                                 temp2,
                                 L_incompatible_class_change_error);
      break;
    }

    default:
      fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
      break;
    }

    // live at this point:  rmethod, r13 (if interpreted)

    // After figuring out which concrete method to call, jump into it.
    // Note that this works in the interpreter with no data motion.
    // But the compiled version will require that r2_recv be shifted out.
    __ verify_method_ptr(rmethod);
    jump_from_method_handle(_masm, rmethod, temp1, for_compiler_entry);
    if (iid == vmIntrinsics::_linkToInterface) {
      __ bind(L_incompatible_class_change_error);
      __ far_jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
    }
  }
}

#ifndef PRODUCT
void trace_method_handle_stub(const char* adaptername,
                              oop mh,
                              intptr_t* saved_regs,
                              intptr_t* entry_sp) {  }

// The stub wraps the arguments in a struct on the stack to avoid
// dealing with the different calling conventions for passing 6
// arguments.
struct MethodHandleStubArguments {
  const char* adaptername;
  oopDesc* mh;
  intptr_t* saved_regs;
  intptr_t* entry_sp;
};
void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {  }

void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {  }
#endif //PRODUCT