view src/cpu/aarch64/vm/c1_FrameMap_aarch64.cpp @ 10905:f57189b7648d

8257192: Integrate AArch64 JIT port into 8u 7009641: Don't fail VM when CodeCache is full 8073108: [AArch64] Use x86 and SPARC CPU instructions for GHASH acceleration 8130309: Need to bailout cleanly if creation of stubs fails when codecache is out of space (AArch64 changes) 8131779: AARCH64: add Montgomery multiply intrinsic 8132875: AArch64: Fix error introduced into AArch64 CodeCache by commit for 8130309 8135018: AARCH64: Missing memory barriers for CMS collector 8145320: Create unsafe_arraycopy and generic_arraycopy for AArch64 8148328: aarch64: redundant lsr instructions in stub code. 8148783: aarch64: SEGV running SpecJBB2013 8148948: aarch64: generate_copy_longs calls align() incorrectly 8149080: AArch64: Recognise disjoint array copy in stub code 8149365: aarch64: memory copy does not prefetch on backwards copy 8149907: aarch64: use load/store pair instructions in call_stub 8150038: aarch64: make use of CBZ and CBNZ when comparing narrow pointer with zero 8150045: arraycopy causes segfaults in SATB during garbage collection 8150082: aarch64: optimise small array copy 8150229: aarch64: pipeline class for several instructions is not set correctly 8150313: aarch64: optimise array copy using SIMD instructions 8150394: aarch64: add support for 8.1 LSE CAS instructions 8150652: Remove unused code in AArch64 back end 8151340: aarch64: prefetch the destination word for write prior to ldxr/stxr loops. 8151502: optimize pd_disjoint_words and pd_conjoint_words 8151775: aarch64: add support for 8.1 LSE atomic operations 8152537: aarch64: Make use of CBZ and CBNZ when comparing unsigned values with zero. 8152840: aarch64: improve _unsafe_arraycopy stub routine 8153172: aarch64: hotspot crashes after the 8.1 LSE patch is merged 8153713: aarch64: improve short array clearing using store pair 8153797: aarch64: Add Arrays.fill stub code 8154413: AArch64: Better byte behaviour 8154537: AArch64: some integer rotate instructions are never emitted 8154739: AArch64: TemplateTable::fast_xaccess loads in wrong mode 8155015: Aarch64: bad assert in spill generation code 8155100: AArch64: Relax alignment requirement for byte_map_base 8155612: Aarch64: vector nodes need to support misaligned offset 8155617: aarch64: ClearArray does not use DC ZVA 8155627: Enable SA on AArch64 8155653: TestVectorUnalignedOffset.java not pushed with 8155612 8156731: aarch64: java/util/Arrays/Correct.java fails due to _generic_arraycopy stub routine 8157841: aarch64: prefetch ignores cache line size 8157906: aarch64: some more integer rotate instructions are never emitted 8158913: aarch64: SEGV running Spark terasort 8159052: aarch64: optimise unaligned copies in pd_disjoint_words and pd_conjoint_words 8159063: aarch64: optimise unaligned array copy long 8160748: [AArch64] Inconsistent types for ideal_reg 8161072: AArch64: jtreg compiler/uncommontrap/TestDeoptOOM failure 8161190: AArch64: Fix overflow in immediate cmp instruction 8164113: AArch64: follow-up the fix for 8161598 8165673: AArch64: Fix JNI floating point argument handling 8167200: AArch64: Broken stack pointer adjustment in interpreter 8167421: AArch64: in one core system, fatal error: Illegal threadstate encountered 8167595: AArch64: SEGV in stub code cipherBlockChaining_decryptAESCrypt 8168699: Validate special case invocations [AArch64 support] 8168888: Port 8160591: Improve internal array handling to AArch64. 8170100: AArch64: Crash in C1-compiled code accessing References 8170188: jtreg test compiler/types/TestMeetIncompatibleInterfaceArrays.java causes JVM crash 8170873: PPC64/aarch64: Poor StrictMath performance due to non-optimized compilation 8171537: aarch64: compiler/c1/Test6849574.java generates guarantee failure in C1 8172881: AArch64: assertion failure: the int pressure is incorrect 8173472: AArch64: C1 comparisons with null only use 32-bit instructions 8176100: [AArch64] [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles 8177661: Correct ad rule output register types from iRegX to iRegXNoSp 8179954: AArch64: C1 and C2 volatile accesses are not sequentially consistent 8182581: aarch64: fix for crash caused by earlyret of compiled method 8183925: [AArch64] Decouple crash protection from watcher thread 8186325: AArch64: jtreg test hotspot/test/gc/g1/TestJNIWeakG1/TestJNIWeakG1.java SEGV 8187224: aarch64: some inconsistency between aarch64_ad.m4 and aarch64.ad 8189170: [AArch64] Add option to disable stack overflow checking in primordial thread for use with JNI_CreateJavaJVM 8193133: Assertion failure because 0xDEADDEAD can be in-heap 8195685: AArch64 port of 8174962: Better interface invocations 8195859: AArch64: vtableStubs gtest fails after 8174962 8196136: AArch64: Correct register use in patch for JDK-8194686 8196221: AArch64: Mistake in committed patch for JDK-8195859 8199712: [AArch64] Flight Recorder 8203481: Incorrect constraint for unextended_sp in frame:safe_for_sender 8203699: java/lang/invoke/SpecialInterfaceCall fails with SIGILL on aarch64 8205421: AARCH64: StubCodeMark should be placed after alignment 8206163: AArch64: incorrect code generation for StoreCM 8207345: Trampoline generation code reads from uninitialized memory 8207838: AArch64: Float registers incorrectly restored in JNI call 8209413: AArch64: NPE in clhsdb jstack command 8209414: [AArch64] method handle invocation does not respect JVMTI interp_only mode 8209415: Fix JVMTI test failure HS202 8209420: Track membars for volatile accesses so they can be properly optimized 8209835: Aarch64: elide barriers on all volatile operations 8210425: [AArch64] sharedRuntimeTrig/sharedRuntimeTrans compiled without optimization 8211064: [AArch64] Interpreter and c1 don't correctly handle jboolean results in native calls 8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better 8213134: AArch64: vector shift failed with MaxVectorSize=8 8213419: [AArch64] C2 may hang in MulLNode::Ideal()/MulINode::Ideal() with gcc 8.2.1 8214857: "bad trailing membar" assert failure at memnode.cpp:3220 8215951: AArch64: jtreg test vmTestbase/nsk/jvmti/PopFrame/popframe005 segfaults 8215961: jdk/jfr/event/os/TestCPUInformation.java fails on AArch64 8216350: AArch64: monitor unlock fast path not called 8216989: CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier() does not check for zero length on AARCH64 8217368: AArch64: C2 recursive stack locking optimisation not triggered 8218185: aarch64: missing LoadStore barrier in TemplateTable::putfield_or_static 8219011: Implement MacroAssembler::warn method on AArch64 8219635: aarch64: missing LoadStore barrier in TemplateTable::fast_storefield 8221220: AArch64: Add StoreStore membar explicitly for Volatile Writes in TemplateTable 8221658: aarch64: add necessary predicate for ubfx patterns 8224671: AArch64: mauve System.arraycopy test failure 8224828: aarch64: rflags is not correct after safepoint poll 8224851: AArch64: fix warnings and errors with Clang and GCC 8.3 8224880: AArch64: java/javac error with AllocatePrefetchDistance 8228400: Remove built-in AArch64 simulator 8228406: Superfluous change in chaitin.hpp 8228593: Revert explicit JDK 7 support additions 8228716: Revert InstanceKlass::print_on debug additions 8228718: Revert incorrect backport of JDK-8129757 to 8-aarch64 8228725: AArch64: Purge method call format support 8228747: Revert "unused" attribute from test_arraycopy_func 8228767: Revert ResourceMark additions 8228770: Revert development hsdis changes 8229123: Revert build fixes for aarch64/zero 8229124: Revert disassembler.cpp changes 8229145: Revert TemplateTable::bytecode() visibility change 8233839: aarch64: missing memory barrier in NewObjectArrayStub and NewTypeArrayStub 8237512: AArch64: aarch64TestHook leaks a BufferBlob 8246482: Build failures with +JFR -PCH 8247979: aarch64: missing side effect of killing flags for clearArray_reg_reg 8248219: aarch64: missing memory barrier in fast_storefield and fast_accessfield Reviewed-by: shade, aph
author andrew
date Mon, 01 Feb 2021 03:48:36 +0000
parents
children f79e943d15a7 655498b9a44d
line wrap: on
line source

/*
 * Copyright (c) 2013, Red Hat Inc.
 * Copyright (c) 1999, 2010, Oracle and/or its affiliates.
 * All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIR.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_aarch64.inline.hpp"

LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) {
  LIR_Opr opr = LIR_OprFact::illegalOpr;
  VMReg r_1 = reg->first();
  VMReg r_2 = reg->second();
  if (r_1->is_stack()) {
    // Convert stack slot to an SP offset
    // The calling convention does not count the SharedRuntime::out_preserve_stack_slots() value
    // so we must add it in here.
    int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
    opr = LIR_OprFact::address(new LIR_Address(sp_opr, st_off, type));
  } else if (r_1->is_Register()) {
    Register reg = r_1->as_Register();
    if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
      Register reg2 = r_2->as_Register();
#ifdef _LP64
      assert(reg2 == reg, "must be same register");
      opr = as_long_opr(reg);
#else
      opr = as_long_opr(reg2, reg);
#endif // _LP64
    } else if (type == T_OBJECT || type == T_ARRAY) {
      opr = as_oop_opr(reg);
    } else if (type == T_METADATA) {
      opr = as_metadata_opr(reg);
    } else {
      opr = as_opr(reg);
    }
  } else if (r_1->is_FloatRegister()) {
    assert(type == T_DOUBLE || type == T_FLOAT, "wrong type");
    int num = r_1->as_FloatRegister()->encoding();
    if (type == T_FLOAT) {
      opr = LIR_OprFact::single_fpu(num);
    } else {
      opr = LIR_OprFact::double_fpu(num);
    }
  } else {
    ShouldNotReachHere();
  }
  return opr;
}

LIR_Opr FrameMap::r0_opr;
LIR_Opr FrameMap::r1_opr;
LIR_Opr FrameMap::r2_opr;
LIR_Opr FrameMap::r3_opr;
LIR_Opr FrameMap::r4_opr;
LIR_Opr FrameMap::r5_opr;
LIR_Opr FrameMap::r6_opr;
LIR_Opr FrameMap::r7_opr;
LIR_Opr FrameMap::r8_opr;
LIR_Opr FrameMap::r9_opr;
LIR_Opr FrameMap::r10_opr;
LIR_Opr FrameMap::r11_opr;
LIR_Opr FrameMap::r12_opr;
LIR_Opr FrameMap::r13_opr;
LIR_Opr FrameMap::r14_opr;
LIR_Opr FrameMap::r15_opr;
LIR_Opr FrameMap::r16_opr;
LIR_Opr FrameMap::r17_opr;
LIR_Opr FrameMap::r18_opr;
LIR_Opr FrameMap::r19_opr;
LIR_Opr FrameMap::r20_opr;
LIR_Opr FrameMap::r21_opr;
LIR_Opr FrameMap::r22_opr;
LIR_Opr FrameMap::r23_opr;
LIR_Opr FrameMap::r24_opr;
LIR_Opr FrameMap::r25_opr;
LIR_Opr FrameMap::r26_opr;
LIR_Opr FrameMap::r27_opr;
LIR_Opr FrameMap::r28_opr;
LIR_Opr FrameMap::r29_opr;
LIR_Opr FrameMap::r30_opr;

LIR_Opr FrameMap::rfp_opr;
LIR_Opr FrameMap::sp_opr;

LIR_Opr FrameMap::receiver_opr;

LIR_Opr FrameMap::r0_oop_opr;
LIR_Opr FrameMap::r1_oop_opr;
LIR_Opr FrameMap::r2_oop_opr;
LIR_Opr FrameMap::r3_oop_opr;
LIR_Opr FrameMap::r4_oop_opr;
LIR_Opr FrameMap::r5_oop_opr;
LIR_Opr FrameMap::r6_oop_opr;
LIR_Opr FrameMap::r7_oop_opr;
LIR_Opr FrameMap::r8_oop_opr;
LIR_Opr FrameMap::r9_oop_opr;
LIR_Opr FrameMap::r10_oop_opr;
LIR_Opr FrameMap::r11_oop_opr;
LIR_Opr FrameMap::r12_oop_opr;
LIR_Opr FrameMap::r13_oop_opr;
LIR_Opr FrameMap::r14_oop_opr;
LIR_Opr FrameMap::r15_oop_opr;
LIR_Opr FrameMap::r16_oop_opr;
LIR_Opr FrameMap::r17_oop_opr;
LIR_Opr FrameMap::r18_oop_opr;
LIR_Opr FrameMap::r19_oop_opr;
LIR_Opr FrameMap::r20_oop_opr;
LIR_Opr FrameMap::r21_oop_opr;
LIR_Opr FrameMap::r22_oop_opr;
LIR_Opr FrameMap::r23_oop_opr;
LIR_Opr FrameMap::r24_oop_opr;
LIR_Opr FrameMap::r25_oop_opr;
LIR_Opr FrameMap::r26_oop_opr;
LIR_Opr FrameMap::r27_oop_opr;
LIR_Opr FrameMap::r28_oop_opr;
LIR_Opr FrameMap::r29_oop_opr;
LIR_Opr FrameMap::r30_oop_opr;

LIR_Opr FrameMap::rscratch1_opr;
LIR_Opr FrameMap::rscratch2_opr;
LIR_Opr FrameMap::rscratch1_long_opr;
LIR_Opr FrameMap::rscratch2_long_opr;

LIR_Opr FrameMap::r0_metadata_opr;
LIR_Opr FrameMap::r1_metadata_opr;
LIR_Opr FrameMap::r2_metadata_opr;
LIR_Opr FrameMap::r3_metadata_opr;
LIR_Opr FrameMap::r4_metadata_opr;
LIR_Opr FrameMap::r5_metadata_opr;

LIR_Opr FrameMap::long0_opr;
LIR_Opr FrameMap::long1_opr;
LIR_Opr FrameMap::fpu0_float_opr;
LIR_Opr FrameMap::fpu0_double_opr;

LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, };
LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, };

//--------------------------------------------------------
//               FrameMap
//--------------------------------------------------------

void FrameMap::initialize() {
  assert(!_init_done, "once");

  int i=0;
  map_register(i, r0); r0_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r1); r1_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r2); r2_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r3); r3_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r4); r4_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r5); r5_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r6); r6_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r7); r7_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r10); r10_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r11); r11_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r12); r12_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r13); r13_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r14); r14_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r15); r15_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r16); r16_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r17); r17_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r18); r18_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r19); r19_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r20); r20_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r21); r21_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r22); r22_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r23); r23_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r24); r24_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r25); r25_opr = LIR_OprFact::single_cpu(i); i++;
  map_register(i, r26); r26_opr = LIR_OprFact::single_cpu(i); i++;

  map_register(i, r27); r27_opr = LIR_OprFact::single_cpu(i); i++; // rheapbase
  map_register(i, r28); r28_opr = LIR_OprFact::single_cpu(i); i++; // rthread
  map_register(i, r29); r29_opr = LIR_OprFact::single_cpu(i); i++; // rfp
  map_register(i, r30); r30_opr = LIR_OprFact::single_cpu(i); i++; // lr
  map_register(i, r31_sp); sp_opr = LIR_OprFact::single_cpu(i); i++; // sp
  map_register(i, r8); r8_opr = LIR_OprFact::single_cpu(i); i++;   // rscratch1
  map_register(i, r9); r9_opr = LIR_OprFact::single_cpu(i); i++;   // rscratch2

  rscratch1_opr = r8_opr;
  rscratch2_opr = r9_opr;
  rscratch1_long_opr = LIR_OprFact::double_cpu(r8_opr->cpu_regnr(), r8_opr->cpu_regnr());
  rscratch2_long_opr = LIR_OprFact::double_cpu(r9_opr->cpu_regnr(), r9_opr->cpu_regnr());

  long0_opr = LIR_OprFact::double_cpu(0, 0);
  long1_opr = LIR_OprFact::double_cpu(1, 1);

  fpu0_float_opr   = LIR_OprFact::single_fpu(0);
  fpu0_double_opr  = LIR_OprFact::double_fpu(0);

  _caller_save_cpu_regs[0] = r0_opr;
  _caller_save_cpu_regs[1] = r1_opr;
  _caller_save_cpu_regs[2] = r2_opr;
  _caller_save_cpu_regs[3] = r3_opr;
  _caller_save_cpu_regs[4] = r4_opr;
  _caller_save_cpu_regs[5] = r5_opr;
  _caller_save_cpu_regs[6]  = r6_opr;
  _caller_save_cpu_regs[7]  = r7_opr;
  // rscratch1, rscratch 2 not included
  _caller_save_cpu_regs[8] = r10_opr;
  _caller_save_cpu_regs[9] = r11_opr;
  _caller_save_cpu_regs[10] = r12_opr;
  _caller_save_cpu_regs[11] = r13_opr;
  _caller_save_cpu_regs[12] = r14_opr;
  _caller_save_cpu_regs[13] = r15_opr;
  _caller_save_cpu_regs[14] = r16_opr;
  _caller_save_cpu_regs[15] = r17_opr;
  _caller_save_cpu_regs[16] = r18_opr;

  for (int i = 0; i < 8; i++) {
    _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
  }

  _init_done = true;

  r0_oop_opr = as_oop_opr(r0);
  r1_oop_opr = as_oop_opr(r1);
  r2_oop_opr = as_oop_opr(r2);
  r3_oop_opr = as_oop_opr(r3);
  r4_oop_opr = as_oop_opr(r4);
  r5_oop_opr = as_oop_opr(r5);
  r6_oop_opr = as_oop_opr(r6);
  r7_oop_opr = as_oop_opr(r7);
  r8_oop_opr = as_oop_opr(r8);
  r9_oop_opr = as_oop_opr(r9);
  r10_oop_opr = as_oop_opr(r10);
  r11_oop_opr = as_oop_opr(r11);
  r12_oop_opr = as_oop_opr(r12);
  r13_oop_opr = as_oop_opr(r13);
  r14_oop_opr = as_oop_opr(r14);
  r15_oop_opr = as_oop_opr(r15);
  r16_oop_opr = as_oop_opr(r16);
  r17_oop_opr = as_oop_opr(r17);
  r18_oop_opr = as_oop_opr(r18);
  r19_oop_opr = as_oop_opr(r19);
  r20_oop_opr = as_oop_opr(r20);
  r21_oop_opr = as_oop_opr(r21);
  r22_oop_opr = as_oop_opr(r22);
  r23_oop_opr = as_oop_opr(r23);
  r24_oop_opr = as_oop_opr(r24);
  r25_oop_opr = as_oop_opr(r25);
  r26_oop_opr = as_oop_opr(r26);
  r27_oop_opr = as_oop_opr(r27);
  r28_oop_opr = as_oop_opr(r28);
  r29_oop_opr = as_oop_opr(r29);
  r30_oop_opr = as_oop_opr(r30);

  r0_metadata_opr = as_metadata_opr(r0);
  r1_metadata_opr = as_metadata_opr(r1);
  r2_metadata_opr = as_metadata_opr(r2);
  r3_metadata_opr = as_metadata_opr(r3);
  r4_metadata_opr = as_metadata_opr(r4);
  r5_metadata_opr = as_metadata_opr(r5);

  sp_opr = as_pointer_opr(r31_sp);
  rfp_opr = as_pointer_opr(rfp);

  VMRegPair regs;
  BasicType sig_bt = T_OBJECT;
  SharedRuntime::java_calling_convention(&sig_bt, &regs, 1, true);
  receiver_opr = as_oop_opr(regs.first()->as_Register());

  for (int i = 0; i < nof_caller_save_fpu_regs; i++) {
    _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
  }
}


Address FrameMap::make_new_address(ByteSize sp_offset) const {
  // for rbp, based address use this:
  // return Address(rbp, in_bytes(sp_offset) - (framesize() - 2) * 4);
  return Address(sp, in_bytes(sp_offset));
}


// ----------------mapping-----------------------
// all mapping is based on rfp addressing, except for simple leaf methods where we access
// the locals sp based (and no frame is built)


// Frame for simple leaf methods (quick entries)
//
//   +----------+
//   | ret addr |   <- TOS
//   +----------+
//   | args     |
//   | ......   |

// Frame for standard methods
//
//   | .........|  <- TOS
//   | locals   |
//   +----------+
//   |  old fp, |  <- RFP
//   +----------+
//   | ret addr |
//   +----------+
//   |  args    |
//   | .........|


// For OopMaps, map a local variable or spill index to an VMRegImpl name.
// This is the offset from sp() in the frame of the slot for the index,
// skewed by VMRegImpl::stack0 to indicate a stack location (vs.a register.)
//
//           framesize +
//           stack0         stack0          0  <- VMReg
//             |              | <registers> |
//  ...........|..............|.............|
//      0 1 2 3 x x 4 5 6 ... |                <- local indices
//      ^           ^        sp()                 ( x x indicate link
//      |           |                               and return addr)
//  arguments   non-argument locals


VMReg FrameMap::fpu_regname (int n) {
  // Return the OptoReg name for the fpu stack slot "n"
  // A spilled fpu stack slot comprises to two single-word OptoReg's.
  return as_FloatRegister(n)->as_VMReg();
}

LIR_Opr FrameMap::stack_pointer() {
  return FrameMap::sp_opr;
}


// JSR 292
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
  // assert(rfp == rbp_mh_SP_save, "must be same register");
  return rfp_opr;
}


bool FrameMap::validate_frame() {
  return true;
}