view src/share/vm/c1/c1_FrameMap.cpp @ 10905:f57189b7648d

8257192: Integrate AArch64 JIT port into 8u 7009641: Don't fail VM when CodeCache is full 8073108: [AArch64] Use x86 and SPARC CPU instructions for GHASH acceleration 8130309: Need to bailout cleanly if creation of stubs fails when codecache is out of space (AArch64 changes) 8131779: AARCH64: add Montgomery multiply intrinsic 8132875: AArch64: Fix error introduced into AArch64 CodeCache by commit for 8130309 8135018: AARCH64: Missing memory barriers for CMS collector 8145320: Create unsafe_arraycopy and generic_arraycopy for AArch64 8148328: aarch64: redundant lsr instructions in stub code. 8148783: aarch64: SEGV running SpecJBB2013 8148948: aarch64: generate_copy_longs calls align() incorrectly 8149080: AArch64: Recognise disjoint array copy in stub code 8149365: aarch64: memory copy does not prefetch on backwards copy 8149907: aarch64: use load/store pair instructions in call_stub 8150038: aarch64: make use of CBZ and CBNZ when comparing narrow pointer with zero 8150045: arraycopy causes segfaults in SATB during garbage collection 8150082: aarch64: optimise small array copy 8150229: aarch64: pipeline class for several instructions is not set correctly 8150313: aarch64: optimise array copy using SIMD instructions 8150394: aarch64: add support for 8.1 LSE CAS instructions 8150652: Remove unused code in AArch64 back end 8151340: aarch64: prefetch the destination word for write prior to ldxr/stxr loops. 8151502: optimize pd_disjoint_words and pd_conjoint_words 8151775: aarch64: add support for 8.1 LSE atomic operations 8152537: aarch64: Make use of CBZ and CBNZ when comparing unsigned values with zero. 8152840: aarch64: improve _unsafe_arraycopy stub routine 8153172: aarch64: hotspot crashes after the 8.1 LSE patch is merged 8153713: aarch64: improve short array clearing using store pair 8153797: aarch64: Add Arrays.fill stub code 8154413: AArch64: Better byte behaviour 8154537: AArch64: some integer rotate instructions are never emitted 8154739: AArch64: TemplateTable::fast_xaccess loads in wrong mode 8155015: Aarch64: bad assert in spill generation code 8155100: AArch64: Relax alignment requirement for byte_map_base 8155612: Aarch64: vector nodes need to support misaligned offset 8155617: aarch64: ClearArray does not use DC ZVA 8155627: Enable SA on AArch64 8155653: TestVectorUnalignedOffset.java not pushed with 8155612 8156731: aarch64: java/util/Arrays/Correct.java fails due to _generic_arraycopy stub routine 8157841: aarch64: prefetch ignores cache line size 8157906: aarch64: some more integer rotate instructions are never emitted 8158913: aarch64: SEGV running Spark terasort 8159052: aarch64: optimise unaligned copies in pd_disjoint_words and pd_conjoint_words 8159063: aarch64: optimise unaligned array copy long 8160748: [AArch64] Inconsistent types for ideal_reg 8161072: AArch64: jtreg compiler/uncommontrap/TestDeoptOOM failure 8161190: AArch64: Fix overflow in immediate cmp instruction 8164113: AArch64: follow-up the fix for 8161598 8165673: AArch64: Fix JNI floating point argument handling 8167200: AArch64: Broken stack pointer adjustment in interpreter 8167421: AArch64: in one core system, fatal error: Illegal threadstate encountered 8167595: AArch64: SEGV in stub code cipherBlockChaining_decryptAESCrypt 8168699: Validate special case invocations [AArch64 support] 8168888: Port 8160591: Improve internal array handling to AArch64. 8170100: AArch64: Crash in C1-compiled code accessing References 8170188: jtreg test compiler/types/TestMeetIncompatibleInterfaceArrays.java causes JVM crash 8170873: PPC64/aarch64: Poor StrictMath performance due to non-optimized compilation 8171537: aarch64: compiler/c1/Test6849574.java generates guarantee failure in C1 8172881: AArch64: assertion failure: the int pressure is incorrect 8173472: AArch64: C1 comparisons with null only use 32-bit instructions 8176100: [AArch64] [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles 8177661: Correct ad rule output register types from iRegX to iRegXNoSp 8179954: AArch64: C1 and C2 volatile accesses are not sequentially consistent 8182581: aarch64: fix for crash caused by earlyret of compiled method 8183925: [AArch64] Decouple crash protection from watcher thread 8186325: AArch64: jtreg test hotspot/test/gc/g1/TestJNIWeakG1/TestJNIWeakG1.java SEGV 8187224: aarch64: some inconsistency between aarch64_ad.m4 and aarch64.ad 8189170: [AArch64] Add option to disable stack overflow checking in primordial thread for use with JNI_CreateJavaJVM 8193133: Assertion failure because 0xDEADDEAD can be in-heap 8195685: AArch64 port of 8174962: Better interface invocations 8195859: AArch64: vtableStubs gtest fails after 8174962 8196136: AArch64: Correct register use in patch for JDK-8194686 8196221: AArch64: Mistake in committed patch for JDK-8195859 8199712: [AArch64] Flight Recorder 8203481: Incorrect constraint for unextended_sp in frame:safe_for_sender 8203699: java/lang/invoke/SpecialInterfaceCall fails with SIGILL on aarch64 8205421: AARCH64: StubCodeMark should be placed after alignment 8206163: AArch64: incorrect code generation for StoreCM 8207345: Trampoline generation code reads from uninitialized memory 8207838: AArch64: Float registers incorrectly restored in JNI call 8209413: AArch64: NPE in clhsdb jstack command 8209414: [AArch64] method handle invocation does not respect JVMTI interp_only mode 8209415: Fix JVMTI test failure HS202 8209420: Track membars for volatile accesses so they can be properly optimized 8209835: Aarch64: elide barriers on all volatile operations 8210425: [AArch64] sharedRuntimeTrig/sharedRuntimeTrans compiled without optimization 8211064: [AArch64] Interpreter and c1 don't correctly handle jboolean results in native calls 8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better 8213134: AArch64: vector shift failed with MaxVectorSize=8 8213419: [AArch64] C2 may hang in MulLNode::Ideal()/MulINode::Ideal() with gcc 8.2.1 8214857: "bad trailing membar" assert failure at memnode.cpp:3220 8215951: AArch64: jtreg test vmTestbase/nsk/jvmti/PopFrame/popframe005 segfaults 8215961: jdk/jfr/event/os/TestCPUInformation.java fails on AArch64 8216350: AArch64: monitor unlock fast path not called 8216989: CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier() does not check for zero length on AARCH64 8217368: AArch64: C2 recursive stack locking optimisation not triggered 8218185: aarch64: missing LoadStore barrier in TemplateTable::putfield_or_static 8219011: Implement MacroAssembler::warn method on AArch64 8219635: aarch64: missing LoadStore barrier in TemplateTable::fast_storefield 8221220: AArch64: Add StoreStore membar explicitly for Volatile Writes in TemplateTable 8221658: aarch64: add necessary predicate for ubfx patterns 8224671: AArch64: mauve System.arraycopy test failure 8224828: aarch64: rflags is not correct after safepoint poll 8224851: AArch64: fix warnings and errors with Clang and GCC 8.3 8224880: AArch64: java/javac error with AllocatePrefetchDistance 8228400: Remove built-in AArch64 simulator 8228406: Superfluous change in chaitin.hpp 8228593: Revert explicit JDK 7 support additions 8228716: Revert InstanceKlass::print_on debug additions 8228718: Revert incorrect backport of JDK-8129757 to 8-aarch64 8228725: AArch64: Purge method call format support 8228747: Revert "unused" attribute from test_arraycopy_func 8228767: Revert ResourceMark additions 8228770: Revert development hsdis changes 8229123: Revert build fixes for aarch64/zero 8229124: Revert disassembler.cpp changes 8229145: Revert TemplateTable::bytecode() visibility change 8233839: aarch64: missing memory barrier in NewObjectArrayStub and NewTypeArrayStub 8237512: AArch64: aarch64TestHook leaks a BufferBlob 8246482: Build failures with +JFR -PCH 8247979: aarch64: missing side effect of killing flags for clearArray_reg_reg 8248219: aarch64: missing memory barrier in fast_storefield and fast_accessfield Reviewed-by: shade, aph
author andrew
date Mon, 01 Feb 2021 03:48:36 +0000
parents a9becfeecd1b
children f79e943d15a7
line wrap: on
line source

/*
 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIR.hpp"
#include "runtime/sharedRuntime.hpp"
#ifdef TARGET_ARCH_x86
# include "vmreg_x86.inline.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "vmreg_aarch64.inline.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "vmreg_sparc.inline.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "vmreg_zero.inline.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "vmreg_arm.inline.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "vmreg_ppc.inline.hpp"
#endif



//-----------------------------------------------------

// Convert method signature into an array of BasicTypes for the arguments
BasicTypeArray* FrameMap::signature_type_array_for(const ciMethod* method) {
  ciSignature* sig = method->signature();
  BasicTypeList* sta = new BasicTypeList(method->arg_size());
  // add receiver, if any
  if (!method->is_static()) sta->append(T_OBJECT);
  // add remaining arguments
  for (int i = 0; i < sig->count(); i++) {
    ciType* type = sig->type_at(i);
    BasicType t = type->basic_type();
    if (t == T_ARRAY) {
      t = T_OBJECT;
    }
    sta->append(t);
  }
  // done
  return sta;
}


CallingConvention* FrameMap::java_calling_convention(const BasicTypeArray* signature, bool outgoing) {
  // compute the size of the arguments first.  The signature array
  // that java_calling_convention takes includes a T_VOID after double
  // work items but our signatures do not.
  int i;
  int sizeargs = 0;
  for (i = 0; i < signature->length(); i++) {
    sizeargs += type2size[signature->at(i)];
  }

  BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
  VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
  int sig_index = 0;
  for (i = 0; i < sizeargs; i++, sig_index++) {
    sig_bt[i] = signature->at(sig_index);
    if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
      sig_bt[i + 1] = T_VOID;
      i++;
    }
  }

  intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, outgoing);
  LIR_OprList* args = new LIR_OprList(signature->length());
  for (i = 0; i < sizeargs;) {
    BasicType t = sig_bt[i];
    assert(t != T_VOID, "should be skipping these");
    LIR_Opr opr = map_to_opr(t, regs + i, outgoing);
    args->append(opr);
    if (opr->is_address()) {
      LIR_Address* addr = opr->as_address_ptr();
      assert(addr->disp() == (int)addr->disp(), "out of range value");
      out_preserve = MAX2(out_preserve, (intptr_t)(addr->disp() - STACK_BIAS) / 4);
    }
    i += type2size[t];
  }
  assert(args->length() == signature->length(), "size mismatch");
  out_preserve += SharedRuntime::out_preserve_stack_slots();

  if (outgoing) {
    // update the space reserved for arguments.
    update_reserved_argument_area_size(out_preserve * BytesPerWord);
  }
  return new CallingConvention(args, out_preserve);
}


CallingConvention* FrameMap::c_calling_convention(const BasicTypeArray* signature) {
  // compute the size of the arguments first.  The signature array
  // that java_calling_convention takes includes a T_VOID after double
  // work items but our signatures do not.
  int i;
  int sizeargs = 0;
  for (i = 0; i < signature->length(); i++) {
    sizeargs += type2size[signature->at(i)];
  }

  BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
  VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
  int sig_index = 0;
  for (i = 0; i < sizeargs; i++, sig_index++) {
    sig_bt[i] = signature->at(sig_index);
    if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
      sig_bt[i + 1] = T_VOID;
      i++;
    }
  }

  intptr_t out_preserve = SharedRuntime::c_calling_convention(sig_bt, regs, NULL, sizeargs);
  LIR_OprList* args = new LIR_OprList(signature->length());
  for (i = 0; i < sizeargs;) {
    BasicType t = sig_bt[i];
    assert(t != T_VOID, "should be skipping these");

    // C calls are always outgoing
    bool outgoing = true;
    LIR_Opr opr = map_to_opr(t, regs + i, outgoing);
    // they might be of different types if for instance floating point
    // values are passed in cpu registers, but the sizes must match.
    assert(type2size[opr->type()] == type2size[t], "type mismatch");
    args->append(opr);
    if (opr->is_address()) {
      LIR_Address* addr = opr->as_address_ptr();
      out_preserve = MAX2(out_preserve, (intptr_t)(addr->disp() - STACK_BIAS) / 4);
    }
    i += type2size[t];
  }
  assert(args->length() == signature->length(), "size mismatch");
  out_preserve += SharedRuntime::out_preserve_stack_slots();
  update_reserved_argument_area_size(out_preserve * BytesPerWord);
  return new CallingConvention(args, out_preserve);
}


//--------------------------------------------------------
//               FrameMap
//--------------------------------------------------------

bool      FrameMap::_init_done = false;
Register  FrameMap::_cpu_rnr2reg [FrameMap::nof_cpu_regs];
int       FrameMap::_cpu_reg2rnr [FrameMap::nof_cpu_regs];


FrameMap::FrameMap(ciMethod* method, int monitors, int reserved_argument_area_size) {
  assert(_init_done, "should already be completed");

  _framesize = -1;
  _num_spills = -1;

  assert(monitors >= 0, "not set");
  _num_monitors = monitors;
  assert(reserved_argument_area_size >= 0, "not set");
  _reserved_argument_area_size = MAX2(4, reserved_argument_area_size) * BytesPerWord;

  _argcount = method->arg_size();
  _argument_locations = new intArray(_argcount, -1);
  _incoming_arguments = java_calling_convention(signature_type_array_for(method), false);
  _oop_map_arg_count = _incoming_arguments->reserved_stack_slots();

  int java_index = 0;
  for (int i = 0; i < _incoming_arguments->length(); i++) {
    LIR_Opr opr = _incoming_arguments->at(i);
    if (opr->is_address()) {
      LIR_Address* address = opr->as_address_ptr();
      _argument_locations->at_put(java_index, address->disp() - STACK_BIAS);
      _incoming_arguments->args()->at_put(i, LIR_OprFact::stack(java_index, as_BasicType(as_ValueType(address->type()))));
    }
    java_index += type2size[opr->type()];
  }

}


bool FrameMap::finalize_frame(int nof_slots) {
  assert(nof_slots >= 0, "must be positive");
  assert(_num_spills == -1, "can only be set once");
  _num_spills = nof_slots;
  assert(_framesize == -1, "should only be calculated once");
  _framesize =  round_to(in_bytes(sp_offset_for_monitor_base(0)) +
                         _num_monitors * sizeof(BasicObjectLock) +
                         sizeof(intptr_t) +                        // offset of deopt orig pc
                         frame_pad_in_bytes,
                         StackAlignmentInBytes) / 4;
  int java_index = 0;
  for (int i = 0; i < _incoming_arguments->length(); i++) {
    LIR_Opr opr = _incoming_arguments->at(i);
    if (opr->is_stack()) {
      _argument_locations->at_put(java_index, in_bytes(framesize_in_bytes()) +
                                  _argument_locations->at(java_index));
    }
    java_index += type2size[opr->type()];
  }
  // make sure it's expressible on the platform
  return validate_frame();
}

VMReg FrameMap::sp_offset2vmreg(ByteSize offset) const {
  int offset_in_bytes = in_bytes(offset);
  assert(offset_in_bytes % 4 == 0, "must be multiple of 4 bytes");
  assert(offset_in_bytes / 4 < framesize() + oop_map_arg_count(), "out of range");
  return VMRegImpl::stack2reg(offset_in_bytes / 4);
}


bool FrameMap::location_for_sp_offset(ByteSize byte_offset_from_sp,
                                      Location::Type loc_type,
                                      Location* loc) const {
  int offset = in_bytes(byte_offset_from_sp);
  assert(offset >= 0, "incorrect offset");
  if (!Location::legal_offset_in_bytes(offset)) {
    return false;
  }
  Location tmp_loc = Location::new_stk_loc(loc_type, offset);
  *loc = tmp_loc;
  return true;
}


bool FrameMap::locations_for_slot  (int index, Location::Type loc_type,
                                     Location* loc, Location* second) const {
  ByteSize offset_from_sp = sp_offset_for_slot(index);
  if (!location_for_sp_offset(offset_from_sp, loc_type, loc)) {
    return false;
  }
  if (second != NULL) {
    // two word item
    offset_from_sp = offset_from_sp + in_ByteSize(4);
    return location_for_sp_offset(offset_from_sp, loc_type, second);
  }
  return true;
}

//////////////////////
// Public accessors //
//////////////////////


ByteSize FrameMap::sp_offset_for_slot(const int index) const {
  if (index < argcount()) {
    int offset = _argument_locations->at(index);
    assert(offset != -1, "not a memory argument");
    assert(offset >= framesize() * 4, "argument inside of frame");
    return in_ByteSize(offset);
  }
  ByteSize offset = sp_offset_for_spill(index - argcount());
  assert(in_bytes(offset) < framesize() * 4, "spill outside of frame");
  return offset;
}


ByteSize FrameMap::sp_offset_for_double_slot(const int index) const {
  ByteSize offset = sp_offset_for_slot(index);
  if (index >= argcount()) {
    assert(in_bytes(offset) + 4 < framesize() * 4, "spill outside of frame");
  }
  return offset;
}


ByteSize FrameMap::sp_offset_for_spill(const int index) const {
  assert(index >= 0 && index < _num_spills, "out of range");
  int offset = round_to(first_available_sp_in_frame + _reserved_argument_area_size, sizeof(double)) +
    index * spill_slot_size_in_bytes;
  return in_ByteSize(offset);
}

ByteSize FrameMap::sp_offset_for_monitor_base(const int index) const {
  int end_of_spills = round_to(first_available_sp_in_frame + _reserved_argument_area_size, sizeof(double)) +
    _num_spills * spill_slot_size_in_bytes;
  int offset = (int) round_to(end_of_spills, HeapWordSize) + index * sizeof(BasicObjectLock);
  return in_ByteSize(offset);
}

ByteSize FrameMap::sp_offset_for_monitor_lock(int index) const {
  check_monitor_index(index);
  return sp_offset_for_monitor_base(index) + in_ByteSize(BasicObjectLock::lock_offset_in_bytes());;
}

ByteSize FrameMap::sp_offset_for_monitor_object(int index) const {
  check_monitor_index(index);
  return sp_offset_for_monitor_base(index) + in_ByteSize(BasicObjectLock::obj_offset_in_bytes());
}


// For OopMaps, map a local variable or spill index to an VMReg.
// This is the offset from sp() in the frame of the slot for the index,
// skewed by SharedInfo::stack0 to indicate a stack location (vs.a register.)
//
//         C ABI size +
//         framesize +     framesize +
//         stack0          stack0         stack0          0 <- VMReg->value()
//            |              |              | <registers> |
//  ..........|..............|..............|.............|
//    0 1 2 3 | <C ABI area> | 4 5 6 ...... |               <- local indices
//    ^                        ^          sp()
//    |                        |
//  arguments            non-argument locals


VMReg FrameMap::regname(LIR_Opr opr) const {
  if (opr->is_single_cpu()) {
    assert(!opr->is_virtual(), "should not see virtual registers here");
    return opr->as_register()->as_VMReg();
  } else if (opr->is_single_stack()) {
    return sp_offset2vmreg(sp_offset_for_slot(opr->single_stack_ix()));
  } else if (opr->is_address()) {
    LIR_Address* addr = opr->as_address_ptr();
    assert(addr->base() == stack_pointer(), "sp based addressing only");
    return sp_offset2vmreg(in_ByteSize(addr->index()->as_jint()));
  }
  ShouldNotReachHere();
  return VMRegImpl::Bad();
}




// ------------ extra spill slots ---------------