view src/share/vm/code/compiledIC.hpp @ 10905:f57189b7648d

8257192: Integrate AArch64 JIT port into 8u 7009641: Don't fail VM when CodeCache is full 8073108: [AArch64] Use x86 and SPARC CPU instructions for GHASH acceleration 8130309: Need to bailout cleanly if creation of stubs fails when codecache is out of space (AArch64 changes) 8131779: AARCH64: add Montgomery multiply intrinsic 8132875: AArch64: Fix error introduced into AArch64 CodeCache by commit for 8130309 8135018: AARCH64: Missing memory barriers for CMS collector 8145320: Create unsafe_arraycopy and generic_arraycopy for AArch64 8148328: aarch64: redundant lsr instructions in stub code. 8148783: aarch64: SEGV running SpecJBB2013 8148948: aarch64: generate_copy_longs calls align() incorrectly 8149080: AArch64: Recognise disjoint array copy in stub code 8149365: aarch64: memory copy does not prefetch on backwards copy 8149907: aarch64: use load/store pair instructions in call_stub 8150038: aarch64: make use of CBZ and CBNZ when comparing narrow pointer with zero 8150045: arraycopy causes segfaults in SATB during garbage collection 8150082: aarch64: optimise small array copy 8150229: aarch64: pipeline class for several instructions is not set correctly 8150313: aarch64: optimise array copy using SIMD instructions 8150394: aarch64: add support for 8.1 LSE CAS instructions 8150652: Remove unused code in AArch64 back end 8151340: aarch64: prefetch the destination word for write prior to ldxr/stxr loops. 8151502: optimize pd_disjoint_words and pd_conjoint_words 8151775: aarch64: add support for 8.1 LSE atomic operations 8152537: aarch64: Make use of CBZ and CBNZ when comparing unsigned values with zero. 8152840: aarch64: improve _unsafe_arraycopy stub routine 8153172: aarch64: hotspot crashes after the 8.1 LSE patch is merged 8153713: aarch64: improve short array clearing using store pair 8153797: aarch64: Add Arrays.fill stub code 8154413: AArch64: Better byte behaviour 8154537: AArch64: some integer rotate instructions are never emitted 8154739: AArch64: TemplateTable::fast_xaccess loads in wrong mode 8155015: Aarch64: bad assert in spill generation code 8155100: AArch64: Relax alignment requirement for byte_map_base 8155612: Aarch64: vector nodes need to support misaligned offset 8155617: aarch64: ClearArray does not use DC ZVA 8155627: Enable SA on AArch64 8155653: TestVectorUnalignedOffset.java not pushed with 8155612 8156731: aarch64: java/util/Arrays/Correct.java fails due to _generic_arraycopy stub routine 8157841: aarch64: prefetch ignores cache line size 8157906: aarch64: some more integer rotate instructions are never emitted 8158913: aarch64: SEGV running Spark terasort 8159052: aarch64: optimise unaligned copies in pd_disjoint_words and pd_conjoint_words 8159063: aarch64: optimise unaligned array copy long 8160748: [AArch64] Inconsistent types for ideal_reg 8161072: AArch64: jtreg compiler/uncommontrap/TestDeoptOOM failure 8161190: AArch64: Fix overflow in immediate cmp instruction 8164113: AArch64: follow-up the fix for 8161598 8165673: AArch64: Fix JNI floating point argument handling 8167200: AArch64: Broken stack pointer adjustment in interpreter 8167421: AArch64: in one core system, fatal error: Illegal threadstate encountered 8167595: AArch64: SEGV in stub code cipherBlockChaining_decryptAESCrypt 8168699: Validate special case invocations [AArch64 support] 8168888: Port 8160591: Improve internal array handling to AArch64. 8170100: AArch64: Crash in C1-compiled code accessing References 8170188: jtreg test compiler/types/TestMeetIncompatibleInterfaceArrays.java causes JVM crash 8170873: PPC64/aarch64: Poor StrictMath performance due to non-optimized compilation 8171537: aarch64: compiler/c1/Test6849574.java generates guarantee failure in C1 8172881: AArch64: assertion failure: the int pressure is incorrect 8173472: AArch64: C1 comparisons with null only use 32-bit instructions 8176100: [AArch64] [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles 8177661: Correct ad rule output register types from iRegX to iRegXNoSp 8179954: AArch64: C1 and C2 volatile accesses are not sequentially consistent 8182581: aarch64: fix for crash caused by earlyret of compiled method 8183925: [AArch64] Decouple crash protection from watcher thread 8186325: AArch64: jtreg test hotspot/test/gc/g1/TestJNIWeakG1/TestJNIWeakG1.java SEGV 8187224: aarch64: some inconsistency between aarch64_ad.m4 and aarch64.ad 8189170: [AArch64] Add option to disable stack overflow checking in primordial thread for use with JNI_CreateJavaJVM 8193133: Assertion failure because 0xDEADDEAD can be in-heap 8195685: AArch64 port of 8174962: Better interface invocations 8195859: AArch64: vtableStubs gtest fails after 8174962 8196136: AArch64: Correct register use in patch for JDK-8194686 8196221: AArch64: Mistake in committed patch for JDK-8195859 8199712: [AArch64] Flight Recorder 8203481: Incorrect constraint for unextended_sp in frame:safe_for_sender 8203699: java/lang/invoke/SpecialInterfaceCall fails with SIGILL on aarch64 8205421: AARCH64: StubCodeMark should be placed after alignment 8206163: AArch64: incorrect code generation for StoreCM 8207345: Trampoline generation code reads from uninitialized memory 8207838: AArch64: Float registers incorrectly restored in JNI call 8209413: AArch64: NPE in clhsdb jstack command 8209414: [AArch64] method handle invocation does not respect JVMTI interp_only mode 8209415: Fix JVMTI test failure HS202 8209420: Track membars for volatile accesses so they can be properly optimized 8209835: Aarch64: elide barriers on all volatile operations 8210425: [AArch64] sharedRuntimeTrig/sharedRuntimeTrans compiled without optimization 8211064: [AArch64] Interpreter and c1 don't correctly handle jboolean results in native calls 8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better 8213134: AArch64: vector shift failed with MaxVectorSize=8 8213419: [AArch64] C2 may hang in MulLNode::Ideal()/MulINode::Ideal() with gcc 8.2.1 8214857: "bad trailing membar" assert failure at memnode.cpp:3220 8215951: AArch64: jtreg test vmTestbase/nsk/jvmti/PopFrame/popframe005 segfaults 8215961: jdk/jfr/event/os/TestCPUInformation.java fails on AArch64 8216350: AArch64: monitor unlock fast path not called 8216989: CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier() does not check for zero length on AARCH64 8217368: AArch64: C2 recursive stack locking optimisation not triggered 8218185: aarch64: missing LoadStore barrier in TemplateTable::putfield_or_static 8219011: Implement MacroAssembler::warn method on AArch64 8219635: aarch64: missing LoadStore barrier in TemplateTable::fast_storefield 8221220: AArch64: Add StoreStore membar explicitly for Volatile Writes in TemplateTable 8221658: aarch64: add necessary predicate for ubfx patterns 8224671: AArch64: mauve System.arraycopy test failure 8224828: aarch64: rflags is not correct after safepoint poll 8224851: AArch64: fix warnings and errors with Clang and GCC 8.3 8224880: AArch64: java/javac error with AllocatePrefetchDistance 8228400: Remove built-in AArch64 simulator 8228406: Superfluous change in chaitin.hpp 8228593: Revert explicit JDK 7 support additions 8228716: Revert InstanceKlass::print_on debug additions 8228718: Revert incorrect backport of JDK-8129757 to 8-aarch64 8228725: AArch64: Purge method call format support 8228747: Revert "unused" attribute from test_arraycopy_func 8228767: Revert ResourceMark additions 8228770: Revert development hsdis changes 8229123: Revert build fixes for aarch64/zero 8229124: Revert disassembler.cpp changes 8229145: Revert TemplateTable::bytecode() visibility change 8233839: aarch64: missing memory barrier in NewObjectArrayStub and NewTypeArrayStub 8237512: AArch64: aarch64TestHook leaks a BufferBlob 8246482: Build failures with +JFR -PCH 8247979: aarch64: missing side effect of killing flags for clearArray_reg_reg 8248219: aarch64: missing memory barrier in fast_storefield and fast_accessfield Reviewed-by: shade, aph
author andrew
date Mon, 01 Feb 2021 03:48:36 +0000
parents f8a45a60bc6b
children f79e943d15a7
line wrap: on
line source

/*
 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_CODE_COMPILEDIC_HPP
#define SHARE_VM_CODE_COMPILEDIC_HPP

#include "interpreter/linkResolver.hpp"
#include "oops/compiledICHolder.hpp"
#ifdef TARGET_ARCH_x86
# include "nativeInst_x86.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "nativeInst_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "nativeInst_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "nativeInst_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "nativeInst_ppc.hpp"
#endif

//-----------------------------------------------------------------------------
// The CompiledIC represents a compiled inline cache.
//
// In order to make patching of the inline cache MT-safe, we only allow the following
// transitions (when not at a safepoint):
//
//
//         [1] --<--  Clean -->---  [1]
//            /       (null)      \
//           /                     \      /-<-\
//          /          [2]          \    /     \
//      Interpreted  ---------> Monomorphic     | [3]
//  (CompiledICHolder*)            (Klass*)     |
//          \                        /   \     /
//       [4] \                      / [4] \->-/
//            \->-  Megamorphic -<-/
//              (CompiledICHolder*)
//
// The text in parentheses () refers to the value of the inline cache receiver (mov instruction)
//
// The numbers in square brackets refer to the kind of transition:
// [1]: Initial fixup. Receiver it found from debug information
// [2]: Compilation of a method
// [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same)
// [4]: Inline cache miss. We go directly to megamorphic call.
//
// The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
// transition is made to a stub.
//
class CompiledIC;
class ICStub;

class CompiledICInfo : public StackObj {
 private:
  address _entry;              // entry point for call
  void*   _cached_value;         // Value of cached_value (either in stub or inline cache)
  bool    _is_icholder;          // Is the cached value a CompiledICHolder*
  bool    _is_optimized;       // it is an optimized virtual call (i.e., can be statically bound)
  bool    _to_interpreter;     // Call it to interpreter
  bool    _release_icholder;
 public:
  address entry() const        { return _entry; }
  Metadata*    cached_metadata() const         { assert(!_is_icholder, ""); return (Metadata*)_cached_value; }
  CompiledICHolder*    claim_cached_icholder() {
    assert(_is_icholder, "");
    assert(_cached_value != NULL, "must be non-NULL");
    _release_icholder = false;
    CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
    icholder->claim();
    return icholder;
  }
  bool    is_optimized() const { return _is_optimized; }
  bool         to_interpreter() const  { return _to_interpreter; }

  void set_compiled_entry(address entry, Klass* klass, bool is_optimized) {
    _entry      = entry;
    _cached_value = (void*)klass;
    _to_interpreter = false;
    _is_icholder = false;
    _is_optimized = is_optimized;
    _release_icholder = false;
  }

  void set_interpreter_entry(address entry, Method* method) {
    _entry      = entry;
    _cached_value = (void*)method;
    _to_interpreter = true;
    _is_icholder = false;
    _is_optimized = true;
    _release_icholder = false;
  }

  void set_icholder_entry(address entry, CompiledICHolder* icholder) {
    _entry      = entry;
    _cached_value = (void*)icholder;
    _to_interpreter = true;
    _is_icholder = true;
    _is_optimized = false;
    _release_icholder = true;
  }

  CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false),
                    _to_interpreter(false), _is_optimized(false), _release_icholder(false) {
  }
  ~CompiledICInfo() {
    // In rare cases the info is computed but not used, so release any
    // CompiledICHolder* that was created
    if (_release_icholder) {
      assert(_is_icholder, "must be");
      CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
      icholder->claim();
      delete icholder;
    }
  }
};

class CompiledIC: public ResourceObj {
  friend class InlineCacheBuffer;
  friend class ICStub;


 private:
  NativeCall*   _ic_call;       // the call instruction
  NativeMovConstReg* _value;    // patchable value cell for this IC
  bool          _is_optimized;  // an optimized virtual call (i.e., no compiled IC)

  CompiledIC(nmethod* nm, NativeCall* ic_call);
  CompiledIC(RelocIterator* iter);

  void initialize_from_iter(RelocIterator* iter);

  static bool is_icholder_entry(address entry);

  // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
  // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
  // changes to a transition stub.
  void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder);
  void set_ic_destination(ICStub* stub);
  void set_ic_destination(address entry_point) {
    assert(_is_optimized, "use set_ic_destination_and_value instead");
    internal_set_ic_destination(entry_point, false, NULL, false);
  }
  // This only for use by ICStubs where the type of the value isn't known
  void set_ic_destination_and_value(address entry_point, void* value) {
    internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point));
  }
  void set_ic_destination_and_value(address entry_point, Metadata* value) {
    internal_set_ic_destination(entry_point, false, value, false);
  }
  void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) {
    internal_set_ic_destination(entry_point, false, value, true);
  }

  // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
  // associated with the inline cache.
  address stub_address() const;
  bool is_in_transition_state() const;  // Use InlineCacheBuffer

 public:
  // conversion (machine PC to CompiledIC*)
  friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
  friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
  friend CompiledIC* CompiledIC_at(Relocation* call_site);
  friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);

  // This is used to release CompiledICHolder*s from nmethods that
  // are about to be freed.  The callsite might contain other stale
  // values of other kinds so it must be careful.
  static void cleanup_call_site(virtual_call_Relocation* call_site);
  static bool is_icholder_call_site(virtual_call_Relocation* call_site);

  // Return the cached_metadata/destination associated with this inline cache. If the cache currently points
  // to a transition stub, it will read the values from the transition stub.
  void* cached_value() const;
  CompiledICHolder* cached_icholder() const {
    assert(is_icholder_call(), "must be");
    return (CompiledICHolder*) cached_value();
  }
  Metadata* cached_metadata() const {
    assert(!is_icholder_call(), "must be");
    return (Metadata*) cached_value();
  }

  address ic_destination() const;

  bool is_optimized() const   { return _is_optimized; }

  // State
  bool is_clean() const;
  bool is_megamorphic() const;
  bool is_call_to_compiled() const;
  bool is_call_to_interpreted() const;

  bool is_icholder_call() const;

  address end_of_call() { return  _ic_call->return_address(); }

  // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
  // so you are guaranteed that no patching takes place. The same goes for verify.
  //
  // Note: We do not provide any direct access to the stub code, to prevent parts of the code
  // to manipulate the inline cache in MT-unsafe ways.
  //
  // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
  //
  void set_to_clean(bool in_use = true);
  void set_to_monomorphic(CompiledICInfo& info);
  void clear_ic_stub();

  // Returns true if successful and false otherwise. The call can fail if memory
  // allocation in the code cache fails.
  bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);

  static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
                                        bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);

  // Location
  address instruction_address() const { return _ic_call->instruction_address(); }

  // Misc
  void print()             PRODUCT_RETURN;
  void print_compiled_ic() PRODUCT_RETURN;
  void verify()            PRODUCT_RETURN;
};

inline CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) {
  CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr));
  c_ic->verify();
  return c_ic;
}

inline CompiledIC* CompiledIC_at(nmethod* nm, address call_site) {
  CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site));
  c_ic->verify();
  return c_ic;
}

inline CompiledIC* CompiledIC_at(Relocation* call_site) {
  assert(call_site->type() == relocInfo::virtual_call_type ||
         call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
  CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr()));
  c_ic->verify();
  return c_ic;
}

inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
  assert(reloc_iter->type() == relocInfo::virtual_call_type ||
      reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
  CompiledIC* c_ic = new CompiledIC(reloc_iter);
  c_ic->verify();
  return c_ic;
}

//-----------------------------------------------------------------------------
// The CompiledStaticCall represents a call to a static method in the compiled
//
// Transition diagram of a static call site is somewhat simpler than for an inlined cache:
//
//
//           -----<----- Clean ----->-----
//          /                             \
//         /                               \
//    compilled code <------------> interpreted code
//
//  Clean:            Calls directly to runtime method for fixup
//  Compiled code:    Calls directly to compiled code
//  Interpreted code: Calls to stub that set Method* reference
//
//
class CompiledStaticCall;

class StaticCallInfo {
 private:
  address      _entry;          // Entrypoint
  methodHandle _callee;         // Callee (used when calling interpreter)
  bool         _to_interpreter; // call to interpreted method (otherwise compiled)

  friend class CompiledStaticCall;
 public:
  address      entry() const    { return _entry;  }
  methodHandle callee() const   { return _callee; }
};


class CompiledStaticCall: public NativeCall {
  friend class CompiledIC;

  // Also used by CompiledIC
  void set_to_interpreted(methodHandle callee, address entry);
  bool is_optimized_virtual();

 public:
  friend CompiledStaticCall* compiledStaticCall_before(address return_addr);
  friend CompiledStaticCall* compiledStaticCall_at(address native_call);
  friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);

  // Code
#if defined(AARCH64) && !defined(ZERO)
  static address emit_to_interp_stub(CodeBuffer &cbuf, address mark);
#else
  static address emit_to_interp_stub(CodeBuffer &cbuf);
#endif
  static int to_interp_stub_size();
  static int reloc_to_interp_stub();

  // State
  bool is_clean() const;
  bool is_call_to_compiled() const;
  bool is_call_to_interpreted() const;

  // Clean static call (will force resolving on next use)
  void set_to_clean();

  // Set state. The entry must be the same, as computed by compute_entry.
  // Computation and setting is split up, since the actions are separate during
  // a OptoRuntime::resolve_xxx.
  void set(const StaticCallInfo& info);

  // Compute entry point given a method
  static void compute_entry(methodHandle m, StaticCallInfo& info);

  // Stub support
  address find_stub();
  static void set_stub_to_clean(static_stub_Relocation* static_stub);

  // Misc.
  void print()  PRODUCT_RETURN;
  void verify() PRODUCT_RETURN;
};


inline CompiledStaticCall* compiledStaticCall_before(address return_addr) {
  CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);
  st->verify();
  return st;
}

inline CompiledStaticCall* compiledStaticCall_at(address native_call) {
  CompiledStaticCall* st = (CompiledStaticCall*)native_call;
  st->verify();
  return st;
}

inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) {
  return compiledStaticCall_at(call_site->addr());
}

#endif // SHARE_VM_CODE_COMPILEDIC_HPP