view src/share/vm/utilities/copy.hpp @ 10905:f57189b7648d

8257192: Integrate AArch64 JIT port into 8u 7009641: Don't fail VM when CodeCache is full 8073108: [AArch64] Use x86 and SPARC CPU instructions for GHASH acceleration 8130309: Need to bailout cleanly if creation of stubs fails when codecache is out of space (AArch64 changes) 8131779: AARCH64: add Montgomery multiply intrinsic 8132875: AArch64: Fix error introduced into AArch64 CodeCache by commit for 8130309 8135018: AARCH64: Missing memory barriers for CMS collector 8145320: Create unsafe_arraycopy and generic_arraycopy for AArch64 8148328: aarch64: redundant lsr instructions in stub code. 8148783: aarch64: SEGV running SpecJBB2013 8148948: aarch64: generate_copy_longs calls align() incorrectly 8149080: AArch64: Recognise disjoint array copy in stub code 8149365: aarch64: memory copy does not prefetch on backwards copy 8149907: aarch64: use load/store pair instructions in call_stub 8150038: aarch64: make use of CBZ and CBNZ when comparing narrow pointer with zero 8150045: arraycopy causes segfaults in SATB during garbage collection 8150082: aarch64: optimise small array copy 8150229: aarch64: pipeline class for several instructions is not set correctly 8150313: aarch64: optimise array copy using SIMD instructions 8150394: aarch64: add support for 8.1 LSE CAS instructions 8150652: Remove unused code in AArch64 back end 8151340: aarch64: prefetch the destination word for write prior to ldxr/stxr loops. 8151502: optimize pd_disjoint_words and pd_conjoint_words 8151775: aarch64: add support for 8.1 LSE atomic operations 8152537: aarch64: Make use of CBZ and CBNZ when comparing unsigned values with zero. 8152840: aarch64: improve _unsafe_arraycopy stub routine 8153172: aarch64: hotspot crashes after the 8.1 LSE patch is merged 8153713: aarch64: improve short array clearing using store pair 8153797: aarch64: Add Arrays.fill stub code 8154413: AArch64: Better byte behaviour 8154537: AArch64: some integer rotate instructions are never emitted 8154739: AArch64: TemplateTable::fast_xaccess loads in wrong mode 8155015: Aarch64: bad assert in spill generation code 8155100: AArch64: Relax alignment requirement for byte_map_base 8155612: Aarch64: vector nodes need to support misaligned offset 8155617: aarch64: ClearArray does not use DC ZVA 8155627: Enable SA on AArch64 8155653: TestVectorUnalignedOffset.java not pushed with 8155612 8156731: aarch64: java/util/Arrays/Correct.java fails due to _generic_arraycopy stub routine 8157841: aarch64: prefetch ignores cache line size 8157906: aarch64: some more integer rotate instructions are never emitted 8158913: aarch64: SEGV running Spark terasort 8159052: aarch64: optimise unaligned copies in pd_disjoint_words and pd_conjoint_words 8159063: aarch64: optimise unaligned array copy long 8160748: [AArch64] Inconsistent types for ideal_reg 8161072: AArch64: jtreg compiler/uncommontrap/TestDeoptOOM failure 8161190: AArch64: Fix overflow in immediate cmp instruction 8164113: AArch64: follow-up the fix for 8161598 8165673: AArch64: Fix JNI floating point argument handling 8167200: AArch64: Broken stack pointer adjustment in interpreter 8167421: AArch64: in one core system, fatal error: Illegal threadstate encountered 8167595: AArch64: SEGV in stub code cipherBlockChaining_decryptAESCrypt 8168699: Validate special case invocations [AArch64 support] 8168888: Port 8160591: Improve internal array handling to AArch64. 8170100: AArch64: Crash in C1-compiled code accessing References 8170188: jtreg test compiler/types/TestMeetIncompatibleInterfaceArrays.java causes JVM crash 8170873: PPC64/aarch64: Poor StrictMath performance due to non-optimized compilation 8171537: aarch64: compiler/c1/Test6849574.java generates guarantee failure in C1 8172881: AArch64: assertion failure: the int pressure is incorrect 8173472: AArch64: C1 comparisons with null only use 32-bit instructions 8176100: [AArch64] [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles 8177661: Correct ad rule output register types from iRegX to iRegXNoSp 8179954: AArch64: C1 and C2 volatile accesses are not sequentially consistent 8182581: aarch64: fix for crash caused by earlyret of compiled method 8183925: [AArch64] Decouple crash protection from watcher thread 8186325: AArch64: jtreg test hotspot/test/gc/g1/TestJNIWeakG1/TestJNIWeakG1.java SEGV 8187224: aarch64: some inconsistency between aarch64_ad.m4 and aarch64.ad 8189170: [AArch64] Add option to disable stack overflow checking in primordial thread for use with JNI_CreateJavaJVM 8193133: Assertion failure because 0xDEADDEAD can be in-heap 8195685: AArch64 port of 8174962: Better interface invocations 8195859: AArch64: vtableStubs gtest fails after 8174962 8196136: AArch64: Correct register use in patch for JDK-8194686 8196221: AArch64: Mistake in committed patch for JDK-8195859 8199712: [AArch64] Flight Recorder 8203481: Incorrect constraint for unextended_sp in frame:safe_for_sender 8203699: java/lang/invoke/SpecialInterfaceCall fails with SIGILL on aarch64 8205421: AARCH64: StubCodeMark should be placed after alignment 8206163: AArch64: incorrect code generation for StoreCM 8207345: Trampoline generation code reads from uninitialized memory 8207838: AArch64: Float registers incorrectly restored in JNI call 8209413: AArch64: NPE in clhsdb jstack command 8209414: [AArch64] method handle invocation does not respect JVMTI interp_only mode 8209415: Fix JVMTI test failure HS202 8209420: Track membars for volatile accesses so they can be properly optimized 8209835: Aarch64: elide barriers on all volatile operations 8210425: [AArch64] sharedRuntimeTrig/sharedRuntimeTrans compiled without optimization 8211064: [AArch64] Interpreter and c1 don't correctly handle jboolean results in native calls 8211233: MemBarNode::trailing_membar() and MemBarNode::leading_membar() need to handle dying subgraphs better 8213134: AArch64: vector shift failed with MaxVectorSize=8 8213419: [AArch64] C2 may hang in MulLNode::Ideal()/MulINode::Ideal() with gcc 8.2.1 8214857: "bad trailing membar" assert failure at memnode.cpp:3220 8215951: AArch64: jtreg test vmTestbase/nsk/jvmti/PopFrame/popframe005 segfaults 8215961: jdk/jfr/event/os/TestCPUInformation.java fails on AArch64 8216350: AArch64: monitor unlock fast path not called 8216989: CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier() does not check for zero length on AARCH64 8217368: AArch64: C2 recursive stack locking optimisation not triggered 8218185: aarch64: missing LoadStore barrier in TemplateTable::putfield_or_static 8219011: Implement MacroAssembler::warn method on AArch64 8219635: aarch64: missing LoadStore barrier in TemplateTable::fast_storefield 8221220: AArch64: Add StoreStore membar explicitly for Volatile Writes in TemplateTable 8221658: aarch64: add necessary predicate for ubfx patterns 8224671: AArch64: mauve System.arraycopy test failure 8224828: aarch64: rflags is not correct after safepoint poll 8224851: AArch64: fix warnings and errors with Clang and GCC 8.3 8224880: AArch64: java/javac error with AllocatePrefetchDistance 8228400: Remove built-in AArch64 simulator 8228406: Superfluous change in chaitin.hpp 8228593: Revert explicit JDK 7 support additions 8228716: Revert InstanceKlass::print_on debug additions 8228718: Revert incorrect backport of JDK-8129757 to 8-aarch64 8228725: AArch64: Purge method call format support 8228747: Revert "unused" attribute from test_arraycopy_func 8228767: Revert ResourceMark additions 8228770: Revert development hsdis changes 8229123: Revert build fixes for aarch64/zero 8229124: Revert disassembler.cpp changes 8229145: Revert TemplateTable::bytecode() visibility change 8233839: aarch64: missing memory barrier in NewObjectArrayStub and NewTypeArrayStub 8237512: AArch64: aarch64TestHook leaks a BufferBlob 8246482: Build failures with +JFR -PCH 8247979: aarch64: missing side effect of killing flags for clearArray_reg_reg 8248219: aarch64: missing memory barrier in fast_storefield and fast_accessfield Reviewed-by: shade, aph
author andrew
date Mon, 01 Feb 2021 03:48:36 +0000
parents dee6a1ce4a0c
children f79e943d15a7
line wrap: on
line source

/*
 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_UTILITIES_COPY_HPP
#define SHARE_VM_UTILITIES_COPY_HPP

#include "runtime/stubRoutines.hpp"

// Assembly code for platforms that need it.
extern "C" {
  void _Copy_conjoint_words(HeapWord* from, HeapWord* to, size_t count);
  void _Copy_disjoint_words(HeapWord* from, HeapWord* to, size_t count);

  void _Copy_conjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count);
  void _Copy_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count);

  void _Copy_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count);
  void _Copy_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count);

  void _Copy_conjoint_bytes(void* from, void* to, size_t count);

  void _Copy_conjoint_bytes_atomic  (void*   from, void*   to, size_t count);
  void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count);
  void _Copy_conjoint_jints_atomic  (jint*   from, jint*   to, size_t count);
  void _Copy_conjoint_jlongs_atomic (jlong*  from, jlong*  to, size_t count);
  void _Copy_conjoint_oops_atomic   (oop*    from, oop*    to, size_t count);

  void _Copy_arrayof_conjoint_bytes  (HeapWord* from, HeapWord* to, size_t count);
  void _Copy_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count);
  void _Copy_arrayof_conjoint_jints  (HeapWord* from, HeapWord* to, size_t count);
  void _Copy_arrayof_conjoint_jlongs (HeapWord* from, HeapWord* to, size_t count);
  void _Copy_arrayof_conjoint_oops   (HeapWord* from, HeapWord* to, size_t count);
}

class Copy : AllStatic {
 public:
  // Block copy methods have four attributes.  We don't define all possibilities.
  //   alignment: aligned to BytesPerLong
  //   arrayof:   arraycopy operation with both operands aligned on the same
  //              boundary as the first element of an array of the copy unit.
  //              This is currently a HeapWord boundary on all platforms, except
  //              for long and double arrays, which are aligned on an 8-byte
  //              boundary on all platforms.
  //              arraycopy operations are implicitly atomic on each array element.
  //   overlap:   disjoint or conjoint.
  //   copy unit: bytes or words (i.e., HeapWords) or oops (i.e., pointers).
  //   atomicity: atomic or non-atomic on the copy unit.
  //
  // Names are constructed thusly:
  //
  //     [ 'aligned_' | 'arrayof_' ]
  //     ('conjoint_' | 'disjoint_')
  //     ('words' | 'bytes' | 'jshorts' | 'jints' | 'jlongs' | 'oops')
  //     [ '_atomic' ]
  //
  // Except in the arrayof case, whatever the alignment is, we assume we can copy
  // whole alignment units.  E.g., if BytesPerLong is 2x word alignment, an odd
  // count may copy an extra word.  In the arrayof case, we are allowed to copy
  // only the number of copy units specified.
  //
  // All callees check count for 0.
  //

  // HeapWords

  // Word-aligned words,    conjoint, not atomic on each word
  static void conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
    assert_params_ok(from, to, LogHeapWordSize);
    pd_conjoint_words(from, to, count);
  }

  // Word-aligned words,    disjoint, not atomic on each word
  static void disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
    assert_params_ok(from, to, LogHeapWordSize);
    assert_disjoint(from, to, count);
    pd_disjoint_words(from, to, count);
  }

  // Word-aligned words,    disjoint, atomic on each word
  static void disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
    assert_params_ok(from, to, LogHeapWordSize);
    assert_disjoint(from, to, count);
    pd_disjoint_words_atomic(from, to, count);
  }

  // Object-aligned words,  conjoint, not atomic on each word
  static void aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
    assert_params_aligned(from, to);
    pd_aligned_conjoint_words(from, to, count);
  }

  // Object-aligned words,  disjoint, not atomic on each word
  static void aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
    assert_params_aligned(from, to);
    assert_disjoint(from, to, count);
    pd_aligned_disjoint_words(from, to, count);
  }

  // bytes, jshorts, jints, jlongs, oops

  // bytes,                 conjoint, not atomic on each byte (not that it matters)
  static void conjoint_jbytes(void* from, void* to, size_t count) {
    pd_conjoint_bytes(from, to, count);
  }

  // bytes,                 conjoint, atomic on each byte (not that it matters)
  static void conjoint_jbytes_atomic(void* from, void* to, size_t count) {
    pd_conjoint_bytes(from, to, count);
  }

  // jshorts,               conjoint, atomic on each jshort
  static void conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
    assert_params_ok(from, to, LogBytesPerShort);
    pd_conjoint_jshorts_atomic(from, to, count);
  }

  // jints,                 conjoint, atomic on each jint
  static void conjoint_jints_atomic(jint* from, jint* to, size_t count) {
    assert_params_ok(from, to, LogBytesPerInt);
    pd_conjoint_jints_atomic(from, to, count);
  }

  // jlongs,                conjoint, atomic on each jlong
  static void conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
    assert_params_ok(from, to, LogBytesPerLong);
    pd_conjoint_jlongs_atomic(from, to, count);
  }

  // oops,                  conjoint, atomic on each oop
  static void conjoint_oops_atomic(oop* from, oop* to, size_t count) {
    assert_params_ok(from, to, LogBytesPerHeapOop);
    pd_conjoint_oops_atomic(from, to, count);
  }

  // overloaded for UseCompressedOops
  static void conjoint_oops_atomic(narrowOop* from, narrowOop* to, size_t count) {
    assert(sizeof(narrowOop) == sizeof(jint), "this cast is wrong");
    assert_params_ok(from, to, LogBytesPerInt);
    pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
  }

  // Copy a span of memory.  If the span is an integral number of aligned
  // longs, words, or ints, copy those units atomically.
  // The largest atomic transfer unit is 8 bytes, or the largest power
  // of two which divides all of from, to, and size, whichever is smaller.
  static void conjoint_memory_atomic(void* from, void* to, size_t size);

  // bytes,                 conjoint array, atomic on each byte (not that it matters)
  static void arrayof_conjoint_jbytes(HeapWord* from, HeapWord* to, size_t count) {
    pd_arrayof_conjoint_bytes(from, to, count);
  }

  // jshorts,               conjoint array, atomic on each jshort
  static void arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
    assert_params_ok(from, to, LogBytesPerShort);
    pd_arrayof_conjoint_jshorts(from, to, count);
  }

  // jints,                 conjoint array, atomic on each jint
  static void arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
    assert_params_ok(from, to, LogBytesPerInt);
    pd_arrayof_conjoint_jints(from, to, count);
  }

  // jlongs,                conjoint array, atomic on each jlong
  static void arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
    assert_params_ok(from, to, LogBytesPerLong);
    pd_arrayof_conjoint_jlongs(from, to, count);
  }

  // oops,                  conjoint array, atomic on each oop
  static void arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
    assert_params_ok(from, to, LogBytesPerHeapOop);
    pd_arrayof_conjoint_oops(from, to, count);
  }

  // Known overlap methods

  // Copy word-aligned words from higher to lower addresses, not atomic on each word
  inline static void conjoint_words_to_lower(HeapWord* from, HeapWord* to, size_t byte_count) {
    // byte_count is in bytes to check its alignment
    assert_params_ok(from, to, LogHeapWordSize);
    assert_byte_count_ok(byte_count, HeapWordSize);

    size_t count = (size_t)round_to(byte_count, HeapWordSize) >> LogHeapWordSize;
    assert(to <= from || from + count <= to, "do not overwrite source data");

    while (count-- > 0) {
      *to++ = *from++;
    }
  }

  // Copy word-aligned words from lower to higher addresses, not atomic on each word
  inline static void conjoint_words_to_higher(HeapWord* from, HeapWord* to, size_t byte_count) {
    // byte_count is in bytes to check its alignment
    assert_params_ok(from, to, LogHeapWordSize);
    assert_byte_count_ok(byte_count, HeapWordSize);

    size_t count = (size_t)round_to(byte_count, HeapWordSize) >> LogHeapWordSize;
    assert(from <= to || to + count <= from, "do not overwrite source data");

    from += count - 1;
    to   += count - 1;
    while (count-- > 0) {
      *to-- = *from--;
    }
  }

  /**
   * Copy and *unconditionally* byte swap elements
   *
   * @param src address of source
   * @param dst address of destination
   * @param byte_count number of bytes to copy
   * @param elem_size size of the elements to copy-swap
   */
  static void conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size);

  // Fill methods

  // Fill word-aligned words, not atomic on each word
  // set_words
  static void fill_to_words(HeapWord* to, size_t count, juint value = 0) {
    assert_params_ok(to, LogHeapWordSize);
    pd_fill_to_words(to, count, value);
  }

  static void fill_to_aligned_words(HeapWord* to, size_t count, juint value = 0) {
    assert_params_aligned(to);
    pd_fill_to_aligned_words(to, count, value);
  }

  // Fill bytes
  static void fill_to_bytes(void* to, size_t count, jubyte value = 0) {
    pd_fill_to_bytes(to, count, value);
  }

  // Fill a span of memory.  If the span is an integral number of aligned
  // longs, words, or ints, store to those units atomically.
  // The largest atomic transfer unit is 8 bytes, or the largest power
  // of two which divides both to and size, whichever is smaller.
  static void fill_to_memory_atomic(void* to, size_t size, jubyte value = 0);

  // Zero-fill methods

  // Zero word-aligned words, not atomic on each word
  static void zero_to_words(HeapWord* to, size_t count) {
    assert_params_ok(to, LogHeapWordSize);
    pd_zero_to_words(to, count);
  }

  // Zero bytes
  static void zero_to_bytes(void* to, size_t count) {
    pd_zero_to_bytes(to, count);
  }

 private:
  static bool params_disjoint(HeapWord* from, HeapWord* to, size_t count) {
    if (from < to) {
      return pointer_delta(to, from) >= count;
    }
    return pointer_delta(from, to) >= count;
  }

  // These methods raise a fatal if they detect a problem.

  static void assert_disjoint(HeapWord* from, HeapWord* to, size_t count) {
#ifdef ASSERT
    if (!params_disjoint(from, to, count))
      basic_fatal("source and dest overlap");
#endif
  }

  static void assert_params_ok(void* from, void* to, intptr_t log_align) {
#ifdef ASSERT
    if (mask_bits((uintptr_t)from, right_n_bits(log_align)) != 0)
      basic_fatal("not aligned");
    if (mask_bits((uintptr_t)to, right_n_bits(log_align)) != 0)
      basic_fatal("not aligned");
#endif
  }

  static void assert_params_ok(HeapWord* to, intptr_t log_align) {
#ifdef ASSERT
    if (mask_bits((uintptr_t)to, right_n_bits(log_align)) != 0)
      basic_fatal("not word aligned");
#endif
  }
  static void assert_params_aligned(HeapWord* from, HeapWord* to) {
#ifdef ASSERT
    if (mask_bits((uintptr_t)from, BytesPerLong-1) != 0)
      basic_fatal("not long aligned");
    if (mask_bits((uintptr_t)to, BytesPerLong-1) != 0)
      basic_fatal("not long aligned");
#endif
  }

  static void assert_params_aligned(HeapWord* to) {
#ifdef ASSERT
    if (mask_bits((uintptr_t)to, BytesPerLong-1) != 0)
      basic_fatal("not long aligned");
#endif
  }

  static void assert_byte_count_ok(size_t byte_count, size_t unit_size) {
#ifdef ASSERT
    if ((size_t)round_to(byte_count, unit_size) != byte_count) {
      basic_fatal("byte count must be aligned");
    }
#endif
  }

  // Platform dependent implementations of the above methods.
#ifdef TARGET_ARCH_x86
# include "copy_x86.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "copy_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "copy_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "copy_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "copy_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "copy_ppc.hpp"
#endif

};

#endif // SHARE_VM_UTILITIES_COPY_HPP