changeset 10634:eeb08cfebded icedtea-3.16.0 icedtea-3.17.0pre00

8224851, PR3785: AArch64: fix warnings and errors with Clang and GCC 8.3 Reviewed-by: aph
author sgehwolf
date Tue, 04 Feb 2020 04:42:57 +0000
parents 773c67608051
children 6bd5ce0d7096
files src/cpu/aarch64/vm/aarch64.ad src/cpu/aarch64/vm/assembler_aarch64.hpp src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp src/cpu/aarch64/vm/frame_aarch64.cpp src/cpu/aarch64/vm/interp_masm_aarch64.hpp src/cpu/aarch64/vm/macroAssembler_aarch64.cpp src/cpu/aarch64/vm/vm_version_aarch64.cpp src/os_cpu/linux_aarch64/vm/copy_linux_aarch64.s src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp
diffstat 10 files changed, 35 insertions(+), 45 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/aarch64/vm/aarch64.ad	Tue Apr 21 23:47:44 2020 +0100
+++ b/src/cpu/aarch64/vm/aarch64.ad	Tue Feb 04 04:42:57 2020 +0000
@@ -12783,7 +12783,7 @@
   format %{ "fcmps $src1, 0.0" %}
 
   ins_encode %{
-    __ fcmps(as_FloatRegister($src1$$reg), 0.0D);
+    __ fcmps(as_FloatRegister($src1$$reg), 0.0);
   %}
 
   ins_pipe(pipe_class_compare);
@@ -12812,7 +12812,7 @@
   format %{ "fcmpd $src1, 0.0" %}
 
   ins_encode %{
-    __ fcmpd(as_FloatRegister($src1$$reg), 0.0D);
+    __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
   %}
 
   ins_pipe(pipe_class_compare);
@@ -12888,7 +12888,7 @@
     Label done;
     FloatRegister s1 = as_FloatRegister($src1$$reg);
     Register d = as_Register($dst$$reg);
-    __ fcmps(s1, 0.0D);
+    __ fcmps(s1, 0.0);
     // installs 0 if EQ else -1
     __ csinvw(d, zr, zr, Assembler::EQ);
     // keeps -1 if less or unordered else installs 1
@@ -12915,7 +12915,7 @@
     Label done;
     FloatRegister s1 = as_FloatRegister($src1$$reg);
     Register d = as_Register($dst$$reg);
-    __ fcmpd(s1, 0.0D);
+    __ fcmpd(s1, 0.0);
     // installs 0 if EQ else -1
     __ csinvw(d, zr, zr, Assembler::EQ);
     // keeps -1 if less or unordered else installs 1
--- a/src/cpu/aarch64/vm/assembler_aarch64.hpp	Tue Apr 21 23:47:44 2020 +0100
+++ b/src/cpu/aarch64/vm/assembler_aarch64.hpp	Tue Feb 04 04:42:57 2020 +0000
@@ -281,7 +281,7 @@
   unsigned get(int msb = 31, int lsb = 0) {
     int nbits = msb - lsb + 1;
     unsigned mask = ((1U << nbits) - 1) << lsb;
-    assert_cond(bits & mask == mask);
+    assert_cond((bits & mask) == mask);
     return (insn & mask) >> lsb;
   }
 
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Tue Apr 21 23:47:44 2020 +0100
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Tue Feb 04 04:42:57 2020 +0000
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013, Red Hat Inc.
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates.
  * All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -1128,8 +1128,8 @@
       // Assembler::EQ does not permit unordered branches, so we add
       // another branch here.  Likewise, Assembler::NE does not permit
       // ordered branches.
-      if (is_unordered && op->cond() == lir_cond_equal 
-	  || !is_unordered && op->cond() == lir_cond_notEqual)
+      if ((is_unordered && op->cond() == lir_cond_equal) 
+	  || (!is_unordered && op->cond() == lir_cond_notEqual))
 	__ br(Assembler::VS, *(op->ublock()->label()));
       switch(op->cond()) {
       case lir_cond_equal:        acond = Assembler::EQ; break;
@@ -1823,18 +1823,22 @@
     switch (code) {
     case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
     case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
+    case lir_mul_strictfp: // fall through
     case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
+    case lir_div_strictfp: // fall through
     case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
     default:
       ShouldNotReachHere();
     }
   } else if (left->is_double_fpu()) {
     if (right->is_double_fpu()) {
-      // cpu register - cpu register
+      // fpu register - fpu register
       switch (code) {
       case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
       case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
+      case lir_mul_strictfp: // fall through
       case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
+      case lir_div_strictfp: // fall through
       case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
       default:
 	ShouldNotReachHere();
--- a/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp	Tue Apr 21 23:47:44 2020 +0100
+++ b/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp	Tue Feb 04 04:42:57 2020 +0000
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013, Red Hat Inc.
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates.
  * All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -496,7 +496,7 @@
     tmp = new_register(T_DOUBLE);
   }
 
-  arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), NULL);
+  arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp());
 
   set_result(x, round_item(reg));
 }
--- a/src/cpu/aarch64/vm/frame_aarch64.cpp	Tue Apr 21 23:47:44 2020 +0100
+++ b/src/cpu/aarch64/vm/frame_aarch64.cpp	Tue Feb 04 04:42:57 2020 +0000
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013, Red Hat Inc.
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates.
  * All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -819,11 +819,13 @@
 
 extern "C" void pf(unsigned long sp, unsigned long fp, unsigned long pc,
 		   unsigned long bcx, unsigned long thread) {
-  RegisterMap map((JavaThread*)thread, false);
   if (!reg_map) {
-    reg_map = (RegisterMap*)os::malloc(sizeof map, mtNone);
+    reg_map = NEW_C_HEAP_OBJ(RegisterMap, mtNone);
+    ::new (reg_map) RegisterMap((JavaThread*)thread, false);
+  } else {
+    *reg_map = RegisterMap((JavaThread*)thread, false);
   }
-  memcpy(reg_map, &map, sizeof map);
+
   {
     CodeBlob *cb = CodeCache::find_blob((address)pc);
     if (cb && cb->frame_size())
--- a/src/cpu/aarch64/vm/interp_masm_aarch64.hpp	Tue Apr 21 23:47:44 2020 +0100
+++ b/src/cpu/aarch64/vm/interp_masm_aarch64.hpp	Tue Feb 04 04:42:57 2020 +0000
@@ -40,8 +40,6 @@
  protected:
 
  protected:
-  using MacroAssembler::call_VM_leaf_base;
-
   // Interpreter specific version of call_VM_base
   using MacroAssembler::call_VM_leaf_base;
 
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Tue Apr 21 23:47:44 2020 +0100
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Tue Feb 04 04:42:57 2020 +0000
@@ -2443,7 +2443,7 @@
   if ((offset & (size-1)) && offset >= (1<<8)) {
     add(tmp, base, offset & ((1<<12)-1));
     base = tmp;
-    offset &= -1<<12;
+    offset &= -1u<<12;
   }
 
   if (offset >= (1<<12) * size) {
--- a/src/cpu/aarch64/vm/vm_version_aarch64.cpp	Tue Apr 21 23:47:44 2020 +0100
+++ b/src/cpu/aarch64/vm/vm_version_aarch64.cpp	Tue Feb 04 04:42:57 2020 +0000
@@ -158,7 +158,7 @@
   if (FILE *f = fopen("/proc/cpuinfo", "r")) {
     char buf[128], *p;
     while (fgets(buf, sizeof (buf), f) != NULL) {
-      if (p = strchr(buf, ':')) {
+      if ((p = strchr(buf, ':')) != NULL) {
         long v = strtol(p+1, NULL, 0);
         if (strncmp(buf, "CPU implementer", sizeof "CPU implementer" - 1) == 0) {
           _cpu = v;
--- a/src/os_cpu/linux_aarch64/vm/copy_linux_aarch64.s	Tue Apr 21 23:47:44 2020 +0100
+++ b/src/os_cpu/linux_aarch64/vm/copy_linux_aarch64.s	Tue Feb 04 04:42:57 2020 +0000
@@ -248,7 +248,7 @@
         blo     bwd_copy_drain
 
 bwd_copy_again:
-        prfm    pldl1keep, [s, #-256]
+        prfum   pldl1keep, [s, #-256]
         stp     t0, t1, [d, #-16]
         ldp     t0, t1, [s, #-16]
         stp     t2, t3, [d, #-32]
--- a/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp	Tue Apr 21 23:47:44 2020 +0100
+++ b/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp	Tue Feb 04 04:42:57 2020 +0000
@@ -76,12 +76,10 @@
 
 #define REG_FP 29
 
-#define SPELL_REG_SP "sp"
-#define SPELL_REG_FP "x29"
+#define NOINLINE __attribute__ ((noinline))
 
-address os::current_stack_pointer() {
-  register void *esp __asm__ (SPELL_REG_SP);
-  return (address) esp;
+NOINLINE address os::current_stack_pointer() {
+  return (address)__builtin_frame_address(0);
 }
 
 char* os::non_memory_address_word() {
@@ -155,14 +153,8 @@
   return frame(fr->link(), fr->link(), fr->sender_pc());
 }
 
-intptr_t* _get_previous_fp() {
-  register intptr_t **ebp __asm__ (SPELL_REG_FP);
-  return (intptr_t*) *ebp;   // we want what it points to.
-}
-
-
-frame os::current_frame() {
-  intptr_t* fp = _get_previous_fp();
+NOINLINE frame os::current_frame() {
+  intptr_t *fp = *(intptr_t **)__builtin_frame_address(0);
   frame myframe((intptr_t*)os::current_stack_pointer(),
                 (intptr_t*)fp,
                 CAST_FROM_FN_PTR(address, os::current_frame));
@@ -176,12 +168,6 @@
 
 // Utility functions
 
-// From IA32 System Programming Guide
-enum {
-  trap_page_fault = 0xE
-};
-
-
 // An operation in Unsafe has faulted.  We're going to return to the
 // instruction after the faulting load or store.  We also set
 // pending_unsafe_access_error so that at some point in the future our
@@ -607,12 +593,12 @@
 
   void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
     if (from > to) {
-      jshort *end = from + count;
+      const jshort *end = from + count;
       while (from < end)
         *(to++) = *(from++);
     }
     else if (from < to) {
-      jshort *end = from;
+      const jshort *end = from;
       from += count - 1;
       to   += count - 1;
       while (from >= end)
@@ -621,12 +607,12 @@
   }
   void _Copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
     if (from > to) {
-      jint *end = from + count;
+      const jint *end = from + count;
       while (from < end)
         *(to++) = *(from++);
     }
     else if (from < to) {
-      jint *end = from;
+      const jint *end = from;
       from += count - 1;
       to   += count - 1;
       while (from >= end)
@@ -635,12 +621,12 @@
   }
   void _Copy_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
     if (from > to) {
-      jlong *end = from + count;
+      const jlong *end = from + count;
       while (from < end)
         os::atomic_copy64(from++, to++);
     }
     else if (from < to) {
-      jlong *end = from;
+      const jlong *end = from;
       from += count - 1;
       to   += count - 1;
       while (from >= end)