changeset 10409:d861794ddc0f icedtea-3.13.0pre01

Merge jdk8u222-b01
author andrew
date Fri, 24 May 2019 04:01:21 +0100
parents 5f929b75d93c (current diff) 7fe2cda84af1 (diff)
children 1ff6a5181156
files .hgtags src/cpu/ppc/vm/assembler_ppc.hpp src/cpu/ppc/vm/assembler_ppc.inline.hpp src/cpu/ppc/vm/stubGenerator_ppc.cpp src/cpu/ppc/vm/vm_version_ppc.cpp src/share/vm/code/codeCache.cpp src/share/vm/code/codeCache.hpp src/share/vm/code/nmethod.cpp src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp src/share/vm/memory/genCollectedHeap.cpp src/share/vm/memory/genCollectedHeap.hpp src/share/vm/opto/memnode.cpp
diffstat 21 files changed, 548 insertions(+), 84 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed Apr 10 16:47:03 2019 +0100
+++ b/.hgtags	Fri May 24 04:01:21 2019 +0100
@@ -1326,6 +1326,7 @@
 7a69774c67cb79a79ccb2ac2d6d258a11e22aa6f jdk8u202-b26
 818b1963f7a227a2368a4f363d5500dd226a529e jdk8u202-ga
 9ce27f0a4683a2083d3aed59a40d6a3ccfc8e397 jdk8u212-b00
+5af8ec63c21c861106d6bf8b137620ae4d3664be jdk8u222-b00
 9ee244aee077ffad50d1b183a61d9f5fc39a1285 jdk8u212-b01
 2d94aac71d3337c7c3284bdb101b7ea15ef6d5f5 jdk8u212-b02
 96fec237fcc0acfcacea0a2bac07bf8ca919e64b jdk8u212-b03
--- a/src/cpu/ppc/vm/assembler_ppc.hpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/cpu/ppc/vm/assembler_ppc.hpp	Fri May 24 04:01:21 2019 +0100
@@ -469,6 +469,8 @@
     LVSR_OPCODE    = (31u << OPCODE_SHIFT |   38u << 1),
 
     // Vector-Scalar (VSX) instruction support.
+    LXVD2X_OPCODE  = (31u << OPCODE_SHIFT |  844u << 1),
+    STXVD2X_OPCODE = (31u << OPCODE_SHIFT |  972u << 1),
     MTVSRD_OPCODE  = (31u << OPCODE_SHIFT |  179u << 1),
     MFVSRD_OPCODE  = (31u << OPCODE_SHIFT |   51u << 1),
 
@@ -670,8 +672,10 @@
     // Atomics.
     LWARX_OPCODE   = (31u << OPCODE_SHIFT |   20u << 1),
     LDARX_OPCODE   = (31u << OPCODE_SHIFT |   84u << 1),
+    LQARX_OPCODE   = (31u << OPCODE_SHIFT |  276u << 1),
     STWCX_OPCODE   = (31u << OPCODE_SHIFT |  150u << 1),
-    STDCX_OPCODE   = (31u << OPCODE_SHIFT |  214u << 1)
+    STDCX_OPCODE   = (31u << OPCODE_SHIFT |  214u << 1),
+    STQCX_OPCODE   = (31u << OPCODE_SHIFT |  182u << 1)
 
   };
 
@@ -1052,6 +1056,19 @@
   static int vrs(   VectorRegister r)  { return  vrs(r->encoding());}
   static int vrt(   VectorRegister r)  { return  vrt(r->encoding());}
 
+  // Support Vector-Scalar (VSX) instructions.
+  static int vsra(      int         x)  { return  opp_u_field(x,            15, 11); }
+  static int vsrb(      int         x)  { return  opp_u_field(x,            20, 16); }
+  static int vsrc(      int         x)  { return  opp_u_field(x,            25, 21); }
+  static int vsrs(      int         x)  { return  opp_u_field(x,            10,  6); }
+  static int vsrt(      int         x)  { return  opp_u_field(x,            10,  6); }
+
+  static int vsra(   VectorSRegister r)  { return  vsra(r->encoding());}
+  static int vsrb(   VectorSRegister r)  { return  vsrb(r->encoding());}
+  static int vsrc(   VectorSRegister r)  { return  vsrc(r->encoding());}
+  static int vsrs(   VectorSRegister r)  { return  vsrs(r->encoding());}
+  static int vsrt(   VectorSRegister r)  { return  vsrt(r->encoding());}
+
   static int vsplt_uim( int        x)  { return  opp_u_field(x,             15, 12); } // for vsplt* instructions
   static int vsplti_sim(int        x)  { return  opp_u_field(x,             15, 11); } // for vsplti* instructions
   static int vsldoi_shb(int        x)  { return  opp_u_field(x,             25, 22); } // for vsldoi instruction
@@ -1663,11 +1680,14 @@
   // atomics
   inline void lwarx_unchecked(Register d, Register a, Register b, int eh1 = 0);
   inline void ldarx_unchecked(Register d, Register a, Register b, int eh1 = 0);
+  inline void lqarx_unchecked(Register d, Register a, Register b, int eh1 = 0);
   inline bool lxarx_hint_exclusive_access();
   inline void lwarx(  Register d, Register a, Register b, bool hint_exclusive_access = false);
   inline void ldarx(  Register d, Register a, Register b, bool hint_exclusive_access = false);
+  inline void lqarx(  Register d, Register a, Register b, bool hint_exclusive_access = false);
   inline void stwcx_( Register s, Register a, Register b);
   inline void stdcx_( Register s, Register a, Register b);
+  inline void stqcx_( Register s, Register a, Register b);
 
   // Instructions for adjusting thread priority for simultaneous
   // multithreading (SMT) on Power5.
@@ -1943,6 +1963,8 @@
   inline void mfvscr(   VectorRegister d);
 
   // Vector-Scalar (VSX) instructions.
+  inline void lxvd2x(   VectorSRegister d, Register a, Register b);
+  inline void stxvd2x(  VectorSRegister d, Register a, Register b);
   inline void mtvrd(    VectorRegister  d, Register a);
   inline void mfvrd(    Register        a, VectorRegister d);
 
@@ -2022,10 +2044,13 @@
   // Atomics: use ra0mem to disallow R0 as base.
   inline void lwarx_unchecked(Register d, Register b, int eh1);
   inline void ldarx_unchecked(Register d, Register b, int eh1);
+  inline void lqarx_unchecked(Register d, Register b, int eh1);
   inline void lwarx( Register d, Register b, bool hint_exclusive_access);
   inline void ldarx( Register d, Register b, bool hint_exclusive_access);
+  inline void lqarx( Register d, Register b, bool hint_exclusive_access);
   inline void stwcx_(Register s, Register b);
   inline void stdcx_(Register s, Register b);
+  inline void stqcx_(Register s, Register b);
   inline void lfs(   FloatRegister d, int si16);
   inline void lfsx(  FloatRegister d, Register b);
   inline void lfd(   FloatRegister d, int si16);
--- a/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Fri May 24 04:01:21 2019 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2018, SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -504,11 +504,14 @@
 // Use ra0mem to disallow R0 as base.
 inline void Assembler::lwarx_unchecked(Register d, Register a, Register b, int eh1)           { emit_int32( LWARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
 inline void Assembler::ldarx_unchecked(Register d, Register a, Register b, int eh1)           { emit_int32( LDARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
+inline void Assembler::lqarx_unchecked(Register d, Register a, Register b, int eh1)           { emit_int32( LQARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
 inline bool Assembler::lxarx_hint_exclusive_access()                                          { return VM_Version::has_lxarxeh(); }
 inline void Assembler::lwarx( Register d, Register a, Register b, bool hint_exclusive_access) { lwarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
 inline void Assembler::ldarx( Register d, Register a, Register b, bool hint_exclusive_access) { ldarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
+inline void Assembler::lqarx( Register d, Register a, Register b, bool hint_exclusive_access) { lqarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
 inline void Assembler::stwcx_(Register s, Register a, Register b)                             { emit_int32( STWCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
 inline void Assembler::stdcx_(Register s, Register a, Register b)                             { emit_int32( STDCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
+inline void Assembler::stqcx_(Register s, Register a, Register b)                             { emit_int32( STQCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
 
 // Instructions for adjusting thread priority
 // for simultaneous multithreading (SMT) on POWER5.
@@ -624,6 +627,8 @@
 inline void Assembler::lvsr(  VectorRegister d, Register s1, Register s2) { emit_int32( LVSR_OPCODE   | vrt(d) | ra0mem(s1) | rb(s2)); }
 
 // Vector-Scalar (VSX) instructions.
+inline void Assembler::lxvd2x (VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE  | vsrt(d) | ra(s1) | rb(s2)); }
+inline void Assembler::stxvd2x(VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra(s1) | rb(s2)); }
 inline void Assembler::mtvrd(  VectorRegister  d, Register a)               { emit_int32( MTVSRD_OPCODE  | vrt(d)  | ra(a)  | 1u); } // 1u: d is treated as Vector (VMX/Altivec).
 inline void Assembler::mfvrd(  Register        a, VectorRegister d)         { emit_int32( MFVSRD_OPCODE  | vrt(d)  | ra(a)  | 1u); } // 1u: d is treated as Vector (VMX/Altivec).
 
@@ -833,10 +838,13 @@
 // ra0 version
 inline void Assembler::lwarx_unchecked(Register d, Register b, int eh1)          { emit_int32( LWARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
 inline void Assembler::ldarx_unchecked(Register d, Register b, int eh1)          { emit_int32( LDARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
+inline void Assembler::lqarx_unchecked(Register d, Register b, int eh1)          { emit_int32( LQARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
 inline void Assembler::lwarx( Register d, Register b, bool hint_exclusive_access){ lwarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
 inline void Assembler::ldarx( Register d, Register b, bool hint_exclusive_access){ ldarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
+inline void Assembler::lqarx( Register d, Register b, bool hint_exclusive_access){ lqarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
 inline void Assembler::stwcx_(Register s, Register b)                            { emit_int32( STWCX_OPCODE | rs(s) | rb(b) | rc(1)); }
 inline void Assembler::stdcx_(Register s, Register b)                            { emit_int32( STDCX_OPCODE | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::stqcx_(Register s, Register b)                            { emit_int32( STQCX_OPCODE | rs(s) | rb(b) | rc(1)); }
 
 // ra0 version
 inline void Assembler::lfs( FloatRegister d, int si16)   { emit_int32( LFS_OPCODE  | frt(d) | simm(si16,16)); }
--- a/src/cpu/ppc/vm/globals_ppc.hpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/cpu/ppc/vm/globals_ppc.hpp	Fri May 24 04:01:21 2019 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2018 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,6 +81,19 @@
   product(bool, ReoptimizeCallSequences, true,                              \
           "Reoptimize code-sequences of calls at runtime.")                 \
                                                                             \
+  /* Power 8: Configure Data Stream Control Register. */                    \
+  product(uint64_t,DSCR_PPC64, (uintx)-1,                                   \
+          "Power8 or later: Specify encoded value for Data Stream Control " \
+          "Register")                                                       \
+  product(uint64_t,DSCR_DPFD_PPC64, 8,                                      \
+          "Power8 or later: DPFD (default prefetch depth) value of the "    \
+          "Data Stream Control Register."                                   \
+          " 0: hardware default, 1: none, 2-7: min-max, 8: don't touch")    \
+  product(uint64_t,DSCR_URG_PPC64, 8,                                       \
+          "Power8 or later: URG (depth attainment urgency) value of the "   \
+          "Data Stream Control Register."                                   \
+          " 0: hardware default, 1: none, 2-7: min-max, 8: don't touch")    \
+                                                                            \
   product(bool, UseLoadInstructionsForStackBangingPPC64, false,             \
           "Use load instructions for stack banging.")                       \
                                                                             \
--- a/src/cpu/ppc/vm/register_ppc.cpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/cpu/ppc/vm/register_ppc.cpp	Fri May 24 04:01:21 2019 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2018 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,3 +75,14 @@
   };
   return is_valid() ? names[encoding()] : "vnoreg";
 }
+
+const char* VectorSRegisterImpl::name() const {
+  const char* names[number_of_registers] = {
+    "VSR0",  "VSR1",  "VSR2",  "VSR3",  "VSR4",  "VSR5",  "VSR6",  "VSR7",
+    "VSR8",  "VSR9",  "VSR10", "VSR11", "VSR12", "VSR13", "VSR14", "VSR15",
+    "VSR16", "VSR17", "VSR18", "VSR19", "VSR20", "VSR21", "VSR22", "VSR23",
+    "VSR24", "VSR25", "VSR26", "VSR27", "VSR28", "VSR29", "VSR30", "VSR31"
+  };
+  return is_valid() ? names[encoding()] : "vsnoreg";
+}
+
--- a/src/cpu/ppc/vm/register_ppc.hpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/cpu/ppc/vm/register_ppc.hpp	Fri May 24 04:01:21 2019 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2018 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -492,6 +492,106 @@
 #endif // DONT_USE_REGISTER_DEFINES
 
 
+// Use VectorSRegister as a shortcut.
+class VectorSRegisterImpl;
+typedef VectorSRegisterImpl* VectorSRegister;
+
+inline VectorSRegister as_VectorSRegister(int encoding) {
+  return (VectorSRegister)(intptr_t)encoding;
+}
+
+// The implementation of Vector-Scalar (VSX) registers on POWER architecture.
+class VectorSRegisterImpl: public AbstractRegisterImpl {
+ public:
+  enum {
+    number_of_registers = 32
+  };
+
+  // construction
+  inline friend VectorSRegister as_VectorSRegister(int encoding);
+
+  // accessors
+  int encoding() const { assert(is_valid(), "invalid register"); return value(); }
+
+  // testers
+  bool is_valid() const { return 0 <=  value() &&  value() < number_of_registers; }
+
+  const char* name() const;
+};
+
+// The Vector-Scalar (VSX) registers of the POWER architecture.
+
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, vsnoreg, (-1));
+
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR0,  ( 0));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR1,  ( 1));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR2,  ( 2));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR3,  ( 3));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR4,  ( 4));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR5,  ( 5));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR6,  ( 6));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR7,  ( 7));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR8,  ( 8));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR9,  ( 9));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR10, (10));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR11, (11));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR12, (12));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR13, (13));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR14, (14));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR15, (15));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR16, (16));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR17, (17));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR18, (18));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR19, (19));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR20, (20));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR21, (21));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR22, (22));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR23, (23));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR24, (24));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR25, (25));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR26, (26));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR27, (27));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR28, (28));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR29, (29));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR30, (30));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR31, (31));
+
+#ifndef DONT_USE_REGISTER_DEFINES
+#define vsnoregi ((VectorSRegister)(vsnoreg_VectorSRegisterEnumValue))
+#define VSR0    ((VectorSRegister)(   VSR0_VectorSRegisterEnumValue))
+#define VSR1    ((VectorSRegister)(   VSR1_VectorSRegisterEnumValue))
+#define VSR2    ((VectorSRegister)(   VSR2_VectorSRegisterEnumValue))
+#define VSR3    ((VectorSRegister)(   VSR3_VectorSRegisterEnumValue))
+#define VSR4    ((VectorSRegister)(   VSR4_VectorSRegisterEnumValue))
+#define VSR5    ((VectorSRegister)(   VSR5_VectorSRegisterEnumValue))
+#define VSR6    ((VectorSRegister)(   VSR6_VectorSRegisterEnumValue))
+#define VSR7    ((VectorSRegister)(   VSR7_VectorSRegisterEnumValue))
+#define VSR8    ((VectorSRegister)(   VSR8_VectorSRegisterEnumValue))
+#define VSR9    ((VectorSRegister)(   VSR9_VectorSRegisterEnumValue))
+#define VSR10   ((VectorSRegister)(  VSR10_VectorSRegisterEnumValue))
+#define VSR11   ((VectorSRegister)(  VSR11_VectorSRegisterEnumValue))
+#define VSR12   ((VectorSRegister)(  VSR12_VectorSRegisterEnumValue))
+#define VSR13   ((VectorSRegister)(  VSR13_VectorSRegisterEnumValue))
+#define VSR14   ((VectorSRegister)(  VSR14_VectorSRegisterEnumValue))
+#define VSR15   ((VectorSRegister)(  VSR15_VectorSRegisterEnumValue))
+#define VSR16   ((VectorSRegister)(  VSR16_VectorSRegisterEnumValue))
+#define VSR17   ((VectorSRegister)(  VSR17_VectorSRegisterEnumValue))
+#define VSR18   ((VectorSRegister)(  VSR18_VectorSRegisterEnumValue))
+#define VSR19   ((VectorSRegister)(  VSR19_VectorSRegisterEnumValue))
+#define VSR20   ((VectorSRegister)(  VSR20_VectorSRegisterEnumValue))
+#define VSR21   ((VectorSRegister)(  VSR21_VectorSRegisterEnumValue))
+#define VSR22   ((VectorSRegister)(  VSR22_VectorSRegisterEnumValue))
+#define VSR23   ((VectorSRegister)(  VSR23_VectorSRegisterEnumValue))
+#define VSR24   ((VectorSRegister)(  VSR24_VectorSRegisterEnumValue))
+#define VSR25   ((VectorSRegister)(  VSR25_VectorSRegisterEnumValue))
+#define VSR26   ((VectorSRegister)(  VSR26_VectorSRegisterEnumValue))
+#define VSR27   ((VectorSRegister)(  VSR27_VectorSRegisterEnumValue))
+#define VSR28   ((VectorSRegister)(  VSR28_VectorSRegisterEnumValue))
+#define VSR29   ((VectorSRegister)(  VSR29_VectorSRegisterEnumValue))
+#define VSR30   ((VectorSRegister)(  VSR30_VectorSRegisterEnumValue))
+#define VSR31   ((VectorSRegister)(  VSR31_VectorSRegisterEnumValue))
+#endif // DONT_USE_REGISTER_DEFINES
+
 // Maximum number of incoming arguments that can be passed in i registers.
 const int PPC_ARGS_IN_REGS_NUM = 8;
 
--- a/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Fri May 24 04:01:21 2019 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2018, SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1352,9 +1352,13 @@
     Register tmp3 = R8_ARG6;
     Register tmp4 = R9_ARG7;
 
+    VectorSRegister tmp_vsr1  = VSR1;
+    VectorSRegister tmp_vsr2  = VSR2;
+
     address start = __ function_entry();
 
-      Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8;
+    Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
+
     // don't try anything fancy if arrays don't have many elements
     __ li(tmp3, 0);
     __ cmpwi(CCR0, R5_ARG3, 9);
@@ -1412,22 +1416,60 @@
       __ andi_(R5_ARG3, R5_ARG3, 15);
       __ mtctr(tmp1);
 
-      __ bind(l_8);
-      // Use unrolled version for mass copying (copy 16 elements a time).
-      // Load feeding store gets zero latency on Power6, however not on Power5.
-      // Therefore, the following sequence is made for the good of both.
-      __ ld(tmp1, 0, R3_ARG1);
-      __ ld(tmp2, 8, R3_ARG1);
-      __ ld(tmp3, 16, R3_ARG1);
-      __ ld(tmp4, 24, R3_ARG1);
-      __ std(tmp1, 0, R4_ARG2);
-      __ std(tmp2, 8, R4_ARG2);
-      __ std(tmp3, 16, R4_ARG2);
-      __ std(tmp4, 24, R4_ARG2);
-      __ addi(R3_ARG1, R3_ARG1, 32);
-      __ addi(R4_ARG2, R4_ARG2, 32);
-      __ bdnz(l_8);
-    }
+      if (!VM_Version::has_vsx()) {
+
+        __ bind(l_8);
+        // Use unrolled version for mass copying (copy 16 elements a time).
+        // Load feeding store gets zero latency on Power6, however not on Power5.
+        // Therefore, the following sequence is made for the good of both.
+        __ ld(tmp1, 0, R3_ARG1);
+        __ ld(tmp2, 8, R3_ARG1);
+        __ ld(tmp3, 16, R3_ARG1);
+        __ ld(tmp4, 24, R3_ARG1);
+        __ std(tmp1, 0, R4_ARG2);
+        __ std(tmp2, 8, R4_ARG2);
+        __ std(tmp3, 16, R4_ARG2);
+        __ std(tmp4, 24, R4_ARG2);
+        __ addi(R3_ARG1, R3_ARG1, 32);
+        __ addi(R4_ARG2, R4_ARG2, 32);
+        __ bdnz(l_8);
+
+      } else { // Processor supports VSX, so use it to mass copy.
+
+        // Prefetch src data into L2 cache.
+        __ dcbt(R3_ARG1, 0);
+
+        // If supported set DSCR pre-fetch to deepest.
+        if (VM_Version::has_mfdscr()) {
+          __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
+          __ mtdscr(tmp2);
+        }
+        __ li(tmp1, 16);
+
+        // Backbranch target aligned to 32-byte. It's not aligned 16-byte
+        // as loop contains < 8 instructions that fit inside a single
+        // i-cache sector.
+        __ align(32);
+
+        __ bind(l_9);
+        // Use loop with VSX load/store instructions to
+        // copy 16 elements a time.
+        __ lxvd2x(tmp_vsr1, 0, R3_ARG1);     // Load from src.
+        __ stxvd2x(tmp_vsr1, 0, R4_ARG2);    // Store to dst.
+        __ lxvd2x(tmp_vsr2, R3_ARG1, tmp1);  // Load from src + 16.
+        __ stxvd2x(tmp_vsr2, R4_ARG2, tmp1); // Store to dst + 16.
+        __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32.
+        __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32.
+        __ bdnz(l_9);                        // Dec CTR and loop if not zero.
+
+        // Restore DSCR pre-fetch value.
+        if (VM_Version::has_mfdscr()) {
+          __ load_const_optimized(tmp2, VM_Version::_dscr_val);
+          __ mtdscr(tmp2);
+        }
+
+      }
+    } // FasterArrayCopy
     __ bind(l_6);
 
     // copy 2 elements at a time
--- a/src/cpu/ppc/vm/vm_version_ppc.cpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/cpu/ppc/vm/vm_version_ppc.cpp	Fri May 24 04:01:21 2019 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2015 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2018 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,7 @@
 int VM_Version::_measured_cache_line_size = 128; // default value
 const char* VM_Version::_features_str = "";
 bool VM_Version::_is_determine_features_test_running = false;
-
+uint64_t VM_Version::_dscr_val = 0;
 
 #define MSG(flag)   \
   if (flag && !FLAG_IS_DEFAULT(flag))                                  \
@@ -61,7 +61,9 @@
 
   // If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
   if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
-    if (VM_Version::has_popcntw()) {
+    if (VM_Version::has_lqarx()) {
+      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8);
+    } else if (VM_Version::has_popcntw()) {
       FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
     } else if (VM_Version::has_cmpb()) {
       FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 6);
@@ -72,8 +74,14 @@
     }
   }
   guarantee(PowerArchitecturePPC64 == 0 || PowerArchitecturePPC64 == 5 ||
-            PowerArchitecturePPC64 == 6 || PowerArchitecturePPC64 == 7,
-            "PowerArchitecturePPC64 should be 0, 5, 6 or 7");
+            PowerArchitecturePPC64 == 6 || PowerArchitecturePPC64 == 7 ||
+            PowerArchitecturePPC64 == 8,
+            "PowerArchitecturePPC64 should be 0, 5, 6, 7, or 8");
+
+  // Power 8: Configure Data Stream Control Register.
+  if (PowerArchitecturePPC64 >= 8) {
+    config_dscr();
+  }
 
   if (!UseSIGTRAP) {
     MSG(TrapBasedICMissChecks);
@@ -103,7 +111,7 @@
   // Create and print feature-string.
   char buf[(num_features+1) * 16]; // Max 16 chars per feature.
   jio_snprintf(buf, sizeof(buf),
-               "ppc64%s%s%s%s%s%s%s%s%s%s",
+               "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s",
                (has_fsqrt()   ? " fsqrt"   : ""),
                (has_isel()    ? " isel"    : ""),
                (has_lxarxeh() ? " lxarxeh" : ""),
@@ -113,12 +121,17 @@
                (has_popcntw() ? " popcntw" : ""),
                (has_fcfids()  ? " fcfids"  : ""),
                (has_vand()    ? " vand"    : ""),
+               (has_lqarx()   ? " lqarx"   : ""),
                (has_vcipher() ? " aes"     : ""),
-               (has_vpmsumb() ? " vpmsumb" : "")
+               (has_vpmsumb() ? " vpmsumb" : ""),
+               (has_mfdscr()  ? " mfdscr"  : ""),
+               (has_vsx()     ? " vsx"     : "")
                // Make sure number of %s matches num_features!
               );
   _features_str = os::strdup(buf);
-  NOT_PRODUCT(if (Verbose) print_features(););
+  if (Verbose) {
+    print_features();
+  }
 
   // PPC64 supports 8-byte compare-exchange operations (see
   // Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
@@ -491,8 +504,11 @@
   a->popcntw(R7, R5);                          // code[7] -> popcntw
   a->fcfids(F3, F4);                           // code[8] -> fcfids
   a->vand(VR0, VR0, VR0);                      // code[9] -> vand
-  a->vcipher(VR0, VR1, VR2);                   // code[10] -> vcipher
-  a->vpmsumb(VR0, VR1, VR2);                   // code[11] -> vpmsumb
+  a->lqarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[10] -> lqarx_m
+  a->vcipher(VR0, VR1, VR2);                   // code[11] -> vcipher
+  a->vpmsumb(VR0, VR1, VR2);                   // code[12] -> vpmsumb
+  a->mfdscr(R0);                               // code[13] -> mfdscr
+  a->lxvd2x(VSR0, 0, R3_ARG1);                 // code[14] -> vsx
   a->blr();
 
   // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
@@ -536,8 +552,11 @@
   if (code[feature_cntr++]) features |= popcntw_m;
   if (code[feature_cntr++]) features |= fcfids_m;
   if (code[feature_cntr++]) features |= vand_m;
+  if (code[feature_cntr++]) features |= lqarx_m;
   if (code[feature_cntr++]) features |= vcipher_m;
   if (code[feature_cntr++]) features |= vpmsumb_m;
+  if (code[feature_cntr++]) features |= mfdscr_m;
+  if (code[feature_cntr++]) features |= vsx_m;
 
   // Print the detection code.
   if (PrintAssembly) {
@@ -549,6 +568,69 @@
   _features = features;
 }
 
+// Power 8: Configure Data Stream Control Register.
+void VM_Version::config_dscr() {
+  assert(has_lqarx(), "Only execute on Power 8 or later!");
+
+  // 7 InstWords for each call (function descriptor + blr instruction).
+  const int code_size = (2+2*7)*BytesPerInstWord;
+
+  // Allocate space for the code.
+  ResourceMark rm;
+  CodeBuffer cb("config_dscr", code_size, 0);
+  MacroAssembler* a = new MacroAssembler(&cb);
+
+  // Emit code.
+  uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->function_entry();
+  uint32_t *code = (uint32_t *)a->pc();
+  a->mfdscr(R3);
+  a->blr();
+
+  void (*set_dscr)(long) = (void(*)(long))(void *)a->function_entry();
+  a->mtdscr(R3);
+  a->blr();
+
+  uint32_t *code_end = (uint32_t *)a->pc();
+  a->flush();
+
+  // Print the detection code.
+  if (PrintAssembly) {
+    ttyLocker ttyl;
+    tty->print_cr("Decoding dscr configuration stub at " INTPTR_FORMAT " before execution:", p2i(code));
+    Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
+  }
+
+  // Apply the configuration if needed.
+  _dscr_val = (*get_dscr)();
+  if (Verbose) {
+    tty->print_cr("dscr value was 0x%lx" , _dscr_val);
+  }
+  bool change_requested = false;
+  if (DSCR_PPC64 != (uintx)-1) {
+    _dscr_val = DSCR_PPC64;
+    change_requested = true;
+  }
+  if (DSCR_DPFD_PPC64 <= 7) {
+    uint64_t mask = 0x7;
+    if ((_dscr_val & mask) != DSCR_DPFD_PPC64) {
+      _dscr_val = (_dscr_val & ~mask) | (DSCR_DPFD_PPC64);
+      change_requested = true;
+    }
+  }
+  if (DSCR_URG_PPC64 <= 7) {
+    uint64_t mask = 0x7 << 6;
+    if ((_dscr_val & mask) != DSCR_DPFD_PPC64 << 6) {
+      _dscr_val = (_dscr_val & ~mask) | (DSCR_URG_PPC64 << 6);
+      change_requested = true;
+    }
+  }
+  if (change_requested) {
+    (*set_dscr)(_dscr_val);
+    if (Verbose) {
+      tty->print_cr("dscr was set to 0x%lx" , (*get_dscr)());
+    }
+  }
+}
 
 static int saved_features = 0;
 
--- a/src/cpu/ppc/vm/vm_version_ppc.hpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/cpu/ppc/vm/vm_version_ppc.hpp	Fri May 24 04:01:21 2019 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2018 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,8 +42,11 @@
     fcfids,
     vand,
     dcba,
+    lqarx,
     vcipher,
     vpmsumb,
+    mfdscr,
+    vsx,
     num_features // last entry to count features
   };
   enum Feature_Flag_Set {
@@ -58,8 +61,11 @@
     fcfids_m              = (1 << fcfids ),
     vand_m                = (1 << vand   ),
     dcba_m                = (1 << dcba   ),
+    lqarx_m               = (1 << lqarx  ),
     vcipher_m             = (1 << vcipher),
     vpmsumb_m             = (1 << vpmsumb),
+    mfdscr_m              = (1 << mfdscr ),
+    vsx_m                 = (1 << vsx    ),
     all_features_m        = -1
   };
   static int  _features;
@@ -69,6 +75,7 @@
 
   static void print_features();
   static void determine_features(); // also measures cache line size
+  static void config_dscr(); // Power 8: Configure Data Stream Control Register.
   static void determine_section_size();
   static void power6_micro_bench();
 public:
@@ -87,8 +94,11 @@
   static bool has_fcfids()  { return (_features & fcfids_m) != 0; }
   static bool has_vand()    { return (_features & vand_m) != 0; }
   static bool has_dcba()    { return (_features & dcba_m) != 0; }
+  static bool has_lqarx()   { return (_features & lqarx_m) != 0; }
   static bool has_vcipher() { return (_features & vcipher_m) != 0; }
   static bool has_vpmsumb() { return (_features & vpmsumb_m) != 0; }
+  static bool has_mfdscr()  { return (_features & mfdscr_m) != 0; }
+  static bool has_vsx()     { return (_features & vsx_m) != 0; }
 
   static const char* cpu_features() { return _features_str; }
 
@@ -97,6 +107,9 @@
   // Assembler testing
   static void allow_all();
   static void revert();
+
+  // POWER 8: DSCR current value.
+  static uint64_t _dscr_val;
 };
 
 #endif // CPU_PPC_VM_VM_VERSION_PPC_HPP
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri May 24 04:01:21 2019 +0100
@@ -4483,7 +4483,7 @@
     // save F48:F54 in temp registers
     __ movdtox(F54,G2);
     __ movdtox(F52,G3);
-    __ movdtox(F50,G6);
+    __ movdtox(F50,L6);
     __ movdtox(F48,G1);
     for ( int i = 46;  i >= 14; i -= 8 ) {
       __ aes_dround23(as_FloatRegister(i), F0, F2, F4);
@@ -4511,7 +4511,7 @@
     // re-init F48:F54 with their original values
     __ movxtod(G2,F54);
     __ movxtod(G3,F52);
-    __ movxtod(G6,F50);
+    __ movxtod(L6,F50);
     __ movxtod(G1,F48);
 
     __ movxtod(L0,F6);
--- a/src/share/vm/code/codeCache.cpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/share/vm/code/codeCache.cpp	Fri May 24 04:01:21 2019 +0100
@@ -336,16 +336,19 @@
 }
 
 // Walk the list of methods which might contain non-perm oops.
-void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
+void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
   assert_locked_or_safepoint(CodeCache_lock);
 
   if (UseG1GC) {
     return;
   }
 
+  const bool fix_relocations = f->fix_relocations();
   debug_only(mark_scavenge_root_nmethods());
 
-  for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
+  nmethod* prev = NULL;
+  nmethod* cur = scavenge_root_nmethods();
+  while (cur != NULL) {
     debug_only(cur->clear_scavenge_root_marked());
     assert(cur->scavenge_root_not_marked(), "");
     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
@@ -360,6 +363,18 @@
       // Perform cur->oops_do(f), maybe just once per nmethod.
       f->do_code_blob(cur);
     }
+    nmethod* const next = cur->scavenge_root_link();
+    // The scavengable nmethod list must contain all methods with scavengable
+    // oops. It is safe to include more nmethod on the list, but we do not
+    // expect any live non-scavengable nmethods on the list.
+    if (fix_relocations) {
+      if (!is_live || !cur->detect_scavenge_root_oops()) {
+        unlink_scavenge_root_nmethod(cur, prev);
+      } else {
+        prev = cur;
+      }
+    }
+    cur = next;
   }
 
   // Check for stray marks.
@@ -379,6 +394,24 @@
   print_trace("add_scavenge_root", nm);
 }
 
+void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) {
+  assert_locked_or_safepoint(CodeCache_lock);
+
+  assert((prev == NULL && scavenge_root_nmethods() == nm) ||
+         (prev != NULL && prev->scavenge_root_link() == nm), "precondition");
+
+  assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list");
+
+  print_trace("unlink_scavenge_root", nm);
+  if (prev == NULL) {
+    set_scavenge_root_nmethods(nm->scavenge_root_link());
+  } else {
+    prev->set_scavenge_root_link(nm->scavenge_root_link());
+  }
+  nm->set_scavenge_root_link(NULL);
+  nm->clear_on_scavenge_root_list();
+}
+
 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
   assert_locked_or_safepoint(CodeCache_lock);
 
@@ -387,20 +420,13 @@
   }
 
   print_trace("drop_scavenge_root", nm);
-  nmethod* last = NULL;
-  nmethod* cur = scavenge_root_nmethods();
-  while (cur != NULL) {
-    nmethod* next = cur->scavenge_root_link();
+  nmethod* prev = NULL;
+  for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
     if (cur == nm) {
-      if (last != NULL)
-            last->set_scavenge_root_link(next);
-      else  set_scavenge_root_nmethods(next);
-      nm->set_scavenge_root_link(NULL);
-      nm->clear_on_scavenge_root_list();
+      unlink_scavenge_root_nmethod(cur, prev);
       return;
     }
-    last = cur;
-    cur = next;
+    prev = cur;
   }
   assert(false, "should have been on list");
 }
@@ -429,11 +455,7 @@
     } else {
       // Prune it from the list, so we don't have to look at it any more.
       print_trace("prune_scavenge_root", cur);
-      cur->set_scavenge_root_link(NULL);
-      cur->clear_on_scavenge_root_list();
-      if (last != NULL)
-            last->set_scavenge_root_link(next);
-      else  set_scavenge_root_nmethods(next);
+      unlink_scavenge_root_nmethod(cur, last);
     }
     cur = next;
   }
--- a/src/share/vm/code/codeCache.hpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/share/vm/code/codeCache.hpp	Fri May 24 04:01:21 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,6 +65,10 @@
 
   static int _codemem_full_count;
 
+  static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
+  static void prune_scavenge_root_nmethods();
+  static void unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev);
+
  public:
 
   // Initialization
@@ -135,13 +139,17 @@
   // to "true" iff some code got unloaded.
   static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
   static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
-  static void scavenge_root_nmethods_do(CodeBlobClosure* f);
+
+  // Apply f to every live code blob in scavengable nmethods. Prune nmethods
+  // from the list of scavengable nmethods if f->fix_relocations() and a nmethod
+  // no longer has scavengable oops.  If f->fix_relocations(), then f must copy
+  // objects to their new location immediately to avoid fixing nmethods on the
+  // basis of the old object locations.
+  static void scavenge_root_nmethods_do(CodeBlobToOopClosure* f);
 
   static nmethod* scavenge_root_nmethods()          { return _scavenge_root_nmethods; }
-  static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
   static void add_scavenge_root_nmethod(nmethod* nm);
   static void drop_scavenge_root_nmethod(nmethod* nm);
-  static void prune_scavenge_root_nmethods();
 
   // Printing/debugging
   static void print();                           // prints summary
--- a/src/share/vm/code/nmethod.cpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/share/vm/code/nmethod.cpp	Fri May 24 04:01:21 2019 +0100
@@ -1413,7 +1413,6 @@
   assert(_method == NULL, "Tautology");
 
   set_osr_link(NULL);
-  //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
   NMethodSweeper::report_state_change(this);
 }
 
@@ -2197,6 +2196,7 @@
         }
       } else if (iter.type() == relocInfo::virtual_call_type) {
         // Check compiledIC holders associated with this nmethod
+        ResourceMark rm;
         CompiledIC *ic = CompiledIC_at(&iter);
         if (ic->is_icholder_call()) {
           CompiledICHolder* cichk = ic->cached_icholder();
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri May 24 04:01:21 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -664,12 +664,6 @@
 
     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
 
-    {
-      GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer, _gc_tracer.gc_id());
-
-      CodeCache::prune_scavenge_root_nmethods();
-    }
-
     // Re-verify object start arrays
     if (VerifyObjectStartArray &&
         VerifyAfterGC) {
--- a/src/share/vm/memory/genCollectedHeap.cpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Fri May 24 04:01:21 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -619,7 +619,7 @@
                                      OopClosure* weak_roots,
                                      CLDClosure* strong_cld_closure,
                                      CLDClosure* weak_cld_closure,
-                                     CodeBlobClosure* code_roots) {
+                                     CodeBlobToOopClosure* code_roots) {
   StrongRootsScope srs(this, activate_scope);
 
   // General roots.
@@ -638,7 +638,7 @@
   // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
   CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
-  CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
+  CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 
   Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
 
--- a/src/share/vm/memory/genCollectedHeap.hpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Fri May 24 04:01:21 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -416,7 +416,7 @@
                      OopClosure* weak_roots,
                      CLDClosure* strong_cld_closure,
                      CLDClosure* weak_cld_closure,
-                     CodeBlobClosure* code_roots);
+                     CodeBlobToOopClosure* code_roots);
 
   void gen_process_roots(int level,
                          bool younger_gens_as_roots,
--- a/src/share/vm/memory/iterator.hpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/share/vm/memory/iterator.hpp	Fri May 24 04:01:21 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -291,9 +291,12 @@
  protected:
   void do_nmethod(nmethod* nm);
  public:
+  // If fix_relocations(), then cl must copy objects to their new location immediately to avoid
+  // patching nmethods with the old locations.
   CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {}
   virtual void do_code_blob(CodeBlob* cb);
 
+  bool fix_relocations() const { return _fix_relocations; }
   const static bool FixRelocations = true;
 };
 
--- a/src/share/vm/opto/memnode.cpp	Wed Apr 10 16:47:03 2019 +0100
+++ b/src/share/vm/opto/memnode.cpp	Fri May 24 04:01:21 2019 +0100
@@ -1514,10 +1514,14 @@
   Node* ctrl    = in(MemNode::Control);
   Node* address = in(MemNode::Address);
 
+  bool addr_mark = ((phase->type(address)->isa_oopptr() || phase->type(address)->isa_narrowoop()) &&
+         phase->type(address)->is_ptr()->offset() == oopDesc::mark_offset_in_bytes());
+
   // Skip up past a SafePoint control.  Cannot do this for Stores because
   // pointer stores & cardmarks must stay on the same side of a SafePoint.
   if( ctrl != NULL && ctrl->Opcode() == Op_SafePoint &&
-      phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw ) {
+      phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw  &&
+      !addr_mark ) {
     ctrl = ctrl->in(0);
     set_req(MemNode::Control,ctrl);
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/8209951/TestCipherBlockChainingEncrypt.java	Fri May 24 04:01:21 2019 +0100
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8209951
+ * @summary SIGBUS in com.sun.crypto.provider.CipherBlockChaining
+ * @run main/othervm/timeout=300 -Xbatch
+ *      compiler.codegen.aes.TestCipherBlockChainingEncrypt
+ */
+
+package compiler.codegen.aes;
+
+import java.io.PrintStream;
+import java.security.*;
+import java.util.Random;
+import java.lang.reflect.Method;
+import javax.crypto.Cipher;
+import javax.crypto.SecretKey;
+import javax.crypto.SecretKeyFactory;
+import javax.crypto.spec.PBEKeySpec;
+
+public class TestCipherBlockChainingEncrypt {
+    private static String algorithm = "PBEWithHmacSHA1AndAES_256";
+    private static final String PBEPASS = "Hush, it's supposed to be a secret!";
+
+    private static final int INPUT_LENGTH = 800;
+    private static final int[] OFFSETS = {0};
+    private static final int NUM_PAD_BYTES = 8;
+    private static final int PBKDF2_ADD_PAD_BYTES = 8;
+
+    private static SecretKey key;
+    private static Cipher ci;
+
+    public static void main(String[] args) throws Exception {
+     for(int i=0; i<5_000; i++) {
+        if (!(new TestCipherBlockChainingEncrypt().test(args))) {
+            throw new RuntimeException("TestCipherBlockChainingEncrypt test failed");
+       }
+     }
+   }
+
+    public boolean test(String[] args) throws Exception {
+        boolean result = true;
+
+        Provider p = Security.getProvider("SunJCE");
+        ci = Cipher.getInstance(algorithm, p);
+        key = SecretKeyFactory.getInstance(algorithm, p).generateSecret(
+                        new PBEKeySpec(PBEPASS.toCharArray()));
+
+        // generate input data
+        byte[] inputText = new byte[INPUT_LENGTH + NUM_PAD_BYTES
+                + PBKDF2_ADD_PAD_BYTES];
+        new Random().nextBytes(inputText);
+
+        try {
+            // Encrypt
+            execute(Cipher.ENCRYPT_MODE,
+                    inputText,
+                    0,
+                    INPUT_LENGTH);
+
+            // PBKDF2 required 16 byte padding
+            int padLength = NUM_PAD_BYTES + PBKDF2_ADD_PAD_BYTES;
+
+            // Decrypt
+            // Note: inputText is implicitly padded by the above encrypt
+            // operation so decrypt operation can safely proceed
+            execute(Cipher.DECRYPT_MODE,
+                    inputText,
+                    0,
+                    INPUT_LENGTH + padLength);
+
+        } catch (Exception ex) {
+            ex.printStackTrace(System.out);
+            result = false;
+        }
+        return result;
+    }
+
+    private void execute(int edMode, byte[] inputText, int offset, int len) {
+        try {
+            // init Cipher
+            if (Cipher.ENCRYPT_MODE == edMode) {
+                ci.init(Cipher.ENCRYPT_MODE, this.key);
+            } else {
+                ci.init(Cipher.DECRYPT_MODE, this.key, ci.getParameters());
+            }
+
+            // First, generate the cipherText at an allocated buffer
+            byte[] outputText = ci.doFinal(inputText, offset, len);
+
+            // Second, generate cipherText again at the same buffer of plainText
+            int myoff = offset / 2;
+            int off = ci.update(inputText, offset, len, inputText, myoff);
+            ci.doFinal(inputText, myoff + off);
+
+            // Compare to see whether the two results are the same or not
+            boolean e = equalsBlock(inputText, myoff, outputText, 0,
+                    outputText.length);
+        } catch (Exception ex) {
+                System.out.println("Got unexpected exception for " + algorithm);
+                ex.printStackTrace(System.out);
+        }
+    }
+
+    private boolean equalsBlock(byte[] b1, int off1,
+            byte[] b2, int off2, int len) {
+        for (int i = off1, j = off2, k = 0; k < len; i++, j++, k++) {
+            if (b1[i] != b2[j]) {
+                return false;
+            }
+        }
+        return true;
+    }
+}
--- a/test/runtime/NMT/NMTWithCDS.java	Wed Apr 10 16:47:03 2019 +0100
+++ b/test/runtime/NMT/NMTWithCDS.java	Fri May 24 04:01:21 2019 +0100
@@ -34,14 +34,15 @@
 
   public static void main(String[] args) throws Exception {
     ProcessBuilder pb;
-    pb = ProcessTools.createJavaProcessBuilder("-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
+    pb = ProcessTools.createJavaProcessBuilder(
+        "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
     try {
       output.shouldContain("Loading classes to share");
       output.shouldHaveExitValue(0);
 
       pb = ProcessTools.createJavaProcessBuilder(
-        "-XX:NativeMemoryTracking=detail", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
+        "-XX:+UnlockDiagnosticVMOptions", "-XX:NativeMemoryTracking=detail", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
       output = new OutputAnalyzer(pb.start());
       output.shouldContain("sharing");
       output.shouldHaveExitValue(0);
--- a/test/runtime/containers/docker/TestCPUAwareness.java	Wed Apr 10 16:47:03 2019 +0100
+++ b/test/runtime/containers/docker/TestCPUAwareness.java	Fri May 24 04:01:21 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,8 @@
     private static void testCpus(int valueToSet, int expectedTraceValue) throws Exception {
         Common.logNewTestCase("test cpus: " + valueToSet);
         DockerRunOptions opts = Common.newOpts(imageName)
-            .addDockerOpts("--cpus", "" + valueToSet);
+            .addDockerOpts("--cpu-period=" + 10000)
+            .addDockerOpts("--cpu-quota=" + valueToSet * 10000);
         Common.run(opts)
             .shouldMatch("active_processor_count.*" + expectedTraceValue);
     }