changeset 7380:72b29bfe67fa jdk8_final

Implement various locked memory operations.
author aph
date Thu, 29 May 2014 05:53:26 -0400
parents a4a33014c25d
children 55da95366938
files src/cpu/aarch64/vm/aarch64.ad src/cpu/aarch64/vm/macroAssembler_aarch64.cpp src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
diffstat 3 files changed, 180 insertions(+), 17 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/aarch64/vm/aarch64.ad	Thu May 29 13:27:46 2014 -0400
+++ b/src/cpu/aarch64/vm/aarch64.ad	Thu May 29 05:53:26 2014 -0400
@@ -6207,17 +6207,12 @@
 // and to use StoreIConditional only for 32-bit and StoreLConditional
 // only for 64-bit.
 //
-// So, it looks like we could implement LoadPLocked and StorePLocked
-// instructions using, respectively the AArch64 hw load-exclusive and
-// store-conditional instructions. Whereas we must implement each of
+// We implement LoadPLocked and StorePLocked instructions using,
+// respectively the AArch64 hw load-exclusive and store-conditional
+// instructions. Whereas we must implement each of
 // Store{IL}Conditional using a CAS which employs a pair of
 // instructions comprising a load-exclusive followed by a
 // store-conditional.
-//
-// We also need CompareAndSwapI, CompareAndSwapL, CompareAndSwapP and
-// CompareAndSwapN, GetAndAddI, GetAndAddL, GetAndSeI, GetAndSetL,
-// GetAndSetP and GetAndSetN. These are used to implement various
-// atomic intrinsics and also to inline various Unsafe operations.
 
 
 // Locked-load (linked load) of the current heap-top
@@ -6367,13 +6362,127 @@
   ins_pipe(pipe_class_memory);
 %}
 
-// TODO
-// GetAndAddI
-// GetAndAddL
-// GetAndSetI
-// GetAndSetL
-// GetAndSetP
-// GetAndSetN
+
+instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
+  match(Set prev (GetAndSetI mem newv));
+  format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
+  ins_encode %{
+    __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
+  match(Set prev (GetAndSetL mem newv));
+  format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
+  ins_encode %{
+    __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
+  match(Set prev (GetAndSetN mem newv));
+  format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
+  ins_encode %{
+    __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
+  match(Set prev (GetAndSetP mem newv));
+  format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
+  ins_encode %{
+    __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+
+instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
+  match(Set newval (GetAndAddL mem incr));
+  ins_cost(INSN_COST * 10);
+  format %{ "get_and_addL $newval, [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddL mem incr));
+  ins_cost(INSN_COST * 9);
+  format %{ "get_and_addL [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
+  match(Set newval (GetAndAddL mem incr));
+  ins_cost(INSN_COST * 10);
+  format %{ "get_and_addL $newval, [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddL mem incr));
+  ins_cost(INSN_COST * 9);
+  format %{ "get_and_addL [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
+  match(Set newval (GetAndAddI mem incr));
+  ins_cost(INSN_COST * 10);
+  format %{ "get_and_addI $newval, [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddI mem incr));
+  ins_cost(INSN_COST * 9);
+  format %{ "get_and_addI [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
+  match(Set newval (GetAndAddI mem incr));
+  ins_cost(INSN_COST * 10);
+  format %{ "get_and_addI $newval, [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddI mem incr));
+  ins_cost(INSN_COST * 9);
+  format %{ "get_and_addI [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_class_memory);
+%}
 
 // ============================================================================
 // Conditional Move Instructions
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu May 29 13:27:46 2014 -0400
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu May 29 05:53:26 2014 -0400
@@ -1814,9 +1814,9 @@
 
 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) {
   if (increment.is_register()) {
-    add(Rd, Rn, increment.as_register());
+    addw(Rd, Rn, increment.as_register());
   } else {
-    add(Rd, Rn, increment.as_constant());
+    addw(Rd, Rn, increment.as_constant());
   }
 }
 
@@ -1898,6 +1898,54 @@
     b(*fail);
 }
 
+static bool different(Register a, RegisterOrConstant b, Register c) {
+  if (b.is_constant())
+    return a != c;
+  else
+    return a != b.as_register() && a != c && b.as_register() != c;
+}
+
+#define ATOMIC_OP(LDXR, OP, STXR)					\
+void MacroAssembler::atomic_##OP(Register prev, RegisterOrConstant incr, Register addr) { \
+  Register result = rscratch2;						\
+  if (prev->is_valid())							\
+    result = different(prev, incr, addr) ? prev : rscratch2;		\
+									\
+  Label retry_load;							\
+  bind(retry_load);							\
+  LDXR(result, addr);							\
+  OP(rscratch1, result, incr);						\
+  STXR(rscratch1, rscratch1, addr);					\
+  cbnzw(rscratch1, retry_load);						\
+  if (prev->is_valid() && prev != result)				\
+    mov(prev, result);							\
+}
+
+ATOMIC_OP(ldxr, add, stxr)
+ATOMIC_OP(ldxrw, addw, stxrw)
+
+#undef ATOMIC_OP
+
+#define ATOMIC_XCHG(OP, LDXR, STXR)					\
+void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) {	\
+  Register result = rscratch2;						\
+  if (prev->is_valid())							\
+    result = different(prev, newv, addr) ? prev : rscratch2;		\
+									\
+  Label retry_load;							\
+  bind(retry_load);							\
+  LDXR(result, addr);							\
+  STXR(rscratch1, newv, addr);						\
+  cbnzw(rscratch1, retry_load);						\
+  if (prev->is_valid() && prev != result)				\
+    mov(prev, result);							\
+}
+
+ATOMIC_XCHG(xchg, ldxr, stxr)
+ATOMIC_XCHG(xchgw, ldxrw, stxrw)
+
+#undef ATOMIC_XCHG
+
 void MacroAssembler::incr_allocated_bytes(Register thread,
                                           Register var_size_in_bytes,
                                           int con_size_in_bytes,
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Thu May 29 13:27:46 2014 -0400
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Thu May 29 05:53:26 2014 -0400
@@ -978,6 +978,12 @@
   void cmpxchgw(Register oldv, Register newv, Register addr, Register tmp,
 		  Label &suceed, Label *fail);
 
+  void atomic_add(Register prev, RegisterOrConstant incr, Register addr);
+  void atomic_addw(Register prev, RegisterOrConstant incr, Register addr);
+
+  void atomic_xchg(Register prev, Register newv, Register addr);
+  void atomic_xchgw(Register prev, Register newv, Register addr);
+
   void imulptr(Register dst, Register src) { Unimplemented(); }