changeset 7138:63c5920a038d

8042309: Some bugfixes for the ppc64 port. Reviewed-by: kvn
author goetz
date Fri, 02 May 2014 14:53:06 +0200
parents 34862ced4a87
children 28bbbecff5f0
files src/cpu/ppc/vm/cppInterpreter_ppc.cpp src/cpu/ppc/vm/frame_ppc.inline.hpp src/cpu/ppc/vm/interp_masm_ppc_64.hpp src/cpu/ppc/vm/interpreterRT_ppc.cpp src/cpu/ppc/vm/interpreter_ppc.cpp src/cpu/ppc/vm/jniFastGetField_ppc.cpp src/cpu/ppc/vm/ppc.ad src/cpu/ppc/vm/templateInterpreter_ppc.cpp src/cpu/ppc/vm/templateTable_ppc_64.cpp src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp
diffstat 10 files changed, 173 insertions(+), 153 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/ppc/vm/cppInterpreter_ppc.cpp	Thu May 08 11:05:02 2014 +0200
+++ b/src/cpu/ppc/vm/cppInterpreter_ppc.cpp	Fri May 02 14:53:06 2014 +0200
@@ -1,3 +1,4 @@
+
 /*
  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2013 SAP AG. All rights reserved.
@@ -403,7 +404,7 @@
   BLOCK_COMMENT("compute_interpreter_state {");
 
   // access_flags = method->access_flags();
-  // TODO: PPC port: assert(4 == methodOopDesc::sz_access_flags(), "unexpected field size");
+  // TODO: PPC port: assert(4 == sizeof(AccessFlags), "unexpected field size");
   __ lwa(access_flags, method_(access_flags));
 
   // parameter_count = method->constMethod->size_of_parameters();
@@ -1055,7 +1056,7 @@
   assert(access_flags->is_nonvolatile(),
          "access_flags must be in a non-volatile register");
   // Type check.
-  // TODO: PPC port: assert(4 == methodOopDesc::sz_access_flags(), "unexpected field size");
+  // TODO: PPC port: assert(4 == sizeof(AccessFlags), "unexpected field size");
   __ lwz(access_flags, method_(access_flags));
 
   // We don't want to reload R19_method and access_flags after calls
@@ -1838,7 +1839,7 @@
   // Interpreter state fields.
   const Register msg               = R24_tmp4;
 
-  // MethodOop fields.
+  // Method fields.
   const Register parameter_count   = R25_tmp5;
   const Register result_index      = R26_tmp6;
 
@@ -2023,7 +2024,7 @@
   __ add(R17_tos, R17_tos, parameter_count);
 
   // Result stub address array index
-  // TODO: PPC port: assert(4 == methodOopDesc::sz_result_index(), "unexpected field size");
+  // TODO: PPC port: assert(4 == sizeof(AccessFlags), "unexpected field size");
   __ lwa(result_index, method_(result_index));
 
   __ li(msg, BytecodeInterpreter::method_resume);
@@ -2709,7 +2710,7 @@
   __ ld(R3_ARG1, state_(_result._osr._osr_buf));
   __ mtctr(R12_scratch2);
 
-  // Load method oop, gc may move it during execution of osr'd method.
+  // Load method, gc may move it during execution of osr'd method.
   __ ld(R22_tmp2, state_(_method));
   // Load message 'call_method'.
   __ li(R23_tmp3, BytecodeInterpreter::call_method);
--- a/src/cpu/ppc/vm/frame_ppc.inline.hpp	Thu May 08 11:05:02 2014 +0200
+++ b/src/cpu/ppc/vm/frame_ppc.inline.hpp	Fri May 02 14:53:06 2014 +0200
@@ -26,6 +26,8 @@
 #ifndef CPU_PPC_VM_FRAME_PPC_INLINE_HPP
 #define CPU_PPC_VM_FRAME_PPC_INLINE_HPP
 
+#include "code/codeCache.hpp"
+
 // Inline functions for ppc64 frames:
 
 // Find codeblob and set deopt_state.
--- a/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Thu May 08 11:05:02 2014 +0200
+++ b/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Fri May 02 14:53:06 2014 +0200
@@ -26,7 +26,7 @@
 #ifndef CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
 #define CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
 
-#include "assembler_ppc.inline.hpp"
+#include "asm/macroAssembler.hpp"
 #include "interpreter/invocationCounter.hpp"
 
 // This file specializes the assembler with interpreter-specific macros.
--- a/src/cpu/ppc/vm/interpreterRT_ppc.cpp	Thu May 08 11:05:02 2014 +0200
+++ b/src/cpu/ppc/vm/interpreterRT_ppc.cpp	Fri May 02 14:53:06 2014 +0200
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/assembler.inline.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "memory/allocation.inline.hpp"
--- a/src/cpu/ppc/vm/interpreter_ppc.cpp	Thu May 08 11:05:02 2014 +0200
+++ b/src/cpu/ppc/vm/interpreter_ppc.cpp	Fri May 02 14:53:06 2014 +0200
@@ -139,32 +139,16 @@
   // Signature is in R3_RET. Signature is callee saved.
   __ mr(signature, R3_RET);
 
-  // Reload method, it may have moved.
-#ifdef CC_INTERP
-  __ ld(R19_method, state_(_method));
-#else
-  __ ld(R19_method, 0, target_sp);
-  __ ld(R19_method, _ijava_state_neg(method), R19_method);
-#endif
-
   // Get the result handler.
   __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method);
 
-  // Reload method, it may have moved.
-#ifdef CC_INTERP
-  __ ld(R19_method, state_(_method));
-#else
-  __ ld(R19_method, 0, target_sp);
-  __ ld(R19_method, _ijava_state_neg(method), R19_method);
-#endif
-
   {
     Label L;
     // test if static
     // _access_flags._flags must be at offset 0.
     // TODO PPC port: requires change in shared code.
     //assert(in_bytes(AccessFlags::flags_offset()) == 0,
-    //       "MethodOopDesc._access_flags == MethodOopDesc._access_flags._flags");
+    //       "MethodDesc._access_flags == MethodDesc._access_flags._flags");
     // _access_flags must be a 32 bit value.
     assert(sizeof(AccessFlags) == 4, "wrong size");
     __ lwa(R11_scratch1/*access_flags*/, method_(access_flags));
--- a/src/cpu/ppc/vm/jniFastGetField_ppc.cpp	Thu May 08 11:05:02 2014 +0200
+++ b/src/cpu/ppc/vm/jniFastGetField_ppc.cpp	Fri May 02 14:53:06 2014 +0200
@@ -32,7 +32,7 @@
 
 
 address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
-  // we don't have fast jni accessors.
+  // We don't have fast jni accessors.
   return (address) -1;
 }
 
@@ -57,12 +57,12 @@
 }
 
 address JNI_FastGetField::generate_fast_get_long_field() {
-  // we don't have fast jni accessors.
+  // We don't have fast jni accessors.
   return (address) -1;
 }
 
 address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
-  // e don't have fast jni accessors.
+  // We don't have fast jni accessors.
   return (address) -1;
 }
 
--- a/src/cpu/ppc/vm/ppc.ad	Thu May 08 11:05:02 2014 +0200
+++ b/src/cpu/ppc/vm/ppc.ad	Fri May 02 14:53:06 2014 +0200
@@ -898,7 +898,7 @@
   // To keep related declarations/definitions/uses close together,
   // we switch between source %{ }% and source_hpp %{ }% freely as needed.
 
-  // Returns true if Node n is followed by a MemBar node that 
+  // Returns true if Node n is followed by a MemBar node that
   // will do an acquire. If so, this node must not do the acquire
   // operation.
   bool followed_by_acquire(const Node *n);
@@ -908,7 +908,7 @@
 
 // Optimize load-acquire.
 //
-// Check if acquire is unnecessary due to following operation that does 
+// Check if acquire is unnecessary due to following operation that does
 // acquire anyways.
 // Walk the pattern:
 //
@@ -919,12 +919,12 @@
 //  Proj(ctrl)  Proj(mem)
 //       |         |
 //   MemBarRelease/Volatile
-// 
+//
 bool followed_by_acquire(const Node *load) {
   assert(load->is_Load(), "So far implemented only for loads.");
 
   // Find MemBarAcquire.
-  const Node *mba = NULL;         
+  const Node *mba = NULL;
   for (DUIterator_Fast imax, i = load->fast_outs(imax); i < imax; i++) {
     const Node *out = load->fast_out(i);
     if (out->Opcode() == Op_MemBarAcquire) {
@@ -937,7 +937,7 @@
 
   // Find following MemBar node.
   //
-  // The following node must be reachable by control AND memory 
+  // The following node must be reachable by control AND memory
   // edge to assure no other operations are in between the two nodes.
   //
   // So first get the Proj node, mem_proj, to use it to iterate forward.
@@ -1135,6 +1135,7 @@
 
  public:
 
+  // Emit call stub, compiled java to interpreter.
   static void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset);
 
   // Size of call trampoline stub.
@@ -2752,7 +2753,7 @@
       // inputs for new nodes
       m1->add_req(NULL, n_toc);
       m2->add_req(NULL, m1);
-      
+
       // operands for new nodes
       m1->_opnds[0] = new (C) iRegPdstOper(); // dst
       m1->_opnds[1] = op_src;                 // src
@@ -2760,29 +2761,29 @@
       m2->_opnds[0] = new (C) iRegPdstOper(); // dst
       m2->_opnds[1] = op_src;                 // src
       m2->_opnds[2] = new (C) iRegLdstOper(); // base
-      
+
       // Initialize ins_attrib TOC fields.
       m1->_const_toc_offset = -1;
       m2->_const_toc_offset_hi_node = m1;
-      
+
       // Register allocation for new nodes.
       ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
       ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
-      
+
       nodes->push(m1);
       nodes->push(m2);
       assert(m2->bottom_type()->isa_ptr(), "must be ptr");
     } else {
       loadConPNode *m2 = new (C) loadConPNode();
-      
+
       // inputs for new nodes
       m2->add_req(NULL, n_toc);
-      
+
       // operands for new nodes
       m2->_opnds[0] = new (C) iRegPdstOper(); // dst
       m2->_opnds[1] = op_src;                 // src
       m2->_opnds[2] = new (C) iRegPdstOper(); // toc
-      
+
       // Register allocation for new nodes.
       ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
 
@@ -2974,17 +2975,17 @@
       n_sub_base->_opnds[1] = op_crx;
       n_sub_base->_opnds[2] = op_src;
       n_sub_base->_bottom_type = _bottom_type;
-   
+
       n_shift->add_req(n_region, n_sub_base);
       n_shift->_opnds[0] = op_dst;
       n_shift->_opnds[1] = op_dst;
       n_shift->_bottom_type = _bottom_type;
-   
+
       ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
       ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
       ra_->set_pair(n_sub_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
       ra_->set_pair(n_move->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
-   
+
       nodes->push(n_move);
       nodes->push(n_compare);
       nodes->push(n_sub_base);
@@ -3061,20 +3062,20 @@
     } else {
       // before Power 7
       cond_add_baseNode *n_add_base = new (C) cond_add_baseNode();
-     
+
       n_add_base->add_req(n_region, n_compare, n_shift);
       n_add_base->_opnds[0] = op_dst;
       n_add_base->_opnds[1] = op_crx;
       n_add_base->_opnds[2] = op_dst;
       n_add_base->_bottom_type = _bottom_type;
-     
+
       assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
       ra_->set_oop(n_add_base, true);
-     
+
       ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
       ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
       ra_->set_pair(n_add_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
-     
+
       nodes->push(n_compare);
       nodes->push(n_shift);
       nodes->push(n_add_base);
@@ -3631,11 +3632,11 @@
     // Req...
     for (uint i = 0; i < req(); ++i) {
       // The expanded node does not need toc any more.
-      // Add the inline cache constant here instead.  This expresses the 
+      // Add the inline cache constant here instead. This expresses the
       // register of the inline cache must be live at the call.
       // Else we would have to adapt JVMState by -1.
       if (i == mach_constant_base_node_input()) {
-        call->add_req(loadConLNodes_IC._last);        
+        call->add_req(loadConLNodes_IC._last);
       } else {
         call->add_req(in(i));
       }
@@ -3663,6 +3664,8 @@
   %}
 
   // Compound version of call dynamic
+  // Toc is only passed so that it can be used in ins_encode statement.
+  // In the code we have to use $constanttablebase.
   enc_class enc_java_dynamic_call(method meth, iRegLdst toc) %{
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     MacroAssembler _masm(&cbuf);
@@ -3670,14 +3673,17 @@
 
     Register Rtoc = (ra_) ? $constanttablebase : R2_TOC;
 #if 0
+    int vtable_index = this->_vtable_index;
     if (_vtable_index < 0) {
       // Must be invalid_vtable_index, not nonvirtual_vtable_index.
       assert(_vtable_index == Method::invalid_vtable_index, "correct sentinel value");
       Register ic_reg = as_Register(Matcher::inline_cache_reg_encode());
-      AddressLiteral meta = __ allocate_metadata_address((Metadata *)Universe::non_oop_word());
-
+
+      // Virtual call relocation will point to ic load.
       address virtual_call_meta_addr = __ pc();
-      __ load_const_from_method_toc(ic_reg, meta, Rtoc);
+      // Load a clear inline cache.
+      AddressLiteral empty_ic((address) Universe::non_oop_word());
+      __ load_const_from_method_toc(ic_reg, empty_ic, Rtoc);
       // CALL to fixup routine.  Fixup routine uses ScopeDesc info
       // to determine who we intended to call.
       __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
@@ -3710,7 +3716,6 @@
              "Fix constant in ret_addr_offset()");
     }
 #endif
-    guarantee(0, "Fix handling of toc edge: messes up derived/base pairs.");
     Unimplemented();  // ret_addr_offset not yet fixed. Depends on compressed oops (load klass!).
   %}
 
@@ -5436,7 +5441,7 @@
   ins_pipe(pipe_class_memory);
 %}
 
-// Match loading integer and casting it to unsigned int in 
+// Match loading integer and casting it to unsigned int in
 // long register.
 // LoadI + ConvI2L + AndL 0xffffffff.
 instruct loadUI2L(iRegLdst dst, memory mem, immL_32bits mask) %{
@@ -6078,7 +6083,7 @@
   ins_pipe(pipe_class_default);
 %}
 
-// This needs a match rule so that build_oop_map knows this is 
+// This needs a match rule so that build_oop_map knows this is
 // not a narrow oop.
 instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
   match(Set dst src1);
@@ -6702,7 +6707,7 @@
   size(4);
   ins_encode %{
     // This is a Power7 instruction for which no machine description exists.
-    // TODO: PPC port $archOpcode(ppc64Opcode_compound); 
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     __ isel_0($dst$$Register, $crx$$CondRegister, Assembler::equal, $src1$$Register);
   %}
   ins_pipe(pipe_class_default);
@@ -6847,7 +6852,7 @@
   size(4);
   ins_encode %{
     // This is a Power7 instruction for which no machine description exists.
-    // TODO: PPC port $archOpcode(ppc64Opcode_compound); 
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     __ isel_0($dst$$Register, $crx$$CondRegister, Assembler::equal, $src1$$Register);
   %}
   ins_pipe(pipe_class_default);
@@ -7064,7 +7069,7 @@
     n1->_bottom_type = _bottom_type;
 
     decodeNKlass_shiftNode *n2 = new (C) decodeNKlass_shiftNode();
-    n2->add_req(n_region, n2);
+    n2->add_req(n_region, n1);
     n2->_opnds[0] = op_dst;
     n2->_opnds[1] = op_dst;
     n2->_bottom_type = _bottom_type;
@@ -7199,7 +7204,7 @@
 //  inline_unsafe_load_store).
 //
 // Add this node again if we found a good solution for inline_unsafe_load_store().
-// Don't forget to look at the implementation of post_store_load_barrier again, 
+// Don't forget to look at the implementation of post_store_load_barrier again,
 // we did other fixes in that method.
 //instruct unnecessary_membar_volatile() %{
 //  match(MemBarVolatile);
@@ -7237,7 +7242,7 @@
     // exists. Anyways, the scheduler should be off on Power7.
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     int cc        = $cmp$$cmpcode;
-    __ isel($dst$$Register, $crx$$CondRegister, 
+    __ isel($dst$$Register, $crx$$CondRegister,
             (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
   %}
   ins_pipe(pipe_class_default);
@@ -7283,7 +7288,7 @@
     // exists. Anyways, the scheduler should be off on Power7.
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     int cc        = $cmp$$cmpcode;
-    __ isel($dst$$Register, $crx$$CondRegister, 
+    __ isel($dst$$Register, $crx$$CondRegister,
             (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
   %}
   ins_pipe(pipe_class_default);
@@ -7329,7 +7334,7 @@
     // exists. Anyways, the scheduler should be off on Power7.
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     int cc        = $cmp$$cmpcode;
-    __ isel($dst$$Register, $crx$$CondRegister, 
+    __ isel($dst$$Register, $crx$$CondRegister,
             (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
   %}
   ins_pipe(pipe_class_default);
@@ -7376,7 +7381,7 @@
     // exists. Anyways, the scheduler should be off on Power7.
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     int cc        = $cmp$$cmpcode;
-    __ isel($dst$$Register, $crx$$CondRegister, 
+    __ isel($dst$$Register, $crx$$CondRegister,
             (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
   %}
   ins_pipe(pipe_class_default);
@@ -7522,8 +7527,8 @@
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
     // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
-    __ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, 
-                MacroAssembler::MemBarFenceAfter, MacroAssembler::cmpxchgx_hint_atomic_update(), 
+    __ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
+                MacroAssembler::MemBarFenceAfter, MacroAssembler::cmpxchgx_hint_atomic_update(),
                 $res$$Register, true);
   %}
   ins_pipe(pipe_class_default);
@@ -7929,7 +7934,23 @@
 
 // Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
 // positive longs and 0xF...F for negative ones.
-instruct signmask64I_regI(iRegIdst dst, iRegIsrc src) %{
+instruct signmask64I_regL(iRegIdst dst, iRegLsrc src) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src);
+  predicate(false);
+
+  format %{ "SRADI   $dst, $src, #63" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_sradi);
+    __ sradi($dst$$Register, $src$$Register, 0x3f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
+// positive longs and 0xF...F for negative ones.
+instruct signmask64L_regL(iRegLdst dst, iRegLsrc src) %{
   // no match-rule, false predicate
   effect(DEF dst, USE src);
   predicate(false);
@@ -8893,7 +8914,7 @@
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm);
-    __ rlwinm($dst$$Register, $src1$$Register, 0, 
+    __ rlwinm($dst$$Register, $src1$$Register, 0,
               (31-log2_long((jlong) $src2$$constant)) & 0x1f, (31-log2_long((jlong) $src2$$constant)) & 0x1f);
   %}
   ins_pipe(pipe_class_default);
@@ -9619,14 +9640,14 @@
   ins_cost(DEFAULT_COST*4);
 
   expand %{
-    iRegIdst src1s;
-    iRegIdst src2s;
-    iRegIdst diff;
-    sxtI_reg(src1s, src1); // ensure proper sign extention
-    sxtI_reg(src2s, src2); // ensure proper sign extention
-    subI_reg_reg(diff, src1s, src2s);
+    iRegLdst src1s;
+    iRegLdst src2s;
+    iRegLdst diff;
+    convI2L_reg(src1s, src1); // Ensure proper sign extension.
+    convI2L_reg(src2s, src2); // Ensure proper sign extension.
+    subL_reg_reg(diff, src1s, src2s);
     // Need to consider >=33 bit result, therefore we need signmaskL.
-    signmask64I_regI(dst, diff);
+    signmask64I_regL(dst, diff);
   %}
 %}
 
@@ -10863,7 +10884,7 @@
   format %{ "PartialSubtypeCheck $result = ($subklass instanceOf $superklass) tmp: $tmp_klass, $tmp_arrayptr" %}
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_compound);
-    __ check_klass_subtype_slow_path($subklass$$Register, $superklass$$Register, $tmp_arrayptr$$Register, 
+    __ check_klass_subtype_slow_path($subklass$$Register, $superklass$$Register, $tmp_arrayptr$$Register,
                                      $tmp_klass$$Register, NULL, $result$$Register);
   %}
   ins_pipe(pipe_class_default);
@@ -11178,18 +11199,18 @@
   ins_cost(DEFAULT_COST*6);
 
   expand %{
-    iRegIdst src1s;
-    iRegIdst src2s;
-    iRegIdst diff;
-    iRegIdst sm;
-    iRegIdst doz; // difference or zero
-    sxtI_reg(src1s, src1); // Ensure proper sign extention.
-    sxtI_reg(src2s, src2); // Ensure proper sign extention.
-    subI_reg_reg(diff, src2s, src1s);
+    iRegLdst src1s;
+    iRegLdst src2s;
+    iRegLdst diff;
+    iRegLdst sm;
+    iRegLdst doz; // difference or zero
+    convI2L_reg(src1s, src1); // Ensure proper sign extension.
+    convI2L_reg(src2s, src2); // Ensure proper sign extension.
+    subL_reg_reg(diff, src2s, src1s);
     // Need to consider >=33 bit result, therefore we need signmaskL.
-    signmask64I_regI(sm, diff);
-    andI_reg_reg(doz, diff, sm); // <=0
-    addI_reg_reg(dst, doz, src1s);
+    signmask64L_regL(sm, diff);
+    andL_reg_reg(doz, diff, sm); // <=0
+    addI_regL_regL(dst, doz, src1s);
   %}
 %}
 
@@ -11198,19 +11219,18 @@
   ins_cost(DEFAULT_COST*6);
 
   expand %{
-    immI_minus1 m1 %{ -1 %}
-    iRegIdst src1s;
-    iRegIdst src2s;
-    iRegIdst diff;
-    iRegIdst sm;
-    iRegIdst doz; // difference or zero
-    sxtI_reg(src1s, src1); // Ensure proper sign extention.
-    sxtI_reg(src2s, src2); // Ensure proper sign extention.
-    subI_reg_reg(diff, src2s, src1s);
+    iRegLdst src1s;
+    iRegLdst src2s;
+    iRegLdst diff;
+    iRegLdst sm;
+    iRegLdst doz; // difference or zero
+    convI2L_reg(src1s, src1); // Ensure proper sign extension.
+    convI2L_reg(src2s, src2); // Ensure proper sign extension.
+    subL_reg_reg(diff, src2s, src1s);
     // Need to consider >=33 bit result, therefore we need signmaskL.
-    signmask64I_regI(sm, diff);
-    andcI_reg_reg(doz, sm, m1, diff); // >=0
-    addI_reg_reg(dst, doz, src1s);
+    signmask64L_regL(sm, diff);
+    andcL_reg_reg(doz, diff, sm); // >=0
+    addI_regL_regL(dst, doz, src1s);
   %}
 %}
 
--- a/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Thu May 08 11:05:02 2014 +0200
+++ b/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Fri May 02 14:53:06 2014 +0200
@@ -81,24 +81,18 @@
 #if 0
 // Call special ClassCastException constructor taking object to cast
 // and target class as arguments.
-address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler(const char* name) {
+address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler() {
   address entry = __ pc();
 
-  // Target class oop is in register R6_ARG4 by convention!
-
   // Expression stack must be empty before entering the VM if an
   // exception happened.
   __ empty_expression_stack();
-  // Setup parameters.
+
   // Thread will be loaded to R3_ARG1.
-  __ load_const_optimized(R4_ARG2, (address) name);
-  __ mr(R5_ARG3, R17_tos);
-  // R6_ARG4 contains specified class.
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose));
-#ifdef ASSERT
+  // Target class oop is in register R5_ARG3 by convention!
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose, R17_tos, R5_ARG3));
   // Above call must not return here since exception pending.
-  __ should_not_reach_here();
-#endif
+  DEBUG_ONLY(__ should_not_reach_here();)
   return entry;
 }
 #endif
@@ -1535,14 +1529,32 @@
     __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
 
     // Get out of the current method and re-execute the call that called us.
-    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ return_pc, R11_scratch1, R12_scratch2);
+    __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
     __ restore_interpreter_state(R11_scratch1);
     __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
     __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
-    __ mtlr(return_pc);
     if (ProfileInterpreter) {
       __ set_method_data_pointer_for_bcp();
     }
+#if INCLUDE_JVMTI
+    Label L_done;
+
+    __ lbz(R11_scratch1, 0, R14_bcp);
+    __ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic);
+    __ bne(CCR0, L_done);
+
+    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
+    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
+    __ ld(R4_ARG2, 0, R18_locals);
+    __ call_VM(R11_scratch1, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),
+               R4_ARG2, R19_method, R14_bcp);
+
+    __ cmpdi(CCR0, R11_scratch1, 0);
+    __ beq(CCR0, L_done);
+
+    __ std(R11_scratch1, wordSize, R15_esp);
+    __ bind(L_done);
+#endif // INCLUDE_JVMTI
     __ dispatch_next(vtos);
   }
   // end of JVMTI PopFrame support
--- a/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Thu May 08 11:05:02 2014 +0200
+++ b/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Fri May 02 14:53:06 2014 +0200
@@ -64,7 +64,7 @@
   assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase);
 
   switch (barrier) {
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
     case BarrierSet::G1SATBCT:
     case BarrierSet::G1SATBCTLogging:
       {
@@ -104,7 +104,7 @@
         __ bind(Ldone);
       }
       break;
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
     case BarrierSet::CardTableModRef:
     case BarrierSet::CardTableExtension:
       {
@@ -259,17 +259,17 @@
   switch (value) {
     default: ShouldNotReachHere();
     case 0: {
-      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0);
+      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
       break;
     }
     case 1: {
-      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0);
+      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
       break;
     }
     case 2: {
-      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0);
+      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true);
       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
       break;
     }
@@ -282,12 +282,12 @@
   static double one  = 1.0;
   switch (value) {
     case 0: {
-      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0);
+      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
       __ lfd(F15_ftos, simm16_offset, R11_scratch1);
       break;
     }
     case 1: {
-      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0);
+      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
       __ lfd(F15_ftos, simm16_offset, R11_scratch1);
       break;
     }
@@ -3728,9 +3728,9 @@
   transition(atos, atos);
 
   Label Ldone, Lis_null, Lquicked, Lresolved;
-  Register Roffset         = R5_ARG3,
+  Register Roffset         = R6_ARG4,
            RobjKlass       = R4_ARG2,
-           RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect this register.
+           RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register.
            Rcpool          = R11_scratch1,
            Rtags           = R12_scratch2;
 
--- a/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp	Thu May 08 11:05:02 2014 +0200
+++ b/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp	Fri May 02 14:53:06 2014 +0200
@@ -53,41 +53,41 @@
 
 inline jlong Atomic::load(volatile jlong* src) { return *src; }
 
-/*
-  machine barrier instructions:
-
-  - sync            two-way memory barrier, aka fence
-  - lwsync          orders  Store|Store,
-                             Load|Store,
-                             Load|Load,
-                    but not Store|Load
-  - eieio           orders memory accesses for device memory (only)
-  - isync           invalidates speculatively executed instructions
-                    From the POWER ISA 2.06 documentation:
-                     "[...] an isync instruction prevents the execution of
-                    instructions following the isync until instructions
-                    preceding the isync have completed, [...]"
-                    From IBM's AIX assembler reference:
-                     "The isync [...] instructions causes the processor to
-                    refetch any instructions that might have been fetched
-                    prior to the isync instruction. The instruction isync
-                    causes the processor to wait for all previous instructions
-                    to complete. Then any instructions already fetched are
-                    discarded and instruction processing continues in the
-                    environment established by the previous instructions."
-
-  semantic barrier instructions:
-  (as defined in orderAccess.hpp)
-
-  - release         orders Store|Store,       (maps to lwsync)
-                            Load|Store
-  - acquire         orders  Load|Store,       (maps to lwsync)
-                            Load|Load
-  - fence           orders Store|Store,       (maps to sync)
-                            Load|Store,
-                            Load|Load,
-                           Store|Load
-*/
+//
+// machine barrier instructions:
+//
+// - sync            two-way memory barrier, aka fence
+// - lwsync          orders  Store|Store,
+//                            Load|Store,
+//                            Load|Load,
+//                   but not Store|Load
+// - eieio           orders memory accesses for device memory (only)
+// - isync           invalidates speculatively executed instructions
+//                   From the POWER ISA 2.06 documentation:
+//                    "[...] an isync instruction prevents the execution of
+//                   instructions following the isync until instructions
+//                   preceding the isync have completed, [...]"
+//                   From IBM's AIX assembler reference:
+//                    "The isync [...] instructions causes the processor to
+//                   refetch any instructions that might have been fetched
+//                   prior to the isync instruction. The instruction isync
+//                   causes the processor to wait for all previous instructions
+//                   to complete. Then any instructions already fetched are
+//                   discarded and instruction processing continues in the
+//                   environment established by the previous instructions."
+//
+// semantic barrier instructions:
+// (as defined in orderAccess.hpp)
+//
+// - release         orders Store|Store,       (maps to lwsync)
+//                           Load|Store
+// - acquire         orders  Load|Store,       (maps to lwsync)
+//                           Load|Load
+// - fence           orders Store|Store,       (maps to sync)
+//                           Load|Store,
+//                           Load|Load,
+//                          Store|Load
+//
 
 #define strasm_sync                       "\n  sync    \n"
 #define strasm_lwsync                     "\n  lwsync  \n"