changeset 5027:90cfd4ad3c92 hs24.60-b03

Merge
author iveresov
date Thu, 14 Nov 2013 17:59:28 -0800
parents ecd0bdbc9635 (current diff) 2a667d6ef59e (diff)
children 8fd0e931efa5
files
diffstat 7 files changed, 136 insertions(+), 29 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Mon Nov 11 11:53:33 2013 -0800
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Thu Nov 14 17:59:28 2013 -0800
@@ -1395,19 +1395,18 @@
     addr = new LIR_Address(src.result(), offset, type);
   }
 
-  if (data != dst) {
-    __ move(data, dst);
-    data = dst;
-  }
+  // Because we want a 2-arg form of xchg and xadd
+  __ move(data, dst);
+
   if (x->is_add()) {
-    __ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
+    __ xadd(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
   } else {
     if (is_obj) {
       // Do the pre-write barrier, if any.
       pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
                   true /* do_load */, false /* patch */, NULL);
     }
-    __ xchg(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
+    __ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
     if (is_obj) {
       // Seems to be a precise address
       post_barrier(LIR_OprFact::address(addr), data);
--- a/src/share/vm/c1/c1_LinearScan.cpp	Mon Nov 11 11:53:33 2013 -0800
+++ b/src/share/vm/c1/c1_LinearScan.cpp	Thu Nov 14 17:59:28 2013 -0800
@@ -1138,8 +1138,10 @@
         }
       }
     }
-
-  } else if (opr_type != T_LONG) {
+    // We want to sometimes use logical operations on pointers, in particular in GC barriers.
+    // Since 64bit logical operations do not current support operands on stack, we have to make sure
+    // T_OBJECT doesn't get spilled along with T_LONG.
+  } else if (opr_type != T_LONG LP64_ONLY(&& opr_type != T_OBJECT)) {
     // integer instruction (note: long operands must always be in register)
     switch (op->code()) {
       case lir_cmp:
--- a/src/share/vm/opto/compile.cpp	Mon Nov 11 11:53:33 2013 -0800
+++ b/src/share/vm/opto/compile.cpp	Thu Nov 14 17:59:28 2013 -0800
@@ -834,6 +834,7 @@
   }
 #endif
 
+  NOT_PRODUCT( verify_barriers(); )
   // Now that we know the size of all the monitors we can add a fixed slot
   // for the original deopt pc.
 
@@ -3239,6 +3240,72 @@
     }
   }
 }
+
+// Verify GC barriers consistency
+// Currently supported:
+// - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
+void Compile::verify_barriers() {
+  if (UseG1GC) {
+    // Verify G1 pre-barriers
+    const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active());
+
+    ResourceArea *area = Thread::current()->resource_area();
+    Unique_Node_List visited(area);
+    Node_List worklist(area);
+    // We're going to walk control flow backwards starting from the Root
+    worklist.push(_root);
+    while (worklist.size() > 0) {
+      Node* x = worklist.pop();
+      if (x == NULL || x == top()) continue;
+      if (visited.member(x)) {
+        continue;
+      } else {
+        visited.push(x);
+      }
+
+      if (x->is_Region()) {
+        for (uint i = 1; i < x->req(); i++) {
+          worklist.push(x->in(i));
+        }
+      } else {
+        worklist.push(x->in(0));
+        // We are looking for the pattern:
+        //                            /->ThreadLocal
+        // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
+        //              \->ConI(0)
+        // We want to verify that the If and the LoadB have the same control
+        // See GraphKit::g1_write_barrier_pre()
+        if (x->is_If()) {
+          IfNode *iff = x->as_If();
+          if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
+            CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
+            if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
+                && cmp->in(1)->is_Load()) {
+              LoadNode* load = cmp->in(1)->as_Load();
+              if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
+                  && load->in(2)->in(3)->is_Con()
+                  && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {
+
+                Node* if_ctrl = iff->in(0);
+                Node* load_ctrl = load->in(0);
+
+                if (if_ctrl != load_ctrl) {
+                  // Skip possible CProj->NeverBranch in infinite loops
+                  if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
+                      && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) {
+                    if_ctrl = if_ctrl->in(0)->in(0);
+                  }
+                }
+                assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
 #endif
 
 // The Compile object keeps track of failure reasons separately from the ciEnv.
--- a/src/share/vm/opto/compile.hpp	Mon Nov 11 11:53:33 2013 -0800
+++ b/src/share/vm/opto/compile.hpp	Thu Nov 14 17:59:28 2013 -0800
@@ -1097,6 +1097,9 @@
   // Print bytecodes, including the scope inlining tree
   void print_codes();
 
+  // Verify GC barrier patterns
+  void verify_barriers() PRODUCT_RETURN;
+
   // End-of-run dumps.
   static void print_statistics() PRODUCT_RETURN;
 
--- a/src/share/vm/opto/loopTransform.cpp	Mon Nov 11 11:53:33 2013 -0800
+++ b/src/share/vm/opto/loopTransform.cpp	Thu Nov 14 17:59:28 2013 -0800
@@ -1954,7 +1954,7 @@
       // Find loads off the surviving projection; remove their control edge
       for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
         Node* cd = dp->fast_out(i); // Control-dependent node
-        if( cd->is_Load() ) {   // Loads can now float around in the loop
+        if (cd->is_Load() && cd->depends_only_on_test()) {   // Loads can now float around in the loop
           // Allow the load to float around in the loop, or before it
           // but NOT before the pre-loop.
           _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL
--- a/src/share/vm/opto/memnode.hpp	Mon Nov 11 11:53:33 2013 -0800
+++ b/src/share/vm/opto/memnode.hpp	Thu Nov 14 17:59:28 2013 -0800
@@ -204,6 +204,17 @@
 protected:
   const Type* load_array_final_field(const TypeKlassPtr *tkls,
                                      ciKlass* klass) const;
+  // depends_only_on_test is almost always true, and needs to be almost always
+  // true to enable key hoisting & commoning optimizations.  However, for the
+  // special case of RawPtr loads from TLS top & end, and other loads performed by
+  // GC barriers, the control edge carries the dependence preventing hoisting past
+  // a Safepoint instead of the memory edge.  (An unfortunate consequence of having
+  // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
+  // which produce results (new raw memory state) inside of loops preventing all
+  // manner of other optimizations).  Basically, it's ugly but so is the alternative.
+  // See comment in macro.cpp, around line 125 expand_allocate_common().
+  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
+
 };
 
 //------------------------------LoadBNode--------------------------------------
@@ -370,16 +381,6 @@
   virtual uint ideal_reg() const { return Op_RegP; }
   virtual int store_Opcode() const { return Op_StoreP; }
   virtual BasicType memory_type() const { return T_ADDRESS; }
-  // depends_only_on_test is almost always true, and needs to be almost always
-  // true to enable key hoisting & commoning optimizations.  However, for the
-  // special case of RawPtr loads from TLS top & end, the control edge carries
-  // the dependence preventing hoisting past a Safepoint instead of the memory
-  // edge.  (An unfortunate consequence of having Safepoints not set Raw
-  // Memory; itself an unfortunate consequence of having Nodes which produce
-  // results (new raw memory state) inside of loops preventing all manner of
-  // other optimizations).  Basically, it's ugly but so is the alternative.
-  // See comment in macro.cpp, around line 125 expand_allocate_common().
-  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
 };
 
 
@@ -393,16 +394,6 @@
   virtual uint ideal_reg() const { return Op_RegN; }
   virtual int store_Opcode() const { return Op_StoreN; }
   virtual BasicType memory_type() const { return T_NARROWOOP; }
-  // depends_only_on_test is almost always true, and needs to be almost always
-  // true to enable key hoisting & commoning optimizations.  However, for the
-  // special case of RawPtr loads from TLS top & end, the control edge carries
-  // the dependence preventing hoisting past a Safepoint instead of the memory
-  // edge.  (An unfortunate consequence of having Safepoints not set Raw
-  // Memory; itself an unfortunate consequence of having Nodes which produce
-  // results (new raw memory state) inside of loops preventing all manner of
-  // other optimizations).  Basically, it's ugly but so is the alternative.
-  // See comment in macro.cpp, around line 125 expand_allocate_common().
-  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
 };
 
 //------------------------------LoadKlassNode----------------------------------
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/regalloc/C1ObjectSpillInLogicOp.java	Thu Nov 14 17:59:28 2013 -0800
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8027751
+ * @summary C1 crashes generating G1 post-barrier in Unsafe.getAndSetObject() intrinsic because of the new value spill
+ * @run main/othervm -XX:+UseG1GC C1ObjectSpillInLogicOp
+ *
+ * G1 barriers use logical operators (xor) on T_OBJECT mixed with T_LONG or T_INT.
+ * The current implementation of logical operations on x86 in C1 doesn't allow for long operands to be on stack.
+ * There is a special code in the register allocator that forces long arguments in registers on x86. However T_OBJECT
+ * can be spilled just fine, and in that case the xor emission will fail.
+ */
+
+import java.util.concurrent.atomic.*;
+public class C1ObjectSpillInLogicOp {
+  static public void main(String[] args) {
+    AtomicReferenceArray<Integer> x = new AtomicReferenceArray(128);
+    Integer y = new Integer(0);
+    for (int i = 0; i < 50000; i++) {
+      x.getAndSet(i % x.length(), y);
+    }
+  }
+}