changeset 962:a76f17a8fd77

Merge
author phh
date Fri, 06 Nov 2009 17:44:04 -0800
parents 395db6268de2 (diff) 26d0c7276c9e (current diff)
children 788af96b55e3 1a1b644fe617 0ad684e27697
files
diffstat 9 files changed, 146 insertions(+), 26 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/sparc/vm/sparc.ad	Fri Nov 06 19:57:55 2009 -0500
+++ b/src/cpu/sparc/vm/sparc.ad	Fri Nov 06 17:44:04 2009 -0800
@@ -9515,8 +9515,9 @@
   // x |= (x >> 8);
   // x |= (x >> 16);
   // return (WORDBITS - popc(x));
-  format %{ "SRL     $src,1,$dst\t! count leading zeros (int)\n\t"
-            "OR      $src,$tmp,$dst\n\t"
+  format %{ "SRL     $src,1,$tmp\t! count leading zeros (int)\n\t"
+            "SRL     $src,0,$dst\t! 32-bit zero extend\n\t"
+            "OR      $dst,$tmp,$dst\n\t"
             "SRL     $dst,2,$tmp\n\t"
             "OR      $dst,$tmp,$dst\n\t"
             "SRL     $dst,4,$tmp\n\t"
@@ -9533,7 +9534,8 @@
     Register Rsrc = $src$$Register;
     Register Rtmp = $tmp$$Register;
     __ srl(Rsrc, 1, Rtmp);
-    __ or3(Rsrc, Rtmp, Rdst);
+    __ srl(Rsrc, 0, Rdst);
+    __ or3(Rdst, Rtmp, Rdst);
     __ srl(Rdst, 2, Rtmp);
     __ or3(Rdst, Rtmp, Rdst);
     __ srl(Rdst, 4, Rtmp);
@@ -9561,7 +9563,7 @@
   // x |= (x >> 16);
   // x |= (x >> 32);
   // return (WORDBITS - popc(x));
-  format %{ "SRLX    $src,1,$dst\t! count leading zeros (long)\n\t"
+  format %{ "SRLX    $src,1,$tmp\t! count leading zeros (long)\n\t"
             "OR      $src,$tmp,$dst\n\t"
             "SRLX    $dst,2,$tmp\n\t"
             "OR      $dst,$tmp,$dst\n\t"
--- a/src/share/vm/memory/compactingPermGenGen.cpp	Fri Nov 06 19:57:55 2009 -0500
+++ b/src/share/vm/memory/compactingPermGenGen.cpp	Fri Nov 06 17:44:04 2009 -0800
@@ -352,15 +352,19 @@
 }
 
 
+// Do not use in time-critical operations due to the possibility of paging
+// in otherwise untouched or previously unread portions of the perm gen,
+// for instance, the shared spaces. NOTE: Because CompactingPermGenGen
+// derives from OneContigSpaceCardGeneration which is supposed to have a
+// single space, and does not override its object_iterate() method,
+// object iteration via that interface does not look at the objects in
+// the shared spaces when using CDS. This should be fixed; see CR 6897798.
 void CompactingPermGenGen::space_iterate(SpaceClosure* blk, bool usedOnly) {
   OneContigSpaceCardGeneration::space_iterate(blk, usedOnly);
   if (spec()->enable_shared_spaces()) {
-#ifdef PRODUCT
     // Making the rw_space walkable will page in the entire space, and
-    // is to be avoided. However, this is required for Verify options.
-    ShouldNotReachHere();
-#endif
-
+    // is to be avoided in the case of time-critical operations.
+    // However, this is required for Verify and heap dump operations.
     blk->do_space(ro_space());
     blk->do_space(rw_space());
   }
--- a/src/share/vm/memory/compactingPermGenGen.hpp	Fri Nov 06 19:57:55 2009 -0500
+++ b/src/share/vm/memory/compactingPermGenGen.hpp	Fri Nov 06 17:44:04 2009 -0800
@@ -29,6 +29,9 @@
 class PermanentGenerationSpec;
 
 // This is the "generation" view of a CompactingPermGen.
+// NOTE: the shared spaces used for CDS are here handled in
+// a somewhat awkward and potentially buggy fashion, see CR 6801625.
+// This infelicity should be fixed, see CR 6897789.
 class CompactingPermGenGen: public OneContigSpaceCardGeneration {
   friend class VMStructs;
   // Abstractly, this is a subtype that gets access to protected fields.
@@ -47,7 +50,7 @@
   OffsetTableContigSpace* _ro_space;
   OffsetTableContigSpace* _rw_space;
 
-  // With shared spaces there is a dicotomy in the use of the
+  // With shared spaces there is a dichotomy in the use of the
   // _virtual_space of the generation.  There is a portion of the
   // _virtual_space that is used for the unshared part of the
   // permanent generation and a portion that is reserved for the shared part.
--- a/src/share/vm/memory/generation.cpp	Fri Nov 06 19:57:55 2009 -0500
+++ b/src/share/vm/memory/generation.cpp	Fri Nov 06 17:44:04 2009 -0800
@@ -606,6 +606,13 @@
 void OneContigSpaceCardGeneration::prepare_for_verify() {}
 
 
+// Override for a card-table generation with one contiguous
+// space. NOTE: For reasons that are lost in the fog of history,
+// this code is used when you iterate over perm gen objects,
+// even when one uses CDS, where the perm gen has a couple of
+// other spaces; this is because CompactingPermGenGen derives
+// from OneContigSpaceCardGeneration. This should be cleaned up,
+// see CR 6897789..
 void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
   _the_space->object_iterate(blk);
 }
--- a/src/share/vm/memory/universe.cpp	Fri Nov 06 19:57:55 2009 -0500
+++ b/src/share/vm/memory/universe.cpp	Fri Nov 06 17:44:04 2009 -0800
@@ -744,19 +744,22 @@
 static const uint64_t OopEncodingHeapMax = NarrowOopHeapMax << LogMinObjAlignmentInBytes;
 
 char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
+  size_t base = 0;
 #ifdef _LP64
   if (UseCompressedOops) {
     assert(mode == UnscaledNarrowOop  ||
            mode == ZeroBasedNarrowOop ||
            mode == HeapBasedNarrowOop, "mode is invalid");
-
     const size_t total_size = heap_size + HeapBaseMinAddress;
-    if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
+    // Return specified base for the first request.
+    if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
+      base = HeapBaseMinAddress;
+    } else if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
       if (total_size <= NarrowOopHeapMax && (mode == UnscaledNarrowOop) &&
           (Universe::narrow_oop_shift() == 0)) {
         // Use 32-bits oops without encoding and
         // place heap's top on the 4Gb boundary
-        return (char*)(NarrowOopHeapMax - heap_size);
+        base = (NarrowOopHeapMax - heap_size);
       } else {
         // Can't reserve with NarrowOopShift == 0
         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
@@ -765,16 +768,38 @@
           // Use zero based compressed oops with encoding and
           // place heap's top on the 32Gb boundary in case
           // total_size > 4Gb or failed to reserve below 4Gb.
-          return (char*)(OopEncodingHeapMax - heap_size);
+          base = (OopEncodingHeapMax - heap_size);
         }
       }
     } else {
       // Can't reserve below 32Gb.
       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
     }
+    // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
+    // used in ReservedHeapSpace() constructors.
+    // The final values will be set in initialize_heap() below.
+    if (base != 0 && (base + heap_size) <= OopEncodingHeapMax) {
+      // Use zero based compressed oops
+      Universe::set_narrow_oop_base(NULL);
+      // Don't need guard page for implicit checks in indexed
+      // addressing mode with zero based Compressed Oops.
+      Universe::set_narrow_oop_use_implicit_null_checks(true);
+    } else {
+      // Set to a non-NULL value so the ReservedSpace ctor computes
+      // the correct no-access prefix.
+      // The final value will be set in initialize_heap() below.
+      Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
+#ifdef _WIN64
+      if (UseLargePages) {
+        // Cannot allocate guard pages for implicit checks in indexed
+        // addressing mode when large pages are specified on windows.
+        Universe::set_narrow_oop_use_implicit_null_checks(false);
+      }
+#endif //  _WIN64
+    }
   }
 #endif
-  return NULL; // also return NULL (don't care) for 32-bit VM
+  return (char*)base; // also return NULL (don't care) for 32-bit VM
 }
 
 jint Universe::initialize_heap() {
@@ -857,7 +882,7 @@
         // Can't reserve heap below 4Gb.
         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
       } else {
-        assert(Universe::narrow_oop_shift() == 0, "use unscaled narrow oop");
+        Universe::set_narrow_oop_shift(0);
         if (PrintCompressedOopsMode) {
           tty->print(", 32-bits Oops");
         }
--- a/src/share/vm/opto/superword.cpp	Fri Nov 06 19:57:55 2009 -0500
+++ b/src/share/vm/opto/superword.cpp	Fri Nov 06 17:44:04 2009 -0800
@@ -990,8 +990,8 @@
 // (5) We know there is no dependence cycle, so there in no other case;
 // (6) Finally, all memory ops in another single pack should be moved in the same direction.
 //
-// To schedule a load pack: the memory edge of every loads in the pack must be
-// the same as the memory edge of the last executed load in the pack
+// To schedule a load pack, we use the memory state of either the first or the last load in
+// the pack, based on the dependence constraint.
 void SuperWord::co_locate_pack(Node_List* pk) {
   if (pk->at(0)->is_Store()) {
     MemNode* first     = executed_first(pk)->as_Mem();
@@ -1076,15 +1076,32 @@
       current = my_mem->as_Mem();
     } // end while
   } else if (pk->at(0)->is_Load()) { //load
-    // all use the memory state that the last executed load uses
-    LoadNode* last_load  = executed_last(pk)->as_Load();
-    Node* last_mem       = last_load->in(MemNode::Memory);
-    _igvn.hash_delete(last_mem);
-    // Give each load same memory state as last
+    // all loads in the pack should have the same memory state. By default,
+    // we use the memory state of the last load. However, if any load could
+    // not be moved down due to the dependence constraint, we use the memory
+    // state of the first load.
+    Node* last_mem  = executed_last(pk)->in(MemNode::Memory);
+    Node* first_mem = executed_first(pk)->in(MemNode::Memory);
+    bool schedule_last = true;
+    for (uint i = 0; i < pk->size(); i++) {
+      Node* ld = pk->at(i);
+      for (Node* current = last_mem; current != ld->in(MemNode::Memory);
+           current=current->in(MemNode::Memory)) {
+        assert(current != first_mem, "corrupted memory graph");
+        if(current->is_Mem() && !independent(current, ld)){
+          schedule_last = false; // a later store depends on this load
+          break;
+        }
+      }
+    }
+
+    Node* mem_input = schedule_last ? last_mem : first_mem;
+    _igvn.hash_delete(mem_input);
+    // Give each load the same memory state
     for (uint i = 0; i < pk->size(); i++) {
       LoadNode* ld = pk->at(i)->as_Load();
       _igvn.hash_delete(ld);
-      ld->set_req(MemNode::Memory, last_mem);
+      ld->set_req(MemNode::Memory, mem_input);
       _igvn._worklist.push(ld);
     }
   }
@@ -1902,6 +1919,11 @@
   }
   // Match AddP(base, AddP(ptr, k*iv [+ invariant]), constant)
   Node* base = adr->in(AddPNode::Base);
+  // Unsafe reference could not be aligned appropriately without runtime checking
+  if (base == NULL || base->bottom_type() == Type::TOP) {
+    assert(!valid(), "unsafe access");
+    return;
+  }
   for (int i = 0; i < 3; i++) {
     if (!scaled_iv_plus_offset(adr->in(AddPNode::Offset))) {
       assert(!valid(), "too complex");
--- a/test/compiler/6636138/Test1.java	Fri Nov 06 19:57:55 2009 -0500
+++ b/test/compiler/6636138/Test1.java	Fri Nov 06 17:44:04 2009 -0800
@@ -45,7 +45,7 @@
         for (int i = 0; i < src.length; i++) {
             if (src[i] != ref[i]) {
                 System.out.println("Error: src and ref don't match at " + i);
-                System.exit(-1);
+                System.exit(97);
             }
         }
     }
--- a/test/compiler/6636138/Test2.java	Fri Nov 06 19:57:55 2009 -0500
+++ b/test/compiler/6636138/Test2.java	Fri Nov 06 17:44:04 2009 -0800
@@ -51,7 +51,7 @@
             int value = (i-1 + src.length)%src.length; // correct value after shifting
                 if (src[i] != value) {
                     System.out.println("Error: src["+i+"] should be "+ value + " instead of " + src[i]);
-                    System.exit(-1);
+                    System.exit(97);
                 }
         }
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6852078/Test6852078.java	Fri Nov 06 17:44:04 2009 -0800
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6852078
+ * @summary Disable SuperWord optimization for unsafe read/write
+ *
+ * @run main/othervm Test6852078
+ */
+
+import java.util.*;
+import java.nio.ByteBuffer;
+import com.sun.corba.se.impl.encoding.ByteBufferWithInfo;
+import com.sun.jndi.toolkit.corba.CorbaUtils;
+
+public class Test6852078 {
+
+    public Test6852078(String [] args) {
+
+        int capacity = 128;
+        ByteBuffer bb = ByteBuffer.allocateDirect(capacity);
+        ByteBufferWithInfo bbwi = new ByteBufferWithInfo( CorbaUtils.getOrb(null, -1, new Hashtable()), bb);
+        byte[] tmpBuf;
+        tmpBuf = new byte[bbwi.buflen];
+
+        for (int i = 0; i < capacity; i++)
+            tmpBuf[i] = bbwi.byteBuffer.get(i);
+    }
+
+    public static void main(String [] args) {
+        for (int i=0; i<2000; i++) {
+            Test6852078 t = new Test6852078(args);
+        }
+    }
+}