changeset 1247:230fac611b50

Merge
author johnc
date Mon, 08 Feb 2010 09:58:32 -0800
parents ff3232b68fbb (current diff) deada8912c54 (diff)
children 95d21201c29a 38836cf1d8d2
files src/share/vm/c1/c1_Runtime1.cpp src/share/vm/includeDB_core
diffstat 11 files changed, 96 insertions(+), 31 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/c1/c1_Runtime1.cpp	Fri Feb 05 12:27:12 2010 -0800
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Mon Feb 08 09:58:32 2010 -0800
@@ -1075,6 +1075,7 @@
 };
 
 
+// Below length is the # elements copied.
 template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
                                           oopDesc* dst, T* dst_addr,
                                           int length) {
@@ -1083,22 +1084,22 @@
   // barrier. The assert will fail if this is not the case.
   // Note that we use the non-virtual inlineable variant of write_ref_array.
   BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->has_write_ref_array_opt(),
-         "Barrier set must have ref array opt");
+  assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
+  assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
   if (src == dst) {
     // same object, no check
+    bs->write_ref_array_pre(dst_addr, length);
     Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
-    bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
-                                  (HeapWord*)(dst_addr + length)));
+    bs->write_ref_array((HeapWord*)dst_addr, length);
     return ac_ok;
   } else {
     klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
     klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
     if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
       // Elements are guaranteed to be subtypes, so no check necessary
+      bs->write_ref_array_pre(dst_addr, length);
       Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
-      bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
-                                    (HeapWord*)(dst_addr + length)));
+      bs->write_ref_array((HeapWord*)dst_addr, length);
       return ac_ok;
     }
   }
@@ -1162,9 +1163,16 @@
 #endif
 
   if (num == 0) return;
+  BarrierSet* bs = Universe::heap()->barrier_set();
+  assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
+  assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
+  if (UseCompressedOops) {
+    bs->write_ref_array_pre((narrowOop*)dst, num);
+  } else {
+    bs->write_ref_array_pre((oop*)dst, num);
+  }
   Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
-  BarrierSet* bs = Universe::heap()->barrier_set();
-  bs->write_ref_array(MemRegion(dst, dst + num));
+  bs->write_ref_array(dst, num);
 JRT_END
 
 
--- a/src/share/vm/classfile/javaClasses.cpp	Fri Feb 05 12:27:12 2010 -0800
+++ b/src/share/vm/classfile/javaClasses.cpp	Mon Feb 08 09:58:32 2010 -0800
@@ -1121,10 +1121,23 @@
   }
 
   void flush() {
+    // The following appears to have been an optimization to save from
+    // doing a barrier for each individual store into the _methods array,
+    // but rather to do it for the entire array after the series of writes.
+    // That optimization seems to have been lost when compressed oops was
+    // implemented. However, the extra card-marks below was left in place,
+    // but is now redundant because the individual stores into the
+    // _methods array already execute the barrier code. CR 6918185 has
+    // been filed so the original code may be restored by deferring the
+    // barriers until after the entire sequence of stores, thus re-enabling
+    // the intent of the original optimization. In the meantime the redundant
+    // card mark below is now disabled.
     if (_dirty && _methods != NULL) {
+#if 0
       BarrierSet* bs = Universe::heap()->barrier_set();
       assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
       bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
+#endif
       _dirty = false;
     }
   }
@@ -1168,9 +1181,7 @@
       method = mhandle();
     }
 
-     _methods->obj_at_put(_index, method);
-    // bad for UseCompressedOops
-    // *_methods->obj_at_addr(_index) = method;
+    _methods->obj_at_put(_index, method);
     _bcis->ushort_at_put(_index, bci);
     _index++;
     _dirty = true;
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Fri Feb 05 12:27:12 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Mon Feb 08 09:58:32 2010 -0800
@@ -300,7 +300,23 @@
   int count;
   jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
   assert(cached_ptr != NULL, "bad cached card ptr");
-  assert(!is_young_card(cached_ptr), "shouldn't get a card in young region");
+
+  if (is_young_card(cached_ptr)) {
+    // The region containing cached_ptr has been freed during a clean up
+    // pause, reallocated, and tagged as young.
+    assert(cached_ptr != card_ptr, "shouldn't be");
+
+    // We've just inserted a new old-gen card pointer into the card count
+    // cache and evicted the previous contents of that count slot.
+    // The evicted card pointer has been determined to be in a young region
+    // and so cannot be the newly inserted card pointer (that will be
+    // in an old region).
+    // The count for newly inserted card will be set to zero during the
+    // insertion, so we don't want to defer the cleaning of the newly
+    // inserted card pointer.
+    assert(*defer == false, "deferring non-hot card");
+    return NULL;
+  }
 
   // The card pointer we obtained from card count cache is not hot
   // so do not store it in the cache; return it for immediate
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Feb 05 12:27:12 2010 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Feb 08 09:58:32 2010 -0800
@@ -2505,6 +2505,7 @@
 }
 
 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
+  // always_do_update_barrier = false;
   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
   // Call allocation profiler
   AllocationProfiler::iterate_since_last_gc();
@@ -2518,6 +2519,7 @@
   // is set.
   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
                         "derived pointer present"));
+  // always_do_update_barrier = true;
 }
 
 void G1CollectedHeap::do_collection_pause() {
--- a/src/share/vm/includeDB_core	Fri Feb 05 12:27:12 2010 -0800
+++ b/src/share/vm/includeDB_core	Mon Feb 08 09:58:32 2010 -0800
@@ -175,6 +175,7 @@
 arguments.cpp                           management.hpp
 arguments.cpp                           oop.inline.hpp
 arguments.cpp                           os_<os_family>.inline.hpp
+arguments.cpp                           referenceProcessor.hpp
 arguments.cpp                           universe.inline.hpp
 arguments.cpp                           vm_version_<arch>.hpp
 
--- a/src/share/vm/memory/barrierSet.hpp	Fri Feb 05 12:27:12 2010 -0800
+++ b/src/share/vm/memory/barrierSet.hpp	Mon Feb 08 09:58:32 2010 -0800
@@ -124,8 +124,6 @@
   // Below length is the # array elements being written
   virtual void write_ref_array_pre(      oop* dst, int length) {}
   virtual void write_ref_array_pre(narrowOop* dst, int length) {}
-  // Below MemRegion mr is expected to be HeapWord-aligned
-  inline void write_ref_array(MemRegion mr);
   // Below count is the # array elements being written, starting
   // at the address "start", which may not necessarily be HeapWord-aligned
   inline void write_ref_array(HeapWord* start, size_t count);
--- a/src/share/vm/memory/barrierSet.inline.hpp	Fri Feb 05 12:27:12 2010 -0800
+++ b/src/share/vm/memory/barrierSet.inline.hpp	Mon Feb 08 09:58:32 2010 -0800
@@ -42,16 +42,6 @@
   }
 }
 
-void BarrierSet::write_ref_array(MemRegion mr) {
-  assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start() , "Unaligned start");
-  assert((HeapWord*)align_size_up  ((uintptr_t)mr.end(),   HeapWordSize) == mr.end(),    "Unaligned end"  );
-  if (kind() == CardTableModRef) {
-    ((CardTableModRefBS*)this)->inline_write_ref_array(mr);
-  } else {
-    write_ref_array_work(mr);
-  }
-}
-
 // count is number of array elements being written
 void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
   assert(count <= (size_t)max_intx, "count too large");
@@ -61,12 +51,12 @@
   // strictly necessary for current uses, but a case of good hygiene and,
   // if you will, aesthetics) and the second upward (this is essential for
   // current uses) to a HeapWord boundary, so we mark all cards overlapping
-  // this write. In the event that this evolves in the future to calling a
+  // this write. If this evolves in the future to calling a
   // logging barrier of narrow oop granularity, like the pre-barrier for G1
   // (mentioned here merely by way of example), we will need to change this
-  // interface, much like the pre-barrier one above, so it is "exactly precise"
-  // (if i may be allowed the adverbial redundancy for emphasis) and does not
-  // include narrow oop slots not included in the original write interval.
+  // interface, so it is "exactly precise" (if i may be allowed the adverbial
+  // redundancy for emphasis) and does not include narrow oop slots not
+  // included in the original write interval.
   HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
   HeapWord* aligned_end   = (HeapWord*)align_size_up  ((uintptr_t)end,   HeapWordSize);
   // If compressed oops were not being used, these should already be aligned
--- a/src/share/vm/memory/referenceProcessor.hpp	Fri Feb 05 12:27:12 2010 -0800
+++ b/src/share/vm/memory/referenceProcessor.hpp	Mon Feb 08 09:58:32 2010 -0800
@@ -263,10 +263,13 @@
     int                parallel_gc_threads = 1,
     bool               mt_processing = false,
     bool               discovered_list_needs_barrier = false);
+
   // RefDiscoveryPolicy values
-  enum {
+  enum DiscoveryPolicy {
     ReferenceBasedDiscovery = 0,
-    ReferentBasedDiscovery  = 1
+    ReferentBasedDiscovery  = 1,
+    DiscoveryPolicyMin      = ReferenceBasedDiscovery,
+    DiscoveryPolicyMax      = ReferentBasedDiscovery
   };
 
   static void init_statics();
--- a/src/share/vm/runtime/arguments.cpp	Fri Feb 05 12:27:12 2010 -0800
+++ b/src/share/vm/runtime/arguments.cpp	Mon Feb 08 09:58:32 2010 -0800
@@ -1487,6 +1487,20 @@
 //===========================================================================================================
 // Parsing of main arguments
 
+bool Arguments::verify_interval(uintx val, uintx min,
+                                uintx max, const char* name) {
+  // Returns true iff value is in the inclusive interval [min..max]
+  // false, otherwise.
+  if (val >= min && val <= max) {
+    return true;
+  }
+  jio_fprintf(defaultStream::error_stream(),
+              "%s of " UINTX_FORMAT " is invalid; must be between " UINTX_FORMAT
+              " and " UINTX_FORMAT "\n",
+              name, val, min, max);
+  return false;
+}
+
 bool Arguments::verify_percentage(uintx value, const char* name) {
   if (value <= 100) {
     return true;
@@ -1723,6 +1737,16 @@
     status = false;
   }
 
+  status = status && verify_interval(RefDiscoveryPolicy,
+                                     ReferenceProcessor::DiscoveryPolicyMin,
+                                     ReferenceProcessor::DiscoveryPolicyMax,
+                                     "RefDiscoveryPolicy");
+
+  // Limit the lower bound of this flag to 1 as it is used in a division
+  // expression.
+  status = status && verify_interval(TLABWasteTargetPercent,
+                                     1, 100, "TLABWasteTargetPercent");
+
   return status;
 }
 
--- a/src/share/vm/runtime/arguments.hpp	Fri Feb 05 12:27:12 2010 -0800
+++ b/src/share/vm/runtime/arguments.hpp	Mon Feb 08 09:58:32 2010 -0800
@@ -336,6 +336,8 @@
   static bool is_bad_option(const JavaVMOption* option, jboolean ignore) {
     return is_bad_option(option, ignore, NULL);
   }
+  static bool verify_interval(uintx val, uintx min,
+                              uintx max, const char* name);
   static bool verify_percentage(uintx value, const char* name);
   static void describe_range_error(ArgsRange errcode);
   static ArgsRange check_memory_size(julong size, julong min_size);
--- a/src/share/vm/runtime/stubRoutines.cpp	Fri Feb 05 12:27:12 2010 -0800
+++ b/src/share/vm/runtime/stubRoutines.cpp	Mon Feb 08 09:58:32 2010 -0800
@@ -196,11 +196,19 @@
 // Default versions of arraycopy functions
 //
 
+static void gen_arraycopy_barrier_pre(oop* dest, size_t count) {
+    assert(count != 0, "count should be non-zero");
+    assert(count <= (size_t)max_intx, "count too large");
+    BarrierSet* bs = Universe::heap()->barrier_set();
+    assert(bs->has_write_ref_array_pre_opt(), "Must have pre-barrier opt");
+    bs->write_ref_array_pre(dest, (int)count);
+}
+
 static void gen_arraycopy_barrier(oop* dest, size_t count) {
     assert(count != 0, "count should be non-zero");
     BarrierSet* bs = Universe::heap()->barrier_set();
     assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
-    bs->write_ref_array(MemRegion((HeapWord*)dest, (HeapWord*)(dest + count)));
+    bs->write_ref_array((HeapWord*)dest, count);
 }
 
 JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
@@ -240,6 +248,7 @@
   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
 #endif // !PRODUCT
   assert(count != 0, "count should be non-zero");
+  gen_arraycopy_barrier_pre(dest, count);
   Copy::conjoint_oops_atomic(src, dest, count);
   gen_arraycopy_barrier(dest, count);
 JRT_END
@@ -281,6 +290,7 @@
   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
 #endif // !PRODUCT
   assert(count != 0, "count should be non-zero");
+  gen_arraycopy_barrier_pre((oop *) dest, count);
   Copy::arrayof_conjoint_oops(src, dest, count);
   gen_arraycopy_barrier((oop *) dest, count);
 JRT_END