changeset 10869:d29cb0e3dfd9

Merge
author andrew
date Fri, 27 Nov 2020 03:51:51 +0000
parents f9f19940bf72 (current diff) 3fcf88579cd6 (diff)
children 3a93682b0b44
files
diffstat 18 files changed, 156 insertions(+), 104 deletions(-) [+]
line wrap: on
line diff
--- a/THIRD_PARTY_README	Tue Nov 24 05:05:37 2020 +0000
+++ b/THIRD_PARTY_README	Fri Nov 27 03:51:51 2020 +0000
@@ -155,11 +155,11 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to CUP Parser Generator for 
-Java 0.10k, which may be included with JRE 8, JDK 8, and OpenJDK 8.
+Java 0.10b, which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
-Copyright 1996-1999 by Scott Hudson, Frank Flannery, C. Scott Ananian
+Copyright 1996-2015 by Scott Hudson, Frank Flannery, C. Scott Ananian, Michael Petter
 
 Permission to use, copy, modify, and distribute this software and its
 documentation for any purpose and without fee is hereby granted, provided
@@ -3028,7 +3028,7 @@
   Apache Commons Math 3.2
   Apache Derby 10.11.1.2
   Apache Jakarta BCEL 5.1 
-  Apache Santuario XML Security for Java 2.1.1
+  Apache Santuario XML Security for Java 2.1.3
   Apache Xalan-Java 2.7.2
   Apache Xerces Java 2.10.0 
   Apache XML Resolver 1.1 
--- a/src/os/linux/vm/osContainer_linux.cpp	Tue Nov 24 05:05:37 2020 +0000
+++ b/src/os/linux/vm/osContainer_linux.cpp	Fri Nov 27 03:51:51 2020 +0000
@@ -34,12 +34,15 @@
 
 bool  OSContainer::_is_initialized   = false;
 bool  OSContainer::_is_containerized = false;
+int   OSContainer::_active_processor_count = 1;
 julong _unlimited_memory;
 
 class CgroupSubsystem: CHeapObj<mtInternal> {
  friend class OSContainer;
 
  private:
+    volatile jlong _next_check_counter;
+
     /* mountinfo contents */
     char *_root;
     char *_mount_point;
@@ -52,6 +55,7 @@
       _root = os::strdup(root);
       _mount_point = os::strdup(mountpoint);
       _path = NULL;
+      _next_check_counter = min_jlong;
     }
 
     /*
@@ -101,6 +105,14 @@
     }
 
     char *subsystem_path() { return _path; }
+
+    bool cache_has_expired() {
+      return os::elapsed_counter() > _next_check_counter;
+    }
+
+    void set_cache_expiry_time(jlong timeout) {
+      _next_check_counter = os::elapsed_counter() + timeout;
+    }
 };
 
 CgroupSubsystem* memory = NULL;
@@ -584,6 +596,17 @@
   int cpu_count, limit_count;
   int result;
 
+  // We use a cache with a timeout to avoid performing expensive
+  // computations in the event this function is called frequently.
+  // [See 8227006].
+  if (!cpu->cache_has_expired()) {
+    if (PrintContainerInfo) {
+      tty->print_cr("OSContainer::active_processor_count (cached): %d", OSContainer::_active_processor_count);
+    }
+
+    return OSContainer::_active_processor_count;
+  }
+
   cpu_count = limit_count = os::Linux::active_processor_count();
   int quota  = cpu_quota();
   int period = cpu_period();
@@ -622,6 +645,11 @@
   if (PrintContainerInfo) {
     tty->print_cr("OSContainer::active_processor_count: %d", result);
   }
+
+  // Update the value and reset the cache timeout
+  OSContainer::_active_processor_count = result;
+  cpu->set_cache_expiry_time(OSCONTAINER_CACHE_TIMEOUT);
+
   return result;
 }
 
--- a/src/os/linux/vm/osContainer_linux.hpp	Tue Nov 24 05:05:37 2020 +0000
+++ b/src/os/linux/vm/osContainer_linux.hpp	Fri Nov 27 03:51:51 2020 +0000
@@ -31,11 +31,15 @@
 
 #define OSCONTAINER_ERROR (-2)
 
+// 20ms timeout between re-reads of _active_processor_count.
+#define OSCONTAINER_CACHE_TIMEOUT (NANOSECS_PER_SEC/50)
+
 class OSContainer: AllStatic {
 
  private:
   static bool   _is_initialized;
   static bool   _is_containerized;
+  static int    _active_processor_count;
 
  public:
   static void init();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Tue Nov 24 05:05:37 2020 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Nov 27 03:51:51 2020 +0000
@@ -998,18 +998,13 @@
       // and the klass read.
       OrderAccess::loadload();
 
-      // must read from what 'p' points to in each loop.
-      Klass* k = ((volatile oopDesc*)p)->klass_or_null();
+      // Ensure klass read before size.
+      Klass* k = oop(p)->klass_or_null_acquire();
       if (k != NULL) {
         assert(k->is_klass(), "Should really be klass oop.");
         oop o = (oop)p;
         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 
-        // Bugfix for systems with weak memory model (PPC64/IA64).
-        // The object o may be an array. Acquire to make sure that the array
-        // size (third word) is consistent.
-        OrderAccess::acquire();
-
         size_t res = o->size_given_klass(k);
         res = adjustObjectSize(res);
         assert(res != 0, "Block size should not be 0");
@@ -1057,21 +1052,13 @@
       // and the klass read.
       OrderAccess::loadload();
 
-      // must read from what 'p' points to in each loop.
-      Klass* k = ((volatile oopDesc*)p)->klass_or_null();
-      // We trust the size of any object that has a non-NULL
-      // klass and (for those in the perm gen) is parsable
-      // -- irrespective of its conc_safe-ty.
+      // Ensure klass read before size.
+      Klass* k = oop(p)->klass_or_null_acquire();
       if (k != NULL) {
         assert(k->is_klass(), "Should really be klass oop.");
         oop o = (oop)p;
         assert(o->is_oop(), "Should be an oop");
 
-        // Bugfix for systems with weak memory model (PPC64/IA64).
-        // The object o may be an array. Acquire to make sure that the array
-        // size (third word) is consistent.
-        OrderAccess::acquire();
-
         size_t res = o->size_given_klass(k);
         res = adjustObjectSize(res);
         assert(res != 0, "Block size should not be 0");
@@ -1124,7 +1111,7 @@
   // and the klass read.
   OrderAccess::loadload();
 
-  Klass* k = oop(p)->klass_or_null();
+  Klass* k = oop(p)->klass_or_null_acquire();
   if (k != NULL) {
     // Ignore mark word because it may have been used to
     // chain together promoted objects (the last one
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Nov 24 05:05:37 2020 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Nov 27 03:51:51 2020 +0000
@@ -6728,7 +6728,7 @@
 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
   size_t sz = 0;
   oop p = (oop)addr;
-  if (p->klass_or_null() != NULL) {
+  if (p->klass_or_null_acquire() != NULL) {
     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
   } else {
     sz = block_size_using_printezis_bits(addr);
@@ -7186,7 +7186,7 @@
   }
   if (_bitMap->isMarked(addr)) {
     // it's marked; is it potentially uninitialized?
-    if (p->klass_or_null() != NULL) {
+    if (p->klass_or_null_acquire() != NULL) {
         // an initialized object; ignore mark word in verification below
         // since we are running concurrent with mutators
         assert(p->is_oop(true), "should be an oop");
@@ -7227,7 +7227,7 @@
     }
   } else {
     // Either a not yet marked object or an uninitialized object
-    if (p->klass_or_null() == NULL) {
+    if (p->klass_or_null_acquire() == NULL) {
       // An uninitialized object, skip to the next card, since
       // we may not be able to read its P-bits yet.
       assert(size == 0, "Initial value");
@@ -7438,7 +7438,7 @@
     assert(_skipBits == 0, "tautology");
     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
     oop p = oop(addr);
-    if (p->klass_or_null() == NULL) {
+    if (p->klass_or_null_acquire() == NULL) {
       DEBUG_ONLY(if (!_verifying) {)
         // We re-dirty the cards on which this object lies and increase
         // the _threshold so that we'll come back to scan this object
@@ -7458,7 +7458,7 @@
           if (_threshold < end_card_addr) {
             _threshold = end_card_addr;
           }
-          if (p->klass_or_null() != NULL) {
+          if (p->klass_or_null_acquire() != NULL) {
             // Redirty the range of cards...
             _mut->mark_range(redirty_range);
           } // ...else the setting of klass will dirty the card anyway.
@@ -7609,7 +7609,7 @@
     assert(_skip_bits == 0, "tautology");
     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
     oop p = oop(addr);
-    if (p->klass_or_null() == NULL) {
+    if (p->klass_or_null_acquire() == NULL) {
       // in the case of Clean-on-Enter optimization, redirty card
       // and avoid clearing card by increasing  the threshold.
       return true;
@@ -8596,7 +8596,7 @@
            "alignment problem");
 
 #ifdef ASSERT
-      if (oop(addr)->klass_or_null() != NULL) {
+      if (oop(addr)->klass_or_null_acquire() != NULL) {
         // Ignore mark word because we are running concurrent with mutators
         assert(oop(addr)->is_oop(true), "live block should be an oop");
         assert(size ==
@@ -8607,7 +8607,7 @@
 
   } else {
     // This should be an initialized object that's alive.
-    assert(oop(addr)->klass_or_null() != NULL,
+    assert(oop(addr)->klass_or_null_acquire() != NULL,
            "Should be an initialized object");
     // Ignore mark word because we are running concurrent with mutators
     assert(oop(addr)->is_oop(true), "live block should be an oop");
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Tue Nov 24 05:05:37 2020 +0000
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Nov 27 03:51:51 2020 +0000
@@ -568,20 +568,18 @@
   // fail arbitrarily). We tell the iteration code to perform this
   // filtering when it has been determined that there has been an actual
   // allocation in this region and making it safe to check the young type.
-  bool filter_young = true;
 
-  HeapWord* stop_point =
+  bool card_processed =
     r->oops_on_card_seq_iterate_careful(dirtyRegion,
                                         &filter_then_update_rs_oop_cl,
-                                        filter_young,
                                         card_ptr);
 
-  // If stop_point is non-null, then we encountered an unallocated region
-  // (perhaps the unfilled portion of a TLAB.)  For now, we'll dirty the
-  // card and re-enqueue: if we put off the card until a GC pause, then the
-  // unallocated portion will be filled in.  Alternatively, we might try
-  // the full complexity of the technique used in "regular" precleaning.
-  if (stop_point != NULL) {
+  // If unable to process the card then we encountered an unparsable
+  // part of the heap (e.g. a partially allocated object).  Redirty
+  // and re-enqueue: if we put off the card until a GC pause, then the
+  // allocation will have completed.
+  if (!card_processed) {
+    assert(!_g1->is_gc_active(), "Unparsable heap during GC");
     // The card might have gotten re-dirtied and re-enqueued while we
     // worked.  (In fact, it's pretty likely.)
     if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Nov 24 05:05:37 2020 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Nov 27 03:51:51 2020 +0000
@@ -407,19 +407,10 @@
   return NULL;
 }
 
-HeapWord*
-HeapRegion::
-oops_on_card_seq_iterate_careful(MemRegion mr,
-                                 FilterOutOfRegionClosure* cl,
-                                 bool filter_young,
-                                 jbyte* card_ptr) {
-  // Currently, we should only have to clean the card if filter_young
-  // is true and vice versa.
-  if (filter_young) {
-    assert(card_ptr != NULL, "pre-condition");
-  } else {
-    assert(card_ptr == NULL, "pre-condition");
-  }
+bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
+                                                  FilterOutOfRegionClosure* cl,
+                                                  jbyte* card_ptr) {
+  assert(card_ptr != NULL, "pre-condition");
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   // If we're within a stop-world GC, then we might look at a card in a
@@ -430,7 +421,9 @@
   } else {
     mr = mr.intersection(used_region());
   }
-  if (mr.is_empty()) return NULL;
+  if (mr.is_empty()) {
+    return true;
+  }
   // Otherwise, find the obj that extends onto mr.start().
 
   // The intersection of the incoming mr (for the card) and the
@@ -439,27 +432,21 @@
   // G1CollectedHeap.cpp that allocates a new region sets the
   // is_young tag on the region before allocating. Thus we
   // safely know if this region is young.
-  if (is_young() && filter_young) {
-    return NULL;
+  if (is_young()) {
+    return true;
   }
 
-  assert(!is_young(), "check value of filter_young");
-
   // We can only clean the card here, after we make the decision that
-  // the card is not young. And we only clean the card if we have been
-  // asked to (i.e., card_ptr != NULL).
-  if (card_ptr != NULL) {
-    *card_ptr = CardTableModRefBS::clean_card_val();
-    // We must complete this write before we do any of the reads below.
-    OrderAccess::storeload();
-  }
+  // the card is not young.
+  *card_ptr = CardTableModRefBS::clean_card_val();
+  // We must complete this write before we do any of the reads below.
+  OrderAccess::storeload();
 
   // Cache the boundaries of the memory region in some const locals
   HeapWord* const start = mr.start();
   HeapWord* const end = mr.end();
 
-  // We used to use "block_start_careful" here.  But we're actually happy
-  // to update the BOT while we do this...
+  // Update BOT as needed while finding start of (potential) object.
   HeapWord* cur = block_start(start);
   assert(cur <= start, "Postcondition");
 
@@ -471,7 +458,9 @@
     obj = oop(cur);
     if (obj->klass_or_null() == NULL) {
       // Ran into an unparseable point.
-      return cur;
+      assert(!g1h->is_gc_active(),
+             err_msg("Unparsable heap during GC at " PTR_FORMAT, p2i(cur)));
+      return false;
     }
     // Otherwise...
     next = cur + block_size(cur);
@@ -488,7 +477,9 @@
     assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant");
     if (obj->klass_or_null() == NULL) {
       // Ran into an unparseable point.
-      return cur;
+      assert(!g1h->is_gc_active(),
+             err_msg("Unparsable heap during GC at " PTR_FORMAT, p2i(cur)));
+      return false;
     }
 
     // Advance the current pointer. "obj" still points to the object to iterate.
@@ -507,7 +498,7 @@
     }
   } while (cur < end);
 
-  return NULL;
+  return true;
 }
 
 // Code roots support
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Nov 24 05:05:37 2020 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Nov 27 03:51:51 2020 +0000
@@ -718,16 +718,17 @@
   HeapWord*
   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
 
-  // filter_young: if true and the region is a young region then we
-  // skip the iteration.
-  // card_ptr: if not NULL, and we decide that the card is not young
-  // and we iterate over it, we'll clean the card before we start the
-  // iteration.
-  HeapWord*
-  oops_on_card_seq_iterate_careful(MemRegion mr,
-                                   FilterOutOfRegionClosure* cl,
-                                   bool filter_young,
-                                   jbyte* card_ptr);
+  // Iterate over the card in the card designated by card_ptr,
+  // applying cl to all references in the region.
+  // mr: the memory region covered by the card.
+  // card_ptr: if we decide that the card is not young and we iterate
+  // over it, we'll clean the card before we start the iteration.
+  // Returns true if card was successfully processed, false if an
+  // unparsable part of the heap was encountered, which should only
+  // happen when invoked concurrently with the mutator.
+  bool oops_on_card_seq_iterate_careful(MemRegion mr,
+                                        FilterOutOfRegionClosure* cl,
+                                        jbyte* card_ptr);
 
   size_t recorded_rs_length() const        { return _recorded_rs_length; }
   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Tue Nov 24 05:05:37 2020 +0000
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Fri Nov 27 03:51:51 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -286,8 +286,6 @@
     return NULL;
   }
 
-  AllocTracer::send_allocation_in_new_tlab_event(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, Thread::current());
-
   if (ZeroTLAB) {
     // ..and clear it.
     Copy::zero_to_words(obj, new_tlab_size);
--- a/src/share/vm/gc_interface/collectedHeap.inline.hpp	Tue Nov 24 05:05:37 2020 +0000
+++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp	Fri Nov 27 03:51:51 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,8 +65,22 @@
   }
 }
 
-// Support for jvmti and dtrace
+inline void send_jfr_allocation_event(KlassHandle klass, HeapWord* obj, size_t size) {
+  Thread* t = Thread::current();
+  ThreadLocalAllocBuffer& tlab = t->tlab();
+  if (obj == tlab.start()) {
+    // allocate in new TLAB
+    size_t new_tlab_size = tlab.hard_size_bytes();
+    AllocTracer::send_allocation_in_new_tlab_event(klass, obj, new_tlab_size, size * HeapWordSize, t);
+  } else if (!tlab.in_used(obj)) {
+    // allocate outside TLAB
+    AllocTracer::send_allocation_outside_tlab_event(klass, obj, size * HeapWordSize, t);
+  }
+}
+
+// Support for jvmti, dtrace and jfr
 inline void post_allocation_notify(KlassHandle klass, oop obj, int size) {
+  send_jfr_allocation_event(klass, (HeapWord*)obj, size);
   // support low memory notifications (no-op if not enabled)
   LowMemoryDetector::detect_low_memory_for_collected_pools();
 
@@ -137,8 +151,6 @@
            "Unexpected exception, will result in uninitialized storage");
     THREAD->incr_allocated_bytes(size * HeapWordSize);
 
-    AllocTracer::send_allocation_outside_tlab_event(klass, result, size * HeapWordSize, Thread::current());
-
     return result;
   }
 
--- a/src/share/vm/memory/threadLocalAllocBuffer.hpp	Tue Nov 24 05:05:37 2020 +0000
+++ b/src/share/vm/memory/threadLocalAllocBuffer.hpp	Fri Nov 27 03:51:51 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -120,6 +120,8 @@
   size_t free() const                            { return pointer_delta(end(), top()); }
   // Don't discard tlab if remaining space is larger than this.
   size_t refill_waste_limit() const              { return _refill_waste_limit; }
+  size_t hard_size_bytes() const                 { return pointer_delta(hard_end(), start(), 1); }
+  bool in_used(HeapWord* addr) const             { return addr >= start() && addr < top(); }
 
   // Allocate size HeapWords. The memory is NOT initialized to zero.
   inline HeapWord* allocate(size_t size);
--- a/src/share/vm/oops/oop.hpp	Tue Nov 24 05:05:37 2020 +0000
+++ b/src/share/vm/oops/oop.hpp	Fri Nov 27 03:51:51 2020 +0000
@@ -83,6 +83,7 @@
 
   Klass* klass() const;
   Klass* klass_or_null() const volatile;
+  Klass* klass_or_null_acquire() const volatile;
   Klass** klass_addr();
   narrowKlass* compressed_klass_addr();
 
--- a/src/share/vm/oops/oop.inline.hpp	Tue Nov 24 05:05:37 2020 +0000
+++ b/src/share/vm/oops/oop.inline.hpp	Fri Nov 27 03:51:51 2020 +0000
@@ -78,7 +78,6 @@
 }
 
 inline Klass* oopDesc::klass_or_null() const volatile {
-  // can be NULL in CMS
   if (UseCompressedClassPointers) {
     return Klass::decode_klass(_metadata._compressed_klass);
   } else {
@@ -86,6 +85,17 @@
   }
 }
 
+inline Klass* oopDesc::klass_or_null_acquire() const volatile {
+  if (UseCompressedClassPointers) {
+    // Workaround for non-const load_acquire parameter.
+    const volatile narrowKlass* addr = &_metadata._compressed_klass;
+    volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr);
+    return Klass::decode_klass(OrderAccess::load_acquire(xaddr));
+  } else {
+    return (Klass*)OrderAccess::load_ptr_acquire(&_metadata._klass);
+  }
+}
+
 inline int oopDesc::klass_gap_offset_in_bytes() {
   assert(UseCompressedClassPointers, "only applicable to compressed klass pointers");
   return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass);
--- a/src/share/vm/opto/c2_globals.hpp	Tue Nov 24 05:05:37 2020 +0000
+++ b/src/share/vm/opto/c2_globals.hpp	Fri Nov 27 03:51:51 2020 +0000
@@ -60,10 +60,10 @@
 
 #define C2_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct) \
                                                                             \
-  develop(bool, StressLCM, false,                                           \
+  diagnostic(bool, StressLCM, false,                                        \
           "Randomize instruction scheduling in LCM")                        \
                                                                             \
-  develop(bool, StressGCM, false,                                           \
+  diagnostic(bool, StressGCM, false,                                        \
           "Randomize instruction scheduling in GCM")                        \
                                                                             \
   notproduct(intx, CompileZapFirst, 0,                                      \
--- a/src/share/vm/runtime/objectMonitor.cpp	Tue Nov 24 05:05:37 2020 +0000
+++ b/src/share/vm/runtime/objectMonitor.cpp	Fri Nov 27 03:51:51 2020 +0000
@@ -2316,6 +2316,7 @@
   _next     = NULL;
   _prev     = NULL;
   _notified = 0;
+  _notifier_tid = 0;
   TState    = TS_RUN ;
   _thread   = thread;
   _event    = thread->_ParkEvent ;
--- a/test/compiler/membars/DekkerTest.java	Tue Nov 24 05:05:37 2020 +0000
+++ b/test/compiler/membars/DekkerTest.java	Fri Nov 27 03:51:51 2020 +0000
@@ -25,9 +25,15 @@
  * @test
  * @bug 8007898
  * @summary Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier().
- * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest
- * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest
- * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest
+ * @run main/othervm -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+IgnoreUnrecognizedVMOptions
+ *      -XX:-TieredCompilation -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM
+ *      DekkerTest
+ * @run main/othervm -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+IgnoreUnrecognizedVMOptions
+ *      -XX:-TieredCompilation -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM
+ *      DekkerTest
+ * @run main/othervm -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+IgnoreUnrecognizedVMOptions
+ *      -XX:-TieredCompilation -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM
+ *      DekkerTest
  * @author Martin Doerr martin DOT doerr AT sap DOT com
  *
  * Run 3 times since the failure is intermittent.
--- a/test/runtime/containers/docker/DockerBasicTest.java	Tue Nov 24 05:05:37 2020 +0000
+++ b/test/runtime/containers/docker/DockerBasicTest.java	Fri Nov 27 03:51:51 2020 +0000
@@ -37,8 +37,6 @@
 
 public class DockerBasicTest {
     private static final String imageNameAndTag = "jdk8-internal:test";
-    // Diganostics: set to false to examine image after the test
-    private static final boolean removeImageAfterTest = true;
 
     public static void main(String[] args) throws Exception {
         if (!DockerTestUtils.canTestDocker()) {
@@ -50,8 +48,9 @@
             testJavaVersion();
             testHelloDocker();
         } finally {
-            if (removeImageAfterTest)
+            if (!DockerTestUtils.RETAIN_IMAGE_AFTER_TEST) {
                 DockerTestUtils.removeDockerImage(imageNameAndTag);
+            }
         }
     }
 
--- a/test/testlibrary/com/oracle/java/testlibrary/DockerTestUtils.java	Tue Nov 24 05:05:37 2020 +0000
+++ b/test/testlibrary/com/oracle/java/testlibrary/DockerTestUtils.java	Fri Nov 27 03:51:51 2020 +0000
@@ -47,8 +47,23 @@
     private static boolean isDockerEngineAvailable = false;
     private static boolean wasDockerEngineChecked = false;
 
-    // Diagnostics: set to true to enable more diagnostic info
-    private static final boolean DEBUG = false;
+    // Use this property to specify docker location on your system.
+    // E.g.: "/usr/local/bin/docker".
+    private static final String DOCKER_COMMAND =
+        System.getProperty("jdk.test.docker.command", "docker");
+
+    // Set this property to true to retain image after test. By default
+    // images are removed after test execution completes.
+    // Retaining the image can be useful for diagnostics and image inspection.
+    // E.g.: start image interactively: docker run -it <IMAGE_NAME>.
+    public static final boolean RETAIN_IMAGE_AFTER_TEST =
+        Boolean.getBoolean("jdk.test.docker.retain.image");
+
+    // Path to a JDK under test.
+    // This may be useful when developing tests on non-Linux platforms.
+    public static final String JDK_UNDER_TEST =
+        System.getProperty("jdk.test.docker.jdk", Utils.TEST_JDK);
+
 
     /**
      * Optimized check of whether the docker engine is available in a given
@@ -96,7 +111,7 @@
      */
     private static boolean isDockerEngineAvailableCheck() throws Exception {
         try {
-            execute("docker", "ps")
+            execute(DOCKER_COMMAND, "ps")
                 .shouldHaveExitValue(0)
                 .shouldContain("CONTAINER")
                 .shouldContain("IMAGE");
@@ -127,7 +142,7 @@
             throw new RuntimeException("The docker build directory already exists: " + buildDir);
         }
 
-        Path jdkSrcDir = Paths.get(Utils.TEST_JDK);
+        Path jdkSrcDir = Paths.get(JDK_UNDER_TEST);
         Path jdkDstDir = buildDir.resolve("jdk");
 
         Files.createDirectories(jdkDstDir);
@@ -157,7 +172,7 @@
                            DockerfileConfig.getBaseImageVersion());
 
         // Build the docker
-        execute("docker", "build", "--no-cache", "--tag", imageName, buildDir.toString())
+        execute(DOCKER_COMMAND, "build", "--no-cache", "--tag", imageName, buildDir.toString())
             .shouldHaveExitValue(0)
             .shouldContain("Successfully built");
     }
@@ -174,7 +189,7 @@
     public static OutputAnalyzer dockerRunJava(DockerRunOptions opts) throws Exception {
         ArrayList<String> cmd = new ArrayList<>();
 
-        cmd.add("docker");
+        cmd.add(DOCKER_COMMAND);
         cmd.add("run");
         if (opts.tty)
             cmd.add("--tty=true");
@@ -200,11 +215,10 @@
      * Remove docker image
      *
      * @param DockerRunOptions optins for running docker
-     * @return output of the command
      * @throws Exception
      */
-    public static OutputAnalyzer removeDockerImage(String imageNameAndTag) throws Exception {
-        return execute("docker", "rmi", "--force", imageNameAndTag);
+    public static void removeDockerImage(String imageNameAndTag) throws Exception {
+            execute(DOCKER_COMMAND, "rmi", "--force", imageNameAndTag);
     }