changeset 3771:0c49af52ff2c

Merge
author jwilhelm
date Mon, 26 Mar 2012 13:22:38 +0200
parents bc8881f974b8 (current diff) cc74fa5a91a9 (diff)
children 08bbee70226f
files
diffstat 13 files changed, 134 insertions(+), 64 deletions(-) [+]
line wrap: on
line diff
--- a/make/jprt.properties	Fri Mar 23 22:34:22 2012 -0700
+++ b/make/jprt.properties	Mon Mar 26 13:22:38 2012 +0200
@@ -446,6 +446,7 @@
 
 jprt.test.targets.jdk8=${jprt.test.targets.standard}
 jprt.test.targets.jdk7=${jprt.test.targets.standard}
+jprt.test.targets.jdk7u4=${jprt.test.targets.jdk7}
 jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
 
 # The default test/Makefile targets that should be run
@@ -505,5 +506,6 @@
 
 jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
 jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
+jprt.make.rule.test.targets.jdk7u4=${jprt.make.rule.test.targets.jdk7}
 jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
 
--- a/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Fri Mar 23 22:34:22 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Mon Mar 26 13:22:38 2012 +0200
@@ -285,7 +285,7 @@
   // that the result is the same during all mixed GCs that follow a cycle.
 
   const size_t region_num = (size_t) _length;
-  const size_t gc_num = (size_t) G1MaxMixedGCNum;
+  const size_t gc_num = (size_t) G1MixedGCCountTarget;
   size_t result = region_num / gc_num;
   // emulate ceiling
   if (result * gc_num < region_num) {
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Fri Mar 23 22:34:22 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Mon Mar 26 13:22:38 2012 +0200
@@ -155,7 +155,7 @@
 
           CMCheckpointRootsFinalClosure final_cl(_cm);
           sprintf(verbose_str, "GC remark");
-          VM_CGC_Operation op(&final_cl, verbose_str);
+          VM_CGC_Operation op(&final_cl, verbose_str, true /* needs_pll */);
           VMThread::execute(&op);
         }
         if (cm()->restart_for_overflow() &&
@@ -189,7 +189,7 @@
 
         CMCleanUp cl_cl(_cm);
         sprintf(verbose_str, "GC cleanup");
-        VM_CGC_Operation op(&cl_cl, verbose_str);
+        VM_CGC_Operation op(&cl_cl, verbose_str, false /* needs_pll */);
         VMThread::execute(&op);
       } else {
         // We don't want to update the marking status if a GC pause
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Mar 23 22:34:22 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Mar 26 13:22:38 2012 +0200
@@ -993,7 +993,7 @@
     // iteration (after taking the Heap_lock).
     result = _mutator_alloc_region.attempt_allocation(word_size,
                                                       false /* bot_updates */);
-    if (result != NULL ){
+    if (result != NULL) {
       return result;
     }
 
@@ -2437,20 +2437,22 @@
                                  true,  /* should_initiate_conc_mark */
                                  g1_policy()->max_pause_time_ms(),
                                  cause);
+
       VMThread::execute(&op);
       if (!op.pause_succeeded()) {
-        // Another GC got scheduled and prevented us from scheduling
-        // the initial-mark GC. It's unlikely that the GC that
-        // pre-empted us was also an initial-mark GC. So, we'll retry
-        // the initial-mark GC.
-
         if (full_gc_count_before == total_full_collections()) {
-          retry_gc = true;
+          retry_gc = op.should_retry_gc();
         } else {
           // A Full GC happened while we were trying to schedule the
           // initial-mark GC. No point in starting a new cycle given
           // that the whole heap was collected anyway.
         }
+
+        if (retry_gc) {
+          if (GC_locker::is_active_and_needs_gc()) {
+            GC_locker::stall_until_clear();
+          }
+        }
       }
     } else {
       if (cause == GCCause::_gc_locker
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Mar 23 22:34:22 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Mar 26 13:22:38 2012 +0200
@@ -2608,7 +2608,7 @@
   size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes();
   size_t capacity_bytes = _g1->capacity();
   double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
-  double threshold = (double) G1OldReclaimableThresholdPercent;
+  double threshold = (double) G1HeapWastePercent;
   if (perc < threshold) {
     ergo_verbose4(ErgoMixedGCs,
               false_action_str,
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Fri Mar 23 22:34:22 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Mon Mar 26 13:22:38 2012 +0200
@@ -299,17 +299,16 @@
           "Percentage (0-100) of the heap size to use as maximum "          \
           "young gen size.")                                                \
                                                                             \
-  develop(uintx, G1OldCSetRegionLiveThresholdPercent, 95,                   \
+  develop(uintx, G1OldCSetRegionLiveThresholdPercent, 90,                   \
           "Threshold for regions to be added to the collection set. "       \
           "Regions with more live bytes that this will not be collected.")  \
                                                                             \
-  develop(uintx, G1OldReclaimableThresholdPercent, 1,                       \
-          "Threshold for the remaining old reclaimable bytes, expressed "   \
-          "as a percentage of the heap size. If the old reclaimable bytes " \
-          "are under this we will not collect them with more mixed GCs.")   \
+  product(uintx, G1HeapWastePercent, 5,                                     \
+          "Amount of space, expressed as a percentage of the heap size, "   \
+          "that G1 is willing not to collect to avoid expensive GCs.")      \
                                                                             \
-  develop(uintx, G1MaxMixedGCNum, 4,                                        \
-          "The maximum desired number of mixed GCs after a marking cycle.") \
+  product(uintx, G1MixedGCCountTarget, 4,                                   \
+          "The target number of mixed GCs after a marking cycle.")          \
                                                                             \
   develop(uintx, G1OldCSetRegionThresholdPercent, 10,                       \
           "An upper bound for the number of old CSet regions expressed "    \
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Mar 23 22:34:22 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Mon Mar 26 13:22:38 2012 +0200
@@ -34,7 +34,8 @@
 VM_G1CollectForAllocation::VM_G1CollectForAllocation(
                                                   unsigned int gc_count_before,
                                                   size_t word_size)
-  : VM_G1OperationWithAllocRequest(gc_count_before, word_size) {
+  : VM_G1OperationWithAllocRequest(gc_count_before, word_size,
+                                   GCCause::_allocation_failure) {
   guarantee(word_size > 0, "an allocation should always be requested");
 }
 
@@ -57,9 +58,10 @@
                                       bool           should_initiate_conc_mark,
                                       double         target_pause_time_ms,
                                       GCCause::Cause gc_cause)
-  : VM_G1OperationWithAllocRequest(gc_count_before, word_size),
+  : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
     _should_initiate_conc_mark(should_initiate_conc_mark),
     _target_pause_time_ms(target_pause_time_ms),
+    _should_retry_gc(false),
     _full_collections_completed_before(0) {
   guarantee(target_pause_time_ms > 0.0,
             err_msg("target_pause_time_ms = %1.6lf should be positive",
@@ -70,6 +72,22 @@
   _gc_cause = gc_cause;
 }
 
+bool VM_G1IncCollectionPause::doit_prologue() {
+  bool res = VM_GC_Operation::doit_prologue();
+  if (!res) {
+    if (_should_initiate_conc_mark) {
+      // The prologue can fail for a couple of reasons. The first is that another GC
+      // got scheduled and prevented the scheduling of the initial mark GC. The
+      // second is that the GC locker may be active and the heap can't be expanded.
+      // In both cases we want to retry the GC so that the initial mark pause is
+      // actually scheduled. In the second case, however, we should stall until
+      // until the GC locker is no longer active and then retry the initial mark GC.
+      _should_retry_gc = true;
+    }
+  }
+  return res;
+}
+
 void VM_G1IncCollectionPause::doit() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   assert(!_should_initiate_conc_mark ||
@@ -106,11 +124,25 @@
     // next GC pause to be an initial mark; it returns false if a
     // marking cycle is already in progress.
     //
-    // If a marking cycle is already in progress just return and skip
-    // the pause - the requesting thread should block in doit_epilogue
-    // until the marking cycle is complete.
+    // If a marking cycle is already in progress just return and skip the
+    // pause below - if the reason for requesting this initial mark pause
+    // was due to a System.gc() then the requesting thread should block in
+    // doit_epilogue() until the marking cycle is complete.
+    //
+    // If this initial mark pause was requested as part of a humongous
+    // allocation then we know that the marking cycle must just have
+    // been started by another thread (possibly also allocating a humongous
+    // object) as there was no active marking cycle when the requesting
+    // thread checked before calling collect() in
+    // attempt_allocation_humongous(). Retrying the GC, in this case,
+    // will cause the requesting thread to spin inside collect() until the
+    // just started marking cycle is complete - which may be a while. So
+    // we do NOT retry the GC.
     if (!res) {
-      assert(_word_size == 0, "ExplicitGCInvokesConcurrent shouldn't be allocating");
+      assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
+      if (_gc_cause != GCCause::_g1_humongous_allocation) {
+        _should_retry_gc = true;
+      }
       return;
     }
   }
@@ -123,6 +155,13 @@
                                       true /* expect_null_cur_alloc_region */);
   } else {
     assert(_result == NULL, "invariant");
+    if (!_pause_succeeded) {
+      // Another possible reason reason for the pause to not be successful
+      // is that, again, the GC locker is active (and has become active
+      // since the prologue was executed). In this case we should retry
+      // the pause after waiting for the GC locker to become inactive.
+      _should_retry_gc = true;
+    }
   }
 }
 
@@ -168,6 +207,7 @@
 }
 
 void VM_CGC_Operation::acquire_pending_list_lock() {
+  assert(_needs_pll, "don't call this otherwise");
   // The caller may block while communicating
   // with the SLT thread in order to acquire/release the PLL.
   ConcurrentMarkThread::slt()->
@@ -175,6 +215,7 @@
 }
 
 void VM_CGC_Operation::release_and_notify_pending_list_lock() {
+  assert(_needs_pll, "don't call this otherwise");
   // The caller may block while communicating
   // with the SLT thread in order to acquire/release the PLL.
   ConcurrentMarkThread::slt()->
@@ -198,7 +239,9 @@
 bool VM_CGC_Operation::doit_prologue() {
   // Note the relative order of the locks must match that in
   // VM_GC_Operation::doit_prologue() or deadlocks can occur
-  acquire_pending_list_lock();
+  if (_needs_pll) {
+    acquire_pending_list_lock();
+  }
 
   Heap_lock->lock();
   SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
@@ -210,5 +253,7 @@
   // VM_GC_Operation::doit_epilogue()
   SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
   Heap_lock->unlock();
-  release_and_notify_pending_list_lock();
+  if (_needs_pll) {
+    release_and_notify_pending_list_lock();
+  }
 }
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Fri Mar 23 22:34:22 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Mon Mar 26 13:22:38 2012 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,8 +43,9 @@
 
 public:
   VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
-                                 size_t       word_size)
-    : VM_GC_Operation(gc_count_before, GCCause::_allocation_failure),
+                                 size_t       word_size,
+                                 GCCause::Cause gc_cause)
+    : VM_GC_Operation(gc_count_before, gc_cause),
       _word_size(word_size), _result(NULL), _pause_succeeded(false) { }
   HeapWord* result() { return _result; }
   bool pause_succeeded() { return _pause_succeeded; }
@@ -77,6 +78,7 @@
 class VM_G1IncCollectionPause: public VM_G1OperationWithAllocRequest {
 private:
   bool         _should_initiate_conc_mark;
+  bool         _should_retry_gc;
   double       _target_pause_time_ms;
   unsigned int _full_collections_completed_before;
 public:
@@ -86,11 +88,13 @@
                           double         target_pause_time_ms,
                           GCCause::Cause gc_cause);
   virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
+  virtual bool doit_prologue();
   virtual void doit();
   virtual void doit_epilogue();
   virtual const char* name() const {
     return "garbage-first incremental collection pause";
   }
+  bool should_retry_gc() const { return _should_retry_gc; }
 };
 
 // Concurrent GC stop-the-world operations such as remark and cleanup;
@@ -98,6 +102,7 @@
 class VM_CGC_Operation: public VM_Operation {
   VoidClosure* _cl;
   const char* _printGCMessage;
+  bool _needs_pll;
 
 protected:
   // java.lang.ref.Reference support
@@ -105,8 +110,8 @@
   void release_and_notify_pending_list_lock();
 
 public:
-  VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg)
-    : _cl(cl), _printGCMessage(printGCMsg) { }
+  VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pll)
+    : _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll) { }
   virtual VMOp_Type type() const { return VMOp_CGC_Operation; }
   virtual void doit();
   virtual bool doit_prologue();
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Fri Mar 23 22:34:22 2012 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Mon Mar 26 13:22:38 2012 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
 
+#include "gc_implementation/parallelScavenge/psOldGen.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
 
--- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Fri Mar 23 22:34:22 2012 -0700
+++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Mon Mar 26 13:22:38 2012 +0200
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -91,29 +91,37 @@
     MutableSpace *s = ls->space();
     if (s->top() < top()) { // For all spaces preceding the one containing top()
       if (s->free_in_words() > 0) {
-        size_t area_touched_words = pointer_delta(s->end(), s->top());
-        CollectedHeap::fill_with_object(s->top(), area_touched_words);
+        intptr_t cur_top = (intptr_t)s->top();
+        size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
+        while (words_left_to_fill > 0) {
+          size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
+          assert(words_to_fill >= CollectedHeap::min_fill_size(),
+            err_msg("Remaining size ("SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
+            words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size()));
+          CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
+          if (!os::numa_has_static_binding()) {
+            size_t touched_words = words_to_fill;
 #ifndef ASSERT
-        if (!ZapUnusedHeapArea) {
-          area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
-                                    area_touched_words);
-        }
+            if (!ZapUnusedHeapArea) {
+              touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
+                touched_words);
+            }
 #endif
-        if (!os::numa_has_static_binding()) {
-          MemRegion invalid;
-          HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size());
-          HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words),
-                                                       os::vm_page_size());
-          if (crossing_start != crossing_end) {
-            // If object header crossed a small page boundary we mark the area
-            // as invalid rounding it to a page_size().
-            HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
-            HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
-                                 s->end());
-            invalid = MemRegion(start, end);
+            MemRegion invalid;
+            HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
+            HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
+            if (crossing_start != crossing_end) {
+              // If object header crossed a small page boundary we mark the area
+              // as invalid rounding it to a page_size().
+              HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
+              HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
+              invalid = MemRegion(start, end);
+            }
+
+            ls->add_invalid_region(invalid);
           }
-
-          ls->add_invalid_region(invalid);
+          cur_top = cur_top + (words_to_fill * HeapWordSize);
+          words_left_to_fill -= words_to_fill;
         }
       }
     } else {
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Fri Mar 23 22:34:22 2012 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Mon Mar 26 13:22:38 2012 +0200
@@ -85,7 +85,7 @@
   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
   const size_t elements_per_word = HeapWordSize / sizeof(jint);
   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
-                                             max_len * elements_per_word);
+                                             max_len / elements_per_word);
 
   _barrier_set = NULL;
   _is_gc_active = false;
@@ -303,10 +303,6 @@
   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
 }
 
-size_t CollectedHeap::filler_array_max_size() {
-  return _filler_array_max_size;
-}
-
 #ifdef ASSERT
 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
 {
@@ -333,6 +329,7 @@
 
   const size_t payload_size = words - filler_array_hdr_size();
   const size_t len = payload_size * HeapWordSize / sizeof(jint);
+  assert((int)len >= 0, err_msg("size too large " SIZE_FORMAT " becomes %d", words, (int)len));
 
   // Set the length first for concurrent GC.
   ((arrayOop)start)->set_length((int)len);
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Fri Mar 23 22:34:22 2012 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Mon Mar 26 13:22:38 2012 +0200
@@ -128,7 +128,6 @@
   // Reinitialize tlabs before resuming mutators.
   virtual void resize_all_tlabs();
 
- protected:
   // Allocate from the current thread's TLAB, with broken-out slow path.
   inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size);
   static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size);
@@ -169,7 +168,6 @@
   // Filler object utilities.
   static inline size_t filler_array_hdr_size();
   static inline size_t filler_array_min_size();
-  static inline size_t filler_array_max_size();
 
   DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
   DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
@@ -197,6 +195,10 @@
     G1CollectedHeap
   };
 
+  static inline size_t filler_array_max_size() {
+    return _filler_array_max_size;
+  }
+
   virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
 
   /**
--- a/test/Makefile	Fri Mar 23 22:34:22 2012 -0700
+++ b/test/Makefile	Mon Mar 26 13:22:38 2012 +0200
@@ -26,6 +26,8 @@
 # Makefile to run various jdk tests
 #
 
+GETMIXEDPATH=echo
+
 # Get OS/ARCH specifics
 OSNAME = $(shell uname -s)
 ifeq ($(OSNAME), SunOS)
@@ -60,7 +62,14 @@
     ARCH = i586
   endif
 endif
-ifeq ($(OSNAME), Windows_NT)
+ifeq ($(PLATFORM),)
+  # detect wether we're running in MKS or cygwin
+  ifeq ($(OSNAME), Windows_NT) # MKS
+    GETMIXEDPATH=dosname -s
+  endif
+  ifeq ($(findstring CYGWIN,$(OSNAME)), CYGWIN)
+    GETMIXEDPATH=cygpath -m -s
+  endif
   PLATFORM = windows
   SLASH_JAVA = J:
   ifeq ($(word 1, $(PROCESSOR_IDENTIFIER)),ia64)
@@ -234,11 +243,11 @@
 	$(JTREG) -a -v:fail,error               \
           $(JTREG_KEY_OPTION)                   \
           $(EXTRA_JTREG_OPTIONS)                \
-          -r:$(ABS_TEST_OUTPUT_DIR)/JTreport    \
-          -w:$(ABS_TEST_OUTPUT_DIR)/JTwork      \
-          -jdk:$(PRODUCT_HOME)                  \
+          -r:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTreport    \
+          -w:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTwork      \
+          -jdk:$(shell $(GETMIXEDPATH) "$(PRODUCT_HOME)")                  \
           $(JAVA_OPTIONS:%=-vmoption:%)         \
-          $(TEST_ROOT)/sanity                   \
+          $(shell $(GETMIXEDPATH) "$(TEST_ROOT)")/sanity                   \
 	  || $(BUNDLE_UP_FAILED)
 	$(BUNDLE_UP)