changeset 6027:81528bb814f8

merge
author adinn
date Thu, 04 Dec 2014 14:30:02 +0000
parents 205e1ae8868b (current diff) 0c2099cd04cd (diff)
children 6712ee98b46e
files
diffstat 29 files changed, 434 insertions(+), 183 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Nov 27 11:27:10 2014 +0000
+++ b/.hgtags	Thu Dec 04 14:30:02 2014 +0000
@@ -775,3 +775,4 @@
 4722cfd15c8386321c8e857951b3cb55461e858b icedtea-2.6pre09
 c8417820ac943736822e7b84518b5aca80f39593 icedtea-2.6pre10
 e13857ecc7870c28dbebca79ff36612693dac157 icedtea-2.6pre11
+9d2b485d2a58ea57ab2b3c06b2128f456ab39a38 jdk7u80-b03
--- a/make/bsd/makefiles/mapfile-vers-debug	Thu Nov 27 11:27:10 2014 +0000
+++ b/make/bsd/makefiles/mapfile-vers-debug	Thu Dec 04 14:30:02 2014 +0000
@@ -158,6 +158,7 @@
                 _JVM_GetStackTraceElement
                 _JVM_GetSystemPackage
                 _JVM_GetSystemPackages
+                _JVM_GetTemporaryDirectory
                 _JVM_GetThreadStateNames
                 _JVM_GetThreadStateValues
                 _JVM_GetVersionInfo
--- a/make/bsd/makefiles/mapfile-vers-product	Thu Nov 27 11:27:10 2014 +0000
+++ b/make/bsd/makefiles/mapfile-vers-product	Thu Dec 04 14:30:02 2014 +0000
@@ -158,6 +158,7 @@
                 _JVM_GetStackTraceElement
                 _JVM_GetSystemPackage
                 _JVM_GetSystemPackages
+                _JVM_GetTemporaryDirectory
                 _JVM_GetThreadStateNames
                 _JVM_GetThreadStateValues
                 _JVM_GetVersionInfo
--- a/make/linux/makefiles/mapfile-vers-debug	Thu Nov 27 11:27:10 2014 +0000
+++ b/make/linux/makefiles/mapfile-vers-debug	Thu Dec 04 14:30:02 2014 +0000
@@ -123,7 +123,7 @@
                 JVM_GetClassModifiers;
                 JVM_GetClassName;
                 JVM_GetClassNameUTF;
-		JVM_GetClassSignature;
+                JVM_GetClassSignature;
                 JVM_GetClassSigners;
                 JVM_GetComponentType;
                 JVM_GetDeclaredClasses;
@@ -160,6 +160,7 @@
                 JVM_GetStackTraceElement;
                 JVM_GetSystemPackage;
                 JVM_GetSystemPackages;
+                JVM_GetTemporaryDirectory;
                 JVM_GetThreadStateNames;
                 JVM_GetThreadStateValues;
                 JVM_GetVersionInfo;
--- a/make/linux/makefiles/mapfile-vers-product	Thu Nov 27 11:27:10 2014 +0000
+++ b/make/linux/makefiles/mapfile-vers-product	Thu Dec 04 14:30:02 2014 +0000
@@ -160,6 +160,7 @@
                 JVM_GetStackTraceElement;
                 JVM_GetSystemPackage;
                 JVM_GetSystemPackages;
+                JVM_GetTemporaryDirectory;
                 JVM_GetThreadStateNames;
                 JVM_GetThreadStateValues;
                 JVM_GetVersionInfo;
--- a/make/solaris/makefiles/mapfile-vers	Thu Nov 27 11:27:10 2014 +0000
+++ b/make/solaris/makefiles/mapfile-vers	Thu Dec 04 14:30:02 2014 +0000
@@ -160,6 +160,7 @@
 		JVM_GetStackTraceElement;
 		JVM_GetSystemPackage;
 		JVM_GetSystemPackages;
+		JVM_GetTemporaryDirectory;
 		JVM_GetThreadStateNames;
 		JVM_GetThreadStateValues;
 		JVM_GetVersionInfo;
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Dec 04 14:30:02 2014 +0000
@@ -1144,50 +1144,81 @@
     // Hoist any int/ptr/long's in the first 6 to int regs.
     // Hoist any flt/dbl's in the first 16 dbl regs.
     int j = 0;                  // Count of actual args, not HALVES
-    for( int i=0; i<total_args_passed; i++, j++ ) {
-      switch( sig_bt[i] ) {
+    VMRegPair param_array_reg;  // location of the argument in the parameter array
+    for (int i = 0; i < total_args_passed; i++, j++) {
+      param_array_reg.set_bad();
+      switch (sig_bt[i]) {
       case T_BOOLEAN:
       case T_BYTE:
       case T_CHAR:
       case T_INT:
       case T_SHORT:
-        regs[i].set1( int_stk_helper( j ) ); break;
+        regs[i].set1(int_stk_helper(j));
+        break;
       case T_LONG:
-        assert( sig_bt[i+1] == T_VOID, "expecting half" );
+        assert(sig_bt[i+1] == T_VOID, "expecting half");
       case T_ADDRESS: // raw pointers, like current thread, for VM calls
       case T_ARRAY:
       case T_OBJECT:
-        regs[i].set2( int_stk_helper( j ) );
+        regs[i].set2(int_stk_helper(j));
         break;
       case T_FLOAT:
-        if ( j < 16 ) {
-          // V9ism: floats go in ODD registers
-          regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
-        } else {
-          // V9ism: floats go in ODD stack slot
-          regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
+        // Per SPARC Compliance Definition 2.4.1, page 3P-12 available here
+        // http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz
+        //
+        // "When a callee prototype exists, and does not indicate variable arguments,
+        // floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248
+        // will be promoted to floating-point registers"
+        //
+        // By "promoted" it means that the argument is located in two places, an unused
+        // spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live
+        // float register.  In most cases, there are 6 or fewer arguments of any type,
+        // and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive)
+        // serve as shadow slots.  Per the spec floating point registers %d6 to %d16
+        // require slots beyond that (up to %sp+BIAS+248).
+        //
+        {
+          // V9ism: floats go in ODD registers and stack slots
+          int float_index = 1 + (j << 1);
+          param_array_reg.set1(VMRegImpl::stack2reg(float_index));
+          if (j < 16) {
+            regs[i].set1(as_FloatRegister(float_index)->as_VMReg());
+          } else {
+            regs[i] = param_array_reg;
+          }
         }
         break;
       case T_DOUBLE:
-        assert( sig_bt[i+1] == T_VOID, "expecting half" );
-        if ( j < 16 ) {
-          // V9ism: doubles go in EVEN/ODD regs
-          regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
-        } else {
-          // V9ism: doubles go in EVEN/ODD stack slots
-          regs[i].set2(VMRegImpl::stack2reg(j<<1));
+        {
+          assert(sig_bt[i + 1] == T_VOID, "expecting half");
+          // V9ism: doubles go in EVEN/ODD regs and stack slots
+          int double_index = (j << 1);
+          param_array_reg.set2(VMRegImpl::stack2reg(double_index));
+          if (j < 16) {
+            regs[i].set2(as_FloatRegister(double_index)->as_VMReg());
+          } else {
+            // V9ism: doubles go in EVEN/ODD stack slots
+            regs[i] = param_array_reg;
+          }
         }
         break;
-      case T_VOID:  regs[i].set_bad(); j--; break; // Do not count HALVES
+      case T_VOID:
+        regs[i].set_bad();
+        j--;
+        break; // Do not count HALVES
       default:
         ShouldNotReachHere();
       }
-      if (regs[i].first()->is_stack()) {
-        int off =  regs[i].first()->reg2stack();
+      // Keep track of the deepest parameter array slot.
+      if (!param_array_reg.first()->is_valid()) {
+        param_array_reg = regs[i];
+      }
+      if (param_array_reg.first()->is_stack()) {
+        int off = param_array_reg.first()->reg2stack();
         if (off > max_stack_slots) max_stack_slots = off;
       }
-      if (regs[i].second()->is_stack()) {
-        int off =  regs[i].second()->reg2stack();
+      if (param_array_reg.second()->is_stack()) {
+        int off = param_array_reg.second()->reg2stack();
         if (off > max_stack_slots) max_stack_slots = off;
       }
     }
@@ -1195,8 +1226,8 @@
 #else // _LP64
     // V8 convention: first 6 things in O-regs, rest on stack.
     // Alignment is willy-nilly.
-    for( int i=0; i<total_args_passed; i++ ) {
-      switch( sig_bt[i] ) {
+    for (int i = 0; i < total_args_passed; i++) {
+      switch (sig_bt[i]) {
       case T_ADDRESS: // raw pointers, like current thread, for VM calls
       case T_ARRAY:
       case T_BOOLEAN:
@@ -1206,23 +1237,23 @@
       case T_INT:
       case T_OBJECT:
       case T_SHORT:
-        regs[i].set1( int_stk_helper( i ) );
+        regs[i].set1(int_stk_helper(i));
         break;
       case T_DOUBLE:
       case T_LONG:
-        assert( sig_bt[i+1] == T_VOID, "expecting half" );
-        regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
+        assert(sig_bt[i + 1] == T_VOID, "expecting half");
+        regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i));
         break;
       case T_VOID: regs[i].set_bad(); break;
       default:
         ShouldNotReachHere();
       }
       if (regs[i].first()->is_stack()) {
-        int off =  regs[i].first()->reg2stack();
+        int off = regs[i].first()->reg2stack();
         if (off > max_stack_slots) max_stack_slots = off;
       }
       if (regs[i].second()->is_stack()) {
-        int off =  regs[i].second()->reg2stack();
+        int off = regs[i].second()->reg2stack();
         if (off > max_stack_slots) max_stack_slots = off;
       }
     }
@@ -1375,11 +1406,10 @@
     const Register rOop = src.first()->as_Register();
     const Register rHandle = L5;
     int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
-    int offset = oop_slot*VMRegImpl::stack_slot_size;
-    Label skip;
+    int offset = oop_slot * VMRegImpl::stack_slot_size;
     __ st_ptr(rOop, SP, offset + STACK_BIAS);
     if (is_receiver) {
-      *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
+       *receiver_offset = offset;
     }
     map->set_oop(VMRegImpl::stack2reg(oop_slot));
     __ add(SP, offset + STACK_BIAS, rHandle);
--- a/src/cpu/sparc/vm/sparc.ad	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/cpu/sparc/vm/sparc.ad	Thu Dec 04 14:30:02 2014 +0000
@@ -1990,7 +1990,7 @@
 // to implement the UseStrictFP mode.
 const bool Matcher::strict_fp_requires_explicit_rounding = false;
 
-// Are floats conerted to double when stored to stack during deoptimization?
+// Are floats converted to double when stored to stack during deoptimization?
 // Sparc does not handle callee-save floats.
 bool Matcher::float_in_double() { return false; }
 
@@ -3228,7 +3228,7 @@
 //         are owned by the CALLEE.  Holes should not be nessecary in the
 //         incoming area, as the Java calling convention is completely under
 //         the control of the AD file.  Doubles can be sorted and packed to
-//         avoid holes.  Holes in the outgoing arguments may be nessecary for
+//         avoid holes.  Holes in the outgoing arguments may be necessary for
 //         varargs C calling conventions.
 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 //         even aligned with pad0 as needed.
@@ -3294,7 +3294,7 @@
   %}
 
   // Body of function which returns an OptoRegs array locating
-  // arguments either in registers or in stack slots for callin
+  // arguments either in registers or in stack slots for calling
   // C.
   c_calling_convention %{
     // This is obviously always outgoing
--- a/src/cpu/x86/vm/frame_x86.inline.hpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/cpu/x86/vm/frame_x86.inline.hpp	Thu Dec 04 14:30:02 2014 +0000
@@ -296,14 +296,18 @@
   return true;
 }
 
-
+inline oop frame::saved_oop_result(RegisterMap* map) const {
+  oop* result_adr = (oop *)map->location(rax->as_VMReg());
+  guarantee(result_adr != NULL, "bad register save location");
 
-inline oop frame::saved_oop_result(RegisterMap* map) const       {
-  return *((oop*) map->location(rax->as_VMReg()));
+  return (*result_adr);
 }
 
 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
-  *((oop*) map->location(rax->as_VMReg())) = obj;
+  oop* result_adr = (oop *)map->location(rax->as_VMReg());
+  guarantee(result_adr != NULL, "bad register save location");
+
+  *result_adr = obj;
 }
 
 #endif // CPU_X86_VM_FRAME_X86_INLINE_HPP
--- a/src/share/vm/classfile/javaClasses.cpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/classfile/javaClasses.cpp	Thu Dec 04 14:30:02 2014 +0000
@@ -578,6 +578,7 @@
       instanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL);
     }
     // set the classLoader field in the java_lang_Class instance
+    assert(class_loader() == k->class_loader(), "should be same");
     set_class_loader(mirror(), class_loader());
     return mirror();
   } else {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Dec 04 14:30:02 2014 +0000
@@ -2778,10 +2778,12 @@
   }
 }
 
-void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl) {
-  assert(fl->count() == 0, "Precondition.");
-  assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
-         "Precondition");
+// Used by par_get_chunk_of_blocks() for the chunks from the
+// indexed_free_lists.  Looks for a chunk with size that is a multiple
+// of "word_sz" and if found, splits it into "word_sz" chunks and add
+// to the free list "fl".  "n" is the maximum number of chunks to
+// be added to "fl".
+bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, FreeList<FreeChunk>* fl) {
 
   // We'll try all multiples of word_sz in the indexed set, starting with
   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
@@ -2862,11 +2864,15 @@
                         Mutex::_no_safepoint_check_flag);
         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
         _indexedFreeList[word_sz].set_split_births(births);
-        return;
+        return true;
       }
     }
+    return found;
   }
-  // Otherwise, we'll split a block from the dictionary.
+}
+
+FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
+
   FreeChunk* fc = NULL;
   FreeChunk* rem_fc = NULL;
   size_t rem;
@@ -2878,16 +2884,12 @@
                                   _dictionary->min_size()),
                                   FreeBlockDictionary<FreeChunk>::atLeast);
       if (fc != NULL) {
-        _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
-        dictionary()->dict_census_udpate(fc->size(),
-                                       true /*split*/,
-                                       false /*birth*/);
         break;
       } else {
         n--;
       }
     }
-    if (fc == NULL) return;
+    if (fc == NULL) return NULL;
     // Otherwise, split up that block.
     assert((ssize_t)n >= 1, "Control point invariant");
     assert(fc->is_free(), "Error: should be a free block");
@@ -2909,10 +2911,14 @@
     // dictionary and return, leaving "fl" empty.
     if (n == 0) {
       returnChunkToDictionary(fc);
-      assert(fl->count() == 0, "We never allocated any blocks");
-      return;
+      return NULL;
     }
 
+    _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
+    dictionary()->dict_census_udpate(fc->size(),
+                                     true /*split*/,
+                                     false /*birth*/);
+
     // First return the remainder, if any.
     // Note that we hold the lock until we decide if we're going to give
     // back the remainder to the dictionary, since a concurrent allocation
@@ -2945,7 +2951,24 @@
     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
     smallSplitBirth(rem);
   }
-  assert((ssize_t)n > 0 && fc != NULL, "Consistency");
+  assert(n * word_sz == fc->size(),
+    err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
+    SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
+    fc->size(), n, word_sz));
+  return fc;
+}
+
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, FreeList<FreeChunk>* fl) {
+
+  FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
+
+  if (fc == NULL) {
+    return;
+  }
+
+  size_t n = fc->size() / word_sz;
+
+  assert((ssize_t)n > 0, "Consistency");
   // Now do the splitting up.
   // Must do this in reverse order, so that anybody attempting to
   // access the main chunk sees it as a single free block until we
@@ -2993,6 +3016,20 @@
   assert(fl->tail()->next() == NULL, "List invariant.");
 }
 
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl) {
+  assert(fl->count() == 0, "Precondition.");
+  assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
+         "Precondition");
+
+  if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
+    // Got it
+    return;
+  }
+
+  // Otherwise, we'll split a block from the dictionary.
+  par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
+}
+
 // Set up the space's par_seq_tasks structure for work claiming
 // for parallel rescan. See CMSParRemarkTask where this is currently used.
 // XXX Need to suitably abstract and generalize this and the next
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Dec 04 14:30:02 2014 +0000
@@ -171,6 +171,20 @@
   // list of size "word_sz", and must now be decremented.
   void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl);
 
+  // Used by par_get_chunk_of_blocks() for the chunks from the
+  // indexed_free_lists.
+  bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, FreeList<FreeChunk>* fl);
+
+  // Used by par_get_chunk_of_blocks_dictionary() to get a chunk
+  // evenly splittable into "n" "word_sz" chunks.  Returns that
+  // evenly splittable chunk.  May split a larger chunk to get the
+  // evenly splittable chunk.
+  FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
+
+  // Used by par_get_chunk_of_blocks() for the chunks from the
+  // dictionary.
+  void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, FreeList<FreeChunk>* fl);
+
   // Allocation helper functions
   // Allocate using a strategy that takes from the indexed free lists
   // first.  This allocation strategy assumes a companion sweeping
--- a/src/share/vm/oops/arrayKlass.cpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/oops/arrayKlass.cpp	Thu Dec 04 14:30:02 2014 +0000
@@ -124,7 +124,7 @@
   ResourceMark rm(THREAD);
   k->initialize_supers(super_klass(), CHECK);
   k->vtable()->initialize_vtable(false, CHECK);
-  java_lang_Class::create_mirror(k, CHECK);
+  java_lang_Class::create_mirror(k, Handle(THREAD, k->class_loader()), CHECK);
 }
 
 objArrayOop arrayKlass::compute_secondary_supers(int num_extra_slots, TRAPS) {
--- a/src/share/vm/oops/methodDataOop.hpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/oops/methodDataOop.hpp	Thu Dec 04 14:30:02 2014 +0000
@@ -1394,7 +1394,7 @@
   // Whole-method sticky bits and flags
 public:
   enum {
-    _trap_hist_limit    = 17,   // decoupled from Deoptimization::Reason_LIMIT
+    _trap_hist_limit    = 18,   // decoupled from Deoptimization::Reason_LIMIT
     _trap_hist_mask     = max_jubyte,
     _extra_data_count   = 4     // extra DataLayout headers, for trap history
   }; // Public flag values
--- a/src/share/vm/oops/objArrayKlass.cpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/oops/objArrayKlass.cpp	Thu Dec 04 14:30:02 2014 +0000
@@ -202,6 +202,15 @@
   }
 }
 
+// Protect against _bottom_klass being null, when called from Universe::fixup_mirrors
+oop objArrayKlass::class_loader() const {
+  if (bottom_klass() != NULL) {
+    return Klass::cast(bottom_klass())->class_loader();
+  } else {
+    return NULL;
+  }
+}
+
 
 klassOop objArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) {
   objArrayKlassHandle h_this(THREAD, as_klassOop());
--- a/src/share/vm/oops/objArrayKlass.hpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/oops/objArrayKlass.hpp	Thu Dec 04 14:30:02 2014 +0000
@@ -68,7 +68,7 @@
   // Compute protection domain
   oop protection_domain() { return Klass::cast(bottom_klass())->protection_domain(); }
   // Compute class loader
-  oop class_loader() const { return Klass::cast(bottom_klass())->class_loader(); }
+  oop class_loader() const;
 
  private:
   // Either oop or narrowOop depending on UseCompressedOops.
--- a/src/share/vm/opto/c2_globals.hpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/opto/c2_globals.hpp	Thu Dec 04 14:30:02 2014 +0000
@@ -461,6 +461,9 @@
   product(bool, DoEscapeAnalysis, true,                                     \
           "Perform escape analysis")                                        \
                                                                             \
+  product(double, EscapeAnalysisTimeout, 20. DEBUG_ONLY(+40.),              \
+          "Abort EA when it reaches time limit (in sec)")                   \
+                                                                            \
   develop(bool, ExitEscapeAnalysisOnTimeout, true,                          \
           "Exit or throw assert in EA when it reaches time limit")          \
                                                                             \
--- a/src/share/vm/opto/doCall.cpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/opto/doCall.cpp	Thu Dec 04 14:30:02 2014 +0000
@@ -735,10 +735,16 @@
     // each arm of the Phi.  If I know something clever about the exceptions
     // I'm loading the class from, I can replace the LoadKlass with the
     // klass constant for the exception oop.
-    if( ex_node->is_Phi() ) {
-      ex_klass_node = new (C) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT );
-      for( uint i = 1; i < ex_node->req(); i++ ) {
-        Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() );
+    if (ex_node->is_Phi()) {
+      ex_klass_node = new (C) PhiNode(ex_node->in(0), TypeKlassPtr::OBJECT);
+      for (uint i = 1; i < ex_node->req(); i++) {
+        Node* ex_in = ex_node->in(i);
+        if (ex_in == top() || ex_in == NULL) {
+          // This path was not taken.
+          ex_klass_node->init_req(i, top());
+          continue;
+        }
+        Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
         Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
         ex_klass_node->init_req( i, k );
       }
--- a/src/share/vm/opto/escape.cpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/opto/escape.cpp	Thu Dec 04 14:30:02 2014 +0000
@@ -37,6 +37,8 @@
 
 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
+  _in_worklist(C->comp_arena()),
+  _next_pidx(0),
   _collecting(true),
   _verify(false),
   _compile(C),
@@ -120,13 +122,19 @@
   if (C->root() != NULL) {
     ideal_nodes.push(C->root());
   }
+  // Processed ideal nodes are unique on ideal_nodes list
+  // but several ideal nodes are mapped to the phantom_obj.
+  // To avoid duplicated entries on the following worklists
+  // add the phantom_obj only once to them.
+  ptnodes_worklist.append(phantom_obj);
+  java_objects_worklist.append(phantom_obj);
   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
     Node* n = ideal_nodes.at(next);
     // Create PointsTo nodes and add them to Connection Graph. Called
     // only once per ideal node since ideal_nodes is Unique_Node list.
     add_node_to_connection_graph(n, &delayed_worklist);
     PointsToNode* ptn = ptnode_adr(n->_idx);
-    if (ptn != NULL) {
+    if (ptn != NULL && ptn != phantom_obj) {
       ptnodes_worklist.append(ptn);
       if (ptn->is_JavaObject()) {
         java_objects_worklist.append(ptn->as_JavaObject());
@@ -395,7 +403,7 @@
     }
     case Op_CreateEx: {
       // assume that all exception objects globally escape
-      add_java_object(n, PointsToNode::GlobalEscape);
+      map_ideal_node(n, phantom_obj);
       break;
     }
     case Op_LoadKlass:
@@ -1016,13 +1024,8 @@
   // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
   // Set limit to 20 to catch situation when something did go wrong and
   // bailout Escape Analysis.
-  // Also limit build time to 30 sec (60 in debug VM).
+  // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
 #define CG_BUILD_ITER_LIMIT 20
-#ifdef ASSERT
-#define CG_BUILD_TIME_LIMIT 60.0
-#else
-#define CG_BUILD_TIME_LIMIT 30.0
-#endif
 
   // Propagate GlobalEscape and ArgEscape escape states and check that
   // we still have non-escaping objects. The method pushs on _worklist
@@ -1033,12 +1036,13 @@
   // Now propagate references to all JavaObject nodes.
   int java_objects_length = java_objects_worklist.length();
   elapsedTimer time;
+  bool timeout = false;
   int new_edges = 1;
   int iterations = 0;
   do {
     while ((new_edges > 0) &&
-          (iterations++   < CG_BUILD_ITER_LIMIT) &&
-          (time.seconds() < CG_BUILD_TIME_LIMIT)) {
+           (iterations++ < CG_BUILD_ITER_LIMIT)) {
+      double start_time = time.seconds();
       time.start();
       new_edges = 0;
       // Propagate references to phantom_object for nodes pushed on _worklist
@@ -1047,7 +1051,26 @@
       for (int next = 0; next < java_objects_length; ++next) {
         JavaObjectNode* ptn = java_objects_worklist.at(next);
         new_edges += add_java_object_edges(ptn, true);
+
+#define SAMPLE_SIZE 4
+        if ((next % SAMPLE_SIZE) == 0) {
+          // Each 4 iterations calculate how much time it will take
+          // to complete graph construction.
+          time.stop();
+          double stop_time = time.seconds();
+          double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
+          double time_until_end = time_per_iter * (double)(java_objects_length - next);
+          if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
+            timeout = true;
+            break; // Timeout
+          }
+          start_time = stop_time;
+          time.start();
+        }
+#undef SAMPLE_SIZE
+
       }
+      if (timeout) break;
       if (new_edges > 0) {
         // Update escape states on each iteration if graph was updated.
         if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
@@ -1055,9 +1078,12 @@
         }
       }
       time.stop();
+      if (time.seconds() >= EscapeAnalysisTimeout) {
+        timeout = true;
+        break;
+      }
     }
-    if ((iterations     < CG_BUILD_ITER_LIMIT) &&
-        (time.seconds() < CG_BUILD_TIME_LIMIT)) {
+    if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) {
       time.start();
       // Find fields which have unknown value.
       int fields_length = oop_fields_worklist.length();
@@ -1070,18 +1096,21 @@
         }
       }
       time.stop();
+      if (time.seconds() >= EscapeAnalysisTimeout) {
+        timeout = true;
+        break;
+      }
     } else {
       new_edges = 0; // Bailout
     }
   } while (new_edges > 0);
 
   // Bailout if passed limits.
-  if ((iterations     >= CG_BUILD_ITER_LIMIT) ||
-      (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
+  if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) {
     Compile* C = _compile;
     if (C->log() != NULL) {
       C->log()->begin_elem("connectionGraph_bailout reason='reached ");
-      C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time");
+      C->log()->text("%s", timeout ? "time" : "iterations");
       C->log()->end_elem(" limit'");
     }
     assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
@@ -1098,7 +1127,6 @@
 #endif
 
 #undef CG_BUILD_ITER_LIMIT
-#undef CG_BUILD_TIME_LIMIT
 
   // Find fields initialized by NULL for non-escaping Allocations.
   int non_escaped_length = non_escaped_worklist.length();
@@ -1222,8 +1250,8 @@
       }
     }
   }
-  while(_worklist.length() > 0) {
-    PointsToNode* use = _worklist.pop();
+  for (int l = 0; l < _worklist.length(); l++) {
+    PointsToNode* use = _worklist.at(l);
     if (PointsToNode::is_base_use(use)) {
       // Add reference from jobj to field and from field to jobj (field's base).
       use = PointsToNode::get_use_node(use)->as_Field();
@@ -1270,6 +1298,8 @@
       add_field_uses_to_worklist(use->as_Field());
     }
   }
+  _worklist.clear();
+  _in_worklist.Reset();
   return new_edges;
 }
 
@@ -1838,7 +1868,7 @@
     return;
   }
   Compile* C = _compile;
-  ptadr = new (C->comp_arena()) LocalVarNode(C, n, es);
+  ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
   _nodes.at_put(n->_idx, ptadr);
 }
 
@@ -1849,7 +1879,7 @@
     return;
   }
   Compile* C = _compile;
-  ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es);
+  ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
   _nodes.at_put(n->_idx, ptadr);
 }
 
@@ -1865,7 +1895,7 @@
     es = PointsToNode::GlobalEscape;
   }
   Compile* C = _compile;
-  FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop);
+  FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
   _nodes.at_put(n->_idx, field);
 }
 
@@ -1879,7 +1909,7 @@
     return;
   }
   Compile* C = _compile;
-  ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es);
+  ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
   _nodes.at_put(n->_idx, ptadr);
   // Add edge from arraycopy node to source object.
   (void)add_edge(ptadr, src);
--- a/src/share/vm/opto/escape.hpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/opto/escape.hpp	Thu Dec 04 14:30:02 2014 +0000
@@ -125,6 +125,8 @@
 class FieldNode;
 class ArraycopyNode;
 
+class ConnectionGraph;
+
 // ConnectionGraph nodes
 class PointsToNode : public ResourceObj {
   GrowableArray<PointsToNode*> _edges; // List of nodes this node points to
@@ -137,6 +139,7 @@
 
   Node* const        _node;  // Ideal node corresponding to this PointsTo node.
   const int           _idx;  // Cached ideal node's _idx
+  const uint         _pidx;  // Index of this node
 
 public:
   typedef enum {
@@ -165,17 +168,9 @@
   } NodeFlags;
 
 
-  PointsToNode(Compile *C, Node* n, EscapeState es, NodeType type):
-    _edges(C->comp_arena(), 2, 0, NULL),
-    _uses (C->comp_arena(), 2, 0, NULL),
-    _node(n),
-    _idx(n->_idx),
-    _type((u1)type),
-    _escape((u1)es),
-    _fields_escape((u1)es),
-    _flags(ScalarReplaceable) {
-    assert(n != NULL && es != UnknownEscape, "sanity");
-  }
+  inline PointsToNode(ConnectionGraph* CG, Node* n, EscapeState es, NodeType type);
+
+  uint        pidx()   const { return _pidx; }
 
   Node* ideal_node()   const { return _node; }
   int          idx()   const { return _idx; }
@@ -243,14 +238,14 @@
 
 class LocalVarNode: public PointsToNode {
 public:
-  LocalVarNode(Compile *C, Node* n, EscapeState es):
-    PointsToNode(C, n, es, LocalVar) {}
+  LocalVarNode(ConnectionGraph *CG, Node* n, EscapeState es):
+    PointsToNode(CG, n, es, LocalVar) {}
 };
 
 class JavaObjectNode: public PointsToNode {
 public:
-  JavaObjectNode(Compile *C, Node* n, EscapeState es):
-    PointsToNode(C, n, es, JavaObject) {
+  JavaObjectNode(ConnectionGraph *CG, Node* n, EscapeState es):
+    PointsToNode(CG, n, es, JavaObject) {
       if (es > NoEscape)
         set_scalar_replaceable(false);
     }
@@ -262,8 +257,8 @@
   const bool  _is_oop; // Field points to object
         bool  _has_unknown_base; // Has phantom_object base
 public:
-  FieldNode(Compile *C, Node* n, EscapeState es, int offs, bool is_oop):
-    PointsToNode(C, n, es, Field),
+  FieldNode(ConnectionGraph *CG, Node* n, EscapeState es, int offs, bool is_oop):
+    PointsToNode(CG, n, es, Field),
     _offset(offs), _is_oop(is_oop),
     _has_unknown_base(false) {}
 
@@ -284,8 +279,8 @@
 
 class ArraycopyNode: public PointsToNode {
 public:
-  ArraycopyNode(Compile *C, Node* n, EscapeState es):
-    PointsToNode(C, n, es, Arraycopy) {}
+  ArraycopyNode(ConnectionGraph *CG, Node* n, EscapeState es):
+    PointsToNode(CG, n, es, Arraycopy) {}
 };
 
 // Iterators for PointsTo node's edges:
@@ -323,11 +318,14 @@
 
 
 class ConnectionGraph: public ResourceObj {
+  friend class PointsToNode;
 private:
   GrowableArray<PointsToNode*>  _nodes; // Map from ideal nodes to
                                         // ConnectionGraph nodes.
 
   GrowableArray<PointsToNode*>  _worklist; // Nodes to be processed
+  VectorSet                  _in_worklist;
+  uint                         _next_pidx;
 
   bool            _collecting; // Indicates whether escape information
                                // is still being collected. If false,
@@ -353,6 +351,8 @@
   }
   uint nodes_size() const { return _nodes.length(); }
 
+  uint next_pidx() { return _next_pidx++; }
+
   // Add nodes to ConnectionGraph.
   void add_local_var(Node* n, PointsToNode::EscapeState es);
   void add_java_object(Node* n, PointsToNode::EscapeState es);
@@ -396,15 +396,26 @@
   int add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist);
 
   // Put node on worklist if it is (or was) not there.
-  void add_to_worklist(PointsToNode* pt) {
-    _worklist.push(pt);
-    return;
+  inline void add_to_worklist(PointsToNode* pt) {
+    PointsToNode* ptf = pt;
+    uint pidx_bias = 0;
+    if (PointsToNode::is_base_use(pt)) {
+      // Create a separate entry in _in_worklist for a marked base edge
+      // because _worklist may have an entry for a normal edge pointing
+      // to the same node. To separate them use _next_pidx as bias.
+      ptf = PointsToNode::get_use_node(pt)->as_Field();
+      pidx_bias = _next_pidx;
+    }
+    if (!_in_worklist.test_set(ptf->pidx() + pidx_bias)) {
+      _worklist.append(pt);
+    }
   }
 
   // Put on worklist all uses of this node.
-  void add_uses_to_worklist(PointsToNode* pt) {
-    for (UseIterator i(pt); i.has_next(); i.next())
-      _worklist.push(i.get());
+  inline void add_uses_to_worklist(PointsToNode* pt) {
+    for (UseIterator i(pt); i.has_next(); i.next()) {
+      add_to_worklist(i.get());
+    }
   }
 
   // Put on worklist all field's uses and related field nodes.
@@ -517,8 +528,8 @@
  }
   // Helper functions
   bool   is_oop_field(Node* n, int offset, bool* unsafe);
- static Node* get_addp_base(Node *addp);
- static Node* find_second_addp(Node* addp, Node* n);
+  static Node* get_addp_base(Node *addp);
+  static Node* find_second_addp(Node* addp, Node* n);
   // offset of a field reference
   int address_offset(Node* adr, PhaseTransform *phase);
 
@@ -587,4 +598,17 @@
 #endif
 };
 
+inline PointsToNode::PointsToNode(ConnectionGraph *CG, Node* n, EscapeState es, NodeType type):
+  _edges(CG->_compile->comp_arena(), 2, 0, NULL),
+  _uses (CG->_compile->comp_arena(), 2, 0, NULL),
+  _node(n),
+  _idx(n->_idx),
+  _pidx(CG->next_pidx()),
+  _type((u1)type),
+  _escape((u1)es),
+  _fields_escape((u1)es),
+  _flags(ScalarReplaceable) {
+  assert(n != NULL && es != UnknownEscape, "sanity");
+}
+
 #endif // SHARE_VM_OPTO_ESCAPE_HPP
--- a/src/share/vm/opto/loopopts.cpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/opto/loopopts.cpp	Thu Dec 04 14:30:02 2014 +0000
@@ -1393,7 +1393,8 @@
         // loop.  Happens if people set a loop-exit flag; then test the flag
         // in the loop to break the loop, then test is again outside of the
         // loop to determine which way the loop exited.
-        if( use->is_If() || use->is_CMove() ) {
+        // Loop predicate If node connects to Bool node through Opaque1 node.
+        if (use->is_If() || use->is_CMove() || C->is_predicate_opaq(use)) {
           // Since this code is highly unlikely, we lazily build the worklist
           // of such Nodes to go split.
           if( !split_if_set )
--- a/src/share/vm/opto/parse2.cpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/opto/parse2.cpp	Thu Dec 04 14:30:02 2014 +0000
@@ -895,53 +895,12 @@
 // if a path is never taken, its controlling comparison is
 // already acting in a stable fashion.  If the comparison
 // seems stable, we will put an expensive uncommon trap
-// on the untaken path.  To be conservative, and to allow
-// partially executed counted loops to be compiled fully,
-// we will plant uncommon traps only after pointer comparisons.
+// on the untaken path.
 bool Parse::seems_stable_comparison(BoolTest::mask btest, Node* cmp) {
-  for (int depth = 4; depth > 0; depth--) {
-    // The following switch can find CmpP here over half the time for
-    // dynamic language code rich with type tests.
-    // Code using counted loops or array manipulations (typical
-    // of benchmarks) will have many (>80%) CmpI instructions.
-    switch (cmp->Opcode()) {
-    case Op_CmpP:
-      // A never-taken null check looks like CmpP/BoolTest::eq.
-      // These certainly should be closed off as uncommon traps.
-      if (btest == BoolTest::eq)
-        return true;
-      // A never-failed type check looks like CmpP/BoolTest::ne.
-      // Let's put traps on those, too, so that we don't have to compile
-      // unused paths with indeterminate dynamic type information.
-      if (ProfileDynamicTypes)
-        return true;
-      return false;
-
-    case Op_CmpI:
-      // A small minority (< 10%) of CmpP are masked as CmpI,
-      // as if by boolean conversion ((p == q? 1: 0) != 0).
-      // Detect that here, even if it hasn't optimized away yet.
-      // Specifically, this covers the 'instanceof' operator.
-      if (btest == BoolTest::ne || btest == BoolTest::eq) {
-        if (_gvn.type(cmp->in(2))->singleton() &&
-            cmp->in(1)->is_Phi()) {
-          PhiNode* phi = cmp->in(1)->as_Phi();
-          int true_path = phi->is_diamond_phi();
-          if (true_path > 0 &&
-              _gvn.type(phi->in(1))->singleton() &&
-              _gvn.type(phi->in(2))->singleton()) {
-            // phi->region->if_proj->ifnode->bool->cmp
-            BoolNode* bol = phi->in(0)->in(1)->in(0)->in(1)->as_Bool();
-            btest = bol->_test._test;
-            cmp = bol->in(1);
-            continue;
-          }
-        }
-      }
-      return false;
-    }
+  if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) {
+    return false;
   }
-  return false;
+  return true;
 }
 
 //-------------------------------repush_if_args--------------------------------
@@ -1180,32 +1139,8 @@
   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
 
   if (seems_never_taken(prob) && seems_stable_comparison(btest, c)) {
-    // If this might possibly turn into an implicit null check,
-    // and the null has never yet been seen, we need to generate
-    // an uncommon trap, so as to recompile instead of suffering
-    // with very slow branches.  (We'll get the slow branches if
-    // the program ever changes phase and starts seeing nulls here.)
-    //
-    // We do not inspect for a null constant, since a node may
-    // optimize to 'null' later on.
-    //
-    // Null checks, and other tests which expect inequality,
-    // show btest == BoolTest::eq along the non-taken branch.
-    // On the other hand, type tests, must-be-null tests,
-    // and other tests which expect pointer equality,
-    // show btest == BoolTest::ne along the non-taken branch.
-    // We prune both types of branches if they look unused.
     repush_if_args();
-    // We need to mark this branch as taken so that if we recompile we will
-    // see that it is possible. In the tiered system the interpreter doesn't
-    // do profiling and by the time we get to the lower tier from the interpreter
-    // the path may be cold again. Make sure it doesn't look untaken
-    if (is_fallthrough) {
-      profile_not_taken_branch(!ProfileInterpreter);
-    } else {
-      profile_taken_branch(iter().get_dest(), !ProfileInterpreter);
-    }
-    uncommon_trap(Deoptimization::Reason_unreached,
+    uncommon_trap(Deoptimization::Reason_unstable_if,
                   Deoptimization::Action_reinterpret,
                   NULL,
                   (is_fallthrough ? "taken always" : "taken never"));
--- a/src/share/vm/prims/jvm.cpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/prims/jvm.cpp	Thu Dec 04 14:30:02 2014 +0000
@@ -391,6 +391,23 @@
 JVM_END
 
 
+/*
+ * Return the temporary directory that the VM uses for the attach
+ * and perf data files.
+ *
+ * It is important that this directory is well-known and the
+ * same for all VM instances. It cannot be affected by configuration
+ * variables such as java.io.tmpdir.
+ */
+JVM_ENTRY(jstring, JVM_GetTemporaryDirectory(JNIEnv *env))
+  JVMWrapper("JVM_GetTemporaryDirectory");
+  HandleMark hm(THREAD);
+  const char* temp_dir = os::get_temp_directory();
+  Handle h = java_lang_String::create_from_platform_dependent_str(temp_dir, CHECK_NULL);
+  return (jstring) JNIHandles::make_local(env, h());
+JVM_END
+
+
 // java.lang.Runtime /////////////////////////////////////////////////////////////////////////
 
 extern volatile jint vm_created;
--- a/src/share/vm/prims/jvm.h	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/prims/jvm.h	Thu Dec 04 14:30:02 2014 +0000
@@ -1480,6 +1480,9 @@
 JNIEXPORT jobject JNICALL
 JVM_InitAgentProperties(JNIEnv *env, jobject agent_props);
 
+JNIEXPORT jstring JNICALL
+JVM_GetTemporaryDirectory(JNIEnv *env);
+
 /* Generics reflection support.
  *
  * Returns information about the given class's EnclosingMethod
--- a/src/share/vm/runtime/deoptimization.cpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/runtime/deoptimization.cpp	Thu Dec 04 14:30:02 2014 +0000
@@ -1805,7 +1805,8 @@
   "div0_check",
   "age",
   "predicate",
-  "loop_limit_check"
+  "loop_limit_check",
+  "unstable_if"
 };
 const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
   // Note:  Keep this in sync. with enum DeoptAction.
--- a/src/share/vm/runtime/deoptimization.hpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/runtime/deoptimization.hpp	Thu Dec 04 14:30:02 2014 +0000
@@ -59,6 +59,7 @@
     Reason_age,                   // nmethod too old; tier threshold reached
     Reason_predicate,             // compiler generated predicate failed
     Reason_loop_limit_check,      // compiler generated loop limits check failed
+    Reason_unstable_if,           // a branch predicted always false was taken
     Reason_LIMIT,
     // Note:  Keep this enum in sync. with _trap_reason_name.
     Reason_RECORDED_LIMIT = Reason_bimorphic  // some are not recorded per bc
@@ -311,6 +312,8 @@
       return reason;
     else if (reason == Reason_div0_check) // null check due to divide-by-zero?
       return Reason_null_check;           // recorded per BCI as a null check
+    else if (reason == Reason_unstable_if)
+      return Reason_intrinsic;
     else
       return Reason_none;
   }
--- a/src/share/vm/runtime/vmStructs.cpp	Thu Nov 27 11:27:10 2014 +0000
+++ b/src/share/vm/runtime/vmStructs.cpp	Thu Dec 04 14:30:02 2014 +0000
@@ -2454,6 +2454,7 @@
   declare_constant(Deoptimization::Reason_age)                            \
   declare_constant(Deoptimization::Reason_predicate)                      \
   declare_constant(Deoptimization::Reason_loop_limit_check)               \
+  declare_constant(Deoptimization::Reason_unstable_if)                    \
   declare_constant(Deoptimization::Reason_LIMIT)                          \
   declare_constant(Deoptimization::Reason_RECORDED_LIMIT)                 \
                                                                           \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/exceptions/CatchInlineExceptions.java	Thu Dec 04 14:30:02 2014 +0000
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8059299
+ * @summary assert(adr_type != NULL) failed: expecting TypeKlassPtr
+ * @run main/othervm -Xbatch CatchInlineExceptions
+ */
+
+class Exception1 extends Exception {};
+class Exception2 extends Exception {};
+
+public class CatchInlineExceptions {
+    private static int counter0;
+    private static int counter1;
+    private static int counter2;
+    private static int counter;
+
+    static void foo(int i) throws Exception {
+        if ((i & 1023) == 2) {
+            counter0++;
+            throw new Exception2();
+        }
+    }
+
+    static void test(int i) throws Exception {
+        try {
+           foo(i);
+        }
+        catch (Exception e) {
+            if (e instanceof Exception1) {
+                counter1++;
+            } else if (e instanceof Exception2) {
+                counter2++;
+            }
+            counter++;
+            throw e;
+        }
+    }
+
+    public static void main(String[] args) throws Throwable {
+        for (int i = 0; i < 15000; i++) {
+            try {
+                test(i);
+            } catch (Exception e) {
+                // expected
+            }
+        }
+        if (counter1 != 0) {
+            throw new RuntimeException("Failed: counter1(" + counter1  + ") != 0");
+        }
+        if (counter2 != counter0) {
+            throw new RuntimeException("Failed: counter2(" + counter2  + ") != counter0(" + counter0  + ")");
+        }
+        if (counter2 != counter) {
+            throw new RuntimeException("Failed: counter2(" + counter2  + ") != counter(" + counter  + ")");
+        }
+        System.out.println("TEST PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/LoadClass/ShowClassLoader.java	Thu Dec 04 14:30:02 2014 +0000
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key regression
+ * @bug 8058927
+ * @summary Make sure array class has the right class loader
+ * @run main ShowClassLoader
+ */
+
+public class ShowClassLoader {
+
+    public static void main(String[] args) {
+        Object[] oa = new Object[0];
+        ShowClassLoader[] sa = new ShowClassLoader[0];
+
+        System.out.println("Classloader for Object[] is " + oa.getClass().getClassLoader());
+        System.out.println("Classloader for SCL[] is " + sa.getClass().getClassLoader() );
+
+        if (sa.getClass().getClassLoader() == null) {
+            throw new RuntimeException("Wrong class loader");
+        }
+    }
+}