changeset 7337:d8f133adf05d jdk8u60-b11

Merge
author asaha
date Tue, 14 Apr 2015 13:02:21 -0700
parents 48fa04e21c87 (current diff) 459a71db33dc (diff)
children fc3cd1db10e2
files .hgtags make/hotspot_version src/share/vm/code/dependencies.cpp src/share/vm/oops/instanceKlass.cpp src/share/vm/oops/instanceKlass.hpp src/share/vm/runtime/arguments.cpp
diffstat 33 files changed, 468 insertions(+), 235 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Fri Apr 10 11:38:00 2015 -0700
+++ b/.hgtags	Tue Apr 14 13:02:21 2015 -0700
@@ -631,3 +631,4 @@
 f1058b5c6294235d8ad032dcc72c8f8bc202cb5a jdk8u60-b09
 57a14c3927eba6372d909ae164fa90bb9b6a6ce4 hs25.60-b10
 8e4518dc2b38957072704ffe4cbf29f046dc9325 jdk8u60-b10
+64a32bc18e88eed6131ed036dc3e10e566ef339b hs25.60-b11
--- a/agent/src/share/classes/com/sun/java/swing/ui/CommonToolBar.java	Fri Apr 10 11:38:00 2015 -0700
+++ b/agent/src/share/classes/com/sun/java/swing/ui/CommonToolBar.java	Tue Apr 14 13:02:21 2015 -0700
@@ -46,7 +46,7 @@
     {
         this.manager = manager;
         statusBar = status;
-        buttonSize = new Dimension(CommonUI.buttconPrefSize);
+        buttonSize = new Dimension(CommonUI.getButtconPrefSize());
         buttonInsets = new Insets(0, 0, 0, 0);
         addComponents();
     }
--- a/agent/src/share/classes/com/sun/java/swing/ui/CommonUI.java	Fri Apr 10 11:38:00 2015 -0700
+++ b/agent/src/share/classes/com/sun/java/swing/ui/CommonUI.java	Tue Apr 14 13:02:21 2015 -0700
@@ -373,20 +373,25 @@
         comp.setCursor(Cursor.getPredefinedCursor(0));
     }
 
-    public static final int BUTTON_WIDTH = 100;
-    public static final int BUTTON_HEIGHT = 26;
-    public static final int BUTTCON_WIDTH = 28;
-    public static final int BUTTCON_HEIGHT = 28;
-    public static final int SM_BUTTON_WIDTH = 72;
-    public static final int SM_BUTTON_HEIGHT = 26;
-    public static final int LABEL_WIDTH = 100;
-    public static final int LABEL_HEIGHT = 20;
-    public static final int TEXT_WIDTH = 150;
-    public static final int TEXT_HEIGHT = 20;
-    public static Dimension buttonPrefSize = new Dimension(100, 26);
-    public static Dimension buttconPrefSize = new Dimension(28, 28);
-    public static Dimension smbuttonPrefSize = new Dimension(72, 26);
-    public static Dimension labelPrefSize = new Dimension(100, 20);
-    public static Dimension textPrefSize = new Dimension(150, 20);
+    public static Dimension getButtconPrefSize()
+    {
+        return buttconPrefSize;
+    }
+
+    private static final int BUTTON_WIDTH = 100;
+    private static final int BUTTON_HEIGHT = 26;
+    private static final int BUTTCON_WIDTH = 28;
+    private static final int BUTTCON_HEIGHT = 28;
+    private static final int SM_BUTTON_WIDTH = 72;
+    private static final int SM_BUTTON_HEIGHT = 26;
+    private static final int LABEL_WIDTH = 100;
+    private static final int LABEL_HEIGHT = 20;
+    private static final int TEXT_WIDTH = 150;
+    private static final int TEXT_HEIGHT = 20;
+    private static final Dimension buttonPrefSize = new Dimension(100, 26);
+    private static final Dimension buttconPrefSize = new Dimension(28, 28);
+    private static final Dimension smbuttonPrefSize = new Dimension(72, 26);
+    private static final Dimension labelPrefSize = new Dimension(100, 20);
+    private static final Dimension textPrefSize = new Dimension(150, 20);
 
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java	Fri Apr 10 11:38:00 2015 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -799,6 +799,18 @@
         writeObjectID(klass.getJavaMirror());
 
         ClassData cd = (ClassData) classDataCache.get(klass);
+        if (cd == null) {
+            // The class is not present in the system dictionary, probably Lambda.
+            // Add it to cache here
+            if (klass instanceof InstanceKlass) {
+                InstanceKlass ik = (InstanceKlass) klass;
+                List fields = getInstanceFields(ik);
+                int instSize = getSizeForFields(fields);
+                cd = new ClassData(instSize, fields);
+                classDataCache.put(ik, cd);
+            }
+        }
+
         if (Assert.ASSERTS_ENABLED) {
             Assert.that(cd != null, "can not get class data for " + klass.getName().asString() + klass.getAddress());
         }
--- a/make/hotspot_version	Fri Apr 10 11:38:00 2015 -0700
+++ b/make/hotspot_version	Tue Apr 14 13:02:21 2015 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=60
-HS_BUILD_NUMBER=10
+HS_BUILD_NUMBER=11
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/src/share/vm/classfile/classLoaderData.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/classfile/classLoaderData.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -320,27 +320,6 @@
   }
 }
 
-#ifdef ASSERT
-class AllAliveClosure : public OopClosure {
-  BoolObjectClosure* _is_alive_closure;
-  bool _found_dead;
- public:
-  AllAliveClosure(BoolObjectClosure* is_alive_closure) : _is_alive_closure(is_alive_closure), _found_dead(false) {}
-  template <typename T> void do_oop_work(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-      if (!_is_alive_closure->do_object_b(obj)) {
-        _found_dead = true;
-      }
-    }
-  }
-  void do_oop(oop* p)       { do_oop_work<oop>(p); }
-  void do_oop(narrowOop* p) { do_oop_work<narrowOop>(p); }
-  bool found_dead()         { return _found_dead; }
-};
-#endif
-
 oop ClassLoaderData::keep_alive_object() const {
   assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
   return is_anonymous() ? _klasses->java_mirror() : class_loader();
@@ -350,15 +329,6 @@
   bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
       || is_alive_closure->do_object_b(keep_alive_object());
 
-#ifdef ASSERT
-  if (alive) {
-    AllAliveClosure all_alive_closure(is_alive_closure);
-    KlassToOopClosure klass_closure(&all_alive_closure);
-    const_cast<ClassLoaderData*>(this)->oops_do(&all_alive_closure, &klass_closure, false);
-    assert(!all_alive_closure.found_dead(), err_msg("Found dead oop in alive cld: " PTR_FORMAT, p2i(this)));
-  }
-#endif
-
   return alive;
 }
 
--- a/src/share/vm/classfile/javaClasses.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/classfile/javaClasses.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1278,7 +1278,8 @@
 }
 
 static inline bool version_matches(Method* method, int version) {
-  return (method->constants()->version() == version && version < MAX_VERSION);
+  assert(version < MAX_VERSION, "version is too big");
+  return method != NULL && (method->constants()->version() == version);
 }
 
 static inline int get_line_number(Method* method, int bci) {
@@ -1308,6 +1309,7 @@
   typeArrayOop    _methods;
   typeArrayOop    _bcis;
   objArrayOop     _mirrors;
+  typeArrayOop    _cprefs; // needed to insulate method name against redefinition
   int             _index;
   No_Safepoint_Verifier _nsv;
 
@@ -1315,8 +1317,9 @@
 
   enum {
     trace_methods_offset = java_lang_Throwable::trace_methods_offset,
-    trace_bcis_offset = java_lang_Throwable::trace_bcis_offset,
+    trace_bcis_offset    = java_lang_Throwable::trace_bcis_offset,
     trace_mirrors_offset = java_lang_Throwable::trace_mirrors_offset,
+    trace_cprefs_offset  = java_lang_Throwable::trace_cprefs_offset,
     trace_next_offset    = java_lang_Throwable::trace_next_offset,
     trace_size           = java_lang_Throwable::trace_size,
     trace_chunk_size     = java_lang_Throwable::trace_chunk_size
@@ -1338,9 +1341,14 @@
     assert(mirrors != NULL, "mirror array should be initialized in backtrace");
     return mirrors;
   }
+  static typeArrayOop get_cprefs(objArrayHandle chunk) {
+    typeArrayOop cprefs = typeArrayOop(chunk->obj_at(trace_cprefs_offset));
+    assert(cprefs != NULL, "cprefs array should be initialized in backtrace");
+    return cprefs;
+  }
 
   // constructor for new backtrace
-  BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _mirrors(NULL) {
+  BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _mirrors(NULL), _cprefs(NULL) {
     expand(CHECK);
     _backtrace = _head;
     _index = 0;
@@ -1350,6 +1358,7 @@
     _methods = get_methods(backtrace);
     _bcis = get_bcis(backtrace);
     _mirrors = get_mirrors(backtrace);
+    _cprefs = get_cprefs(backtrace);
     assert(_methods->length() == _bcis->length() &&
            _methods->length() == _mirrors->length(),
            "method and source information arrays should match");
@@ -1375,17 +1384,22 @@
     objArrayOop mirrors = oopFactory::new_objectArray(trace_chunk_size, CHECK);
     objArrayHandle new_mirrors(THREAD, mirrors);
 
+    typeArrayOop cprefs = oopFactory::new_shortArray(trace_chunk_size, CHECK);
+    typeArrayHandle new_cprefs(THREAD, cprefs);
+
     if (!old_head.is_null()) {
       old_head->obj_at_put(trace_next_offset, new_head());
     }
     new_head->obj_at_put(trace_methods_offset, new_methods());
     new_head->obj_at_put(trace_bcis_offset, new_bcis());
     new_head->obj_at_put(trace_mirrors_offset, new_mirrors());
+    new_head->obj_at_put(trace_cprefs_offset, new_cprefs());
 
     _head    = new_head();
     _methods = new_methods();
     _bcis = new_bcis();
     _mirrors = new_mirrors();
+    _cprefs  = new_cprefs();
     _index = 0;
   }
 
@@ -1405,8 +1419,9 @@
       method = mhandle();
     }
 
-    _methods->short_at_put(_index, method->method_idnum());
+    _methods->short_at_put(_index, method->orig_method_idnum());
     _bcis->int_at_put(_index, merge_bci_and_version(bci, method->constants()->version()));
+    _cprefs->short_at_put(_index, method->name_index());
 
     // We need to save the mirrors in the backtrace to keep the class
     // from being unloaded while we still have this stack trace.
@@ -1419,27 +1434,26 @@
 
 // Print stack trace element to resource allocated buffer
 char* java_lang_Throwable::print_stack_element_to_buffer(Handle mirror,
-                                  int method_id, int version, int bci) {
+                                  int method_id, int version, int bci, int cpref) {
 
   // Get strings and string lengths
   InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(mirror()));
   const char* klass_name  = holder->external_name();
   int buf_len = (int)strlen(klass_name);
 
-  // The method id may point to an obsolete method, can't get more stack information
-  Method* method = holder->method_with_idnum(method_id);
-  if (method == NULL) {
-    char* buf = NEW_RESOURCE_ARRAY(char, buf_len + 64);
-    // This is what the java code prints in this case - added Redefined
-    sprintf(buf, "\tat %s.null (Redefined)", klass_name);
-    return buf;
-  }
-
-  char* method_name = method->name()->as_C_string();
+  Method* method = holder->method_with_orig_idnum(method_id, version);
+
+  // The method can be NULL if the requested class version is gone
+  Symbol* sym = (method != NULL) ? method->name() : holder->constants()->symbol_at(cpref);
+  char* method_name = sym->as_C_string();
   buf_len += (int)strlen(method_name);
 
+  // Use a specific ik version as a holder since the mirror might
+  // refer to a version that is now obsolete and no longer accessible
+  // via the previous versions list.
+  holder = holder->get_klass_version(version);
   char* source_file_name = NULL;
-  if (version_matches(method, version)) {
+  if (holder != NULL) {
     Symbol* source = holder->source_file_name();
     if (source != NULL) {
       source_file_name = source->as_C_string();
@@ -1481,17 +1495,18 @@
 }
 
 void java_lang_Throwable::print_stack_element(outputStream *st, Handle mirror,
-                                              int method_id, int version, int bci) {
+                                              int method_id, int version, int bci, int cpref) {
   ResourceMark rm;
-  char* buf = print_stack_element_to_buffer(mirror, method_id, version, bci);
+  char* buf = print_stack_element_to_buffer(mirror, method_id, version, bci, cpref);
   st->print_cr("%s", buf);
 }
 
 void java_lang_Throwable::print_stack_element(outputStream *st, methodHandle method, int bci) {
   Handle mirror = method->method_holder()->java_mirror();
-  int method_id = method->method_idnum();
+  int method_id = method->orig_method_idnum();
   int version = method->constants()->version();
-  print_stack_element(st, mirror, method_id, version, bci);
+  int cpref = method->name_index();
+  print_stack_element(st, mirror, method_id, version, bci, cpref);
 }
 
 const char* java_lang_Throwable::no_stack_trace_message() {
@@ -1516,6 +1531,7 @@
       typeArrayHandle methods (THREAD, BacktraceBuilder::get_methods(result));
       typeArrayHandle bcis (THREAD, BacktraceBuilder::get_bcis(result));
       objArrayHandle mirrors (THREAD, BacktraceBuilder::get_mirrors(result));
+      typeArrayHandle cprefs (THREAD, BacktraceBuilder::get_cprefs(result));
 
       int length = methods()->length();
       for (int index = 0; index < length; index++) {
@@ -1525,7 +1541,8 @@
         int method = methods->short_at(index);
         int version = version_at(bcis->int_at(index));
         int bci = bci_at(bcis->int_at(index));
-        print_stack_element(st, mirror, method, version, bci);
+        int cpref = cprefs->short_at(index);
+        print_stack_element(st, mirror, method, version, bci, cpref);
       }
       result = objArrayHandle(THREAD, objArrayOop(result->obj_at(trace_next_offset)));
     }
@@ -1809,29 +1826,30 @@
   if (chunk == NULL) {
     THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
   }
-  // Get method id, bci, version and mirror from chunk
+  // Get method id, bci, version, mirror and cpref from chunk
   typeArrayOop methods = BacktraceBuilder::get_methods(chunk);
   typeArrayOop bcis = BacktraceBuilder::get_bcis(chunk);
   objArrayOop mirrors = BacktraceBuilder::get_mirrors(chunk);
+  typeArrayOop cprefs = BacktraceBuilder::get_cprefs(chunk);
 
   assert(methods != NULL && bcis != NULL && mirrors != NULL, "sanity check");
 
   int method = methods->short_at(chunk_index);
   int version = version_at(bcis->int_at(chunk_index));
   int bci = bci_at(bcis->int_at(chunk_index));
+  int cpref = cprefs->short_at(chunk_index);
   Handle mirror(THREAD, mirrors->obj_at(chunk_index));
 
   // Chunk can be partial full
   if (mirror.is_null()) {
     THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
   }
-
-  oop element = java_lang_StackTraceElement::create(mirror, method, version, bci, CHECK_0);
+  oop element = java_lang_StackTraceElement::create(mirror, method, version, bci, cpref, CHECK_0);
   return element;
 }
 
 oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
-                                        int version, int bci, TRAPS) {
+                                        int version, int bci, int cpref, TRAPS) {
   // Allocate java.lang.StackTraceElement instance
   Klass* k = SystemDictionary::StackTraceElement_klass();
   assert(k != NULL, "must be loaded in 1.4+");
@@ -1848,17 +1866,13 @@
   oop classname = StringTable::intern((char*) str, CHECK_0);
   java_lang_StackTraceElement::set_declaringClass(element(), classname);
 
-  Method* method = holder->method_with_idnum(method_id);
-  // Method on stack may be obsolete because it was redefined so cannot be
-  // found by idnum.
-  if (method == NULL) {
-    // leave name and fileName null
-    java_lang_StackTraceElement::set_lineNumber(element(), -1);
-    return element();
-  }
+  Method* method = holder->method_with_orig_idnum(method_id, version);
+
+  // The method can be NULL if the requested class version is gone
+  Symbol* sym = (method != NULL) ? method->name() : holder->constants()->symbol_at(cpref);
 
   // Fill in method name
-  oop methodname = StringTable::intern(method->name(), CHECK_0);
+  oop methodname = StringTable::intern(sym, CHECK_0);
   java_lang_StackTraceElement::set_methodName(element(), methodname);
 
   if (!version_matches(method, version)) {
@@ -1867,6 +1881,11 @@
     java_lang_StackTraceElement::set_lineNumber(element(), -1);
   } else {
     // Fill in source file name and line number.
+    // Use a specific ik version as a holder since the mirror might
+    // refer to a version that is now obsolete and no longer accessible
+    // via the previous versions list.
+    holder = holder->get_klass_version(version);
+    assert(holder != NULL, "sanity check");
     Symbol* source = holder->source_file_name();
     if (ShowHiddenFrames && source == NULL)
       source = vmSymbols::unknown_class_name();
@@ -1881,8 +1900,9 @@
 
 oop java_lang_StackTraceElement::create(methodHandle method, int bci, TRAPS) {
   Handle mirror (THREAD, method->method_holder()->java_mirror());
-  int method_id = method->method_idnum();
-  return create(mirror, method_id, method->constants()->version(), bci, THREAD);
+  int method_id = method->orig_method_idnum();
+  int cpref = method->name_index();
+  return create(mirror, method_id, method->constants()->version(), bci, cpref, THREAD);
 }
 
 void java_lang_reflect_AccessibleObject::compute_offsets() {
--- a/src/share/vm/classfile/javaClasses.hpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/classfile/javaClasses.hpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -484,8 +484,9 @@
     trace_methods_offset = 0,
     trace_bcis_offset    = 1,
     trace_mirrors_offset = 2,
-    trace_next_offset    = 3,
-    trace_size           = 4,
+    trace_cprefs_offset  = 3,
+    trace_next_offset    = 4,
+    trace_size           = 5,
     trace_chunk_size     = 32
   };
 
@@ -496,7 +497,7 @@
   static int static_unassigned_stacktrace_offset;
 
   // Printing
-  static char* print_stack_element_to_buffer(Handle mirror, int method, int version, int bci);
+  static char* print_stack_element_to_buffer(Handle mirror, int method, int version, int bci, int cpref);
   // StackTrace (programmatic access, new since 1.4)
   static void clear_stacktrace(oop throwable);
   // No stack trace available
@@ -517,7 +518,7 @@
   static oop message(Handle throwable);
   static void set_message(oop throwable, oop value);
   static void print_stack_element(outputStream *st, Handle mirror, int method,
-                                  int version, int bci);
+                                  int version, int bci, int cpref);
   static void print_stack_element(outputStream *st, methodHandle method, int bci);
   static void print_stack_usage(Handle stream);
 
@@ -1326,7 +1327,7 @@
   static void set_lineNumber(oop element, int value);
 
   // Create an instance of StackTraceElement
-  static oop create(Handle mirror, int method, int version, int bci, TRAPS);
+  static oop create(Handle mirror, int method, int version, int bci, int cpref, TRAPS);
   static oop create(methodHandle method, int bci, TRAPS);
 
   // Debugging
--- a/src/share/vm/code/dependencies.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/code/dependencies.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -811,7 +811,13 @@
     assert((uint)n <= (uint)_num_participants, "oob");
     Method* fm = _found_methods[n];
     assert(n == _num_participants || fm != NULL, "proper usage");
-    assert(fm == NULL || fm->method_holder() == _participants[n], "sanity");
+    if (fm != NULL && fm->method_holder() != _participants[n]) {
+      // Default methods from interfaces can be added to classes. In
+      // that case the holder of the method is not the class but the
+      // interface where it's defined.
+      assert(fm->is_default_method(), "sanity");
+      return NULL;
+    }
     return fm;
   }
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -130,8 +130,8 @@
 class VM_GenCollectFullConcurrent: public VM_GC_Operation {
   bool _disabled_icms;
  public:
-  VM_GenCollectFullConcurrent(unsigned int gc_count_before,
-                              unsigned int full_gc_count_before,
+  VM_GenCollectFullConcurrent(uint gc_count_before,
+                              uint full_gc_count_before,
                               GCCause::Cause gc_cause)
     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
       _disabled_icms(false)
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -353,7 +353,7 @@
   HeapRegion* lists[] = {_head,   _survivor_head};
   const char* names[] = {"YOUNG", "SURVIVOR"};
 
-  for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
+  for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
     HeapRegion *curr = lists[list];
     if (curr == NULL)
@@ -827,8 +827,8 @@
   assert_heap_not_locked_and_not_at_safepoint();
   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
 
-  unsigned int dummy_gc_count_before;
-  int dummy_gclocker_retry_count = 0;
+  uint dummy_gc_count_before;
+  uint dummy_gclocker_retry_count = 0;
   return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
 }
 
@@ -838,8 +838,8 @@
   assert_heap_not_locked_and_not_at_safepoint();
 
   // Loop until the allocation is satisfied, or unsatisfied after GC.
-  for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
-    unsigned int gc_count_before;
+  for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
+    uint gc_count_before;
 
     HeapWord* result = NULL;
     if (!isHumongous(word_size)) {
@@ -891,8 +891,8 @@
 
 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
                                                    AllocationContext_t context,
-                                                   unsigned int *gc_count_before_ret,
-                                                   int* gclocker_retry_count_ret) {
+                                                   uint* gc_count_before_ret,
+                                                   uint* gclocker_retry_count_ret) {
   // Make sure you read the note in attempt_allocation_humongous().
 
   assert_heap_not_locked_and_not_at_safepoint();
@@ -909,7 +909,7 @@
   HeapWord* result = NULL;
   for (int try_count = 1; /* we'll return */; try_count += 1) {
     bool should_try_gc;
-    unsigned int gc_count_before;
+    uint gc_count_before;
 
     {
       MutexLockerEx x(Heap_lock);
@@ -953,7 +953,7 @@
     if (should_try_gc) {
       bool succeeded;
       result = do_collection_pause(word_size, gc_count_before, &succeeded,
-          GCCause::_g1_inc_collection_pause);
+                                   GCCause::_g1_inc_collection_pause);
       if (result != NULL) {
         assert(succeeded, "only way to get back a non-NULL result");
         return result;
@@ -1007,8 +1007,8 @@
 }
 
 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
-                                                        unsigned int * gc_count_before_ret,
-                                                        int* gclocker_retry_count_ret) {
+                                                        uint* gc_count_before_ret,
+                                                        uint* gclocker_retry_count_ret) {
   // The structure of this method has a lot of similarities to
   // attempt_allocation_slow(). The reason these two were not merged
   // into a single one is that such a method would require several "if
@@ -1041,7 +1041,7 @@
   HeapWord* result = NULL;
   for (int try_count = 1; /* we'll return */; try_count += 1) {
     bool should_try_gc;
-    unsigned int gc_count_before;
+    uint gc_count_before;
 
     {
       MutexLockerEx x(Heap_lock);
@@ -1079,7 +1079,7 @@
 
       bool succeeded;
       result = do_collection_pause(word_size, gc_count_before, &succeeded,
-          GCCause::_g1_humongous_allocation);
+                                   GCCause::_g1_humongous_allocation);
       if (result != NULL) {
         assert(succeeded, "only way to get back a non-NULL result");
         return result;
@@ -1887,7 +1887,7 @@
   assert(n_rem_sets > 0, "Invariant.");
 
   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
-  _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
+  _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
 
   for (int i = 0; i < n_queues; i++) {
@@ -2473,9 +2473,9 @@
 void G1CollectedHeap::collect(GCCause::Cause cause) {
   assert_heap_not_locked();
 
-  unsigned int gc_count_before;
-  unsigned int old_marking_count_before;
-  unsigned int full_gc_count_before;
+  uint gc_count_before;
+  uint old_marking_count_before;
+  uint full_gc_count_before;
   bool retry_gc;
 
   do {
@@ -3613,7 +3613,7 @@
 }
 
 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
-                                               unsigned int gc_count_before,
+                                               uint gc_count_before,
                                                bool* succeeded,
                                                GCCause::Cause gc_cause) {
   assert_heap_not_locked_and_not_at_safepoint();
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
   /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -343,11 +343,11 @@
 
   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
   // concurrent cycles) we have started.
-  volatile unsigned int _old_marking_cycles_started;
+  volatile uint _old_marking_cycles_started;
 
   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
   // concurrent cycles) we have completed.
-  volatile unsigned int _old_marking_cycles_completed;
+  volatile uint _old_marking_cycles_completed;
 
   bool _concurrent_cycle_started;
   bool _heap_summary_sent;
@@ -515,22 +515,22 @@
   // the mutator alloc region without taking the Heap_lock. This
   // should only be used for non-humongous allocations.
   inline HeapWord* attempt_allocation(size_t word_size,
-                                      unsigned int* gc_count_before_ret,
-                                      int* gclocker_retry_count_ret);
+                                      uint* gc_count_before_ret,
+                                      uint* gclocker_retry_count_ret);
 
   // Second-level mutator allocation attempt: take the Heap_lock and
   // retry the allocation attempt, potentially scheduling a GC
   // pause. This should only be used for non-humongous allocations.
   HeapWord* attempt_allocation_slow(size_t word_size,
                                     AllocationContext_t context,
-                                    unsigned int* gc_count_before_ret,
-                                    int* gclocker_retry_count_ret);
+                                    uint* gc_count_before_ret,
+                                    uint* gclocker_retry_count_ret);
 
   // Takes the Heap_lock and attempts a humongous allocation. It can
   // potentially schedule a GC pause.
   HeapWord* attempt_allocation_humongous(size_t word_size,
-                                         unsigned int* gc_count_before_ret,
-                                         int* gclocker_retry_count_ret);
+                                         uint* gc_count_before_ret,
+                                         uint* gclocker_retry_count_ret);
 
   // Allocation attempt that should be called during safepoints (e.g.,
   // at the end of a successful GC). expect_null_mutator_alloc_region
@@ -701,7 +701,7 @@
   // +ExplicitGCInvokesConcurrent).
   void increment_old_marking_cycles_completed(bool concurrent);
 
-  unsigned int old_marking_cycles_completed() {
+  uint old_marking_cycles_completed() {
     return _old_marking_cycles_completed;
   }
 
@@ -760,7 +760,7 @@
   // methods that call do_collection_pause() release the Heap_lock
   // before the call, so it's easy to read gc_count_before just before.
   HeapWord* do_collection_pause(size_t         word_size,
-                                unsigned int   gc_count_before,
+                                uint           gc_count_before,
                                 bool*          succeeded,
                                 GCCause::Cause gc_cause);
 
@@ -983,7 +983,7 @@
   // The heap region entry for a given worker is valid iff
   // the associated time stamp value matches the current value
   // of G1CollectedHeap::_gc_time_stamp.
-  unsigned int* _worker_cset_start_region_time_stamp;
+  uint* _worker_cset_start_region_time_stamp;
 
   volatile bool _free_regions_coming;
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -131,8 +131,8 @@
 }
 
 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
-                                                     unsigned int* gc_count_before_ret,
-                                                     int* gclocker_retry_count_ret) {
+                                                     uint* gc_count_before_ret,
+                                                     uint* gclocker_retry_count_ret) {
   assert_heap_not_locked_and_not_at_safepoint();
   assert(!isHumongous(word_size), "attempt_allocation() should not "
          "be called for humongous allocation requests");
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,12 +34,11 @@
 #include "gc_implementation/g1/vm_operations_g1.hpp"
 #include "runtime/interfaceSupport.hpp"
 
-VM_G1CollectForAllocation::VM_G1CollectForAllocation(
-                                                  unsigned int gc_count_before,
-                                                  size_t word_size)
+VM_G1CollectForAllocation::VM_G1CollectForAllocation(uint gc_count_before,
+                                                     size_t word_size)
   : VM_G1OperationWithAllocRequest(gc_count_before, word_size,
                                    GCCause::_allocation_failure) {
-  guarantee(word_size > 0, "an allocation should always be requested");
+  guarantee(word_size != 0, "An allocation should always be requested with this operation.");
 }
 
 void VM_G1CollectForAllocation::doit() {
@@ -57,12 +56,11 @@
   g1h->do_full_collection(false /* clear_all_soft_refs */);
 }
 
-VM_G1IncCollectionPause::VM_G1IncCollectionPause(
-                                      unsigned int   gc_count_before,
-                                      size_t         word_size,
-                                      bool           should_initiate_conc_mark,
-                                      double         target_pause_time_ms,
-                                      GCCause::Cause gc_cause)
+VM_G1IncCollectionPause::VM_G1IncCollectionPause(uint           gc_count_before,
+                                                 size_t         word_size,
+                                                 bool           should_initiate_conc_mark,
+                                                 double         target_pause_time_ms,
+                                                 GCCause::Cause gc_cause)
   : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
     _should_initiate_conc_mark(should_initiate_conc_mark),
     _target_pause_time_ms(target_pause_time_ms),
@@ -75,7 +73,7 @@
 }
 
 bool VM_G1IncCollectionPause::doit_prologue() {
-  bool res = VM_GC_Operation::doit_prologue();
+  bool res = VM_G1OperationWithAllocRequest::doit_prologue();
   if (!res) {
     if (_should_initiate_conc_mark) {
       // The prologue can fail for a couple of reasons. The first is that another GC
@@ -169,7 +167,7 @@
 }
 
 void VM_G1IncCollectionPause::doit_epilogue() {
-  VM_GC_Operation::doit_epilogue();
+  VM_G1OperationWithAllocRequest::doit_epilogue();
 
   // If the pause was initiated by a System.gc() and
   // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,20 +36,17 @@
 //     - VM_G1CollectForAllocation
 //     - VM_G1IncCollectionPause
 
-class VM_G1OperationWithAllocRequest: public VM_GC_Operation {
+class VM_G1OperationWithAllocRequest : public VM_CollectForAllocation {
 protected:
-  size_t    _word_size;
-  HeapWord* _result;
   bool      _pause_succeeded;
   AllocationContext_t _allocation_context;
 
 public:
-  VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
-                                 size_t       word_size,
+  VM_G1OperationWithAllocRequest(uint           gc_count_before,
+                                 size_t         word_size,
                                  GCCause::Cause gc_cause)
-    : VM_GC_Operation(gc_count_before, gc_cause),
-      _word_size(word_size), _result(NULL), _pause_succeeded(false) { }
-  HeapWord* result() { return _result; }
+    : VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
+      _pause_succeeded(false) {}
   bool pause_succeeded() { return _pause_succeeded; }
   void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
   AllocationContext_t  allocation_context() { return _allocation_context; }
@@ -57,8 +54,8 @@
 
 class VM_G1CollectFull: public VM_GC_Operation {
 public:
-  VM_G1CollectFull(unsigned int gc_count_before,
-                   unsigned int full_gc_count_before,
+  VM_G1CollectFull(uint gc_count_before,
+                   uint full_gc_count_before,
                    GCCause::Cause cause)
     : VM_GC_Operation(gc_count_before, cause, full_gc_count_before, true) { }
   virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
@@ -70,7 +67,7 @@
 
 class VM_G1CollectForAllocation: public VM_G1OperationWithAllocRequest {
 public:
-  VM_G1CollectForAllocation(unsigned int gc_count_before,
+  VM_G1CollectForAllocation(uint         gc_count_before,
                             size_t       word_size);
   virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
   virtual void doit();
@@ -84,9 +81,9 @@
   bool         _should_initiate_conc_mark;
   bool         _should_retry_gc;
   double       _target_pause_time_ms;
-  unsigned int _old_marking_cycles_completed_before;
+  uint         _old_marking_cycles_completed_before;
 public:
-  VM_G1IncCollectionPause(unsigned int   gc_count_before,
+  VM_G1IncCollectionPause(uint           gc_count_before,
                           size_t         word_size,
                           bool           should_initiate_conc_mark,
                           double         target_pause_time_ms,
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -261,7 +261,7 @@
 
   uint loop_count = 0;
   uint gc_count = 0;
-  int gclocker_stalled_count = 0;
+  uint gclocker_stalled_count = 0;
 
   while (result == NULL) {
     // We don't want to have multiple collections for a single filled generation.
@@ -521,8 +521,8 @@
   assert(!Heap_lock->owned_by_self(),
     "this thread should not own the Heap_lock");
 
-  unsigned int gc_count      = 0;
-  unsigned int full_gc_count = 0;
+  uint gc_count      = 0;
+  uint full_gc_count = 0;
   {
     MutexLocker ml(Heap_lock);
     // This value is guarded by the Heap_lock
--- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,12 +32,10 @@
 #include "utilities/dtrace.hpp"
 
 // The following methods are used by the parallel scavenge collector
-VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size,
-                                                      unsigned int gc_count) :
-  VM_GC_Operation(gc_count, GCCause::_allocation_failure),
-  _size(size),
-  _result(NULL)
-{
+VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t word_size,
+                                                             uint gc_count) :
+    VM_CollectForAllocation(word_size, gc_count, GCCause::_allocation_failure) {
+  assert(word_size != 0, "An allocation should always be requested with this operation.");
 }
 
 void VM_ParallelGCFailedAllocation::doit() {
@@ -47,7 +45,7 @@
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
 
   GCCauseSetter gccs(heap, _gc_cause);
-  _result = heap->failed_mem_allocate(_size);
+  _result = heap->failed_mem_allocate(_word_size);
 
   if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
@@ -55,8 +53,8 @@
 }
 
 // Only used for System.gc() calls
-VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(unsigned int gc_count,
-                                             unsigned int full_gc_count,
+VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(uint gc_count,
+                                             uint full_gc_count,
                                              GCCause::Cause gc_cause) :
   VM_GC_Operation(gc_count, gc_cause, full_gc_count, true /* full */)
 {
--- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,26 +29,19 @@
 #include "gc_implementation/shared/vmGCOperations.hpp"
 #include "gc_interface/gcCause.hpp"
 
-class VM_ParallelGCFailedAllocation: public VM_GC_Operation {
- private:
-  size_t    _size;
-  HeapWord* _result;
-
+class VM_ParallelGCFailedAllocation : public VM_CollectForAllocation {
  public:
-  VM_ParallelGCFailedAllocation(size_t size, unsigned int gc_count);
+  VM_ParallelGCFailedAllocation(size_t word_size, uint gc_count);
 
   virtual VMOp_Type type() const {
     return VMOp_ParallelGCFailedAllocation;
   }
   virtual void doit();
-
-  HeapWord* result() const       { return _result; }
 };
 
 class VM_ParallelGCSystemGC: public VM_GC_Operation {
  public:
-  VM_ParallelGCSystemGC(unsigned int gc_count, unsigned int full_gc_count,
-                        GCCause::Cause gc_cause);
+  VM_ParallelGCSystemGC(uint gc_count, uint full_gc_count, GCCause::Cause gc_cause);
   virtual VMOp_Type type() const { return VMOp_ParallelGCSystemGC; }
   virtual void doit();
 };
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -193,10 +193,10 @@
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   GCCauseSetter gccs(gch, _gc_cause);
-  _res = gch->satisfy_failed_allocation(_size, _tlab);
-  assert(gch->is_in_reserved_or_null(_res), "result not in heap");
+  _result = gch->satisfy_failed_allocation(_word_size, _tlab);
+  assert(gch->is_in_reserved_or_null(_result), "result not in heap");
 
-  if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
+  if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
   }
 }
@@ -209,6 +209,18 @@
   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
 }
 
+VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
+                                                                 size_t size,
+                                                                 Metaspace::MetadataType mdtype,
+                                                                 uint gc_count_before,
+                                                                 uint full_gc_count_before,
+                                                                 GCCause::Cause gc_cause)
+    : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
+      _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
+  assert(_size != 0, "An allocation should always be requested with this operation.");
+  AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
+}
+
 // Returns true iff concurrent GCs unloads metadata.
 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
 #if INCLUDE_ALL_GCS
@@ -313,3 +325,11 @@
     set_gc_locked();
   }
 }
+
+VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)
+    : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) {
+  // Only report if operation was really caused by an allocation.
+  if (_word_size != 0) {
+    AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek());
+  }
+}
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_VMGCOPERATIONS_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_VMGCOPERATIONS_HPP
 
+#include "gc_implementation/shared/gcId.hpp"
 #include "memory/heapInspection.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/jniHandles.hpp"
@@ -38,11 +39,12 @@
 //  VM_Operation
 //      VM_GC_Operation
 //          VM_GC_HeapInspection
-//          VM_GenCollectForAllocation
 //          VM_GenCollectFull
 //          VM_GenCollectFullConcurrent
-//          VM_ParallelGCFailedAllocation
 //          VM_ParallelGCSystemGC
+//          VM_CollectForAllocation
+//              VM_GenCollectForAllocation
+//              VM_ParallelGCFailedAllocation
 //  VM_GC_Operation
 //   - implements methods common to all classes in the hierarchy:
 //     prevents multiple gc requests and manages lock on heap;
@@ -51,6 +53,7 @@
 //   - prints class histogram on SIGBREAK if PrintClassHistogram
 //     is specified; and also the attach "inspectheap" operation
 //
+//  VM_CollectForAllocation
 //  VM_GenCollectForAllocation
 //  VM_ParallelGCFailedAllocation
 //   - this operation is invoked when allocation is failed;
@@ -66,13 +69,13 @@
 
 class VM_GC_Operation: public VM_Operation {
  protected:
-  BasicLock     _pending_list_basic_lock; // for refs pending list notification (PLL)
-  unsigned int  _gc_count_before;         // gc count before acquiring PLL
-  unsigned int  _full_gc_count_before;    // full gc count before acquiring PLL
-  bool          _full;                    // whether a "full" collection
-  bool          _prologue_succeeded;      // whether doit_prologue succeeded
+  BasicLock      _pending_list_basic_lock; // for refs pending list notification (PLL)
+  uint           _gc_count_before;         // gc count before acquiring PLL
+  uint           _full_gc_count_before;    // full gc count before acquiring PLL
+  bool           _full;                    // whether a "full" collection
+  bool           _prologue_succeeded;      // whether doit_prologue succeeded
   GCCause::Cause _gc_cause;                // the putative cause for this gc op
-  bool          _gc_locked;               // will be set if gc was locked
+  bool           _gc_locked;               // will be set if gc was locked
 
   virtual bool skip_operation() const;
 
@@ -81,9 +84,9 @@
   void release_and_notify_pending_list_lock();
 
  public:
-  VM_GC_Operation(unsigned int gc_count_before,
+  VM_GC_Operation(uint gc_count_before,
                   GCCause::Cause _cause,
-                  unsigned int full_gc_count_before = 0,
+                  uint full_gc_count_before = 0,
                   bool full = false) {
     _full = full;
     _prologue_succeeded = false;
@@ -160,38 +163,45 @@
   bool collect();
 };
 
+class VM_CollectForAllocation : public VM_GC_Operation {
+ protected:
+  size_t    _word_size; // Size of object to be allocated (in number of words)
+  HeapWord* _result;    // Allocation result (NULL if allocation failed)
 
-class VM_GenCollectForAllocation: public VM_GC_Operation {
+ public:
+  VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause);
+
+  HeapWord* result() const {
+    return _result;
+  }
+};
+
+class VM_GenCollectForAllocation : public VM_CollectForAllocation {
  private:
-  HeapWord*   _res;
-  size_t      _size;                       // size of object to be allocated.
   bool        _tlab;                       // alloc is of a tlab.
  public:
-  VM_GenCollectForAllocation(size_t size,
+  VM_GenCollectForAllocation(size_t word_size,
                              bool tlab,
-                             unsigned int gc_count_before)
-    : VM_GC_Operation(gc_count_before, GCCause::_allocation_failure),
-      _size(size),
+                             uint gc_count_before)
+    : VM_CollectForAllocation(word_size, gc_count_before, GCCause::_allocation_failure),
       _tlab(tlab) {
-    _res = NULL;
+    assert(word_size != 0, "An allocation should always be requested with this operation.");
   }
   ~VM_GenCollectForAllocation()  {}
   virtual VMOp_Type type() const { return VMOp_GenCollectForAllocation; }
   virtual void doit();
-  HeapWord* result() const       { return _res; }
 };
 
-
 // VM operation to invoke a collection of the heap as a
 // GenCollectedHeap heap.
 class VM_GenCollectFull: public VM_GC_Operation {
  private:
   int _max_level;
  public:
-  VM_GenCollectFull(unsigned int gc_count_before,
-                    unsigned int full_gc_count_before,
+  VM_GenCollectFull(uint gc_count_before,
+                    uint full_gc_count_before,
                     GCCause::Cause gc_cause,
-                      int max_level)
+                    int max_level)
     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
       _max_level(max_level) { }
   ~VM_GenCollectFull() {}
@@ -208,12 +218,9 @@
  public:
   VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
                                   size_t size, Metaspace::MetadataType mdtype,
-                                      unsigned int gc_count_before,
-                                      unsigned int full_gc_count_before,
-                                      GCCause::Cause gc_cause)
-    : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
-      _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
-  }
+                                  uint gc_count_before,
+                                  uint full_gc_count_before,
+                                  GCCause::Cause gc_cause);
   virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
   virtual void doit();
   MetaWord* result() const       { return _result; }
--- a/src/share/vm/gc_interface/allocTracer.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/gc_interface/allocTracer.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/shared/gcId.hpp"
 #include "gc_interface/allocTracer.hpp"
 #include "trace/tracing.hpp"
 #include "runtime/handles.hpp"
@@ -46,3 +47,12 @@
     event.commit();
   }
 }
+
+void AllocTracer::send_allocation_requiring_gc_event(size_t size, const GCId& gcId) {
+  EventAllocationRequiringGC event;
+  if (event.should_commit()) {
+    event.set_gcId(gcId.id());
+    event.set_size(size);
+    event.commit();
+  }
+}
--- a/src/share/vm/gc_interface/allocTracer.hpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/gc_interface/allocTracer.hpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
   public:
     static void send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size);
     static void send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size);
+    static void send_allocation_requiring_gc_event(size_t size, const GCId& gcId);
 };
 
 #endif /* SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP */
--- a/src/share/vm/memory/collectorPolicy.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/memory/collectorPolicy.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -656,7 +656,7 @@
 
   // Loop until the allocation is satisified,
   // or unsatisfied after GC.
-  for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
+  for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
     HandleMark hm; // discard any handles allocated in each iteration
 
     // First allocation attempt is lock-free.
@@ -670,7 +670,7 @@
         return result;
       }
     }
-    unsigned int gc_count_before;  // read inside the Heap_lock locked region
+    uint gc_count_before;  // read inside the Heap_lock locked region
     {
       MutexLocker ml(Heap_lock);
       if (PrintGC && Verbose) {
--- a/src/share/vm/oops/instanceKlass.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/oops/instanceKlass.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -3747,6 +3747,22 @@
 } // end has_previous_version()
 
 
+InstanceKlass* InstanceKlass::get_klass_version(int version) {
+  if (constants()->version() == version) {
+    return this;
+  }
+  PreviousVersionWalker pvw(Thread::current(), (InstanceKlass*)this);
+  for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+       pv_node != NULL; pv_node = pvw.next_previous_version()) {
+    ConstantPool* prev_cp = pv_node->prev_constant_pool();
+    if (prev_cp->version() == version) {
+      return prev_cp->pool_holder();
+    }
+  }
+  return NULL; // None found
+}
+
+
 Method* InstanceKlass::method_with_idnum(int idnum) {
   Method* m = NULL;
   if (idnum < methods()->length()) {
@@ -3765,6 +3781,37 @@
   return m;
 }
 
+
+Method* InstanceKlass::method_with_orig_idnum(int idnum) {
+  if (idnum >= methods()->length()) {
+    return NULL;
+  }
+  Method* m = methods()->at(idnum);
+  if (m != NULL && m->orig_method_idnum() == idnum) {
+    return m;
+  }
+  // Obsolete method idnum does not match the original idnum
+  for (int index = 0; index < methods()->length(); ++index) {
+    m = methods()->at(index);
+    if (m->orig_method_idnum() == idnum) {
+      return m;
+    }
+  }
+  // None found, return null for the caller to handle.
+  return NULL;
+}
+
+
+Method* InstanceKlass::method_with_orig_idnum(int idnum, int version) {
+  InstanceKlass* holder = get_klass_version(version);
+  if (holder == NULL) {
+    return NULL; // The version of klass is gone, no method is found
+  }
+  Method* method = holder->method_with_orig_idnum(idnum);
+  return method;
+}
+
+
 jint InstanceKlass::get_cached_class_file_len() {
   return VM_RedefineClasses::get_cached_class_file_len(_cached_class_file);
 }
--- a/src/share/vm/oops/instanceKlass.hpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/oops/instanceKlass.hpp	Tue Apr 14 13:02:21 2015 -0700
@@ -358,6 +358,8 @@
   Array<Method*>* methods() const          { return _methods; }
   void set_methods(Array<Method*>* a)      { _methods = a; }
   Method* method_with_idnum(int idnum);
+  Method* method_with_orig_idnum(int idnum);
+  Method* method_with_orig_idnum(int idnum, int version);
 
   // method ordering
   Array<int>* method_ordering() const     { return _method_ordering; }
@@ -658,6 +660,7 @@
     return _previous_versions;
   }
 
+  InstanceKlass* get_klass_version(int version);
   static void purge_previous_versions(InstanceKlass* ik);
 
   // JVMTI: Support for caching a class file before it is modified by an agent that can do retransformation
--- a/src/share/vm/opto/type.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/opto/type.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -3950,7 +3950,9 @@
            (tap->_klass_is_exact && !tap->klass()->is_subtype_of(klass())) ||
            // 'this' is exact and super or unrelated:
            (this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) {
-      tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
+      if (above_centerline(ptr)) {
+        tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
+      }
       return make(NotNull, NULL, tary, lazy_klass, false, off, InstanceBot);
     }
 
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -3979,14 +3979,13 @@
     // the_class doesn't have a cache yet so copy it
     the_class->set_cached_class_file(scratch_class->get_cached_class_file());
   }
-#ifndef PRODUCT
-  else {
-    assert(the_class->get_cached_class_file_bytes() ==
-      scratch_class->get_cached_class_file_bytes(), "cache ptrs must match");
-    assert(the_class->get_cached_class_file_len() ==
-      scratch_class->get_cached_class_file_len(), "cache lens must match");
+  else if (scratch_class->get_cached_class_file_bytes() !=
+           the_class->get_cached_class_file_bytes()) {
+    // The same class can be present twice in the scratch classes list or there
+    // are multiple concurrent RetransformClasses calls on different threads.
+    // In such cases we have to deallocate scratch_class cached_class_file_bytes.
+    os::free(scratch_class->get_cached_class_file_bytes());
   }
-#endif
 
   // NULL out in scratch class to not delete twice.  The class to be redefined
   // always owns these bytes.
--- a/src/share/vm/runtime/arguments.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -3837,8 +3837,8 @@
       CommandLineFlags::printFlags(tty, false);
       vm_exit(0);
     }
+    if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
 #if INCLUDE_NMT
-    if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
       // The launcher did not setup nmt environment variable properly.
       if (!MemTracker::check_launcher_nmt_support(tail)) {
         warning("Native Memory Tracking did not setup properly, using wrong launcher?");
@@ -3853,8 +3853,12 @@
       } else {
         vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
       }
+#else
+      jio_fprintf(defaultStream::error_stream(),
+        "Native Memory Tracking is not supported in this VM\n");
+      return JNI_ERR;
+#endif
     }
-#endif
 
 
 #ifndef PRODUCT
--- a/src/share/vm/runtime/globals.hpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/runtime/globals.hpp	Tue Apr 14 13:02:21 2015 -0700
@@ -1494,7 +1494,7 @@
           "How much the GC can expand the eden by while the GC locker "     \
           "is active (as a percentage)")                                    \
                                                                             \
-  diagnostic(intx, GCLockerRetryAllocationCount, 2,                         \
+  diagnostic(uintx, GCLockerRetryAllocationCount, 2,                        \
           "Number of times to retry allocations when "                      \
           "blocked by the GC locker")                                       \
                                                                             \
--- a/src/share/vm/runtime/mutexLocker.cpp	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/runtime/mutexLocker.cpp	Tue Apr 14 13:02:21 2015 -0700
@@ -282,10 +282,10 @@
 
 #ifdef INCLUDE_TRACE
   def(JfrMsg_lock                  , Monitor, leaf,        true);
-  def(JfrBuffer_lock               , Mutex,   nonleaf+1,   true);
-  def(JfrThreadGroups_lock         , Mutex,   nonleaf+1,   true);
-  def(JfrStream_lock               , Mutex,   nonleaf+2,   true);
-  def(JfrStacktrace_lock           , Mutex,   special,     true );
+  def(JfrBuffer_lock               , Mutex,   leaf,        true);
+  def(JfrThreadGroups_lock         , Mutex,   leaf,        true);
+  def(JfrStream_lock               , Mutex,   nonleaf,     true);
+  def(JfrStacktrace_lock           , Mutex,   special,     true);
 #endif
 
 #ifndef SUPPORTS_NATIVE_CX8
--- a/src/share/vm/trace/trace.xml	Fri Apr 10 11:38:00 2015 -0700
+++ b/src/share/vm/trace/trace.xml	Tue Apr 14 13:02:21 2015 -0700
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="utf-8"?>
 <!--
- Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -352,6 +352,12 @@
       <value type="UTF8" field="name" label="Name" />
     </event>
 
+    <event id="AllocationRequiringGC" path="vm/gc/detailed/allocation_requiring_gc" label="Allocation Requiring GC"
+           has_thread="true" has_stacktrace="true"  is_instant="true">
+      <value type="UINT" field="gcId"  label="Pending GC ID" relation="GC_ID" />
+      <value type="BYTES64" field="size" label="Allocation Size" />
+    </event>
+
     <!-- Compiler events -->
 
     <event id="Compilation" path="vm/compiler/compilation" label="Compilation"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/inlining/DefaultMethodsDependencies.java	Tue Apr 14 13:02:21 2015 -0700
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8069263
+ * @summary Deoptimization between array allocation and arraycopy may result in non initialized array
+ * @run main/othervm -XX:-BackgroundCompilation -XX:CompileOnly=DefaultMethodsDependencies::test -XX:CompileOnly=DefaultMethodsDependencies$I2::m1 DefaultMethodsDependencies
+ *
+ */
+
+public class DefaultMethodsDependencies {
+
+    interface I1 {
+        void m1();
+        // triggers processing of default methods in C1
+        default void m2() {
+        }
+    }
+
+    interface I2 extends I1 {
+        // added to C2 as default method
+        default void m1() {
+        }
+    }
+
+    static abstract class C1 implements I1 {
+    }
+
+    static class C2 extends C1 implements I2 {
+    }
+
+    static void test(C1 obj) {
+        obj.m1();
+    }
+
+    static public void main(String[] args) {
+        C2 obj = new C2();
+        for (int i = 0; i < 20000; i++) {
+            test(obj);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/types/TestMeetExactConstantArrays.java	Tue Apr 14 13:02:21 2015 -0700
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8075587
+ * @summary meet of 2 constant arrays result in bottom
+ * @run main/othervm TestMeetExactConstantArrays
+ *
+ */
+
+public class TestMeetExactConstantArrays {
+    public abstract static class NumbersHolder {
+        public Number[] getNumbers() {
+            return null;
+        }
+    }
+
+    public static class IntegersHolder extends NumbersHolder {
+        private final static Integer integers[] = { new Integer(1) };
+
+        public Number[] getNumbers() {
+            return integers;
+        }
+    }
+
+    public static class LongsHolder extends NumbersHolder {
+        private final static Long longs[] = { new Long(1) };
+
+        public Number[] getNumbers() {
+            return longs;
+        }
+    }
+
+    public static final void loopNumbers(NumbersHolder numbersHolder) {
+        Number[] numbers = numbersHolder.getNumbers();
+        for (int i = 0; i < numbers.length; i++) {
+            numbers[i].longValue();
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        for (int i = 0; i < 10000; i++) {
+            IntegersHolder integersHolder = new IntegersHolder();
+            LongsHolder longsHolder = new LongsHolder();
+            loopNumbers(integersHolder);
+            loopNumbers(longsHolder);
+        }
+    }
+}