# HG changeset patch # User asaha # Date 1415295589 28800 # Node ID b1cf34d57e7846a2e1ff17bd32e3463b62d7d31a # Parent 5ca2ea5eeff04194afcdfff5774acf5adb6923ea# Parent fc1348524f6536c38bfcbd4884b96b8e62200936 Merge diff -r 5ca2ea5eeff0 -r b1cf34d57e78 .hgtags --- a/.hgtags Fri Oct 31 17:09:14 2014 -0700 +++ b/.hgtags Thu Nov 06 09:39:49 2014 -0800 @@ -553,7 +553,13 @@ 401cbaa475b4efe53153119ab87a82b217980a7f jdk8u31-b03 060cdf93040c1bfa5fdf580da5e9999042632cc8 jdk8u31-b04 6e56d7f1634f6c4cd4196e699c06e6ca2e6d6efb jdk8u31-b05 +271a32147391d08b0f338d9353330e2b5584d580 jdk8u31-b06 +e9f815c3f21cf2febd8e3c185917c1519aa52d9a jdk8u31-b07 1b3abbeee961dee49780c0e4af5337feb918c555 jdk8u40-b10 f10fe402dfb1543723b4b117a7cba3ea3d4159f1 hs25.40-b15 99372b2fee0eb8b3452f47230e84aa6e97003184 jdk8u40-b11 +8b9ec2da541a74ac698560b6a2bc45fccb789919 hs25.40-b16 +6b93bf9ea3ea57ed0fe53cfedb2f9ab912c324e5 jdk8u40-b12 +521e269ae1daa9df1cb0835b97aa76bdf340fcb2 hs25.40-b17 +86307d47790785398d0695acc361bccaefe25f94 jdk8u40-b13 b95f13f05f553309cd74d6ccf8fcedb259c6716c jdk8u45-b00 diff -r 5ca2ea5eeff0 -r b1cf34d57e78 make/excludeSrc.make --- a/make/excludeSrc.make Fri Oct 31 17:09:14 2014 -0700 +++ b/make/excludeSrc.make Thu Nov 06 09:39:49 2014 -0800 @@ -97,6 +97,7 @@ ageTable.cpp \ collectorCounters.cpp \ cSpaceCounters.cpp \ + gcId.cpp \ gcPolicyCounters.cpp \ gcStats.cpp \ gcTimer.cpp \ diff -r 5ca2ea5eeff0 -r b1cf34d57e78 make/hotspot_version diff -r 5ca2ea5eeff0 -r b1cf34d57e78 make/jprt.properties --- a/make/jprt.properties Fri Oct 31 17:09:14 2014 -0700 +++ b/make/jprt.properties Thu Nov 06 09:39:49 2014 -0800 @@ -374,21 +374,25 @@ ${jprt.my.windows.i586}-fastdebug-c2-internalvmtests, \ ${jprt.my.windows.x64}-fastdebug-c2-internalvmtests -jprt.make.rule.test.targets.standard.wbapi = \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-wbapitest, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-wbapitest, \ - ${jprt.my.linux.i586}-{product|fastdebug}-c2-wbapitest, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-wbapitest, \ - ${jprt.my.windows.i586}-{product|fastdebug}-c2-wbapitest, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-wbapitest, \ - ${jprt.my.linux.i586}-{product|fastdebug}-c1-wbapitest, \ - ${jprt.my.windows.i586}-{product|fastdebug}-c1-wbapitest +jprt.make.rule.test.targets.standard.reg.group = \ + ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GROUP, \ + ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GROUP, \ + ${jprt.my.linux.i586}-{product|fastdebug}-c2-GROUP, \ + ${jprt.my.linux.x64}-{product|fastdebug}-c2-GROUP, \ + ${jprt.my.windows.i586}-{product|fastdebug}-c2-GROUP, \ + ${jprt.my.windows.x64}-{product|fastdebug}-c2-GROUP, \ + ${jprt.my.linux.i586}-{product|fastdebug}-c1-GROUP, \ + ${jprt.my.windows.i586}-{product|fastdebug}-c1-GROUP jprt.make.rule.test.targets.standard = \ ${jprt.make.rule.test.targets.standard.client}, \ ${jprt.make.rule.test.targets.standard.server}, \ ${jprt.make.rule.test.targets.standard.internalvmtests}, \ - ${jprt.make.rule.test.targets.standard.wbapi} + ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_wbapitest}, \ + ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_compiler}, \ + ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_gc}, \ + ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime}, \ + ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_serviceability} jprt.make.rule.test.targets.embedded = \ ${jprt.make.rule.test.targets.standard.client} diff -r 5ca2ea5eeff0 -r b1cf34d57e78 make/solaris/makefiles/vm.make --- a/make/solaris/makefiles/vm.make Fri Oct 31 17:09:14 2014 -0700 +++ b/make/solaris/makefiles/vm.make Thu Nov 06 09:39:49 2014 -0800 @@ -141,7 +141,7 @@ LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc -ldemangle endif # sparcWorks -LIBS += -lkstat -lpicl +LIBS += -lkstat # By default, link the *.o into the library, not the executable. LINK_INTO$(LINK_INTO) = LIBJVM diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/cpu/sparc/vm/sparc.ad diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/cpu/x86/vm/vm_version_x86.cpp --- a/src/cpu/x86/vm/vm_version_x86.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/cpu/x86/vm/vm_version_x86.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -873,14 +873,19 @@ if (supports_bmi1()) { // tzcnt does not require VEX prefix if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { - UseCountTrailingZerosInstruction = true; + if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) { + // Don't use tzcnt if BMI1 is switched off on command line. + UseCountTrailingZerosInstruction = false; + } else { + UseCountTrailingZerosInstruction = true; + } } } else if (UseCountTrailingZerosInstruction) { warning("tzcnt instruction is not available on this CPU"); FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false); } - // BMI instructions use an encoding with VEX prefix. + // BMI instructions (except tzcnt) use an encoding with VEX prefix. // VEX prefix is generated only when AVX > 0. if (supports_bmi1() && supports_avx()) { if (FLAG_IS_DEFAULT(UseBMI1Instructions)) { diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp --- a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -31,18 +31,51 @@ #include #include #include +#include +#include extern "C" static int PICL_get_l1_data_cache_line_size_helper(picl_nodehdl_t nodeh, void *result); extern "C" static int PICL_get_l2_cache_line_size_helper(picl_nodehdl_t nodeh, void *result); +// Functions from the library we need (signatures should match those in picl.h) +extern "C" { + typedef int (*picl_initialize_func_t)(void); + typedef int (*picl_shutdown_func_t)(void); + typedef int (*picl_get_root_func_t)(picl_nodehdl_t *nodehandle); + typedef int (*picl_walk_tree_by_class_func_t)(picl_nodehdl_t rooth, + const char *classname, void *c_args, + int (*callback_fn)(picl_nodehdl_t hdl, void *args)); + typedef int (*picl_get_prop_by_name_func_t)(picl_nodehdl_t nodeh, const char *nm, + picl_prophdl_t *ph); + typedef int (*picl_get_propval_func_t)(picl_prophdl_t proph, void *valbuf, size_t sz); + typedef int (*picl_get_propinfo_func_t)(picl_prophdl_t proph, picl_propinfo_t *pi); +} + class PICL { + // Pointers to functions in the library + picl_initialize_func_t _picl_initialize; + picl_shutdown_func_t _picl_shutdown; + picl_get_root_func_t _picl_get_root; + picl_walk_tree_by_class_func_t _picl_walk_tree_by_class; + picl_get_prop_by_name_func_t _picl_get_prop_by_name; + picl_get_propval_func_t _picl_get_propval; + picl_get_propinfo_func_t _picl_get_propinfo; + // Handle to the library that is returned by dlopen + void *_dl_handle; + + bool open_library(); + void close_library(); + + template bool bind(FuncType& func, const char* name); + bool bind_library_functions(); + // Get a value of the integer property. The value in the tree can be either 32 or 64 bit // depending on the platform. The result is converted to int. - static int get_int_property(picl_nodehdl_t nodeh, const char* name, int* result) { + int get_int_property(picl_nodehdl_t nodeh, const char* name, int* result) { picl_propinfo_t pinfo; picl_prophdl_t proph; - if (picl_get_prop_by_name(nodeh, name, &proph) != PICL_SUCCESS || - picl_get_propinfo(proph, &pinfo) != PICL_SUCCESS) { + if (_picl_get_prop_by_name(nodeh, name, &proph) != PICL_SUCCESS || + _picl_get_propinfo(proph, &pinfo) != PICL_SUCCESS) { return PICL_FAILURE; } @@ -52,13 +85,13 @@ } if (pinfo.size == sizeof(int64_t)) { int64_t val; - if (picl_get_propval(proph, &val, sizeof(int64_t)) != PICL_SUCCESS) { + if (_picl_get_propval(proph, &val, sizeof(int64_t)) != PICL_SUCCESS) { return PICL_FAILURE; } *result = static_cast(val); } else if (pinfo.size == sizeof(int32_t)) { int32_t val; - if (picl_get_propval(proph, &val, sizeof(int32_t)) != PICL_SUCCESS) { + if (_picl_get_propval(proph, &val, sizeof(int32_t)) != PICL_SUCCESS) { return PICL_FAILURE; } *result = static_cast(val); @@ -72,6 +105,7 @@ // Visitor and a state machine that visits integer properties and verifies that the // values are the same. Stores the unique value observed. class UniqueValueVisitor { + PICL *_picl; enum { INITIAL, // Start state, no assignments happened ASSIGNED, // Assigned a value @@ -79,7 +113,7 @@ } _state; int _value; public: - UniqueValueVisitor() : _state(INITIAL) { } + UniqueValueVisitor(PICL* picl) : _picl(picl), _state(INITIAL) { } int value() { assert(_state == ASSIGNED, "Precondition"); return _value; @@ -96,9 +130,10 @@ static int visit(picl_nodehdl_t nodeh, const char* name, void *arg) { UniqueValueVisitor *state = static_cast(arg); + PICL* picl = state->_picl; assert(!state->is_inconsistent(), "Precondition"); int curr; - if (PICL::get_int_property(nodeh, name, &curr) == PICL_SUCCESS) { + if (picl->get_int_property(nodeh, name, &curr) == PICL_SUCCESS) { if (!state->is_assigned()) { // first iteration state->set_value(curr); } else if (curr != state->value()) { // following iterations @@ -122,32 +157,36 @@ return UniqueValueVisitor::visit(nodeh, "l2-cache-line-size", state); } - PICL() : _L1_data_cache_line_size(0), _L2_cache_line_size(0) { - if (picl_initialize() == PICL_SUCCESS) { + PICL() : _L1_data_cache_line_size(0), _L2_cache_line_size(0), _dl_handle(NULL) { + if (!open_library()) { + return; + } + if (_picl_initialize() == PICL_SUCCESS) { picl_nodehdl_t rooth; - if (picl_get_root(&rooth) == PICL_SUCCESS) { - UniqueValueVisitor L1_state; + if (_picl_get_root(&rooth) == PICL_SUCCESS) { + UniqueValueVisitor L1_state(this); // Visit all "cpu" class instances - picl_walk_tree_by_class(rooth, "cpu", &L1_state, PICL_get_l1_data_cache_line_size_helper); + _picl_walk_tree_by_class(rooth, "cpu", &L1_state, PICL_get_l1_data_cache_line_size_helper); if (L1_state.is_initial()) { // Still initial, iteration found no values // Try walk all "core" class instances, it might be a Fujitsu machine - picl_walk_tree_by_class(rooth, "core", &L1_state, PICL_get_l1_data_cache_line_size_helper); + _picl_walk_tree_by_class(rooth, "core", &L1_state, PICL_get_l1_data_cache_line_size_helper); } if (L1_state.is_assigned()) { // Is there a value? _L1_data_cache_line_size = L1_state.value(); } - UniqueValueVisitor L2_state; - picl_walk_tree_by_class(rooth, "cpu", &L2_state, PICL_get_l2_cache_line_size_helper); + UniqueValueVisitor L2_state(this); + _picl_walk_tree_by_class(rooth, "cpu", &L2_state, PICL_get_l2_cache_line_size_helper); if (L2_state.is_initial()) { - picl_walk_tree_by_class(rooth, "core", &L2_state, PICL_get_l2_cache_line_size_helper); + _picl_walk_tree_by_class(rooth, "core", &L2_state, PICL_get_l2_cache_line_size_helper); } if (L2_state.is_assigned()) { _L2_cache_line_size = L2_state.value(); } } - picl_shutdown(); + _picl_shutdown(); } + close_library(); } unsigned int L1_data_cache_line_size() const { return _L1_data_cache_line_size; } @@ -161,6 +200,43 @@ return PICL::get_l2_cache_line_size(nodeh, result); } +template +bool PICL::bind(FuncType& func, const char* name) { + func = reinterpret_cast(dlsym(_dl_handle, name)); + return func != NULL; +} + +bool PICL::bind_library_functions() { + assert(_dl_handle != NULL, "library should be open"); + return bind(_picl_initialize, "picl_initialize" ) && + bind(_picl_shutdown, "picl_shutdown" ) && + bind(_picl_get_root, "picl_get_root" ) && + bind(_picl_walk_tree_by_class, "picl_walk_tree_by_class") && + bind(_picl_get_prop_by_name, "picl_get_prop_by_name" ) && + bind(_picl_get_propval, "picl_get_propval" ) && + bind(_picl_get_propinfo, "picl_get_propinfo" ); +} + +bool PICL::open_library() { + _dl_handle = dlopen("libpicl.so.1", RTLD_LAZY); + if (_dl_handle == NULL) { + warning("PICL (libpicl.so.1) is missing. Performance will not be optimal."); + return false; + } + if (!bind_library_functions()) { + assert(false, "unexpected PICL API change"); + close_library(); + return false; + } + return true; +} + +void PICL::close_library() { + assert(_dl_handle != NULL, "library should be open"); + dlclose(_dl_handle); + _dl_handle = NULL; +} + // We need to keep these here as long as we have to build on Solaris // versions before 10. #ifndef SI_ARCHITECTURE_32 diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/c1/c1_globals.hpp --- a/src/share/vm/c1/c1_globals.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/c1/c1_globals.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -287,9 +287,6 @@ develop(bool, InstallMethods, true, \ "Install methods at the end of successful compilations") \ \ - product(intx, CompilationRepeat, 0, \ - "Number of times to recompile method before returning result") \ - \ develop(intx, NMethodSizeLimit, (64*K)*wordSize, \ "Maximum size of a compiled method.") \ \ diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/ci/ciEnv.cpp --- a/src/share/vm/ci/ciEnv.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/ci/ciEnv.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -559,7 +559,12 @@ oop obj = cpool->resolved_references()->obj_at(cache_index); if (obj != NULL) { ciObject* ciobj = get_object(obj); - return ciConstant(T_OBJECT, ciobj); + if (ciobj->is_array()) { + return ciConstant(T_ARRAY, ciobj); + } else { + assert(ciobj->is_instance(), "should be an instance"); + return ciConstant(T_OBJECT, ciobj); + } } index = cpool->object_to_cp_index(cache_index); } @@ -586,8 +591,12 @@ } } ciObject* constant = get_object(string); - assert (constant->is_instance(), "must be an instance, or not? "); - return ciConstant(T_OBJECT, constant); + if (constant->is_array()) { + return ciConstant(T_ARRAY, constant); + } else { + assert (constant->is_instance(), "must be an instance, or not? "); + return ciConstant(T_OBJECT, constant); + } } else if (tag.is_klass() || tag.is_unresolved_klass()) { // 4881222: allow ldc to take a class type ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore_will_link, accessor); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/ci/ciTypeFlow.cpp --- a/src/share/vm/ci/ciTypeFlow.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/ci/ciTypeFlow.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -730,7 +730,7 @@ if (obj->is_null_object()) { push_null(); } else { - assert(obj->is_instance(), "must be java_mirror of klass"); + assert(obj->is_instance() || obj->is_array(), "must be java_mirror of klass"); push_object(obj->klass()); } } else { diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/classfile/classFileParser.cpp --- a/src/share/vm/classfile/classFileParser.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/classfile/classFileParser.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -2529,7 +2529,7 @@ Array* ClassFileParser::parse_methods(bool is_interface, AccessFlags* promoted_flags, bool* has_final_method, - bool* has_default_methods, + bool* declares_default_methods, TRAPS) { ClassFileStream* cfs = stream(); cfs->guarantee_more(2, CHECK_NULL); // length @@ -2548,11 +2548,11 @@ if (method->is_final()) { *has_final_method = true; } - if (is_interface && !(*has_default_methods) - && !method->is_abstract() && !method->is_static() - && !method->is_private()) { - // default method - *has_default_methods = true; + // declares_default_methods: declares concrete instance methods, any access flags + // used for interface initialization, and default method inheritance analysis + if (is_interface && !(*declares_default_methods) + && !method->is_abstract() && !method->is_static()) { + *declares_default_methods = true; } _methods->at_put(index, method()); } @@ -3691,6 +3691,7 @@ JvmtiCachedClassFileData *cached_class_file = NULL; Handle class_loader(THREAD, loader_data->class_loader()); bool has_default_methods = false; + bool declares_default_methods = false; ResourceMark rm(THREAD); ClassFileStream* cfs = stream(); @@ -3928,8 +3929,11 @@ Array* methods = parse_methods(access_flags.is_interface(), &promoted_flags, &has_final_method, - &has_default_methods, + &declares_default_methods, CHECK_(nullHandle)); + if (declares_default_methods) { + has_default_methods = true; + } // Additional attributes ClassAnnotationCollector parsed_annotations; @@ -4072,6 +4076,7 @@ this_klass->set_minor_version(minor_version); this_klass->set_major_version(major_version); this_klass->set_has_default_methods(has_default_methods); + this_klass->set_declares_default_methods(declares_default_methods); if (!host_klass.is_null()) { assert (this_klass->is_anonymous(), "should be the same"); @@ -4165,8 +4170,12 @@ tty->print("[Loaded %s from %s]\n", this_klass->external_name(), cfs->source()); } else if (class_loader.is_null()) { - if (THREAD->is_Java_thread()) { - Klass* caller = ((JavaThread*)THREAD)->security_get_caller_class(1); + Klass* caller = + THREAD->is_Java_thread() + ? ((JavaThread*)THREAD)->security_get_caller_class(1) + : NULL; + // caller can be NULL, for example, during a JVMTI VM_Init hook + if (caller != NULL) { tty->print("[Loaded %s by instance of %s]\n", this_klass->external_name(), InstanceKlass::cast(caller)->external_name()); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/classfile/classFileParser.hpp --- a/src/share/vm/classfile/classFileParser.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/classfile/classFileParser.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -247,7 +247,7 @@ Array* parse_methods(bool is_interface, AccessFlags* promoted_flags, bool* has_final_method, - bool* has_default_method, + bool* declares_default_methods, TRAPS); intArray* sort_methods(Array* methods); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/compiler/compileBroker.cpp --- a/src/share/vm/compiler/compileBroker.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/compiler/compileBroker.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -183,9 +183,8 @@ long CompileBroker::_peak_compilation_time = 0; -CompileQueue* CompileBroker::_c2_method_queue = NULL; -CompileQueue* CompileBroker::_c1_method_queue = NULL; -CompileTask* CompileBroker::_task_free_list = NULL; +CompileQueue* CompileBroker::_c2_compile_queue = NULL; +CompileQueue* CompileBroker::_c1_compile_queue = NULL; GrowableArray* CompileBroker::_compiler_threads = NULL; @@ -253,13 +252,56 @@ // By convention, the compiling thread is responsible for // recycling a non-blocking CompileTask. - CompileBroker::free_task(task); + CompileTask::free(task); } } -// ------------------------------------------------------------------ -// CompileTask::initialize +CompileTask* CompileTask::_task_free_list = NULL; +#ifdef ASSERT +int CompileTask::_num_allocated_tasks = 0; +#endif +/** + * Allocate a CompileTask, from the free list if possible. + */ +CompileTask* CompileTask::allocate() { + MutexLocker locker(CompileTaskAlloc_lock); + CompileTask* task = NULL; + + if (_task_free_list != NULL) { + task = _task_free_list; + _task_free_list = task->next(); + task->set_next(NULL); + } else { + task = new CompileTask(); + DEBUG_ONLY(_num_allocated_tasks++;) + assert (_num_allocated_tasks < 10000, "Leaking compilation tasks?"); + task->set_next(NULL); + task->set_is_free(true); + } + assert(task->is_free(), "Task must be free."); + task->set_is_free(false); + return task; +} + + +/** + * Add a task to the free list. + */ +void CompileTask::free(CompileTask* task) { + MutexLocker locker(CompileTaskAlloc_lock); + if (!task->is_free()) { + task->set_code(NULL); + assert(!task->lock()->is_locked(), "Should not be locked when freed"); + JNIHandles::destroy_global(task->_method_holder); + JNIHandles::destroy_global(task->_hot_method_holder); + + task->set_is_free(true); + task->set_next(_task_free_list); + _task_free_list = task; + } +} + void CompileTask::initialize(int compile_id, methodHandle method, int osr_bci, @@ -318,15 +360,6 @@ if (nm == NULL) _code_handle = NULL; // drop the handle also } -// ------------------------------------------------------------------ -// CompileTask::free -void CompileTask::free() { - set_code(NULL); - assert(!_lock->is_locked(), "Should not be locked when freed"); - JNIHandles::destroy_global(_method_holder); - JNIHandles::destroy_global(_hot_method_holder); -} - void CompileTask::mark_on_stack() { // Mark these methods as something redefine classes cannot remove. @@ -594,9 +627,12 @@ -// Add a CompileTask to a CompileQueue +/** + * Add a CompileTask to a CompileQueue + */ void CompileQueue::add(CompileTask* task) { assert(lock()->owned_by_self(), "must own lock"); + assert(!CompileBroker::is_compilation_disabled_forever(), "Do not add task if compilation is turned off forever"); task->set_next(NULL); task->set_prev(NULL); @@ -618,9 +654,7 @@ // Mark the method as being in the compile queue. task->method()->set_queued_for_compilation(); - if (CIPrintCompileQueue) { - print(); - } + NOT_PRODUCT(print();) if (LogCompilation && xtty != NULL) { task->log_task_queued(); @@ -630,14 +664,32 @@ lock()->notify_all(); } -void CompileQueue::delete_all() { - assert(lock()->owned_by_self(), "must own lock"); - if (_first != NULL) { - for (CompileTask* task = _first; task != NULL; task = task->next()) { - delete task; +/** + * Empties compilation queue by putting all compilation tasks onto + * a freelist. Furthermore, the method wakes up all threads that are + * waiting on a compilation task to finish. This can happen if background + * compilation is disabled. + */ +void CompileQueue::free_all() { + MutexLocker mu(lock()); + CompileTask* next = _first; + + // Iterate over all tasks in the compile queue + while (next != NULL) { + CompileTask* current = next; + next = current->next(); + { + // Wake up thread that blocks on the compile task. + MutexLocker ct_lock(current->lock()); + current->lock()->notify(); } - _first = NULL; + // Put the task back on the freelist. + CompileTask::free(current); } + _first = NULL; + + // Wake up all threads that block on the queue. + lock()->notify_all(); } // ------------------------------------------------------------------ @@ -767,18 +819,24 @@ } } -// ------------------------------------------------------------------ -// CompileQueue::print +#ifndef PRODUCT +/** + * Print entire compilation queue. + */ void CompileQueue::print() { - tty->print_cr("Contents of %s", name()); - tty->print_cr("----------------------"); - CompileTask* task = _first; - while (task != NULL) { - task->print_line(); - task = task->next(); + if (CIPrintCompileQueue) { + ttyLocker ttyl; + tty->print_cr("Contents of %s", name()); + tty->print_cr("----------------------"); + CompileTask* task = _first; + while (task != NULL) { + task->print_line(); + task = task->next(); + } + tty->print_cr("----------------------"); } - tty->print_cr("----------------------"); } +#endif // PRODUCT CompilerCounters::CompilerCounters(const char* thread_name, int instance, TRAPS) { @@ -851,9 +909,6 @@ _compilers[1] = new SharkCompiler(); #endif // SHARK - // Initialize the CompileTask free list - _task_free_list = NULL; - // Start the CompilerThreads init_compiler_threads(c1_count, c2_count); // totalTime performance counter is always created as it is required @@ -1046,11 +1101,11 @@ #endif // !ZERO && !SHARK // Initialize the compilation queue if (c2_compiler_count > 0) { - _c2_method_queue = new CompileQueue("C2MethodQueue", MethodCompileQueue_lock); + _c2_compile_queue = new CompileQueue("C2 CompileQueue", MethodCompileQueue_lock); _compilers[1]->set_num_compiler_threads(c2_compiler_count); } if (c1_compiler_count > 0) { - _c1_method_queue = new CompileQueue("C1MethodQueue", MethodCompileQueue_lock); + _c1_compile_queue = new CompileQueue("C1 CompileQueue", MethodCompileQueue_lock); _compilers[0]->set_num_compiler_threads(c1_compiler_count); } @@ -1065,7 +1120,7 @@ sprintf(name_buffer, "C2 CompilerThread%d", i); CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK); // Shark and C2 - CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, _compilers[1], CHECK); + CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], CHECK); _compiler_threads->append(new_thread); } @@ -1074,7 +1129,7 @@ sprintf(name_buffer, "C1 CompilerThread%d", i); CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK); // C1 - CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, _compilers[0], CHECK); + CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], CHECK); _compiler_threads->append(new_thread); } @@ -1084,14 +1139,19 @@ } -// Set the methods on the stack as on_stack so that redefine classes doesn't -// reclaim them +/** + * Set the methods on the stack as on_stack so that redefine classes doesn't + * reclaim them. This method is executed at a safepoint. + */ void CompileBroker::mark_on_stack() { - if (_c2_method_queue != NULL) { - _c2_method_queue->mark_on_stack(); + assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); + // Since we are at a safepoint, we do not need a lock to access + // the compile queues. + if (_c2_compile_queue != NULL) { + _c2_compile_queue->mark_on_stack(); } - if (_c1_method_queue != NULL) { - _c1_method_queue->mark_on_stack(); + if (_c1_compile_queue != NULL) { + _c1_compile_queue->mark_on_stack(); } } @@ -1107,7 +1167,7 @@ const char* comment, Thread* thread) { // do nothing if compiler thread(s) is not available - if (!_initialized ) { + if (!_initialized) { return; } @@ -1154,7 +1214,7 @@ // If this method is already in the compile queue, then // we do not block the current thread. - if (compilation_is_in_queue(method, osr_bci)) { + if (compilation_is_in_queue(method)) { // We may want to decay our counter a bit here to prevent // multiple denied requests for compilation. This is an // open compilation policy issue. Note: The other possibility, @@ -1193,7 +1253,7 @@ // Make sure the method has not slipped into the queues since // last we checked; note that those checks were "fast bail-outs". // Here we need to be more careful, see 14012000 below. - if (compilation_is_in_queue(method, osr_bci)) { + if (compilation_is_in_queue(method)) { return; } @@ -1214,7 +1274,7 @@ } // Should this thread wait for completion of the compile? - blocking = is_compile_blocking(method, osr_bci); + blocking = is_compile_blocking(); // We will enter the compilation in the queue. // 14012000: Note that this sets the queued_for_compile bits in @@ -1406,19 +1466,17 @@ } -// ------------------------------------------------------------------ -// CompileBroker::compilation_is_in_queue -// -// See if this compilation is already requested. -// -// Implementation note: there is only a single "is in queue" bit -// for each method. This means that the check below is overly -// conservative in the sense that an osr compilation in the queue -// will block a normal compilation from entering the queue (and vice -// versa). This can be remedied by a full queue search to disambiguate -// cases. If it is deemed profitible, this may be done. -bool CompileBroker::compilation_is_in_queue(methodHandle method, - int osr_bci) { +/** + * See if this compilation is already requested. + * + * Implementation note: there is only a single "is in queue" bit + * for each method. This means that the check below is overly + * conservative in the sense that an osr compilation in the queue + * will block a normal compilation from entering the queue (and vice + * versa). This can be remedied by a full queue search to disambiguate + * cases. If it is deemed profitable, this may be done. + */ +bool CompileBroker::compilation_is_in_queue(methodHandle method) { return method->queued_for_compilation(); } @@ -1498,13 +1556,11 @@ #endif } - -// ------------------------------------------------------------------ -// CompileBroker::is_compile_blocking -// -// Should the current thread be blocked until this compilation request -// has been fulfilled? -bool CompileBroker::is_compile_blocking(methodHandle method, int osr_bci) { +/** + * Should the current thread block until this compilation request + * has been fulfilled? + */ +bool CompileBroker::is_compile_blocking() { assert(!InstanceRefKlass::owns_pending_list_lock(JavaThread::current()), "possible deadlock"); return !BackgroundCompilation; } @@ -1532,7 +1588,7 @@ int hot_count, const char* comment, bool blocking) { - CompileTask* new_task = allocate_task(); + CompileTask* new_task = CompileTask::allocate(); new_task->initialize(compile_id, method, osr_bci, comp_level, hot_method, hot_count, comment, blocking); @@ -1541,75 +1597,52 @@ } -// ------------------------------------------------------------------ -// CompileBroker::allocate_task -// -// Allocate a CompileTask, from the free list if possible. -CompileTask* CompileBroker::allocate_task() { - MutexLocker locker(CompileTaskAlloc_lock); - CompileTask* task = NULL; - if (_task_free_list != NULL) { - task = _task_free_list; - _task_free_list = task->next(); - task->set_next(NULL); - } else { - task = new CompileTask(); - task->set_next(NULL); - } - return task; -} - - -// ------------------------------------------------------------------ -// CompileBroker::free_task -// -// Add a task to the free list. -void CompileBroker::free_task(CompileTask* task) { - MutexLocker locker(CompileTaskAlloc_lock); - task->free(); - task->set_next(_task_free_list); - _task_free_list = task; -} - - -// ------------------------------------------------------------------ -// CompileBroker::wait_for_completion -// -// Wait for the given method CompileTask to complete. +/** + * Wait for the compilation task to complete. + */ void CompileBroker::wait_for_completion(CompileTask* task) { if (CIPrintCompileQueue) { + ttyLocker ttyl; tty->print_cr("BLOCKING FOR COMPILE"); } assert(task->is_blocking(), "can only wait on blocking task"); - JavaThread *thread = JavaThread::current(); + JavaThread* thread = JavaThread::current(); thread->set_blocked_on_compilation(true); methodHandle method(thread, task->method()); { MutexLocker waiter(task->lock(), thread); - while (!task->is_complete()) + while (!task->is_complete() && !is_compilation_disabled_forever()) { task->lock()->wait(); + } } + + thread->set_blocked_on_compilation(false); + if (is_compilation_disabled_forever()) { + CompileTask::free(task); + return; + } + // It is harmless to check this status without the lock, because // completion is a stable property (until the task object is recycled). assert(task->is_complete(), "Compilation should have completed"); assert(task->code_handle() == NULL, "must be reset"); - thread->set_blocked_on_compilation(false); - // By convention, the waiter is responsible for recycling a // blocking CompileTask. Since there is only one waiter ever // waiting on a CompileTask, we know that no one else will // be using this CompileTask; we can free it. - free_task(task); + CompileTask::free(task); } -// Initialize compiler thread(s) + compiler object(s). The postcondition -// of this function is that the compiler runtimes are initialized and that -//compiler threads can start compiling. +/** + * Initialize compiler thread(s) + compiler object(s). The postcondition + * of this function is that the compiler runtimes are initialized and that + * compiler threads can start compiling. + */ bool CompileBroker::init_compiler_runtime() { CompilerThread* thread = CompilerThread::current(); AbstractCompiler* comp = thread->compiler(); @@ -1646,7 +1679,6 @@ disable_compilation_forever(); // If compiler initialization failed, no compiler thread that is specific to a // particular compiler runtime will ever start to compile methods. - shutdown_compiler_runtime(comp, thread); return false; } @@ -1660,9 +1692,11 @@ return true; } -// If C1 and/or C2 initialization failed, we shut down all compilation. -// We do this to keep things simple. This can be changed if it ever turns out to be -// a problem. +/** + * If C1 and/or C2 initialization failed, we shut down all compilation. + * We do this to keep things simple. This can be changed if it ever turns + * out to be a problem. + */ void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) { // Free buffer blob, if allocated if (thread->get_buffer_blob() != NULL) { @@ -1674,28 +1708,25 @@ // There are two reasons for shutting down the compiler // 1) compiler runtime initialization failed // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing - warning("Shutting down compiler %s (no space to run compilers)", comp->name()); + warning("%s initialization failed. Shutting down all compilers", comp->name()); // Only one thread per compiler runtime object enters here // Set state to shut down comp->set_shut_down(); - MutexLocker mu(MethodCompileQueue_lock, thread); - CompileQueue* queue; - if (_c1_method_queue != NULL) { - _c1_method_queue->delete_all(); - queue = _c1_method_queue; - _c1_method_queue = NULL; - delete _c1_method_queue; + // Delete all queued compilation tasks to make compiler threads exit faster. + if (_c1_compile_queue != NULL) { + _c1_compile_queue->free_all(); } - if (_c2_method_queue != NULL) { - _c2_method_queue->delete_all(); - queue = _c2_method_queue; - _c2_method_queue = NULL; - delete _c2_method_queue; + if (_c2_compile_queue != NULL) { + _c2_compile_queue->free_all(); } + // Set flags so that we continue execution with using interpreter only. + UseCompiler = false; + UseInterpreter = true; + // We could delete compiler runtimes also. However, there are references to // the compiler runtime(s) (e.g., nmethod::is_compiled_by_c1()) which then // fail. This can be done later if necessary. @@ -1781,22 +1812,6 @@ if (method()->number_of_breakpoints() == 0) { // Compile the method. if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { -#ifdef COMPILER1 - // Allow repeating compilations for the purpose of benchmarking - // compile speed. This is not useful for customers. - if (CompilationRepeat != 0) { - int compile_count = CompilationRepeat; - while (compile_count > 0) { - invoke_compiler_on_method(task); - nmethod* nm = method->code(); - if (nm != NULL) { - nm->make_zombie(); - method->clear_code(); - } - compile_count--; - } - } -#endif /* COMPILER1 */ invoke_compiler_on_method(task); } else { // After compilation is disabled, remove remaining methods from queue diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/compiler/compileBroker.hpp --- a/src/share/vm/compiler/compileBroker.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/compiler/compileBroker.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -40,6 +40,11 @@ friend class VMStructs; private: + static CompileTask* _task_free_list; +#ifdef ASSERT + static int _num_allocated_tasks; +#endif + Monitor* _lock; uint _compile_id; Method* _method; @@ -52,7 +57,7 @@ int _num_inlined_bytecodes; nmethodLocker* _code_handle; // holder of eventual result CompileTask* _next, *_prev; - + bool _is_free; // Fields used for logging why the compilation was initiated: jlong _time_queued; // in units of os::elapsed_counter() Method* _hot_method; // which method actually triggered this task @@ -70,7 +75,8 @@ methodHandle hot_method, int hot_count, const char* comment, bool is_blocking); - void free(); + static CompileTask* allocate(); + static void free(CompileTask* task); int compile_id() const { return _compile_id; } Method* method() const { return _method; } @@ -99,6 +105,8 @@ void set_next(CompileTask* next) { _next = next; } CompileTask* prev() const { return _prev; } void set_prev(CompileTask* prev) { _prev = prev; } + bool is_free() const { return _is_free; } + void set_is_free(bool val) { _is_free = val; } private: static void print_compilation_impl(outputStream* st, Method* method, int compile_id, int comp_level, @@ -225,8 +233,8 @@ // Redefine Classes support void mark_on_stack(); - void delete_all(); - void print(); + void free_all(); + NOT_PRODUCT (void print();) ~CompileQueue() { assert (is_empty(), " Compile Queue must be empty"); @@ -279,9 +287,8 @@ static int _last_compile_level; static char _last_method_compiled[name_buffer_length]; - static CompileQueue* _c2_method_queue; - static CompileQueue* _c1_method_queue; - static CompileTask* _task_free_list; + static CompileQueue* _c2_compile_queue; + static CompileQueue* _c1_compile_queue; static GrowableArray* _compiler_threads; @@ -334,7 +341,7 @@ static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count); static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level); static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level); - static bool is_compile_blocking (methodHandle method, int osr_bci); + static bool is_compile_blocking (); static void preload_classes (methodHandle method, TRAPS); static CompileTask* create_compile_task(CompileQueue* queue, @@ -346,8 +353,6 @@ int hot_count, const char* comment, bool blocking); - static CompileTask* allocate_task(); - static void free_task(CompileTask* task); static void wait_for_completion(CompileTask* task); static void invoke_compiler_on_method(CompileTask* task); @@ -365,8 +370,8 @@ const char* comment, Thread* thread); static CompileQueue* compile_queue(int comp_level) { - if (is_c2_compile(comp_level)) return _c2_method_queue; - if (is_c1_compile(comp_level)) return _c1_method_queue; + if (is_c2_compile(comp_level)) return _c2_compile_queue; + if (is_c1_compile(comp_level)) return _c1_compile_queue; return NULL; } static bool init_compiler_runtime(); @@ -384,7 +389,7 @@ return NULL; } - static bool compilation_is_in_queue(methodHandle method, int osr_bci); + static bool compilation_is_in_queue(methodHandle method); static int queue_size(int comp_level) { CompileQueue *q = compile_queue(comp_level); return q != NULL ? q->size() : 0; diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -2478,6 +2478,7 @@ unsigned int gc_count_before; unsigned int old_marking_count_before; + unsigned int full_gc_count_before; bool retry_gc; do { @@ -2488,6 +2489,7 @@ // Read the GC count while holding the Heap_lock gc_count_before = total_collections(); + full_gc_count_before = total_full_collections(); old_marking_count_before = _old_marking_cycles_started; } @@ -2532,7 +2534,7 @@ VMThread::execute(&op); } else { // Schedule a Full GC. - VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause); + VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); VMThread::execute(&op); } } diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -1249,7 +1249,7 @@ // The same as above but assume that the caller holds the Heap_lock. void collect_locked(GCCause::Cause cause); - virtual void copy_allocation_context_stats(const jint* contexts, + virtual bool copy_allocation_context_stats(const jint* contexts, jlong* totals, jbyte* accuracy, jint len); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/gc_implementation/g1/g1CollectedHeap_ext.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap_ext.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap_ext.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -25,8 +25,9 @@ #include "precompiled.hpp" #include "gc_implementation/g1/g1CollectedHeap.hpp" -void G1CollectedHeap::copy_allocation_context_stats(const jint* contexts, +bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts, jlong* totals, jbyte* accuracy, jint len) { + return false; } diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/gc_implementation/g1/vm_operations_g1.hpp --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -60,7 +60,7 @@ VM_G1CollectFull(unsigned int gc_count_before, unsigned int full_gc_count_before, GCCause::Cause cause) - : VM_GC_Operation(gc_count_before, cause, full_gc_count_before) { } + : VM_GC_Operation(gc_count_before, cause, full_gc_count_before, true) { } virtual VMOp_Type type() const { return VMOp_G1CollectFull; } virtual void doit(); virtual const char* name() const { diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/gc_interface/collectedHeap.hpp --- a/src/share/vm/gc_interface/collectedHeap.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -641,10 +641,13 @@ // For each context in contexts, set the corresponding entries in the totals // and accuracy arrays to the current values held by the statistics. Each // array should be of length len. - virtual void copy_allocation_context_stats(const jint* contexts, + // Returns true if there are more stats available. + virtual bool copy_allocation_context_stats(const jint* contexts, jlong* totals, jbyte* accuracy, - jint len) { } + jint len) { + return false; + } /////////////// Unit tests /////////////// diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/memory/collectorPolicy.cpp --- a/src/share/vm/memory/collectorPolicy.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/memory/collectorPolicy.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -183,13 +183,9 @@ // Requirements of any new remembered set implementations must be added here. size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable); - // Parallel GC does its own alignment of the generations to avoid requiring a - // large page (256M on some platforms) for the permanent generation. The - // other collectors should also be updated to do their own alignment and then - // this use of lcm() should be removed. - if (UseLargePages && !UseParallelGC) { - // in presence of large pages we have to make sure that our - // alignment is large page aware + if (UseLargePages) { + // In presence of large pages we have to make sure that our + // alignment is large page aware. alignment = lcm(os::large_page_size(), alignment); } diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/memory/metaspace.cpp --- a/src/share/vm/memory/metaspace.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/memory/metaspace.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -3141,7 +3141,7 @@ MetaspaceGC::initialize(); // Initialize the alignment for shared spaces. - int max_alignment = os::vm_page_size(); + int max_alignment = os::vm_allocation_granularity(); size_t cds_total = 0; MetaspaceShared::set_max_alignment(max_alignment); @@ -3155,6 +3155,16 @@ SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); + // the min_misc_code_size estimate is based on MetaspaceShared::generate_vtable_methods() + uintx min_misc_code_size = align_size_up( + (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) * + (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size, + max_alignment); + + if (SharedMiscCodeSize < min_misc_code_size) { + report_out_of_shared_space(SharedMiscCode); + } + // Initialize with the sum of the shared space sizes. The read-only // and read write metaspace chunks will be allocated out of this and the // remainder is the misc code and data chunks. diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/memory/metaspaceShared.hpp --- a/src/share/vm/memory/metaspaceShared.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/memory/metaspaceShared.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -57,11 +57,16 @@ static bool _archive_loading_failed; public: enum { - vtbl_list_size = 17, // number of entries in the shared space vtable list. - num_virtuals = 200 // maximum number of virtual functions - // If virtual functions are added to Metadata, - // this number needs to be increased. Also, - // SharedMiscCodeSize will need to be increased. + vtbl_list_size = 17, // number of entries in the shared space vtable list. + num_virtuals = 200, // maximum number of virtual functions + // If virtual functions are added to Metadata, + // this number needs to be increased. Also, + // SharedMiscCodeSize will need to be increased. + // The following 2 sizes were based on + // MetaspaceShared::generate_vtable_methods() + vtbl_method_size = 16, // conservative size of the mov1 and jmp instructions + // for the x64 platform + vtbl_common_code_size = (1*K) // conservative size of the "common_code" for the x64 platform }; enum { diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/oops/instanceKlass.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -780,6 +780,41 @@ } } +// Eagerly initialize superinterfaces that declare default methods (concrete instance: any access) +void InstanceKlass::initialize_super_interfaces(instanceKlassHandle this_oop, TRAPS) { + if (this_oop->has_default_methods()) { + for (int i = 0; i < this_oop->local_interfaces()->length(); ++i) { + Klass* iface = this_oop->local_interfaces()->at(i); + InstanceKlass* ik = InstanceKlass::cast(iface); + if (ik->should_be_initialized()) { + if (ik->has_default_methods()) { + ik->initialize_super_interfaces(ik, THREAD); + } + // Only initialize() interfaces that "declare" concrete methods. + // has_default_methods drives searching superinterfaces since it + // means has_default_methods in its superinterface hierarchy + if (!HAS_PENDING_EXCEPTION && ik->declares_default_methods()) { + ik->initialize(THREAD); + } + if (HAS_PENDING_EXCEPTION) { + Handle e(THREAD, PENDING_EXCEPTION); + CLEAR_PENDING_EXCEPTION; + { + EXCEPTION_MARK; + // Locks object, set state, and notify all waiting threads + this_oop->set_initialization_state_and_notify( + initialization_error, THREAD); + + // ignore any exception thrown, superclass initialization error is + // thrown below + CLEAR_PENDING_EXCEPTION; + } + THROW_OOP(e()); + } + } + } + } +} void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) { // Make sure klass is linked (verified) before initialization @@ -859,33 +894,11 @@ } } + // Recursively initialize any superinterfaces that declare default methods + // Only need to recurse if has_default_methods which includes declaring and + // inheriting default methods if (this_oop->has_default_methods()) { - // Step 7.5: initialize any interfaces which have default methods - for (int i = 0; i < this_oop->local_interfaces()->length(); ++i) { - Klass* iface = this_oop->local_interfaces()->at(i); - InstanceKlass* ik = InstanceKlass::cast(iface); - if (ik->has_default_methods() && ik->should_be_initialized()) { - ik->initialize(THREAD); - - if (HAS_PENDING_EXCEPTION) { - Handle e(THREAD, PENDING_EXCEPTION); - CLEAR_PENDING_EXCEPTION; - { - EXCEPTION_MARK; - // Locks object, set state, and notify all waiting threads - this_oop->set_initialization_state_and_notify( - initialization_error, THREAD); - - // ignore any exception thrown, superclass initialization error is - // thrown below - CLEAR_PENDING_EXCEPTION; - } - DTRACE_CLASSINIT_PROBE_WAIT( - super__failed, InstanceKlass::cast(this_oop()), -1, wait); - THROW_OOP(e()); - } - } - } + this_oop->initialize_super_interfaces(this_oop, CHECK); } // Step 8 diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/oops/instanceKlass.hpp --- a/src/share/vm/oops/instanceKlass.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/oops/instanceKlass.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -229,12 +229,13 @@ bool _has_unloaded_dependent; enum { - _misc_rewritten = 1 << 0, // methods rewritten. - _misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops - _misc_should_verify_class = 1 << 2, // allow caching of preverification - _misc_is_anonymous = 1 << 3, // has embedded _host_klass field - _misc_is_contended = 1 << 4, // marked with contended annotation - _misc_has_default_methods = 1 << 5 // class/superclass/implemented interfaces has default methods + _misc_rewritten = 1 << 0, // methods rewritten. + _misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops + _misc_should_verify_class = 1 << 2, // allow caching of preverification + _misc_is_anonymous = 1 << 3, // has embedded _host_klass field + _misc_is_contended = 1 << 4, // marked with contended annotation + _misc_has_default_methods = 1 << 5, // class/superclass/implemented interfaces has default methods + _misc_declares_default_methods = 1 << 6 // directly declares default methods (any access) }; u2 _misc_flags; u2 _minor_version; // minor version number of class file @@ -680,6 +681,17 @@ } } + bool declares_default_methods() const { + return (_misc_flags & _misc_declares_default_methods) != 0; + } + void set_declares_default_methods(bool b) { + if (b) { + _misc_flags |= _misc_declares_default_methods; + } else { + _misc_flags &= ~_misc_declares_default_methods; + } + } + // for adding methods, ConstMethod::UNSET_IDNUM means no more ids available inline u2 next_method_idnum(); void set_initial_method_idnum(u2 value) { _idnum_allocated_count = value; } @@ -1046,6 +1058,7 @@ static bool link_class_impl (instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS); static bool verify_code (instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS); static void initialize_impl (instanceKlassHandle this_oop, TRAPS); + static void initialize_super_interfaces (instanceKlassHandle this_oop, TRAPS); static void eager_initialize_impl (instanceKlassHandle this_oop); static void set_initialization_state_and_notify_impl (instanceKlassHandle this_oop, ClassState state, TRAPS); static void call_class_initializer_impl (instanceKlassHandle this_oop, TRAPS); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/oops/objArrayOop.hpp --- a/src/share/vm/oops/objArrayOop.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/oops/objArrayOop.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -45,9 +45,10 @@ private: // Give size of objArrayOop in HeapWords minus the header static int array_size(int length) { - const int OopsPerHeapWord = HeapWordSize/heapOopSize; + const uint OopsPerHeapWord = HeapWordSize/heapOopSize; assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0), "Else the following (new) computation would be in error"); + uint res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord; #ifdef ASSERT // The old code is left in for sanity-checking; it'll // go away pretty soon. XXX @@ -55,16 +56,15 @@ // oop->length() * HeapWordsPerOop; // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer. // The oop elements are aligned up to wordSize - const int HeapWordsPerOop = heapOopSize/HeapWordSize; - int old_res; + const uint HeapWordsPerOop = heapOopSize/HeapWordSize; + uint old_res; if (HeapWordsPerOop > 0) { old_res = length * HeapWordsPerOop; } else { - old_res = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord; + old_res = align_size_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord; } + assert(res == old_res, "Inconsistency between old and new."); #endif // ASSERT - int res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord; - assert(res == old_res, "Inconsistency between old and new."); return res; } diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/oops/typeArrayOop.hpp --- a/src/share/vm/oops/typeArrayOop.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/oops/typeArrayOop.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -150,7 +150,7 @@ DEBUG_ONLY(BasicType etype = Klass::layout_helper_element_type(lh)); assert(length <= arrayOopDesc::max_array_length(etype), "no overflow"); - julong size_in_bytes = length; + julong size_in_bytes = (juint)length; size_in_bytes <<= element_shift; size_in_bytes += instance_header_size; julong size_in_words = ((size_in_bytes + (HeapWordSize-1)) >> LogHeapWordSize); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/opto/c2_globals.hpp --- a/src/share/vm/opto/c2_globals.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/opto/c2_globals.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -473,6 +473,9 @@ product(bool, DoEscapeAnalysis, true, \ "Perform escape analysis") \ \ + product(double, EscapeAnalysisTimeout, 20. DEBUG_ONLY(+40.), \ + "Abort EA when it reaches time limit (in sec)") \ + \ develop(bool, ExitEscapeAnalysisOnTimeout, true, \ "Exit or throw assert in EA when it reaches time limit") \ \ diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/opto/callGenerator.cpp --- a/src/share/vm/opto/callGenerator.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/opto/callGenerator.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -862,7 +862,7 @@ call_does_dispatch, vtable_index); // out-parameters // We lack profiling at this call but type speculation may // provide us with a type - speculative_receiver_type = receiver_type->speculative_type(); + speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL; } CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/opto/coalesce.cpp --- a/src/share/vm/opto/coalesce.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/opto/coalesce.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -281,9 +281,11 @@ Block *pred = _phc._cfg.get_block_for_node(b->pred(j)); Node *copy; assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); - // Rematerialize constants instead of copying them - if( m->is_Mach() && m->as_Mach()->is_Con() && - m->as_Mach()->rematerialize() ) { + // Rematerialize constants instead of copying them. + // We do this only for immediate constants, we avoid constant table loads + // because that will unsafely extend the live range of the constant table base. + if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() && + m->as_Mach()->rematerialize()) { copy = m->clone(); // Insert the copy in the predecessor basic block pred->add_inst(copy); @@ -317,8 +319,8 @@ assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); // At this point it is unsafe to extend live ranges (6550579). // Rematerialize only constants as we do for Phi above. - if(m->is_Mach() && m->as_Mach()->is_Con() && - m->as_Mach()->rematerialize()) { + if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() && + m->as_Mach()->rematerialize()) { copy = m->clone(); // Insert the copy in the basic block, just before us b->insert_node(copy, l++); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/opto/doCall.cpp --- a/src/share/vm/opto/doCall.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/opto/doCall.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -799,10 +799,16 @@ // each arm of the Phi. If I know something clever about the exceptions // I'm loading the class from, I can replace the LoadKlass with the // klass constant for the exception oop. - if( ex_node->is_Phi() ) { - ex_klass_node = new (C) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT ); - for( uint i = 1; i < ex_node->req(); i++ ) { - Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() ); + if (ex_node->is_Phi()) { + ex_klass_node = new (C) PhiNode(ex_node->in(0), TypeKlassPtr::OBJECT); + for (uint i = 1; i < ex_node->req(); i++) { + Node* ex_in = ex_node->in(i); + if (ex_in == top() || ex_in == NULL) { + // This path was not taken. + ex_klass_node->init_req(i, top()); + continue; + } + Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes()); Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) ); ex_klass_node->init_req( i, k ); } diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/opto/escape.cpp --- a/src/share/vm/opto/escape.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/opto/escape.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -37,6 +37,8 @@ ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), + _in_worklist(C->comp_arena()), + _next_pidx(0), _collecting(true), _verify(false), _compile(C), @@ -124,13 +126,19 @@ if (C->root() != NULL) { ideal_nodes.push(C->root()); } + // Processed ideal nodes are unique on ideal_nodes list + // but several ideal nodes are mapped to the phantom_obj. + // To avoid duplicated entries on the following worklists + // add the phantom_obj only once to them. + ptnodes_worklist.append(phantom_obj); + java_objects_worklist.append(phantom_obj); for( uint next = 0; next < ideal_nodes.size(); ++next ) { Node* n = ideal_nodes.at(next); // Create PointsTo nodes and add them to Connection Graph. Called // only once per ideal node since ideal_nodes is Unique_Node list. add_node_to_connection_graph(n, &delayed_worklist); PointsToNode* ptn = ptnode_adr(n->_idx); - if (ptn != NULL) { + if (ptn != NULL && ptn != phantom_obj) { ptnodes_worklist.append(ptn); if (ptn->is_JavaObject()) { java_objects_worklist.append(ptn->as_JavaObject()); @@ -414,7 +422,7 @@ } case Op_CreateEx: { // assume that all exception objects globally escape - add_java_object(n, PointsToNode::GlobalEscape); + map_ideal_node(n, phantom_obj); break; } case Op_LoadKlass: @@ -1065,13 +1073,8 @@ // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. // Set limit to 20 to catch situation when something did go wrong and // bailout Escape Analysis. - // Also limit build time to 30 sec (60 in debug VM). + // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. #define CG_BUILD_ITER_LIMIT 20 -#ifdef ASSERT -#define CG_BUILD_TIME_LIMIT 60.0 -#else -#define CG_BUILD_TIME_LIMIT 30.0 -#endif // Propagate GlobalEscape and ArgEscape escape states and check that // we still have non-escaping objects. The method pushs on _worklist @@ -1082,12 +1085,13 @@ // Now propagate references to all JavaObject nodes. int java_objects_length = java_objects_worklist.length(); elapsedTimer time; + bool timeout = false; int new_edges = 1; int iterations = 0; do { while ((new_edges > 0) && - (iterations++ < CG_BUILD_ITER_LIMIT) && - (time.seconds() < CG_BUILD_TIME_LIMIT)) { + (iterations++ < CG_BUILD_ITER_LIMIT)) { + double start_time = time.seconds(); time.start(); new_edges = 0; // Propagate references to phantom_object for nodes pushed on _worklist @@ -1096,7 +1100,26 @@ for (int next = 0; next < java_objects_length; ++next) { JavaObjectNode* ptn = java_objects_worklist.at(next); new_edges += add_java_object_edges(ptn, true); + +#define SAMPLE_SIZE 4 + if ((next % SAMPLE_SIZE) == 0) { + // Each 4 iterations calculate how much time it will take + // to complete graph construction. + time.stop(); + double stop_time = time.seconds(); + double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; + double time_until_end = time_per_iter * (double)(java_objects_length - next); + if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { + timeout = true; + break; // Timeout + } + start_time = stop_time; + time.start(); + } +#undef SAMPLE_SIZE + } + if (timeout) break; if (new_edges > 0) { // Update escape states on each iteration if graph was updated. if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { @@ -1104,9 +1127,12 @@ } } time.stop(); + if (time.seconds() >= EscapeAnalysisTimeout) { + timeout = true; + break; + } } - if ((iterations < CG_BUILD_ITER_LIMIT) && - (time.seconds() < CG_BUILD_TIME_LIMIT)) { + if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) { time.start(); // Find fields which have unknown value. int fields_length = oop_fields_worklist.length(); @@ -1119,18 +1145,21 @@ } } time.stop(); + if (time.seconds() >= EscapeAnalysisTimeout) { + timeout = true; + break; + } } else { new_edges = 0; // Bailout } } while (new_edges > 0); // Bailout if passed limits. - if ((iterations >= CG_BUILD_ITER_LIMIT) || - (time.seconds() >= CG_BUILD_TIME_LIMIT)) { + if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) { Compile* C = _compile; if (C->log() != NULL) { C->log()->begin_elem("connectionGraph_bailout reason='reached "); - C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time"); + C->log()->text("%s", timeout ? "time" : "iterations"); C->log()->end_elem(" limit'"); } assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", @@ -1147,7 +1176,6 @@ #endif #undef CG_BUILD_ITER_LIMIT -#undef CG_BUILD_TIME_LIMIT // Find fields initialized by NULL for non-escaping Allocations. int non_escaped_length = non_escaped_worklist.length(); @@ -1271,8 +1299,8 @@ } } } - while(_worklist.length() > 0) { - PointsToNode* use = _worklist.pop(); + for (int l = 0; l < _worklist.length(); l++) { + PointsToNode* use = _worklist.at(l); if (PointsToNode::is_base_use(use)) { // Add reference from jobj to field and from field to jobj (field's base). use = PointsToNode::get_use_node(use)->as_Field(); @@ -1319,6 +1347,8 @@ add_field_uses_to_worklist(use->as_Field()); } } + _worklist.clear(); + _in_worklist.Reset(); return new_edges; } @@ -1898,7 +1928,7 @@ return; } Compile* C = _compile; - ptadr = new (C->comp_arena()) LocalVarNode(C, n, es); + ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); _nodes.at_put(n->_idx, ptadr); } @@ -1909,7 +1939,7 @@ return; } Compile* C = _compile; - ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es); + ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); _nodes.at_put(n->_idx, ptadr); } @@ -1925,7 +1955,7 @@ es = PointsToNode::GlobalEscape; } Compile* C = _compile; - FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop); + FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); _nodes.at_put(n->_idx, field); } @@ -1939,7 +1969,7 @@ return; } Compile* C = _compile; - ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es); + ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); _nodes.at_put(n->_idx, ptadr); // Add edge from arraycopy node to source object. (void)add_edge(ptadr, src); @@ -2839,6 +2869,13 @@ continue; } } + + const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); + if (t == NULL) + continue; // not a TypeOopPtr + if (!t->klass_is_exact()) + continue; // not an unique type + if (alloc->is_Allocate()) { // Set the scalar_replaceable flag for allocation // so it could be eliminated. @@ -2857,10 +2894,7 @@ // - not determined to be ineligible by escape analysis set_map(alloc, n); set_map(n, alloc); - const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); - if (t == NULL) - continue; // not a TypeOopPtr - const TypeOopPtr* tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni); + const TypeOopPtr* tinst = t->cast_to_instance_id(ni); igvn->hash_delete(n); igvn->set_type(n, tinst); n->raise_bottom_type(tinst); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/opto/escape.hpp --- a/src/share/vm/opto/escape.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/opto/escape.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -125,6 +125,8 @@ class FieldNode; class ArraycopyNode; +class ConnectionGraph; + // ConnectionGraph nodes class PointsToNode : public ResourceObj { GrowableArray _edges; // List of nodes this node points to @@ -137,6 +139,7 @@ Node* const _node; // Ideal node corresponding to this PointsTo node. const int _idx; // Cached ideal node's _idx + const uint _pidx; // Index of this node public: typedef enum { @@ -165,17 +168,9 @@ } NodeFlags; - PointsToNode(Compile *C, Node* n, EscapeState es, NodeType type): - _edges(C->comp_arena(), 2, 0, NULL), - _uses (C->comp_arena(), 2, 0, NULL), - _node(n), - _idx(n->_idx), - _type((u1)type), - _escape((u1)es), - _fields_escape((u1)es), - _flags(ScalarReplaceable) { - assert(n != NULL && es != UnknownEscape, "sanity"); - } + inline PointsToNode(ConnectionGraph* CG, Node* n, EscapeState es, NodeType type); + + uint pidx() const { return _pidx; } Node* ideal_node() const { return _node; } int idx() const { return _idx; } @@ -243,14 +238,14 @@ class LocalVarNode: public PointsToNode { public: - LocalVarNode(Compile *C, Node* n, EscapeState es): - PointsToNode(C, n, es, LocalVar) {} + LocalVarNode(ConnectionGraph *CG, Node* n, EscapeState es): + PointsToNode(CG, n, es, LocalVar) {} }; class JavaObjectNode: public PointsToNode { public: - JavaObjectNode(Compile *C, Node* n, EscapeState es): - PointsToNode(C, n, es, JavaObject) { + JavaObjectNode(ConnectionGraph *CG, Node* n, EscapeState es): + PointsToNode(CG, n, es, JavaObject) { if (es > NoEscape) set_scalar_replaceable(false); } @@ -262,8 +257,8 @@ const bool _is_oop; // Field points to object bool _has_unknown_base; // Has phantom_object base public: - FieldNode(Compile *C, Node* n, EscapeState es, int offs, bool is_oop): - PointsToNode(C, n, es, Field), + FieldNode(ConnectionGraph *CG, Node* n, EscapeState es, int offs, bool is_oop): + PointsToNode(CG, n, es, Field), _offset(offs), _is_oop(is_oop), _has_unknown_base(false) {} @@ -284,8 +279,8 @@ class ArraycopyNode: public PointsToNode { public: - ArraycopyNode(Compile *C, Node* n, EscapeState es): - PointsToNode(C, n, es, Arraycopy) {} + ArraycopyNode(ConnectionGraph *CG, Node* n, EscapeState es): + PointsToNode(CG, n, es, Arraycopy) {} }; // Iterators for PointsTo node's edges: @@ -323,11 +318,14 @@ class ConnectionGraph: public ResourceObj { + friend class PointsToNode; private: GrowableArray _nodes; // Map from ideal nodes to // ConnectionGraph nodes. GrowableArray _worklist; // Nodes to be processed + VectorSet _in_worklist; + uint _next_pidx; bool _collecting; // Indicates whether escape information // is still being collected. If false, @@ -353,6 +351,8 @@ } uint nodes_size() const { return _nodes.length(); } + uint next_pidx() { return _next_pidx++; } + // Add nodes to ConnectionGraph. void add_local_var(Node* n, PointsToNode::EscapeState es); void add_java_object(Node* n, PointsToNode::EscapeState es); @@ -396,15 +396,26 @@ int add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist); // Put node on worklist if it is (or was) not there. - void add_to_worklist(PointsToNode* pt) { - _worklist.push(pt); - return; + inline void add_to_worklist(PointsToNode* pt) { + PointsToNode* ptf = pt; + uint pidx_bias = 0; + if (PointsToNode::is_base_use(pt)) { + // Create a separate entry in _in_worklist for a marked base edge + // because _worklist may have an entry for a normal edge pointing + // to the same node. To separate them use _next_pidx as bias. + ptf = PointsToNode::get_use_node(pt)->as_Field(); + pidx_bias = _next_pidx; + } + if (!_in_worklist.test_set(ptf->pidx() + pidx_bias)) { + _worklist.append(pt); + } } // Put on worklist all uses of this node. - void add_uses_to_worklist(PointsToNode* pt) { - for (UseIterator i(pt); i.has_next(); i.next()) - _worklist.push(i.get()); + inline void add_uses_to_worklist(PointsToNode* pt) { + for (UseIterator i(pt); i.has_next(); i.next()) { + add_to_worklist(i.get()); + } } // Put on worklist all field's uses and related field nodes. @@ -517,8 +528,8 @@ } // Helper functions bool is_oop_field(Node* n, int offset, bool* unsafe); - static Node* get_addp_base(Node *addp); - static Node* find_second_addp(Node* addp, Node* n); + static Node* get_addp_base(Node *addp); + static Node* find_second_addp(Node* addp, Node* n); // offset of a field reference int address_offset(Node* adr, PhaseTransform *phase); @@ -587,4 +598,17 @@ #endif }; +inline PointsToNode::PointsToNode(ConnectionGraph *CG, Node* n, EscapeState es, NodeType type): + _edges(CG->_compile->comp_arena(), 2, 0, NULL), + _uses (CG->_compile->comp_arena(), 2, 0, NULL), + _node(n), + _idx(n->_idx), + _pidx(CG->next_pidx()), + _type((u1)type), + _escape((u1)es), + _fields_escape((u1)es), + _flags(ScalarReplaceable) { + assert(n != NULL && es != UnknownEscape, "sanity"); +} + #endif // SHARE_VM_OPTO_ESCAPE_HPP diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/prims/whitebox.cpp --- a/src/share/vm/prims/whitebox.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/prims/whitebox.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -331,7 +331,36 @@ } WB_END +WB_ENTRY(jboolean, WB_NMTChangeTrackingLevel(JNIEnv* env)) + // Test that we can downgrade NMT levels but not upgrade them. + if (MemTracker::tracking_level() == NMT_off) { + MemTracker::transition_to(NMT_off); + return MemTracker::tracking_level() == NMT_off; + } else { + assert(MemTracker::tracking_level() == NMT_detail, "Should start out as detail tracking"); + MemTracker::transition_to(NMT_summary); + assert(MemTracker::tracking_level() == NMT_summary, "Should be summary now"); + // Can't go to detail once NMT is set to summary. + MemTracker::transition_to(NMT_detail); + assert(MemTracker::tracking_level() == NMT_summary, "Should still be summary now"); + + // Shutdown sets tracking level to minimal. + MemTracker::shutdown(); + assert(MemTracker::tracking_level() == NMT_minimal, "Should be minimal now"); + + // Once the tracking level is minimal, we cannot increase to summary. + // The code ignores this request instead of asserting because if the malloc site + // table overflows in another thread, it tries to change the code to summary. + MemTracker::transition_to(NMT_summary); + assert(MemTracker::tracking_level() == NMT_minimal, "Should still be minimal now"); + + // Really can never go up to detail, verify that the code would never do this. + MemTracker::transition_to(NMT_detail); + assert(MemTracker::tracking_level() == NMT_minimal, "Should still be minimal now"); + return MemTracker::tracking_level() == NMT_minimal; + } +WB_END #endif // INCLUDE_NMT static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) { @@ -936,6 +965,7 @@ {CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory }, {CC"NMTOverflowHashBucket", CC"(J)V", (void*)&WB_NMTOverflowHashBucket}, {CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported}, + {CC"NMTChangeTrackingLevel", CC"()Z", (void*)&WB_NMTChangeTrackingLevel}, #endif // INCLUDE_NMT {CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll }, {CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;Z)I", diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/runtime/advancedThresholdPolicy.cpp --- a/src/share/vm/runtime/advancedThresholdPolicy.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -451,7 +451,7 @@ if (should_create_mdo(mh(), level)) { create_mdo(mh, thread); } - if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) { + if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { CompLevel next_level = call_event(mh(), level); if (next_level != level) { compile(mh, InvocationEntryBci, next_level, thread); @@ -475,7 +475,7 @@ CompLevel next_osr_level = loop_event(imh(), level); CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level(); // At the very least compile the OSR version - if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) { + if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) { compile(imh, bci, next_osr_level, thread); } @@ -509,7 +509,7 @@ nm->make_not_entrant(); } } - if (!CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) { + if (!CompileBroker::compilation_is_in_queue(mh)) { // Fix up next_level if necessary to avoid deopts if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) { next_level = CompLevel_full_profile; @@ -521,7 +521,7 @@ } else { cur_level = comp_level(imh()); next_level = call_event(imh(), cur_level); - if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) { + if (!CompileBroker::compilation_is_in_queue(imh) && (next_level != cur_level)) { compile(imh, InvocationEntryBci, next_level, thread); } } diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/runtime/arguments.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -66,7 +66,7 @@ #endif // INCLUDE_ALL_GCS // Note: This is a special bug reporting site for the JVM -#define DEFAULT_VENDOR_URL_BUG "http://bugreport.sun.com/bugreport/crash.jsp" +#define DEFAULT_VENDOR_URL_BUG "http://bugreport.java.com/bugreport/crash.jsp" #define DEFAULT_JAVA_LAUNCHER "generic" // Disable options not supported in this release, with a warning if they @@ -300,6 +300,7 @@ { "UseStringCache", JDK_Version::jdk(8), JDK_Version::jdk(9) }, { "UseOldInlining", JDK_Version::jdk(9), JDK_Version::jdk(10) }, { "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) }, + { "CompilationRepeat", JDK_Version::jdk(8), JDK_Version::jdk(9) }, #ifdef PRODUCT { "DesiredMethodLimit", JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) }, @@ -1144,6 +1145,32 @@ } } +/** + * Returns the minimum number of compiler threads needed to run the JVM. The following + * configurations are possible. + * + * 1) The JVM is build using an interpreter only. As a result, the minimum number of + * compiler threads is 0. + * 2) The JVM is build using the compiler(s) and tiered compilation is disabled. As + * a result, either C1 or C2 is used, so the minimum number of compiler threads is 1. + * 3) The JVM is build using the compiler(s) and tiered compilation is enabled. However, + * the option "TieredStopAtLevel < CompLevel_full_optimization". As a result, only + * C1 can be used, so the minimum number of compiler threads is 1. + * 4) The JVM is build using the compilers and tiered compilation is enabled. The option + * 'TieredStopAtLevel = CompLevel_full_optimization' (the default value). As a result, + * the minimum number of compiler threads is 2. + */ +int Arguments::get_min_number_of_compiler_threads() { +#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK) + return 0; // case 1 +#else + if (!TieredCompilation || (TieredStopAtLevel < CompLevel_full_optimization)) { + return 1; // case 2 or case 3 + } + return 2; // case 4 (tiered) +#endif +} + #if INCLUDE_ALL_GCS static void disable_adaptive_size_policy(const char* collector_name) { if (UseAdaptiveSizePolicy) { @@ -2461,6 +2488,12 @@ status &= verify_interval(SafepointPollOffset, 0, os::vm_page_size() - BytesPerWord, "SafepointPollOffset"); #endif + int min_number_of_compiler_threads = get_min_number_of_compiler_threads(); + // The default CICompilerCount's value is CI_COMPILER_COUNT. + assert(min_number_of_compiler_threads <= CI_COMPILER_COUNT, "minimum should be less or equal default number"); + // Check the minimum number of compiler threads + status &=verify_min_value(CICompilerCount, min_number_of_compiler_threads, "CICompilerCount"); + return status; } @@ -3617,6 +3650,8 @@ bool settings_file_specified = false; bool needs_hotspotrc_warning = false; + ArgumentsExt::process_options(args); + const char* flags_file; int index; for (index = 0; index < args->nOptions; index++) { diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/runtime/arguments.hpp --- a/src/share/vm/runtime/arguments.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/runtime/arguments.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -327,6 +327,7 @@ // Tiered static void set_tiered_flags(); + static int get_min_number_of_compiler_threads(); // CMS/ParNew garbage collectors static void set_parnew_gc_flags(); static void set_cms_and_parnew_gc_flags(); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/runtime/arguments_ext.hpp --- a/src/share/vm/runtime/arguments_ext.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/runtime/arguments_ext.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -34,6 +34,7 @@ static inline bool check_gc_consistency_user(); static inline bool check_gc_consistency_ergo(); static inline bool check_vm_args_consistency(); + static void process_options(const JavaVMInitArgs* args) {} }; void ArgumentsExt::select_gc_ergonomically() { diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/runtime/globals.cpp --- a/src/share/vm/runtime/globals.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/runtime/globals.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -243,6 +243,11 @@ return is_unlocked_ext(); } +void Flag::unlock_diagnostic() { + assert(is_diagnostic(), "sanity"); + _flags = Flags(_flags & ~KIND_DIAGNOSTIC); +} + // Get custom message for this locked flag, or return NULL if // none is available. void Flag::get_locked_message(char* buf, int buflen) const { diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/runtime/globals.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -313,6 +313,8 @@ bool is_writeable_ext() const; bool is_external_ext() const; + void unlock_diagnostic(); + void get_locked_message(char*, int) const; void get_locked_message_ext(char*, int) const; diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/runtime/os.hpp --- a/src/share/vm/runtime/os.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/runtime/os.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -214,13 +214,14 @@ // Interface for detecting multiprocessor system static inline bool is_MP() { -#if !INCLUDE_NMT - assert(_processor_count > 0, "invalid processor count"); - return _processor_count > 1 || AssumeMP; -#else - // NMT needs atomic operations before this initialization. - return true; -#endif + // During bootstrap if _processor_count is not yet initialized + // we claim to be MP as that is safest. If any platform has a + // stub generator that might be triggered in this phase and for + // which being declared MP when in fact not, is a problem - then + // the bootstrap routine for the stub generator needs to check + // the processor count directly and leave the bootstrap routine + // in place until called after initialization has ocurred. + return (_processor_count != 1) || AssumeMP; } static julong available_memory(); static julong physical_memory(); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/runtime/simpleThresholdPolicy.cpp --- a/src/share/vm/runtime/simpleThresholdPolicy.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -239,7 +239,7 @@ if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) { return; } - if (!CompileBroker::compilation_is_in_queue(mh, bci)) { + if (!CompileBroker::compilation_is_in_queue(mh)) { if (PrintTieredEvents) { print_event(COMPILE, mh, mh, bci, level); } @@ -378,7 +378,7 @@ // Handle the invocation event. void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh, CompLevel level, nmethod* nm, JavaThread* thread) { - if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) { + if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { CompLevel next_level = call_event(mh(), level); if (next_level != level) { compile(mh, InvocationEntryBci, next_level, thread); @@ -391,8 +391,8 @@ void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh, int bci, CompLevel level, nmethod* nm, JavaThread* thread) { // If the method is already compiling, quickly bail out. - if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) { - // Use loop event as an opportinity to also check there's been + if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { + // Use loop event as an opportunity to also check there's been // enough calls. CompLevel cur_level = comp_level(mh()); CompLevel next_level = call_event(mh(), cur_level); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/services/mallocTracker.cpp --- a/src/share/vm/services/mallocTracker.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/services/mallocTracker.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -51,14 +51,6 @@ return amount; } - -void MallocMemorySnapshot::reset() { - _tracking_header.reset(); - for (int index = 0; index < mt_number_of_types; index ++) { - _malloc[index].reset(); - } -} - // Make adjustment by subtracting chunks used by arenas // from total chunks to get total free chunck size void MallocMemorySnapshot::make_adjustment() { @@ -116,14 +108,9 @@ bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) { assert(from != NMT_off, "Can not transition from off state"); assert(to != NMT_off, "Can not transition to off state"); - if (from == NMT_minimal) { - MallocMemorySummary::reset(); - } + assert (from != NMT_minimal, "cannot transition from minimal state"); - if (to == NMT_detail) { - assert(from == NMT_minimal || from == NMT_summary, "Just check"); - return MallocSiteTable::initialize(); - } else if (from == NMT_detail) { + if (from == NMT_detail) { assert(to == NMT_minimal || to == NMT_summary, "Just check"); MallocSiteTable::shutdown(); } diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/services/mallocTracker.hpp --- a/src/share/vm/services/mallocTracker.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/services/mallocTracker.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -51,14 +51,6 @@ DEBUG_ONLY(_peak_size = 0;) } - // Reset counters - void reset() { - _size = 0; - _count = 0; - DEBUG_ONLY(_peak_size = 0;) - DEBUG_ONLY(_peak_count = 0;) - } - inline void allocate(size_t sz) { Atomic::add(1, (volatile MemoryCounterType*)&_count); if (sz > 0) { @@ -124,11 +116,6 @@ _arena.resize(sz); } - void reset() { - _malloc.reset(); - _arena.reset(); - } - inline size_t malloc_size() const { return _malloc.size(); } inline size_t malloc_count() const { return _malloc.count();} inline size_t arena_size() const { return _arena.size(); } @@ -176,8 +163,6 @@ return s->by_type(mtThreadStack)->malloc_count(); } - void reset(); - void copy_to(MallocMemorySnapshot* s) { s->_tracking_header = _tracking_header; for (int index = 0; index < mt_number_of_types; index ++) { @@ -240,11 +225,6 @@ return as_snapshot()->malloc_overhead()->size(); } - // Reset all counters to zero - static void reset() { - as_snapshot()->reset(); - } - static MallocMemorySnapshot* as_snapshot() { return (MallocMemorySnapshot*)_snapshot; } diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/services/memBaseline.hpp --- a/src/share/vm/services/memBaseline.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/services/memBaseline.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -90,10 +90,6 @@ _class_count(0) { } - ~MemBaseline() { - reset(); - } - bool baseline(bool summaryOnly = true); BaselineType baseline_type() const { return _baseline_type; } @@ -169,8 +165,7 @@ // reset the baseline for reuse void reset() { _baseline_type = Not_baselined; - _malloc_memory_snapshot.reset(); - _virtual_memory_snapshot.reset(); + // _malloc_memory_snapshot and _virtual_memory_snapshot are copied over. _class_count = 0; _malloc_sites.clear(); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/services/memReporter.hpp --- a/src/share/vm/services/memReporter.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/services/memReporter.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -96,20 +96,6 @@ size_t _class_count; public: - // Report summary tracking data from global snapshots directly. - // This constructor is used for final reporting and hs_err reporting. - MemSummaryReporter(MallocMemorySnapshot* malloc_snapshot, - VirtualMemorySnapshot* vm_snapshot, outputStream* output, - size_t class_count = 0, size_t scale = K) : - MemReporterBase(output, scale), - _malloc_snapshot(malloc_snapshot), - _vm_snapshot(vm_snapshot) { - if (class_count == 0) { - _class_count = InstanceKlass::number_of_instance_classes(); - } else { - _class_count = class_count; - } - } // This constructor is for normal reporting from a recent baseline. MemSummaryReporter(MemBaseline& baseline, outputStream* output, size_t scale = K) : MemReporterBase(output, scale), diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/services/memTracker.cpp --- a/src/share/vm/services/memTracker.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/services/memTracker.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -144,11 +144,9 @@ } -// Shutdown can only be issued via JCmd, and NMT JCmd is serialized -// by lock +// Shutdown can only be issued via JCmd, and NMT JCmd is serialized by lock void MemTracker::shutdown() { - // We can only shutdown NMT to minimal tracking level if it is - // ever on. + // We can only shutdown NMT to minimal tracking level if it is ever on. if (tracking_level () > NMT_minimal) { transition_to(NMT_minimal); } @@ -157,45 +155,36 @@ bool MemTracker::transition_to(NMT_TrackingLevel level) { NMT_TrackingLevel current_level = tracking_level(); + assert(level != NMT_off || current_level == NMT_off, "Cannot transition NMT to off"); + if (current_level == level) { return true; } else if (current_level > level) { - // Downgrade tracking level, we want to lower the tracking - // level first + // Downgrade tracking level, we want to lower the tracking level first _tracking_level = level; // Make _tracking_level visible immediately. OrderAccess::fence(); VirtualMemoryTracker::transition(current_level, level); MallocTracker::transition(current_level, level); - - if (level == NMT_minimal) _baseline.reset(); } else { - VirtualMemoryTracker::transition(current_level, level); - MallocTracker::transition(current_level, level); - - _tracking_level = level; - // Make _tracking_level visible immediately. - OrderAccess::fence(); + // Upgrading tracking level is not supported and has never been supported. + // Allocating and deallocating malloc tracking structures is not thread safe and + // leads to inconsistencies unless a lot coarser locks are added. } - return true; } -void MemTracker::final_report(outputStream* output) { - assert(output != NULL, "No output stream"); - if (tracking_level() >= NMT_summary) { - MallocMemorySnapshot* malloc_memory_snapshot = - MallocMemorySummary::as_snapshot(); - malloc_memory_snapshot->make_adjustment(); - - VirtualMemorySnapshot* virtual_memory_snapshot = - VirtualMemorySummary::as_snapshot(); - - MemSummaryReporter rptr(malloc_memory_snapshot, - virtual_memory_snapshot, output); - rptr.report(); - // shutdown NMT, the data no longer accurate - shutdown(); +void MemTracker::report(bool summary_only, outputStream* output) { + assert(output != NULL, "No output stream"); + MemBaseline baseline; + if (baseline.baseline(summary_only)) { + if (summary_only) { + MemSummaryReporter rpt(baseline, output); + rpt.report(); + } else { + MemDetailReporter rpt(baseline, output); + rpt.report(); + } } } diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/services/memTracker.hpp --- a/src/share/vm/services/memTracker.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/services/memTracker.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -70,6 +70,7 @@ static inline void release_thread_stack(void* addr, size_t size) { } static void final_report(outputStream*) { } + static void error_report(outputStream*) { } }; #else @@ -270,13 +271,20 @@ // other tools. static inline Mutex* query_lock() { return _query_lock; } - // Make a final report and shutdown. - // This function generates summary report without creating snapshots, - // to avoid additional memory allocation. It uses native memory summary - // counters, and makes adjustment to them, once the adjustment is made, - // the counters are no longer accurate. As the result, this function - // should only be used for final reporting before shutting down. - static void final_report(outputStream*); + // Make a final report or report for hs_err file. + static void error_report(outputStream* output) { + if (tracking_level() >= NMT_summary) { + report(true, output); // just print summary for error case. + } + } + + static void final_report(outputStream* output) { + NMT_TrackingLevel level = tracking_level(); + if (level >= NMT_summary) { + report(level == NMT_summary, output); + } + } + // Stored baseline static inline MemBaseline& get_baseline() { @@ -291,6 +299,7 @@ private: static NMT_TrackingLevel init_tracking_level(); + static void report(bool summary_only, outputStream* output); private: // Tracking level diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/services/virtualMemoryTracker.cpp --- a/src/share/vm/services/virtualMemoryTracker.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/services/virtualMemoryTracker.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -443,26 +443,28 @@ bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) { assert(_reserved_regions != NULL, "Sanity check"); ThreadCritical tc; - LinkedListNode* head = _reserved_regions->head(); - while (head != NULL) { - const ReservedMemoryRegion* rgn = head->peek(); - if (!walker->do_allocation_site(rgn)) { - return false; + // Check that the _reserved_regions haven't been deleted. + if (_reserved_regions != NULL) { + LinkedListNode* head = _reserved_regions->head(); + while (head != NULL) { + const ReservedMemoryRegion* rgn = head->peek(); + if (!walker->do_allocation_site(rgn)) { + return false; + } + head = head->next(); } - head = head->next(); - } + } return true; } // Transition virtual memory tracking level. bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) { - if (from == NMT_minimal) { - assert(to == NMT_summary || to == NMT_detail, "Just check"); - VirtualMemorySummary::reset(); - } else if (to == NMT_minimal) { + assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything"); + if (to == NMT_minimal) { assert(from == NMT_summary || from == NMT_detail, "Just check"); // Clean up virtual memory tracking data structures. ThreadCritical tc; + // Check for potential race with other thread calling transition if (_reserved_regions != NULL) { delete _reserved_regions; _reserved_regions = NULL; diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/services/virtualMemoryTracker.hpp --- a/src/share/vm/services/virtualMemoryTracker.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/services/virtualMemoryTracker.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -62,11 +62,6 @@ _committed -= sz; } - void reset() { - _reserved = 0; - _committed = 0; - } - inline size_t reserved() const { return _reserved; } inline size_t committed() const { return _committed; } }; @@ -123,12 +118,6 @@ return amount; } - inline void reset() { - for (int index = 0; index < mt_number_of_types; index ++) { - _virtual_memory[index].reset(); - } - } - void copy_to(VirtualMemorySnapshot* s) { for (int index = 0; index < mt_number_of_types; index ++) { s->_virtual_memory[index] = _virtual_memory[index]; @@ -174,10 +163,6 @@ as_snapshot()->copy_to(s); } - static inline void reset() { - as_snapshot()->reset(); - } - static VirtualMemorySnapshot* as_snapshot() { return (VirtualMemorySnapshot*)_snapshot; } diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/utilities/debug.cpp --- a/src/share/vm/utilities/debug.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/utilities/debug.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -266,17 +266,19 @@ "native memory for metadata", "shared read only space", "shared read write space", - "shared miscellaneous data space" + "shared miscellaneous data space", + "shared miscellaneous code space" }; static const char* flag[] = { "Metaspace", "SharedReadOnlySize", "SharedReadWriteSize", - "SharedMiscDataSize" + "SharedMiscDataSize", + "SharedMiscCodeSize" }; warning("\nThe %s is not large enough\n" - "to preload requested classes. Use -XX:%s=\n" + "to preload requested classes. Use -XX:%s=\n" "to increase the initial size of %s.\n", name[shared_space], flag[shared_space], name[shared_space]); exit(2); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/utilities/debug.hpp --- a/src/share/vm/utilities/debug.hpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/utilities/debug.hpp Thu Nov 06 09:39:49 2014 -0800 @@ -246,7 +246,8 @@ SharedPermGen, SharedReadOnly, SharedReadWrite, - SharedMiscData + SharedMiscData, + SharedMiscCode }; void report_out_of_shared_space(SharedSpaceType space_type); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 src/share/vm/utilities/vmError.cpp --- a/src/share/vm/utilities/vmError.cpp Fri Oct 31 17:09:14 2014 -0700 +++ b/src/share/vm/utilities/vmError.cpp Thu Nov 06 09:39:49 2014 -0800 @@ -775,7 +775,7 @@ STEP(228, "(Native Memory Tracking)" ) if (_verbose) { - MemTracker::final_report(st); + MemTracker::error_report(st); } STEP(230, "" ) diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/Makefile --- a/test/Makefile Fri Oct 31 17:09:14 2014 -0700 +++ b/test/Makefile Thu Nov 06 09:39:49 2014 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1995, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -23,14 +23,36 @@ # # -# Makefile to run various jdk tests +# Makefile to run various hotspot tests # GETMIXEDPATH=echo -# Get OS/ARCH specifics -OSNAME = $(shell uname -s) -ifeq ($(OSNAME), SunOS) +# Utilities used +AWK = awk +CAT = cat +CD = cd +CHMOD = chmod +CP = cp +CUT = cut +DIRNAME = dirname +ECHO = echo +EGREP = egrep +EXPAND = expand +FIND = find +MKDIR = mkdir +PWD = pwd +SED = sed +SORT = sort +TEE = tee +UNAME = uname +UNIQ = uniq +WC = wc +ZIP = zip + +# Get OS name from uname (Cygwin inexplicably adds _NT-5.1) +UNAME_S := $(shell $(UNAME) -s | $(CUT) -f1 -d_) +ifeq ($(UNAME_S), SunOS) PLATFORM = solaris SLASH_JAVA = /java ARCH = $(shell uname -p) @@ -38,7 +60,7 @@ ARCH=i586 endif endif -ifeq ($(OSNAME), Linux) +ifeq ($(UNAME_S), Linux) PLATFORM = linux SLASH_JAVA = /java ARCH = $(shell uname -m) @@ -46,7 +68,7 @@ ARCH = i586 endif endif -ifeq ($(OSNAME), Darwin) +ifeq ($(UNAME_S), Darwin) PLATFORM = bsd SLASH_JAVA = /java ARCH = $(shell uname -m) @@ -54,7 +76,7 @@ ARCH = i586 endif endif -ifeq ($(findstring BSD,$(OSNAME)), BSD) +ifeq ($(findstring BSD,$(UNAME_S)), BSD) PLATFORM = bsd SLASH_JAVA = /java ARCH = $(shell uname -m) @@ -63,12 +85,12 @@ endif endif ifeq ($(PLATFORM),) - # detect wether we're running in MKS or cygwin - ifeq ($(OSNAME), Windows_NT) # MKS + # detect whether we're running in MKS or cygwin + ifeq ($(UNAME_S), Windows_NT) # MKS GETMIXEDPATH=dosname -s endif - ifeq ($(findstring CYGWIN,$(OSNAME)), CYGWIN) - GETMIXEDPATH=cygpath -m -s + ifeq ($(findstring CYGWIN,$(UNAME_S)), CYGWIN) + GETMIXEDPATH=cygpath -m endif PLATFORM = windows SLASH_JAVA = J: @@ -92,13 +114,6 @@ SLASH_JAVA = $(ALT_SLASH_JAVA) endif -# Utilities used -CD = cd -CP = cp -ECHO = echo -MKDIR = mkdir -ZIP = zip - # Root of this test area (important to use full paths in some places) TEST_ROOT := $(shell pwd) @@ -136,21 +151,82 @@ endif # How to create the test bundle (pass or fail, we want to create this) -BUNDLE_UP = ( $(MKDIR) -p `dirname $(ARCHIVE_BUNDLE)` \ - && $(CD) $(ABS_TEST_OUTPUT_DIR) \ - && $(ZIP) -q -r $(ARCHIVE_BUNDLE) . ) -BUNDLE_UP_FAILED = ( exitCode=$$? && $(BUNDLE_UP) && exit $${exitCode} ) +# Follow command with ";$(BUNDLE_UP_AND_EXIT)", so it always gets executed. +ZIP_UP_RESULTS = ( $(MKDIR) -p `$(DIRNAME) $(ARCHIVE_BUNDLE)` \ + && $(CD) $(ABS_TEST_OUTPUT_DIR) \ + && $(CHMOD) -R a+r . \ + && $(ZIP) -q -r $(ARCHIVE_BUNDLE) . ) + +# important results files +SUMMARY_TXT = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/JTreport/text/summary.txt") +STATS_TXT_NAME = Stats.txt +STATS_TXT = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/$(STATS_TXT_NAME)") +RUNLIST = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/runlist.txt") +PASSLIST = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/passlist.txt") +FAILLIST = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/faillist.txt") +EXITCODE = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/exitcode.txt") + +TESTEXIT = \ + if [ ! -s $(EXITCODE) ] ; then \ + $(ECHO) "ERROR: EXITCODE file not filled in."; \ + $(ECHO) "1" > $(EXITCODE); \ + fi ; \ + testExitCode=`$(CAT) $(EXITCODE)`; \ + $(ECHO) "EXIT CODE: $${testExitCode}"; \ + exit $${testExitCode} + +BUNDLE_UP_AND_EXIT = \ +( \ + jtregExitCode=$$? && \ + _summary="$(SUMMARY_TXT)"; \ + $(RM) -f $(STATS_TXT) $(RUNLIST) $(PASSLIST) $(FAILLIST) $(EXITCODE); \ + $(ECHO) "$${jtregExitCode}" > $(EXITCODE); \ + if [ -r "$${_summary}" ] ; then \ + $(ECHO) "Summary: $(UNIQUE_DIR)" > $(STATS_TXT); \ + $(EXPAND) $${_summary} | $(EGREP) -v ' Not run\.' > $(RUNLIST); \ + $(EGREP) ' Passed\.' $(RUNLIST) \ + | $(EGREP) -v ' Error\.' \ + | $(EGREP) -v ' Failed\.' > $(PASSLIST); \ + ( $(EGREP) ' Failed\.' $(RUNLIST); \ + $(EGREP) ' Error\.' $(RUNLIST); \ + $(EGREP) -v ' Passed\.' $(RUNLIST) ) \ + | $(SORT) | $(UNIQ) > $(FAILLIST); \ + if [ $${jtregExitCode} != 0 -o -s $(FAILLIST) ] ; then \ + $(EXPAND) $(FAILLIST) \ + | $(CUT) -d' ' -f1 \ + | $(SED) -e 's@^@FAILED: @' >> $(STATS_TXT); \ + if [ $${jtregExitCode} = 0 ] ; then \ + jtregExitCode=1; \ + fi; \ + fi; \ + runc="`$(CAT) $(RUNLIST) | $(WC) -l | $(AWK) '{print $$1;}'`"; \ + passc="`$(CAT) $(PASSLIST) | $(WC) -l | $(AWK) '{print $$1;}'`"; \ + failc="`$(CAT) $(FAILLIST) | $(WC) -l | $(AWK) '{print $$1;}'`"; \ + exclc="FIXME CODETOOLS-7900176"; \ + $(ECHO) "TEST STATS: name=$(UNIQUE_DIR) run=$${runc} pass=$${passc} fail=$${failc}" \ + >> $(STATS_TXT); \ + else \ + $(ECHO) "Missing file: $${_summary}" >> $(STATS_TXT); \ + fi; \ + if [ -f $(STATS_TXT) ] ; then \ + $(CAT) $(STATS_TXT); \ + fi; \ + $(ZIP_UP_RESULTS) ; \ + $(TESTEXIT) \ +) ################################################################ # Default make rule (runs jtreg_tests) -all: jtreg_tests +all: hotspot_all @$(ECHO) "Testing completed successfully" -# Support "hotspot_" prefixed test make targets too -# The hotspot_% targets are for example invoked by the top level Makefile +# Support "hotspot_" prefixed test make targets (too) +# The hotspot_% targets are used by the top level Makefile +# Unless explicitly defined below, hotspot_ is interpreted as a jtreg test group name hotspot_%: - $(MAKE) $* + $(ECHO) "Running tests: $@" + $(MAKE) -j 1 TEST_SELECTION=":$@" UNIQUE_DIR=$@ jtreg_tests; # Prep for output prep: clean @@ -168,41 +244,64 @@ # Expect JT_HOME to be set for jtreg tests. (home for jtreg) ifndef JT_HOME - JT_HOME = $(SLASH_JAVA)/re/jtreg/4.0/promoted/latest/binaries/jtreg -endif -ifdef JPRT_JTREG_HOME - JT_HOME = $(JPRT_JTREG_HOME) + JT_HOME = $(SLASH_JAVA)/re/jtreg/4.1/promoted/latest/binaries/jtreg + ifdef JPRT_JTREG_HOME + JT_HOME = $(JPRT_JTREG_HOME) + endif endif -# Expect JPRT to set TESTDIRS to the jtreg test dirs -JTREG_TESTDIRS = demo/jvmti/gctest demo/jvmti/hprof +# When called from JPRT the TESTDIRS variable is set to the jtreg tests to run ifdef TESTDIRS - JTREG_TESTDIRS = $(TESTDIRS) + TEST_SELECTION = $(TESTDIRS) +endif + +ifdef CONCURRENCY + EXTRA_JTREG_OPTIONS += -concurrency:$(CONCURRENCY) endif # Default JTREG to run JTREG = $(JT_HOME)/bin/jtreg +# Only run automatic tests +JTREG_BASIC_OPTIONS += -a +# Report details on all failed or error tests, times too +JTREG_BASIC_OPTIONS += -v:fail,error,time +# Retain all files for failing tests +JTREG_BASIC_OPTIONS += -retain:fail,error +# Ignore tests are not run and completely silent about it +JTREG_IGNORE_OPTION = -ignore:quiet +JTREG_BASIC_OPTIONS += $(JTREG_IGNORE_OPTION) +# Add any extra options +JTREG_BASIC_OPTIONS += $(EXTRA_JTREG_OPTIONS) +# Set other vm and test options +JTREG_TEST_OPTIONS = $(JAVA_ARGS:%=-javaoptions:%) $(JAVA_OPTIONS:%=-vmoption:%) $(JAVA_VM_ARGS:%=-vmoption:%) + # Option to tell jtreg to not run tests marked with "ignore" ifeq ($(PLATFORM), windows) JTREG_KEY_OPTION = -k:!ignore else JTREG_KEY_OPTION = -k:\!ignore endif - -#EXTRA_JTREG_OPTIONS = +JTREG_BASIC_OPTIONS += $(JTREG_KEY_OPTION) + +# Make sure jtreg exists +$(JTREG): $(JT_HOME) -jtreg_tests: prep $(JT_HOME) $(PRODUCT_HOME) $(JTREG) - $(JTREG) -a -v:fail,error \ - $(JTREG_KEY_OPTION) \ - $(EXTRA_JTREG_OPTIONS) \ - -r:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTreport \ - -w:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTwork \ - -jdk:$(shell $(GETMIXEDPATH) "$(PRODUCT_HOME)") \ - $(JAVA_OPTIONS:%=-vmoption:%) \ - $(JTREG_TESTDIRS) \ - || $(BUNDLE_UP_FAILED) - $(BUNDLE_UP) +jtreg_tests: prep $(PRODUCT_HOME) $(JTREG) + ( \ + ( JT_HOME=$(shell $(GETMIXEDPATH) "$(JT_HOME)"); \ + export JT_HOME; \ + $(shell $(GETMIXEDPATH) "$(JTREG)") \ + $(JTREG_BASIC_OPTIONS) \ + -r:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/JTreport") \ + -w:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/JTwork") \ + -jdk:$(shell $(GETMIXEDPATH) "$(PRODUCT_HOME)") \ + $(JTREG_EXCLUSIONS) \ + $(JTREG_TEST_OPTIONS) \ + $(TEST_SELECTION) \ + ) ; \ + $(BUNDLE_UP_AND_EXIT) \ + ) 2>&1 | $(TEE) $(ABS_TEST_OUTPUT_DIR)/output.txt ; $(TESTEXIT) PHONY_LIST += jtreg_tests @@ -210,7 +309,7 @@ # clienttest (make sure various basic java client options work) -clienttest: prep $(PRODUCT_HOME) +hotspot_clienttest clienttest: prep $(PRODUCT_HOME) $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -version $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -help $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -X @@ -218,73 +317,38 @@ $(RM) $(PRODUCT_HOME)/jre/bin/client/classes.jsa $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -Xshare:dump -PHONY_LIST += clienttest +PHONY_LIST += hotspot_clienttest clienttest + +################################################################ + +# minimaltest (make sure various basic java minimal options work) + +hotspot_minimaltest minimaltest: prep $(PRODUCT_HOME) + $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -version + $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -help + $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -X + +PHONY_LIST += hotspot_minimaltest minimaltest ################################################################ # servertest (make sure various basic java server options work) -servertest: prep $(PRODUCT_HOME) +hotspot_servertest servertest: prep $(PRODUCT_HOME) $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -version $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -help $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -X -PHONY_LIST += servertest +PHONY_LIST += hotspot_servertest servertest ################################################################ # internalvmtests (run internal unit tests inside the VM) -internalvmtests: prep $(PRODUCT_HOME) +hotspot_internalvmtests internalvmtests: prep $(PRODUCT_HOME) $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -XX:+ExecuteInternalVMTests -version -PHONY_LIST += internalvmtests - -################################################################ - -# wbapitest (make sure the whitebox testing api classes work - -wbapitest: prep $(JT_HOME) $(PRODUCT_HOME) $(JTREG) - $(JTREG) -a -v:fail,error \ - $(JTREG_KEY_OPTION) \ - $(EXTRA_JTREG_OPTIONS) \ - -r:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTreport \ - -w:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTwork \ - -jdk:$(shell $(GETMIXEDPATH) "$(PRODUCT_HOME)") \ - $(JAVA_OPTIONS:%=-vmoption:%) \ - $(shell $(GETMIXEDPATH) "$(TEST_ROOT)")/sanity \ - || $(BUNDLE_UP_FAILED) - $(BUNDLE_UP) - -PHONY_LIST += wbapitest - -################################################################ - -# packtest - -# Expect JPRT to set JPRT_PACKTEST_HOME. -PACKTEST_HOME = /net/jprt-web.sfbay.sun.com/jprt/allproducts/packtest -ifdef JPRT_PACKTEST_HOME - PACKTEST_HOME = $(JPRT_PACKTEST_HOME) -endif - -#EXTRA_PACKTEST_OPTIONS = - -packtest: prep $(PACKTEST_HOME)/ptest $(PRODUCT_HOME) - ( $(CD) $(PACKTEST_HOME) && \ - $(PACKTEST_HOME)/ptest \ - -t "$(PRODUCT_HOME)" \ - $(PACKTEST_STRESS_OPTION) \ - $(EXTRA_PACKTEST_OPTIONS) \ - -W $(ABS_TEST_OUTPUT_DIR) \ - $(JAVA_OPTIONS:%=-J %) \ - ) || $(BUNDLE_UP_FAILED) - $(BUNDLE_UP) - -packtest_stress: PACKTEST_STRESS_OPTION=-s -packtest_stress: packtest - -PHONY_LIST += packtest packtest_stress +PHONY_LIST += hotspot_internalvmtests internalvmtests ################################################################ @@ -292,4 +356,3 @@ .PHONY: all clean prep $(PHONY_LIST) ################################################################ - diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/TEST.groups --- a/test/TEST.groups Fri Oct 31 17:09:14 2014 -0700 +++ b/test/TEST.groups Thu Nov 06 09:39:49 2014 -0800 @@ -125,6 +125,27 @@ -:needs_jdk +# When called from top level the test suites use the hotspot_ prefix +hotspot_wbapitest = \ + sanity/ + +hotspot_compiler = \ + sanity/ExecuteInternalVMTests.java + +hotspot_gc = \ + sanity/ExecuteInternalVMTests.java + +hotspot_runtime = \ + sanity/ExecuteInternalVMTests.java + +hotspot_serviceability = \ + sanity/ExecuteInternalVMTests.java + +hotspot_all = \ + :hotspot_compiler \ + :hotspot_gc \ + :hotspot_runtime \ + :hotspot_serviceability # Tests that require compact3 API's # needs_compact3 = \ diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/compiler/ciReplay/common.sh --- a/test/compiler/ciReplay/common.sh Fri Oct 31 17:09:14 2014 -0700 +++ b/test/compiler/ciReplay/common.sh Thu Nov 06 09:39:49 2014 -0800 @@ -213,7 +213,7 @@ -XX:VMThreadStackSize=512 \ -XX:CompilerThreadStackSize=512 \ -XX:ParallelGCThreads=1 \ - -XX:CICompilerCount=1 \ + -XX:CICompilerCount=2 \ -Xcomp \ -XX:CICrashAt=1 \ -XX:+CreateMinidumpOnCrash \ diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/compiler/exceptions/CatchInlineExceptions.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/exceptions/CatchInlineExceptions.java Thu Nov 06 09:39:49 2014 -0800 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8059299 + * @summary assert(adr_type != NULL) failed: expecting TypeKlassPtr + * @run main/othervm -Xbatch CatchInlineExceptions + */ + +class Exception1 extends Exception {}; +class Exception2 extends Exception {}; + +public class CatchInlineExceptions { + private static int counter0; + private static int counter1; + private static int counter2; + private static int counter; + + static void foo(int i) throws Exception { + if ((i & 1023) == 2) { + counter0++; + throw new Exception2(); + } + } + + static void test(int i) throws Exception { + try { + foo(i); + } + catch (Exception e) { + if (e instanceof Exception1) { + counter1++; + } else if (e instanceof Exception2) { + counter2++; + } + counter++; + throw e; + } + } + + public static void main(String[] args) throws Throwable { + for (int i = 0; i < 15000; i++) { + try { + test(i); + } catch (Exception e) { + // expected + } + } + if (counter1 != 0) { + throw new RuntimeException("Failed: counter1(" + counter1 + ") != 0"); + } + if (counter2 != counter0) { + throw new RuntimeException("Failed: counter2(" + counter2 + ") != counter0(" + counter0 + ")"); + } + if (counter2 != counter) { + throw new RuntimeException("Failed: counter2(" + counter2 + ") != counter(" + counter + ")"); + } + System.out.println("TEST PASSED"); + } +} diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/compiler/jsr292/NullConstantReceiver.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/jsr292/NullConstantReceiver.java Thu Nov 06 09:39:49 2014 -0800 @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8059556 + * @run main/othervm -Xbatch NullConstantReceiver + */ + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; + +public class NullConstantReceiver { + static final MethodHandle target; + static { + try { + target = MethodHandles.lookup().findVirtual(NullConstantReceiver.class, "test", MethodType.methodType(void.class)); + } catch (ReflectiveOperationException e) { + throw new Error(e); + } + } + + public void test() {} + + static void run() throws Throwable { + target.invokeExact((NullConstantReceiver) null); + } + + public static void main(String[] args) throws Throwable { + for (int i = 0; i<15000; i++) { + try { + run(); + } catch (NullPointerException e) { + // expected + continue; + } + throw new AssertionError("NPE wasn't thrown"); + } + System.out.println("TEST PASSED"); + } +} diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/compiler/jsr292/VMAnonymousClasses.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/jsr292/VMAnonymousClasses.java Thu Nov 06 09:39:49 2014 -0800 @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8058828 + * @run main/bootclasspath -Xbatch VMAnonymousClasses + */ + +import jdk.internal.org.objectweb.asm.ClassWriter; +import jdk.internal.org.objectweb.asm.MethodVisitor; +import jdk.internal.org.objectweb.asm.Opcodes; +import sun.misc.Unsafe; + +import java.lang.invoke.ConstantCallSite; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.invoke.MutableCallSite; +import java.lang.invoke.VolatileCallSite; + +public class VMAnonymousClasses { + static final String TEST_METHOD_NAME = "constant"; + + static final Unsafe UNSAFE = Unsafe.getUnsafe(); + + static int getConstantPoolSize(byte[] classFile) { + // The first few bytes: + // u4 magic; + // u2 minor_version; + // u2 major_version; + // u2 constant_pool_count; + return ((classFile[8] & 0xFF) << 8) | (classFile[9] & 0xFF); + } + + static void test(Object value) throws ReflectiveOperationException { + System.out.printf("Test: %s", value != null ? value.getClass() : "null"); + + ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_MAXS | ClassWriter.COMPUTE_FRAMES); + cw.visit(Opcodes.V1_8, Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER, "Test", null, "java/lang/Object", null); + + MethodVisitor mv = cw.visitMethod(Opcodes.ACC_STATIC | Opcodes.ACC_PUBLIC, TEST_METHOD_NAME, "()Ljava/lang/Object;", null, null); + + String placeholder = "CONSTANT"; + int index = cw.newConst(placeholder); + mv.visitLdcInsn(placeholder); + mv.visitInsn(Opcodes.ARETURN); + + mv.visitMaxs(0, 0); + mv.visitEnd(); + + byte[] classFile = cw.toByteArray(); + + Object[] cpPatches = new Object[getConstantPoolSize(classFile)]; + cpPatches[index] = value; + + Class test = UNSAFE.defineAnonymousClass(VMAnonymousClasses.class, classFile, cpPatches); + + Object expectedResult = (value != null) ? value : placeholder; + for (int i = 0; i<15000; i++) { + Object result = test.getMethod(TEST_METHOD_NAME).invoke(null); + if (result != expectedResult) { + throw new AssertionError(String.format("Wrong value returned: %s != %s", value, result)); + } + } + System.out.println(" PASSED"); + } + + public static void main(String[] args) throws ReflectiveOperationException { + // Objects + test(new Object()); + test("TEST"); + test(new VMAnonymousClasses()); + test(null); + + // Class + test(String.class); + + // Arrays + test(new boolean[0]); + test(new byte[0]); + test(new char[0]); + test(new short[0]); + test(new int[0]); + test(new long[0]); + test(new float[0]); + test(new double[0]); + test(new Object[0]); + + // Multi-dimensional arrays + test(new byte[0][0]); + test(new Object[0][0]); + + // MethodHandle-related + MethodType mt = MethodType.methodType(void.class, String[].class); + MethodHandle mh = MethodHandles.lookup().findStatic(VMAnonymousClasses.class, "main", mt); + test(mt); + test(mh); + test(new ConstantCallSite(mh)); + test(new MutableCallSite(MethodType.methodType(void.class))); + test(new VolatileCallSite(MethodType.methodType(void.class))); + + System.out.println("TEST PASSED"); + } +} diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/compiler/membars/DekkerTest.java --- a/test/compiler/membars/DekkerTest.java Fri Oct 31 17:09:14 2014 -0700 +++ b/test/compiler/membars/DekkerTest.java Thu Nov 06 09:39:49 2014 -0800 @@ -25,9 +25,9 @@ * @test * @bug 8007898 * @summary Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier(). - * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest - * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest - * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest + * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest + * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest + * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest * @author Martin Doerr martin DOT doerr AT sap DOT com * * Run 3 times since the failure is intermittent. diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/compiler/startup/NumCompilerThreadsCheck.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/startup/NumCompilerThreadsCheck.java Thu Nov 06 09:39:49 2014 -0800 @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8034775 + * @summary Ensures correct minimal number of compiler threads (provided by -XX:CICompilerCount=) + * @library /testlibrary + */ +import com.oracle.java.testlibrary.*; + +public class NumCompilerThreadsCheck { + + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:CICompilerCount=-1"); + OutputAnalyzer out = new OutputAnalyzer(pb.start()); + + String expectedOutput = "CICompilerCount of -1 is invalid"; + out.shouldContain(expectedOutput); + + if (isZeroVm()) { + String expectedLowWaterMarkText = "must be at least 0"; + out.shouldContain(expectedLowWaterMarkText); + } + } + + private static boolean isZeroVm() { + String vmName = System.getProperty("java.vm.name"); + if (vmName == null) { + throw new RuntimeException("No VM name"); + } + if (vmName.toLowerCase().contains("zero")) { + return true; + } + return false; + } +} diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/runtime/NMT/ChangeTrackingLevel.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/NMT/ChangeTrackingLevel.java Thu Nov 06 09:39:49 2014 -0800 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8059100 + * @summary Test that you can decrease NMT tracking level but not increase it. + * @key nmt + * @library /testlibrary /testlibrary/whitebox + * @build ChangeTrackingLevel + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ChangeTrackingLevel + */ + +import com.oracle.java.testlibrary.*; +import sun.hotspot.WhiteBox; + +public class ChangeTrackingLevel { + + public static WhiteBox wb = WhiteBox.getWhiteBox(); + public static void main(String args[]) throws Exception { + boolean testChangeLevel = wb.NMTChangeTrackingLevel(); + if (testChangeLevel) { + System.out.println("NMT level change test passed."); + } else { + // it also fails if the VM asserts. + throw new RuntimeException("NMT level change test failed"); + } + } +}; diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/runtime/NMT/PrintNMTStatistics.java --- a/test/runtime/NMT/PrintNMTStatistics.java Fri Oct 31 17:09:14 2014 -0700 +++ b/test/runtime/NMT/PrintNMTStatistics.java Thu Nov 06 09:39:49 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,42 +24,40 @@ /* * @test * @key nmt regression - * @bug 8005936 - * @summary Make sure PrintNMTStatistics works on normal JVM exit - * @library /testlibrary /testlibrary/whitebox - * @build PrintNMTStatistics - * @run main ClassFileInstaller sun.hotspot.WhiteBox - * @run main PrintNMTStatistics + * @bug 8005936 8058606 + * @summary Verify PrintNMTStatistics on normal JVM exit for detail and summary tracking level + * @library /testlibrary */ import com.oracle.java.testlibrary.*; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import sun.hotspot.WhiteBox; - public class PrintNMTStatistics { - public static void main(String args[]) throws Exception { - - // We start a new java process running with an argument and use WB API to ensure - // we have data for NMT on VM exit - if (args.length > 0) { - return; - } + public static void main(String args[]) throws Exception { ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( - "-XX:+UnlockDiagnosticVMOptions", - "-Xbootclasspath/a:.", - "-XX:+WhiteBoxAPI", - "-XX:NativeMemoryTracking=summary", - "-XX:+PrintNMTStatistics", - "PrintNMTStatistics", - "test"); + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+PrintNMTStatistics", + "-XX:NativeMemoryTracking=detail", + "-version"); + + OutputAnalyzer output_detail = new OutputAnalyzer(pb.start()); + output_detail.shouldContain("Virtual memory map:"); + output_detail.shouldContain("Details:"); + output_detail.shouldNotContain("error"); + output_detail.shouldHaveExitValue(0); - OutputAnalyzer output = new OutputAnalyzer(pb.start()); - output.shouldContain("Java Heap (reserved="); - output.shouldNotContain("error"); - output.shouldHaveExitValue(0); - } + ProcessBuilder pb1 = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+PrintNMTStatistics", + "-XX:NativeMemoryTracking=summary", + "-version"); + + OutputAnalyzer output_summary = new OutputAnalyzer(pb1.start()); + output_summary.shouldContain("Java Heap (reserved="); + output_summary.shouldNotContain("Virtual memory map:"); + output_summary.shouldNotContain("Details:"); + output_summary.shouldNotContain("error"); + output_summary.shouldHaveExitValue(0); + } } diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/runtime/SharedArchiveFile/LimitSharedSizes.java --- a/test/runtime/SharedArchiveFile/LimitSharedSizes.java Fri Oct 31 17:09:14 2014 -0700 +++ b/test/runtime/SharedArchiveFile/LimitSharedSizes.java Thu Nov 06 09:39:49 2014 -0800 @@ -51,9 +51,12 @@ // Known issue, JDK-8038422 (assert() on Windows) // new SharedSizeTestData("-XX:SharedMiscDataSize", "500k", "miscellaneous data"), - // This will cause a VM crash; commenting out for now; see bug JDK-8038268 - // @ignore JDK-8038268 - // new SharedSizeTestData("-XX:SharedMiscCodeSize", "20k", "miscellaneous code"), + // Too small of a misc code size should not cause a vm crash. + // It should result in the following error message: + // The shared miscellaneous code space is not large enough + // to preload requested classes. Use -XX:SharedMiscCodeSize= + // to increase the initial size of shared miscellaneous code space. + new SharedSizeTestData("-XX:SharedMiscCodeSize", "20k", "miscellaneous code"), // these values are larger than default ones, but should // be acceptable and not cause failure diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/runtime/lambda-features/InvokespecialInterface.java --- a/test/runtime/lambda-features/InvokespecialInterface.java Fri Oct 31 17:09:14 2014 -0700 +++ b/test/runtime/lambda-features/InvokespecialInterface.java Thu Nov 06 09:39:49 2014 -0800 @@ -33,11 +33,12 @@ import java.util.function.*; import java.util.*; +public class InvokespecialInterface { interface I { default void imethod() { System.out.println("I::imethod"); } } -class C implements I { +static class C implements I { public void foo() { I.super.imethod(); } // invokespecial InterfaceMethod public void bar() { I i = this; i.imethod(); } // invokeinterface same public void doSomeInvokedynamic() { @@ -48,7 +49,6 @@ } } -public class InvokespecialInterface { public static void main(java.lang.String[] unused) { // need to create C and call I::foo() C c = new C(); diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/runtime/lambda-features/TestInterfaceInit.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/lambda-features/TestInterfaceInit.java Thu Nov 06 09:39:49 2014 -0800 @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test + * @bug 8034275 + * @summary [JDK 8u40] Test interface initialization: only for interfaces declaring default methods + * @run main TestInterfaceInit + */ +import java.util.List; +import java.util.Arrays; +import java.util.ArrayList; + +public class TestInterfaceInit { + + static List> cInitOrder = new ArrayList<>(); + + // Declares a default method and initializes + interface I { + boolean v = TestInterfaceInit.out(I.class); + default void x() {} + } + + // Declares a default method and initializes + interface J extends I { + boolean v = TestInterfaceInit.out(J.class); + default void x() {} + } + // No default method, does not initialize + interface JN extends J { + boolean v = TestInterfaceInit.out(JN.class); + } + + // Declares a default method and initializes + interface K extends I { + boolean v = TestInterfaceInit.out(K.class); + default void x() {} + } + + // No default method, does not initialize + interface KN extends K { + boolean v = TestInterfaceInit.out(KN.class); + } + + interface L extends JN, KN { + boolean v = TestInterfaceInit.out(L.class); + default void x() {} + } + + public static void main(String[] args) { + // Trigger initialization + boolean v = L.v; + + List> expectedCInitOrder = Arrays.asList(I.class,J.class,K.class,L.class); + if (!cInitOrder.equals(expectedCInitOrder)) { + throw new RuntimeException(String.format("Class initialization array %s not equal to expected array %s", cInitOrder, expectedCInitOrder)); + } + } + + static boolean out(Class c) { + System.out.println("#: initializing " + c.getName()); + cInitOrder.add(c); + return true; + } + +} diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/runtime/lambda-features/TestInterfaceOrder.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/lambda-features/TestInterfaceOrder.java Thu Nov 06 09:39:49 2014 -0800 @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test + * @bug 8034275 + * @summary [JDK 8u40] Test interface initialization order + * @run main TestInterfaceOrder + */ + +import java.util.List; +import java.util.Arrays; +import java.util.ArrayList; + +public class TestInterfaceOrder { + static List> cInitOrder = new ArrayList<>(); + + public static void main(java.lang.String[] args) { + //Trigger initialization + C c = new C(); + + List> expectedCInitOrder = Arrays.asList(I.class, J.class, A.class, K.class, B.class, L.class, C.class); + if (!cInitOrder.equals(expectedCInitOrder)) { + throw new RuntimeException(String.format("Class initialization order %s not equal to expected order %s", cInitOrder, expectedCInitOrder)); + } + } + + interface I { + boolean v = TestInterfaceOrder.out(I.class); + default void i() {} + } + + interface J extends I { + boolean v = TestInterfaceOrder.out(J.class); + default void j() {} + } + + static class A implements J { + static boolean v = TestInterfaceOrder.out(A.class); + } + + interface K extends I { + boolean v = TestInterfaceOrder.out(K.class); + default void k() {} + } + + static class B extends A implements K { + static boolean v = TestInterfaceOrder.out(B.class); + } + + interface L { + boolean v = TestInterfaceOrder.out(L.class); + default void l() {} + } + + static class C extends B implements L { + static boolean v = TestInterfaceOrder.out(C.class); + } + + + static boolean out(Class c) { + System.out.println("#: initializing " + c.getName()); + cInitOrder.add(c); + return true; + } + +} diff -r 5ca2ea5eeff0 -r b1cf34d57e78 test/testlibrary/whitebox/sun/hotspot/WhiteBox.java --- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Fri Oct 31 17:09:14 2014 -0700 +++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Thu Nov 06 09:39:49 2014 -0800 @@ -101,6 +101,7 @@ public native void NMTOverflowHashBucket(long num); public native long NMTMallocWithPseudoStack(long size, int index); public native boolean NMTIsDetailSupported(); + public native boolean NMTChangeTrackingLevel(); // Compiler public native void deoptimizeAll();