changeset 7909:9cff03eab05d

Merge
author anoll
date Fri, 31 Oct 2014 11:46:11 +0000
parents 327c00d0f091 (current diff) e903867b97e6 (diff)
children 521e269ae1da
files
diffstat 13 files changed, 331 insertions(+), 221 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/compiler/compileBroker.cpp	Thu May 08 12:49:21 2014 +0200
+++ b/src/share/vm/compiler/compileBroker.cpp	Fri Oct 31 11:46:11 2014 +0000
@@ -183,9 +183,8 @@
 
 long CompileBroker::_peak_compilation_time       = 0;
 
-CompileQueue* CompileBroker::_c2_method_queue    = NULL;
-CompileQueue* CompileBroker::_c1_method_queue    = NULL;
-CompileTask*  CompileBroker::_task_free_list     = NULL;
+CompileQueue* CompileBroker::_c2_compile_queue   = NULL;
+CompileQueue* CompileBroker::_c1_compile_queue   = NULL;
 
 GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
 
@@ -253,13 +252,56 @@
 
     // By convention, the compiling thread is responsible for
     // recycling a non-blocking CompileTask.
-    CompileBroker::free_task(task);
+    CompileTask::free(task);
   }
 }
 
 
-// ------------------------------------------------------------------
-// CompileTask::initialize
+CompileTask*  CompileTask::_task_free_list = NULL;
+#ifdef ASSERT
+int CompileTask::_num_allocated_tasks = 0;
+#endif
+/**
+ * Allocate a CompileTask, from the free list if possible.
+ */
+CompileTask* CompileTask::allocate() {
+  MutexLocker locker(CompileTaskAlloc_lock);
+  CompileTask* task = NULL;
+
+  if (_task_free_list != NULL) {
+    task = _task_free_list;
+    _task_free_list = task->next();
+    task->set_next(NULL);
+  } else {
+    task = new CompileTask();
+    DEBUG_ONLY(_num_allocated_tasks++;)
+    assert (_num_allocated_tasks < 10000, "Leaking compilation tasks?");
+    task->set_next(NULL);
+    task->set_is_free(true);
+  }
+  assert(task->is_free(), "Task must be free.");
+  task->set_is_free(false);
+  return task;
+}
+
+
+/**
+ * Add a task to the free list.
+ */
+void CompileTask::free(CompileTask* task) {
+  MutexLocker locker(CompileTaskAlloc_lock);
+  if (!task->is_free()) {
+    task->set_code(NULL);
+    assert(!task->lock()->is_locked(), "Should not be locked when freed");
+    JNIHandles::destroy_global(task->_method_holder);
+    JNIHandles::destroy_global(task->_hot_method_holder);
+
+    task->set_is_free(true);
+    task->set_next(_task_free_list);
+    _task_free_list = task;
+  }
+}
+
 void CompileTask::initialize(int compile_id,
                              methodHandle method,
                              int osr_bci,
@@ -318,15 +360,6 @@
   if (nm == NULL)  _code_handle = NULL;  // drop the handle also
 }
 
-// ------------------------------------------------------------------
-// CompileTask::free
-void CompileTask::free() {
-  set_code(NULL);
-  assert(!_lock->is_locked(), "Should not be locked when freed");
-  JNIHandles::destroy_global(_method_holder);
-  JNIHandles::destroy_global(_hot_method_holder);
-}
-
 
 void CompileTask::mark_on_stack() {
   // Mark these methods as something redefine classes cannot remove.
@@ -594,9 +627,12 @@
 
 
 
-// Add a CompileTask to a CompileQueue
+/**
+ * Add a CompileTask to a CompileQueue
+ */
 void CompileQueue::add(CompileTask* task) {
   assert(lock()->owned_by_self(), "must own lock");
+  assert(!CompileBroker::is_compilation_disabled_forever(), "Do not add task if compilation is turned off forever");
 
   task->set_next(NULL);
   task->set_prev(NULL);
@@ -618,9 +654,7 @@
   // Mark the method as being in the compile queue.
   task->method()->set_queued_for_compilation();
 
-  if (CIPrintCompileQueue) {
-    print();
-  }
+  NOT_PRODUCT(print();)
 
   if (LogCompilation && xtty != NULL) {
     task->log_task_queued();
@@ -630,14 +664,32 @@
   lock()->notify_all();
 }
 
-void CompileQueue::delete_all() {
-  assert(lock()->owned_by_self(), "must own lock");
-  if (_first != NULL) {
-    for (CompileTask* task = _first; task != NULL; task = task->next()) {
-      delete task;
+/**
+ * Empties compilation queue by putting all compilation tasks onto
+ * a freelist. Furthermore, the method wakes up all threads that are
+ * waiting on a compilation task to finish. This can happen if background
+ * compilation is disabled.
+ */
+void CompileQueue::free_all() {
+  MutexLocker mu(lock());
+  CompileTask* next = _first;
+
+  // Iterate over all tasks in the compile queue
+  while (next != NULL) {
+    CompileTask* current = next;
+    next = current->next();
+    {
+      // Wake up thread that blocks on the compile task.
+      MutexLocker ct_lock(current->lock());
+      current->lock()->notify();
     }
-    _first = NULL;
+    // Put the task back on the freelist.
+    CompileTask::free(current);
   }
+  _first = NULL;
+
+  // Wake up all threads that block on the queue.
+  lock()->notify_all();
 }
 
 // ------------------------------------------------------------------
@@ -767,18 +819,24 @@
   }
 }
 
-// ------------------------------------------------------------------
-// CompileQueue::print
+#ifndef PRODUCT
+/**
+ * Print entire compilation queue.
+ */
 void CompileQueue::print() {
-  tty->print_cr("Contents of %s", name());
-  tty->print_cr("----------------------");
-  CompileTask* task = _first;
-  while (task != NULL) {
-    task->print_line();
-    task = task->next();
+  if (CIPrintCompileQueue) {
+    ttyLocker ttyl;
+    tty->print_cr("Contents of %s", name());
+    tty->print_cr("----------------------");
+    CompileTask* task = _first;
+    while (task != NULL) {
+      task->print_line();
+      task = task->next();
+    }
+    tty->print_cr("----------------------");
   }
-  tty->print_cr("----------------------");
 }
+#endif // PRODUCT
 
 CompilerCounters::CompilerCounters(const char* thread_name, int instance, TRAPS) {
 
@@ -851,9 +909,6 @@
   _compilers[1] = new SharkCompiler();
 #endif // SHARK
 
-  // Initialize the CompileTask free list
-  _task_free_list = NULL;
-
   // Start the CompilerThreads
   init_compiler_threads(c1_count, c2_count);
   // totalTime performance counter is always created as it is required
@@ -1046,11 +1101,11 @@
 #endif // !ZERO && !SHARK
   // Initialize the compilation queue
   if (c2_compiler_count > 0) {
-    _c2_method_queue  = new CompileQueue("C2MethodQueue",  MethodCompileQueue_lock);
+    _c2_compile_queue  = new CompileQueue("C2 CompileQueue",  MethodCompileQueue_lock);
     _compilers[1]->set_num_compiler_threads(c2_compiler_count);
   }
   if (c1_compiler_count > 0) {
-    _c1_method_queue  = new CompileQueue("C1MethodQueue",  MethodCompileQueue_lock);
+    _c1_compile_queue  = new CompileQueue("C1 CompileQueue",  MethodCompileQueue_lock);
     _compilers[0]->set_num_compiler_threads(c1_compiler_count);
   }
 
@@ -1065,7 +1120,7 @@
     sprintf(name_buffer, "C2 CompilerThread%d", i);
     CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
     // Shark and C2
-    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, _compilers[1], CHECK);
+    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], CHECK);
     _compiler_threads->append(new_thread);
   }
 
@@ -1074,7 +1129,7 @@
     sprintf(name_buffer, "C1 CompilerThread%d", i);
     CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
     // C1
-    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, _compilers[0], CHECK);
+    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], CHECK);
     _compiler_threads->append(new_thread);
   }
 
@@ -1084,14 +1139,19 @@
 }
 
 
-// Set the methods on the stack as on_stack so that redefine classes doesn't
-// reclaim them
+/**
+ * Set the methods on the stack as on_stack so that redefine classes doesn't
+ * reclaim them. This method is executed at a safepoint.
+ */
 void CompileBroker::mark_on_stack() {
-  if (_c2_method_queue != NULL) {
-    _c2_method_queue->mark_on_stack();
+  assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
+  // Since we are at a safepoint, we do not need a lock to access
+  // the compile queues.
+  if (_c2_compile_queue != NULL) {
+    _c2_compile_queue->mark_on_stack();
   }
-  if (_c1_method_queue != NULL) {
-    _c1_method_queue->mark_on_stack();
+  if (_c1_compile_queue != NULL) {
+    _c1_compile_queue->mark_on_stack();
   }
 }
 
@@ -1107,7 +1167,7 @@
                                         const char* comment,
                                         Thread* thread) {
   // do nothing if compiler thread(s) is not available
-  if (!_initialized ) {
+  if (!_initialized) {
     return;
   }
 
@@ -1154,7 +1214,7 @@
 
   // If this method is already in the compile queue, then
   // we do not block the current thread.
-  if (compilation_is_in_queue(method, osr_bci)) {
+  if (compilation_is_in_queue(method)) {
     // We may want to decay our counter a bit here to prevent
     // multiple denied requests for compilation.  This is an
     // open compilation policy issue. Note: The other possibility,
@@ -1193,7 +1253,7 @@
     // Make sure the method has not slipped into the queues since
     // last we checked; note that those checks were "fast bail-outs".
     // Here we need to be more careful, see 14012000 below.
-    if (compilation_is_in_queue(method, osr_bci)) {
+    if (compilation_is_in_queue(method)) {
       return;
     }
 
@@ -1214,7 +1274,7 @@
     }
 
     // Should this thread wait for completion of the compile?
-    blocking = is_compile_blocking(method, osr_bci);
+    blocking = is_compile_blocking();
 
     // We will enter the compilation in the queue.
     // 14012000: Note that this sets the queued_for_compile bits in
@@ -1406,19 +1466,17 @@
 }
 
 
-// ------------------------------------------------------------------
-// CompileBroker::compilation_is_in_queue
-//
-// See if this compilation is already requested.
-//
-// Implementation note: there is only a single "is in queue" bit
-// for each method.  This means that the check below is overly
-// conservative in the sense that an osr compilation in the queue
-// will block a normal compilation from entering the queue (and vice
-// versa).  This can be remedied by a full queue search to disambiguate
-// cases.  If it is deemed profitible, this may be done.
-bool CompileBroker::compilation_is_in_queue(methodHandle method,
-                                            int          osr_bci) {
+/**
+ * See if this compilation is already requested.
+ *
+ * Implementation note: there is only a single "is in queue" bit
+ * for each method.  This means that the check below is overly
+ * conservative in the sense that an osr compilation in the queue
+ * will block a normal compilation from entering the queue (and vice
+ * versa).  This can be remedied by a full queue search to disambiguate
+ * cases.  If it is deemed profitable, this may be done.
+ */
+bool CompileBroker::compilation_is_in_queue(methodHandle method) {
   return method->queued_for_compilation();
 }
 
@@ -1498,13 +1556,11 @@
 #endif
 }
 
-
-// ------------------------------------------------------------------
-// CompileBroker::is_compile_blocking
-//
-// Should the current thread be blocked until this compilation request
-// has been fulfilled?
-bool CompileBroker::is_compile_blocking(methodHandle method, int osr_bci) {
+/**
+ * Should the current thread block until this compilation request
+ * has been fulfilled?
+ */
+bool CompileBroker::is_compile_blocking() {
   assert(!InstanceRefKlass::owns_pending_list_lock(JavaThread::current()), "possible deadlock");
   return !BackgroundCompilation;
 }
@@ -1532,7 +1588,7 @@
                                               int           hot_count,
                                               const char*   comment,
                                               bool          blocking) {
-  CompileTask* new_task = allocate_task();
+  CompileTask* new_task = CompileTask::allocate();
   new_task->initialize(compile_id, method, osr_bci, comp_level,
                        hot_method, hot_count, comment,
                        blocking);
@@ -1541,75 +1597,52 @@
 }
 
 
-// ------------------------------------------------------------------
-// CompileBroker::allocate_task
-//
-// Allocate a CompileTask, from the free list if possible.
-CompileTask* CompileBroker::allocate_task() {
-  MutexLocker locker(CompileTaskAlloc_lock);
-  CompileTask* task = NULL;
-  if (_task_free_list != NULL) {
-    task = _task_free_list;
-    _task_free_list = task->next();
-    task->set_next(NULL);
-  } else {
-    task = new CompileTask();
-    task->set_next(NULL);
-  }
-  return task;
-}
-
-
-// ------------------------------------------------------------------
-// CompileBroker::free_task
-//
-// Add a task to the free list.
-void CompileBroker::free_task(CompileTask* task) {
-  MutexLocker locker(CompileTaskAlloc_lock);
-  task->free();
-  task->set_next(_task_free_list);
-  _task_free_list = task;
-}
-
-
-// ------------------------------------------------------------------
-// CompileBroker::wait_for_completion
-//
-// Wait for the given method CompileTask to complete.
+/**
+ *  Wait for the compilation task to complete.
+ */
 void CompileBroker::wait_for_completion(CompileTask* task) {
   if (CIPrintCompileQueue) {
+    ttyLocker ttyl;
     tty->print_cr("BLOCKING FOR COMPILE");
   }
 
   assert(task->is_blocking(), "can only wait on blocking task");
 
-  JavaThread *thread = JavaThread::current();
+  JavaThread* thread = JavaThread::current();
   thread->set_blocked_on_compilation(true);
 
   methodHandle method(thread, task->method());
   {
     MutexLocker waiter(task->lock(), thread);
 
-    while (!task->is_complete())
+    while (!task->is_complete() && !is_compilation_disabled_forever()) {
       task->lock()->wait();
+    }
   }
+
+  thread->set_blocked_on_compilation(false);
+  if (is_compilation_disabled_forever()) {
+    CompileTask::free(task);
+    return;
+  }
+
   // It is harmless to check this status without the lock, because
   // completion is a stable property (until the task object is recycled).
   assert(task->is_complete(), "Compilation should have completed");
   assert(task->code_handle() == NULL, "must be reset");
 
-  thread->set_blocked_on_compilation(false);
-
   // By convention, the waiter is responsible for recycling a
   // blocking CompileTask. Since there is only one waiter ever
   // waiting on a CompileTask, we know that no one else will
   // be using this CompileTask; we can free it.
-  free_task(task);
+  CompileTask::free(task);
 }
 
-// Initialize compiler thread(s) + compiler object(s). The postcondition
-// of this function is that the compiler runtimes are initialized and that
-//compiler threads can start compiling.
+/**
+ * Initialize compiler thread(s) + compiler object(s). The postcondition
+ * of this function is that the compiler runtimes are initialized and that
+ * compiler threads can start compiling.
+ */
 bool CompileBroker::init_compiler_runtime() {
   CompilerThread* thread = CompilerThread::current();
   AbstractCompiler* comp = thread->compiler();
@@ -1646,7 +1679,6 @@
     disable_compilation_forever();
     // If compiler initialization failed, no compiler thread that is specific to a
     // particular compiler runtime will ever start to compile methods.
-
     shutdown_compiler_runtime(comp, thread);
     return false;
   }
@@ -1660,9 +1692,11 @@
   return true;
 }
 
-// If C1 and/or C2 initialization failed, we shut down all compilation.
-// We do this to keep things simple. This can be changed if it ever turns out to be
-// a problem.
+/**
+ * If C1 and/or C2 initialization failed, we shut down all compilation.
+ * We do this to keep things simple. This can be changed if it ever turns
+ * out to be a problem.
+ */
 void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
   // Free buffer blob, if allocated
   if (thread->get_buffer_blob() != NULL) {
@@ -1674,28 +1708,25 @@
     // There are two reasons for shutting down the compiler
     // 1) compiler runtime initialization failed
     // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing
-    warning("Shutting down compiler %s (no space to run compilers)", comp->name());
+    warning("%s initialization failed. Shutting down all compilers", comp->name());
 
     // Only one thread per compiler runtime object enters here
     // Set state to shut down
     comp->set_shut_down();
 
-    MutexLocker mu(MethodCompileQueue_lock, thread);
-    CompileQueue* queue;
-    if (_c1_method_queue != NULL) {
-      _c1_method_queue->delete_all();
-      queue = _c1_method_queue;
-      _c1_method_queue = NULL;
-      delete _c1_method_queue;
+    // Delete all queued compilation tasks to make compiler threads exit faster.
+    if (_c1_compile_queue != NULL) {
+      _c1_compile_queue->free_all();
     }
 
-    if (_c2_method_queue != NULL) {
-      _c2_method_queue->delete_all();
-      queue = _c2_method_queue;
-      _c2_method_queue = NULL;
-      delete _c2_method_queue;
+    if (_c2_compile_queue != NULL) {
+      _c2_compile_queue->free_all();
     }
 
+    // Set flags so that we continue execution with using interpreter only.
+    UseCompiler    = false;
+    UseInterpreter = true;
+
     // We could delete compiler runtimes also. However, there are references to
     // the compiler runtime(s) (e.g.,  nmethod::is_compiled_by_c1()) which then
     // fail. This can be done later if necessary.
--- a/src/share/vm/compiler/compileBroker.hpp	Thu May 08 12:49:21 2014 +0200
+++ b/src/share/vm/compiler/compileBroker.hpp	Fri Oct 31 11:46:11 2014 +0000
@@ -40,6 +40,11 @@
   friend class VMStructs;
 
  private:
+  static CompileTask* _task_free_list;
+#ifdef ASSERT
+  static int          _num_allocated_tasks;
+#endif
+
   Monitor*     _lock;
   uint         _compile_id;
   Method*      _method;
@@ -52,7 +57,7 @@
   int          _num_inlined_bytecodes;
   nmethodLocker* _code_handle;  // holder of eventual result
   CompileTask* _next, *_prev;
-
+  bool         _is_free;
   // Fields used for logging why the compilation was initiated:
   jlong        _time_queued;  // in units of os::elapsed_counter()
   Method*      _hot_method;   // which method actually triggered this task
@@ -70,7 +75,8 @@
                   methodHandle hot_method, int hot_count, const char* comment,
                   bool is_blocking);
 
-  void free();
+  static CompileTask* allocate();
+  static void         free(CompileTask* task);
 
   int          compile_id() const                { return _compile_id; }
   Method*      method() const                    { return _method; }
@@ -99,6 +105,8 @@
   void         set_next(CompileTask* next)       { _next = next; }
   CompileTask* prev() const                      { return _prev; }
   void         set_prev(CompileTask* prev)       { _prev = prev; }
+  bool         is_free() const                   { return _is_free; }
+  void         set_is_free(bool val)             { _is_free = val; }
 
 private:
   static void  print_compilation_impl(outputStream* st, Method* method, int compile_id, int comp_level,
@@ -225,8 +233,8 @@
 
   // Redefine Classes support
   void mark_on_stack();
-  void delete_all();
-  void         print();
+  void free_all();
+  NOT_PRODUCT (void print();)
 
   ~CompileQueue() {
     assert (is_empty(), " Compile Queue must be empty");
@@ -279,9 +287,8 @@
   static int  _last_compile_level;
   static char _last_method_compiled[name_buffer_length];
 
-  static CompileQueue* _c2_method_queue;
-  static CompileQueue* _c1_method_queue;
-  static CompileTask* _task_free_list;
+  static CompileQueue* _c2_compile_queue;
+  static CompileQueue* _c1_compile_queue;
 
   static GrowableArray<CompilerThread*>* _compiler_threads;
 
@@ -334,7 +341,7 @@
   static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
   static bool compilation_is_complete  (methodHandle method, int osr_bci, int comp_level);
   static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
-  static bool is_compile_blocking      (methodHandle method, int osr_bci);
+  static bool is_compile_blocking      ();
   static void preload_classes          (methodHandle method, TRAPS);
 
   static CompileTask* create_compile_task(CompileQueue* queue,
@@ -346,8 +353,6 @@
                                           int           hot_count,
                                           const char*   comment,
                                           bool          blocking);
-  static CompileTask* allocate_task();
-  static void free_task(CompileTask* task);
   static void wait_for_completion(CompileTask* task);
 
   static void invoke_compiler_on_method(CompileTask* task);
@@ -365,8 +370,8 @@
                                   const char* comment,
                                   Thread* thread);
   static CompileQueue* compile_queue(int comp_level) {
-    if (is_c2_compile(comp_level)) return _c2_method_queue;
-    if (is_c1_compile(comp_level)) return _c1_method_queue;
+    if (is_c2_compile(comp_level)) return _c2_compile_queue;
+    if (is_c1_compile(comp_level)) return _c1_compile_queue;
     return NULL;
   }
   static bool init_compiler_runtime();
@@ -384,7 +389,7 @@
     return NULL;
   }
 
-  static bool compilation_is_in_queue(methodHandle method, int osr_bci);
+  static bool compilation_is_in_queue(methodHandle method);
   static int queue_size(int comp_level) {
     CompileQueue *q = compile_queue(comp_level);
     return q != NULL ? q->size() : 0;
--- a/src/share/vm/memory/collectorPolicy.cpp	Thu May 08 12:49:21 2014 +0200
+++ b/src/share/vm/memory/collectorPolicy.cpp	Fri Oct 31 11:46:11 2014 +0000
@@ -183,13 +183,9 @@
   // Requirements of any new remembered set implementations must be added here.
   size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
 
-  // Parallel GC does its own alignment of the generations to avoid requiring a
-  // large page (256M on some platforms) for the permanent generation.  The
-  // other collectors should also be updated to do their own alignment and then
-  // this use of lcm() should be removed.
-  if (UseLargePages && !UseParallelGC) {
-      // in presence of large pages we have to make sure that our
-      // alignment is large page aware
+  if (UseLargePages) {
+      // In presence of large pages we have to make sure that our
+      // alignment is large page aware.
       alignment = lcm(os::large_page_size(), alignment);
   }
 
--- a/src/share/vm/memory/metaspace.cpp	Thu May 08 12:49:21 2014 +0200
+++ b/src/share/vm/memory/metaspace.cpp	Fri Oct 31 11:46:11 2014 +0000
@@ -3155,6 +3155,16 @@
     SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
 
+    // the min_misc_code_size estimate is based on MetaspaceShared::generate_vtable_methods()
+    uintx min_misc_code_size = align_size_up(
+      (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) *
+        (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size,
+          max_alignment);
+
+    if (SharedMiscCodeSize < min_misc_code_size) {
+      report_out_of_shared_space(SharedMiscCode);
+    }
+
     // Initialize with the sum of the shared space sizes.  The read-only
     // and read write metaspace chunks will be allocated out of this and the
     // remainder is the misc code and data chunks.
--- a/src/share/vm/memory/metaspaceShared.hpp	Thu May 08 12:49:21 2014 +0200
+++ b/src/share/vm/memory/metaspaceShared.hpp	Fri Oct 31 11:46:11 2014 +0000
@@ -57,11 +57,16 @@
   static bool _archive_loading_failed;
  public:
   enum {
-    vtbl_list_size = 17, // number of entries in the shared space vtable list.
-    num_virtuals = 200   // maximum number of virtual functions
-                         // If virtual functions are added to Metadata,
-                         // this number needs to be increased.  Also,
-                         // SharedMiscCodeSize will need to be increased.
+    vtbl_list_size         = 17,   // number of entries in the shared space vtable list.
+    num_virtuals           = 200,  // maximum number of virtual functions
+                                   // If virtual functions are added to Metadata,
+                                   // this number needs to be increased.  Also,
+                                   // SharedMiscCodeSize will need to be increased.
+                                   // The following 2 sizes were based on
+                                   // MetaspaceShared::generate_vtable_methods()
+    vtbl_method_size       = 16,   // conservative size of the mov1 and jmp instructions
+                                   // for the x64 platform
+    vtbl_common_code_size  = (1*K) // conservative size of the "common_code" for the x64 platform
   };
 
   enum {
--- a/src/share/vm/opto/c2_globals.hpp	Thu May 08 12:49:21 2014 +0200
+++ b/src/share/vm/opto/c2_globals.hpp	Fri Oct 31 11:46:11 2014 +0000
@@ -473,6 +473,9 @@
   product(bool, DoEscapeAnalysis, true,                                     \
           "Perform escape analysis")                                        \
                                                                             \
+  product(double, EscapeAnalysisTimeout, 20. DEBUG_ONLY(+40.),              \
+          "Abort EA when it reaches time limit (in sec)")                   \
+                                                                            \
   develop(bool, ExitEscapeAnalysisOnTimeout, true,                          \
           "Exit or throw assert in EA when it reaches time limit")          \
                                                                             \
--- a/src/share/vm/opto/escape.cpp	Thu May 08 12:49:21 2014 +0200
+++ b/src/share/vm/opto/escape.cpp	Fri Oct 31 11:46:11 2014 +0000
@@ -37,6 +37,8 @@
 
 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
+  _in_worklist(C->comp_arena()),
+  _next_pidx(0),
   _collecting(true),
   _verify(false),
   _compile(C),
@@ -124,13 +126,19 @@
   if (C->root() != NULL) {
     ideal_nodes.push(C->root());
   }
+  // Processed ideal nodes are unique on ideal_nodes list
+  // but several ideal nodes are mapped to the phantom_obj.
+  // To avoid duplicated entries on the following worklists
+  // add the phantom_obj only once to them.
+  ptnodes_worklist.append(phantom_obj);
+  java_objects_worklist.append(phantom_obj);
   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
     Node* n = ideal_nodes.at(next);
     // Create PointsTo nodes and add them to Connection Graph. Called
     // only once per ideal node since ideal_nodes is Unique_Node list.
     add_node_to_connection_graph(n, &delayed_worklist);
     PointsToNode* ptn = ptnode_adr(n->_idx);
-    if (ptn != NULL) {
+    if (ptn != NULL && ptn != phantom_obj) {
       ptnodes_worklist.append(ptn);
       if (ptn->is_JavaObject()) {
         java_objects_worklist.append(ptn->as_JavaObject());
@@ -414,7 +422,7 @@
     }
     case Op_CreateEx: {
       // assume that all exception objects globally escape
-      add_java_object(n, PointsToNode::GlobalEscape);
+      map_ideal_node(n, phantom_obj);
       break;
     }
     case Op_LoadKlass:
@@ -1065,13 +1073,8 @@
   // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
   // Set limit to 20 to catch situation when something did go wrong and
   // bailout Escape Analysis.
-  // Also limit build time to 30 sec (60 in debug VM).
+  // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
 #define CG_BUILD_ITER_LIMIT 20
-#ifdef ASSERT
-#define CG_BUILD_TIME_LIMIT 60.0
-#else
-#define CG_BUILD_TIME_LIMIT 30.0
-#endif
 
   // Propagate GlobalEscape and ArgEscape escape states and check that
   // we still have non-escaping objects. The method pushs on _worklist
@@ -1082,12 +1085,13 @@
   // Now propagate references to all JavaObject nodes.
   int java_objects_length = java_objects_worklist.length();
   elapsedTimer time;
+  bool timeout = false;
   int new_edges = 1;
   int iterations = 0;
   do {
     while ((new_edges > 0) &&
-          (iterations++   < CG_BUILD_ITER_LIMIT) &&
-          (time.seconds() < CG_BUILD_TIME_LIMIT)) {
+           (iterations++ < CG_BUILD_ITER_LIMIT)) {
+      double start_time = time.seconds();
       time.start();
       new_edges = 0;
       // Propagate references to phantom_object for nodes pushed on _worklist
@@ -1096,7 +1100,26 @@
       for (int next = 0; next < java_objects_length; ++next) {
         JavaObjectNode* ptn = java_objects_worklist.at(next);
         new_edges += add_java_object_edges(ptn, true);
+
+#define SAMPLE_SIZE 4
+        if ((next % SAMPLE_SIZE) == 0) {
+          // Each 4 iterations calculate how much time it will take
+          // to complete graph construction.
+          time.stop();
+          double stop_time = time.seconds();
+          double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
+          double time_until_end = time_per_iter * (double)(java_objects_length - next);
+          if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
+            timeout = true;
+            break; // Timeout
+          }
+          start_time = stop_time;
+          time.start();
+        }
+#undef SAMPLE_SIZE
+
       }
+      if (timeout) break;
       if (new_edges > 0) {
         // Update escape states on each iteration if graph was updated.
         if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
@@ -1104,9 +1127,12 @@
         }
       }
       time.stop();
+      if (time.seconds() >= EscapeAnalysisTimeout) {
+        timeout = true;
+        break;
+      }
     }
-    if ((iterations     < CG_BUILD_ITER_LIMIT) &&
-        (time.seconds() < CG_BUILD_TIME_LIMIT)) {
+    if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) {
       time.start();
       // Find fields which have unknown value.
       int fields_length = oop_fields_worklist.length();
@@ -1119,18 +1145,21 @@
         }
       }
       time.stop();
+      if (time.seconds() >= EscapeAnalysisTimeout) {
+        timeout = true;
+        break;
+      }
     } else {
       new_edges = 0; // Bailout
     }
   } while (new_edges > 0);
 
   // Bailout if passed limits.
-  if ((iterations     >= CG_BUILD_ITER_LIMIT) ||
-      (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
+  if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) {
     Compile* C = _compile;
     if (C->log() != NULL) {
       C->log()->begin_elem("connectionGraph_bailout reason='reached ");
-      C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time");
+      C->log()->text("%s", timeout ? "time" : "iterations");
       C->log()->end_elem(" limit'");
     }
     assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
@@ -1147,7 +1176,6 @@
 #endif
 
 #undef CG_BUILD_ITER_LIMIT
-#undef CG_BUILD_TIME_LIMIT
 
   // Find fields initialized by NULL for non-escaping Allocations.
   int non_escaped_length = non_escaped_worklist.length();
@@ -1271,8 +1299,8 @@
       }
     }
   }
-  while(_worklist.length() > 0) {
-    PointsToNode* use = _worklist.pop();
+  for (int l = 0; l < _worklist.length(); l++) {
+    PointsToNode* use = _worklist.at(l);
     if (PointsToNode::is_base_use(use)) {
       // Add reference from jobj to field and from field to jobj (field's base).
       use = PointsToNode::get_use_node(use)->as_Field();
@@ -1319,6 +1347,8 @@
       add_field_uses_to_worklist(use->as_Field());
     }
   }
+  _worklist.clear();
+  _in_worklist.Reset();
   return new_edges;
 }
 
@@ -1898,7 +1928,7 @@
     return;
   }
   Compile* C = _compile;
-  ptadr = new (C->comp_arena()) LocalVarNode(C, n, es);
+  ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
   _nodes.at_put(n->_idx, ptadr);
 }
 
@@ -1909,7 +1939,7 @@
     return;
   }
   Compile* C = _compile;
-  ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es);
+  ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
   _nodes.at_put(n->_idx, ptadr);
 }
 
@@ -1925,7 +1955,7 @@
     es = PointsToNode::GlobalEscape;
   }
   Compile* C = _compile;
-  FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop);
+  FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
   _nodes.at_put(n->_idx, field);
 }
 
@@ -1939,7 +1969,7 @@
     return;
   }
   Compile* C = _compile;
-  ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es);
+  ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
   _nodes.at_put(n->_idx, ptadr);
   // Add edge from arraycopy node to source object.
   (void)add_edge(ptadr, src);
--- a/src/share/vm/opto/escape.hpp	Thu May 08 12:49:21 2014 +0200
+++ b/src/share/vm/opto/escape.hpp	Fri Oct 31 11:46:11 2014 +0000
@@ -125,6 +125,8 @@
 class FieldNode;
 class ArraycopyNode;
 
+class ConnectionGraph;
+
 // ConnectionGraph nodes
 class PointsToNode : public ResourceObj {
   GrowableArray<PointsToNode*> _edges; // List of nodes this node points to
@@ -137,6 +139,7 @@
 
   Node* const        _node;  // Ideal node corresponding to this PointsTo node.
   const int           _idx;  // Cached ideal node's _idx
+  const uint         _pidx;  // Index of this node
 
 public:
   typedef enum {
@@ -165,17 +168,9 @@
   } NodeFlags;
 
 
-  PointsToNode(Compile *C, Node* n, EscapeState es, NodeType type):
-    _edges(C->comp_arena(), 2, 0, NULL),
-    _uses (C->comp_arena(), 2, 0, NULL),
-    _node(n),
-    _idx(n->_idx),
-    _type((u1)type),
-    _escape((u1)es),
-    _fields_escape((u1)es),
-    _flags(ScalarReplaceable) {
-    assert(n != NULL && es != UnknownEscape, "sanity");
-  }
+  inline PointsToNode(ConnectionGraph* CG, Node* n, EscapeState es, NodeType type);
+
+  uint        pidx()   const { return _pidx; }
 
   Node* ideal_node()   const { return _node; }
   int          idx()   const { return _idx; }
@@ -243,14 +238,14 @@
 
 class LocalVarNode: public PointsToNode {
 public:
-  LocalVarNode(Compile *C, Node* n, EscapeState es):
-    PointsToNode(C, n, es, LocalVar) {}
+  LocalVarNode(ConnectionGraph *CG, Node* n, EscapeState es):
+    PointsToNode(CG, n, es, LocalVar) {}
 };
 
 class JavaObjectNode: public PointsToNode {
 public:
-  JavaObjectNode(Compile *C, Node* n, EscapeState es):
-    PointsToNode(C, n, es, JavaObject) {
+  JavaObjectNode(ConnectionGraph *CG, Node* n, EscapeState es):
+    PointsToNode(CG, n, es, JavaObject) {
       if (es > NoEscape)
         set_scalar_replaceable(false);
     }
@@ -262,8 +257,8 @@
   const bool  _is_oop; // Field points to object
         bool  _has_unknown_base; // Has phantom_object base
 public:
-  FieldNode(Compile *C, Node* n, EscapeState es, int offs, bool is_oop):
-    PointsToNode(C, n, es, Field),
+  FieldNode(ConnectionGraph *CG, Node* n, EscapeState es, int offs, bool is_oop):
+    PointsToNode(CG, n, es, Field),
     _offset(offs), _is_oop(is_oop),
     _has_unknown_base(false) {}
 
@@ -284,8 +279,8 @@
 
 class ArraycopyNode: public PointsToNode {
 public:
-  ArraycopyNode(Compile *C, Node* n, EscapeState es):
-    PointsToNode(C, n, es, Arraycopy) {}
+  ArraycopyNode(ConnectionGraph *CG, Node* n, EscapeState es):
+    PointsToNode(CG, n, es, Arraycopy) {}
 };
 
 // Iterators for PointsTo node's edges:
@@ -323,11 +318,14 @@
 
 
 class ConnectionGraph: public ResourceObj {
+  friend class PointsToNode;
 private:
   GrowableArray<PointsToNode*>  _nodes; // Map from ideal nodes to
                                         // ConnectionGraph nodes.
 
   GrowableArray<PointsToNode*>  _worklist; // Nodes to be processed
+  VectorSet                  _in_worklist;
+  uint                         _next_pidx;
 
   bool            _collecting; // Indicates whether escape information
                                // is still being collected. If false,
@@ -353,6 +351,8 @@
   }
   uint nodes_size() const { return _nodes.length(); }
 
+  uint next_pidx() { return _next_pidx++; }
+
   // Add nodes to ConnectionGraph.
   void add_local_var(Node* n, PointsToNode::EscapeState es);
   void add_java_object(Node* n, PointsToNode::EscapeState es);
@@ -396,15 +396,26 @@
   int add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist);
 
   // Put node on worklist if it is (or was) not there.
-  void add_to_worklist(PointsToNode* pt) {
-    _worklist.push(pt);
-    return;
+  inline void add_to_worklist(PointsToNode* pt) {
+    PointsToNode* ptf = pt;
+    uint pidx_bias = 0;
+    if (PointsToNode::is_base_use(pt)) {
+      // Create a separate entry in _in_worklist for a marked base edge
+      // because _worklist may have an entry for a normal edge pointing
+      // to the same node. To separate them use _next_pidx as bias.
+      ptf = PointsToNode::get_use_node(pt)->as_Field();
+      pidx_bias = _next_pidx;
+    }
+    if (!_in_worklist.test_set(ptf->pidx() + pidx_bias)) {
+      _worklist.append(pt);
+    }
   }
 
   // Put on worklist all uses of this node.
-  void add_uses_to_worklist(PointsToNode* pt) {
-    for (UseIterator i(pt); i.has_next(); i.next())
-      _worklist.push(i.get());
+  inline void add_uses_to_worklist(PointsToNode* pt) {
+    for (UseIterator i(pt); i.has_next(); i.next()) {
+      add_to_worklist(i.get());
+    }
   }
 
   // Put on worklist all field's uses and related field nodes.
@@ -517,8 +528,8 @@
  }
   // Helper functions
   bool   is_oop_field(Node* n, int offset, bool* unsafe);
- static Node* get_addp_base(Node *addp);
- static Node* find_second_addp(Node* addp, Node* n);
+  static Node* get_addp_base(Node *addp);
+  static Node* find_second_addp(Node* addp, Node* n);
   // offset of a field reference
   int address_offset(Node* adr, PhaseTransform *phase);
 
@@ -587,4 +598,17 @@
 #endif
 };
 
+inline PointsToNode::PointsToNode(ConnectionGraph *CG, Node* n, EscapeState es, NodeType type):
+  _edges(CG->_compile->comp_arena(), 2, 0, NULL),
+  _uses (CG->_compile->comp_arena(), 2, 0, NULL),
+  _node(n),
+  _idx(n->_idx),
+  _pidx(CG->next_pidx()),
+  _type((u1)type),
+  _escape((u1)es),
+  _fields_escape((u1)es),
+  _flags(ScalarReplaceable) {
+  assert(n != NULL && es != UnknownEscape, "sanity");
+}
+
 #endif // SHARE_VM_OPTO_ESCAPE_HPP
--- a/src/share/vm/runtime/advancedThresholdPolicy.cpp	Thu May 08 12:49:21 2014 +0200
+++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp	Fri Oct 31 11:46:11 2014 +0000
@@ -451,7 +451,7 @@
   if (should_create_mdo(mh(), level)) {
     create_mdo(mh, thread);
   }
-  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
+  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
     CompLevel next_level = call_event(mh(), level);
     if (next_level != level) {
       compile(mh, InvocationEntryBci, next_level, thread);
@@ -475,7 +475,7 @@
     CompLevel next_osr_level = loop_event(imh(), level);
     CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
     // At the very least compile the OSR version
-    if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) {
+    if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
       compile(imh, bci, next_osr_level, thread);
     }
 
@@ -509,7 +509,7 @@
           nm->make_not_entrant();
         }
       }
-      if (!CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
+      if (!CompileBroker::compilation_is_in_queue(mh)) {
         // Fix up next_level if necessary to avoid deopts
         if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
           next_level = CompLevel_full_profile;
@@ -521,7 +521,7 @@
     } else {
       cur_level = comp_level(imh());
       next_level = call_event(imh(), cur_level);
-      if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) {
+      if (!CompileBroker::compilation_is_in_queue(imh) && (next_level != cur_level)) {
         compile(imh, InvocationEntryBci, next_level, thread);
       }
     }
--- a/src/share/vm/runtime/simpleThresholdPolicy.cpp	Thu May 08 12:49:21 2014 +0200
+++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp	Fri Oct 31 11:46:11 2014 +0000
@@ -239,7 +239,7 @@
   if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
     return;
   }
-  if (!CompileBroker::compilation_is_in_queue(mh, bci)) {
+  if (!CompileBroker::compilation_is_in_queue(mh)) {
     if (PrintTieredEvents) {
       print_event(COMPILE, mh, mh, bci, level);
     }
@@ -378,7 +378,7 @@
 // Handle the invocation event.
 void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
                                               CompLevel level, nmethod* nm, JavaThread* thread) {
-  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
+  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
     CompLevel next_level = call_event(mh(), level);
     if (next_level != level) {
       compile(mh, InvocationEntryBci, next_level, thread);
@@ -391,8 +391,8 @@
 void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
                                                      int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
   // If the method is already compiling, quickly bail out.
-  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
-    // Use loop event as an opportinity to also check there's been
+  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
+    // Use loop event as an opportunity to also check there's been
     // enough calls.
     CompLevel cur_level = comp_level(mh());
     CompLevel next_level = call_event(mh(), cur_level);
--- a/src/share/vm/utilities/debug.cpp	Thu May 08 12:49:21 2014 +0200
+++ b/src/share/vm/utilities/debug.cpp	Fri Oct 31 11:46:11 2014 +0000
@@ -266,17 +266,19 @@
     "native memory for metadata",
     "shared read only space",
     "shared read write space",
-    "shared miscellaneous data space"
+    "shared miscellaneous data space",
+    "shared miscellaneous code space"
   };
   static const char* flag[] = {
     "Metaspace",
     "SharedReadOnlySize",
     "SharedReadWriteSize",
-    "SharedMiscDataSize"
+    "SharedMiscDataSize",
+    "SharedMiscCodeSize"
   };
 
    warning("\nThe %s is not large enough\n"
-           "to preload requested classes. Use -XX:%s=\n"
+           "to preload requested classes. Use -XX:%s=<size>\n"
            "to increase the initial size of %s.\n",
            name[shared_space], flag[shared_space], name[shared_space]);
    exit(2);
--- a/src/share/vm/utilities/debug.hpp	Thu May 08 12:49:21 2014 +0200
+++ b/src/share/vm/utilities/debug.hpp	Fri Oct 31 11:46:11 2014 +0000
@@ -246,7 +246,8 @@
   SharedPermGen,
   SharedReadOnly,
   SharedReadWrite,
-  SharedMiscData
+  SharedMiscData,
+  SharedMiscCode
 };
 
 void report_out_of_shared_space(SharedSpaceType space_type);
--- a/test/runtime/SharedArchiveFile/LimitSharedSizes.java	Thu May 08 12:49:21 2014 +0200
+++ b/test/runtime/SharedArchiveFile/LimitSharedSizes.java	Fri Oct 31 11:46:11 2014 +0000
@@ -51,9 +51,12 @@
         // Known issue, JDK-8038422 (assert() on Windows)
         // new SharedSizeTestData("-XX:SharedMiscDataSize", "500k",    "miscellaneous data"),
 
-        // This will cause a VM crash; commenting out for now; see bug JDK-8038268
-        // @ignore JDK-8038268
-        // new SharedSizeTestData("-XX:SharedMiscCodeSize", "20k",     "miscellaneous code"),
+        // Too small of a misc code size should not cause a vm crash.
+        // It should result in the following error message:
+        // The shared miscellaneous code space is not large enough
+        // to preload requested classes. Use -XX:SharedMiscCodeSize=
+        // to increase the initial size of shared miscellaneous code space.
+        new SharedSizeTestData("-XX:SharedMiscCodeSize", "20k",     "miscellaneous code"),
 
         // these values are larger than default ones, but should
         // be acceptable and not cause failure