changeset 4806:f75faf51e8c4

7158805: Better rewriting of nested subroutine calls Reviewed-by: mschoene, coleenp
author hseigel
date Thu, 07 Mar 2013 11:49:38 -0500
parents 6c560f9ebb3e
children b295e132102d
files src/share/vm/memory/allocation.cpp src/share/vm/memory/allocation.hpp src/share/vm/oops/generateOopMap.cpp
diffstat 3 files changed, 46 insertions(+), 27 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/memory/allocation.cpp	Wed Apr 17 10:12:42 2013 -0700
+++ b/src/share/vm/memory/allocation.cpp	Thu Mar 07 11:49:38 2013 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -248,7 +248,7 @@
    ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
 
   // Allocate a new chunk from the pool (might expand the pool)
-  _NOINLINE_ void* allocate(size_t bytes) {
+  _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) {
     assert(bytes == _size, "bad size");
     void* p = NULL;
     // No VM lock can be taken inside ThreadCritical lock, so os::malloc
@@ -258,9 +258,9 @@
       p = get_first();
     }
     if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
-    if (p == NULL)
+    if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
       vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
-
+    }
     return p;
   }
 
@@ -357,7 +357,7 @@
 //--------------------------------------------------------------------------------------
 // Chunk implementation
 
-void* Chunk::operator new(size_t requested_size, size_t length) {
+void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) {
   // requested_size is equal to sizeof(Chunk) but in order for the arena
   // allocations to come out aligned as expected the size must be aligned
   // to expected arean alignment.
@@ -365,13 +365,14 @@
   assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
   size_t bytes = ARENA_ALIGN(requested_size) + length;
   switch (length) {
-   case Chunk::size:        return ChunkPool::large_pool()->allocate(bytes);
-   case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
-   case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes);
+   case Chunk::size:        return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
+   case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
+   case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
    default: {
-     void *p =  os::malloc(bytes, mtChunk, CALLER_PC);
-     if (p == NULL)
+     void* p = os::malloc(bytes, mtChunk, CALLER_PC);
+     if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
        vm_exit_out_of_memory(bytes, "Chunk::new");
+     }
      return p;
    }
   }
@@ -425,7 +426,7 @@
 Arena::Arena(size_t init_size) {
   size_t round_size = (sizeof (char *)) - 1;
   init_size = (init_size+round_size) & ~round_size;
-  _first = _chunk = new (init_size) Chunk(init_size);
+  _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
   _hwm = _chunk->bottom();      // Save the cached hwm, max
   _max = _chunk->top();
   set_size_in_bytes(init_size);
@@ -433,7 +434,7 @@
 }
 
 Arena::Arena() {
-  _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
+  _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
   _hwm = _chunk->bottom();      // Save the cached hwm, max
   _max = _chunk->top();
   set_size_in_bytes(Chunk::init_size);
@@ -540,12 +541,9 @@
   size_t len = MAX2(x, (size_t) Chunk::size);
 
   Chunk *k = _chunk;            // Get filled-up chunk address
-  _chunk = new (len) Chunk(len);
+  _chunk = new (alloc_failmode, len) Chunk(len);
 
   if (_chunk == NULL) {
-    if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
-      signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
-    }
     return NULL;
   }
   if (k) k->set_next(_chunk);   // Append new chunk to end of linked list
--- a/src/share/vm/memory/allocation.hpp	Wed Apr 17 10:12:42 2013 -0700
+++ b/src/share/vm/memory/allocation.hpp	Thu Mar 07 11:49:38 2013 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -274,7 +274,7 @@
   Chunk*       _next;     // Next Chunk in list
   const size_t _len;      // Size of this Chunk
  public:
-  void* operator new(size_t size, size_t length);
+  void* operator new(size_t size, AllocFailType alloc_failmode, size_t length);
   void  operator delete(void* p);
   Chunk(size_t length);
 
@@ -337,10 +337,15 @@
 
   void signal_out_of_memory(size_t request, const char* whence) const;
 
-  void check_for_overflow(size_t request, const char* whence) const {
+  bool check_for_overflow(size_t request, const char* whence,
+      AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const {
     if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
+      if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
+        return false;
+      }
       signal_out_of_memory(request, whence);
     }
+    return true;
  }
 
  public:
@@ -364,7 +369,8 @@
     assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
     x = ARENA_ALIGN(x);
     debug_only(if (UseMallocOnly) return malloc(x);)
-    check_for_overflow(x, "Arena::Amalloc");
+    if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode))
+      return NULL;
     NOT_PRODUCT(inc_bytes_allocated(x);)
     if (_hwm + x > _max) {
       return grow(x, alloc_failmode);
@@ -378,7 +384,8 @@
   void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
     assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
     debug_only(if (UseMallocOnly) return malloc(x);)
-    check_for_overflow(x, "Arena::Amalloc_4");
+    if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode))
+      return NULL;
     NOT_PRODUCT(inc_bytes_allocated(x);)
     if (_hwm + x > _max) {
       return grow(x, alloc_failmode);
@@ -399,7 +406,8 @@
     size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
     x += delta;
 #endif
-    check_for_overflow(x, "Arena::Amalloc_D");
+    if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode))
+      return NULL;
     NOT_PRODUCT(inc_bytes_allocated(x);)
     if (_hwm + x > _max) {
       return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
@@ -539,6 +547,9 @@
 #define NEW_RESOURCE_ARRAY(type, size)\
   (type*) resource_allocate_bytes((size) * sizeof(type))
 
+#define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\
+  (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
+
 #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\
   (type*) resource_allocate_bytes(thread, (size) * sizeof(type))
 
--- a/src/share/vm/oops/generateOopMap.cpp	Wed Apr 17 10:12:42 2013 -0700
+++ b/src/share/vm/oops/generateOopMap.cpp	Thu Mar 07 11:49:38 2013 -0500
@@ -642,11 +642,21 @@
 // CellType handling methods
 //
 
+// Allocate memory and throw LinkageError if failure.
+#define ALLOC_RESOURCE_ARRAY(var, type, count) \
+  var = NEW_RESOURCE_ARRAY_RETURN_NULL(type, count);              \
+  if (var == NULL) {                                              \
+    report_error("Cannot reserve enough memory to analyze this method"); \
+    return;                                                       \
+  }
+
+
 void GenerateOopMap::init_state() {
   _state_len     = _max_locals + _max_stack + _max_monitors;
-  _state         = NEW_RESOURCE_ARRAY(CellTypeState, _state_len);
+  ALLOC_RESOURCE_ARRAY(_state, CellTypeState, _state_len);
   memset(_state, 0, _state_len * sizeof(CellTypeState));
-  _state_vec_buf = NEW_RESOURCE_ARRAY(char, MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */);
+  int count = MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */;
+  ALLOC_RESOURCE_ARRAY(_state_vec_buf, char, count);
 }
 
 void GenerateOopMap::make_context_uninitialized() {
@@ -905,7 +915,7 @@
   // But cumbersome since we don't know the stack heights yet.  (Nor the
   // monitor stack heights...)
 
-  _basic_blocks = NEW_RESOURCE_ARRAY(BasicBlock, _bb_count);
+  ALLOC_RESOURCE_ARRAY(_basic_blocks, BasicBlock, _bb_count);
 
   // Make a pass through the bytecodes.  Count the number of monitorenters.
   // This can be used an upper bound on the monitor stack depth in programs
@@ -976,8 +986,8 @@
     return;
   }
 
-  CellTypeState *basicBlockState =
-      NEW_RESOURCE_ARRAY(CellTypeState, bbNo * _state_len);
+  CellTypeState *basicBlockState;
+  ALLOC_RESOURCE_ARRAY(basicBlockState, CellTypeState, bbNo * _state_len);
   memset(basicBlockState, 0, bbNo * _state_len * sizeof(CellTypeState));
 
   // Make a pass over the basicblocks and assign their state vectors.