view patches/security/20130618/7158805-nested_subroutine_rewriting.patch @ 2907:d7eca687b7d2

Add 2013/06/18 security patches. 2013-06-22 Andrew John Hughes <gnu.andrew@member.fsf.org> * patches/idresolver_fix.patch: Removed. Part of 6469266. * Makefile.am: (SECURITY_PATCHES): Add new ones. (SPECIAL_SECURITY_PATCH_1): Renamed from SPECIAL_SECURITY_PATCH. (SPECIAL_SECURITY_PATCH_2): Add 8009071, which needs to be applied after some AWT backports. (ICEDTEA_PATCHES): Use SPECIAL_SECURITY_PATCH_{1,2}. Move 8005615, 8007393 & 8007611 to SECURITY_PATCHES as must be applied before 8004584. Add 7171223 to end. * patches/openjdk/6307603-xrender-01.patch, * patches/openjdk/6469266-xmlsec_1.4.2.patch, * patches/openjdk/6656651-windows_lcd_glyphs.patch, * patches/openjdk/6786028-wcag_bold_tags.patch, * patches/openjdk/6786682-wcag_lang.patch, * patches/openjdk/6786688-wcag_table.patch, * patches/openjdk/6786690-wcag_dl.patch, * patches/openjdk/6802694-no_deprecated.patch, * patches/openjdk/6851834-restructure.patch, * patches/openjdk/6888167-medialib_memory_leaks.patch, * patches/openjdk/6961178-doclet_xml.patch, * patches/openjdk/6990754-use_native_memory_for_symboltable.patch, * patches/openjdk/7006270-regressions.patch, * patches/openjdk/7008809-report_class_in_arraystoreexception.patch, * patches/openjdk/7014851-unused_parallel_compaction_code.patch, * patches/openjdk/7017732-move_static_fields_to_class.patch, * patches/openjdk/7036747-elfstringtable.patch, * patches/openjdk/7086585-flexible_field_injection.patch, * patches/openjdk/7171223-strict_aliasing.patch, * patches/openjdk/7195301-no_instanceof_node.patch, * patches/security/20130618/6741606-apache_santuario.patch, * patches/security/20130618/7158805-nested_subroutine_rewriting.patch, * patches/security/20130618/7170730-windows_network_stack.patch, * patches/security/20130618/8000638-improve_deserialization.patch, * patches/security/20130618/8000642-better_transportation_handling.patch, * patches/security/20130618/8001032-restrict_object_access-corba.patch, * patches/security/20130618/8001032-restrict_object_access-jdk.patch, * patches/security/20130618/8001033-refactor_address_handling.patch, * patches/security/20130618/8001034-memory_management.patch, * patches/security/20130618/8001038-resourcefully_handle_resources.patch, * patches/security/20130618/8001043-clarify_definition_restrictions.patch, * patches/security/20130618/8001309-better_handling_of_annotation_interfaces.patch, * patches/security/20130618/8001318-6_fixup.patch, * patches/security/20130618/8001318-socket_getlocaladdress_consistency.patch, * patches/security/20130618/8001330-checking_order_improvement.patch, * patches/security/20130618/8001330-improve_checking_order.patch, * patches/security/20130618/8003703-update_rmi_connection_dialog.patch, * patches/security/20130618/8004584-augment_applet_contextualization.patch, * patches/security/20130618/8005007-better_glyph_processing.patch, * patches/security/20130618/8006328-6_fixup.patch, * patches/security/20130618/8006328-sound_class_robustness.patch, * patches/security/20130618/8006611-improve_scripting.patch, * patches/security/20130618/8007467-improve_jmx_internal_api_robustness.patch, * patches/security/20130618/8007471-6_fixup.patch, * patches/security/20130618/8007471-improve_mbean_notifications.patch, * patches/security/20130618/8007812-getenclosingmethod.patch, * patches/security/20130618/8008120-improve_jmx_class_checking.patch, * patches/security/20130618/8008124-better_compliance_testing.patch, * patches/security/20130618/8008128-better_jmx_api_coherence.patch, * patches/security/20130618/8008132-better_serialization.patch, * patches/security/20130618/8008585-jmx_data_handling.patch, * patches/security/20130618/8008593-better_urlclassloader.patch, * patches/security/20130618/8008603-jmx_provider_provision.patch, * patches/security/20130618/8008611-6_fixup.patch, * patches/security/20130618/8008611-jmx_annotations.patch, * patches/security/20130618/8008615-jmx_internal_api_robustness.patch, * patches/security/20130618/8008623-mbeanserver_handling.patch, * patches/security/20130618/8008744-6741606_rework.patch, * patches/security/20130618/8008982-jmx_interface_changes.patch, * patches/security/20130618/8009004-rmi_connection_improvement.patch, * patches/security/20130618/8009013-t2k_glyphs.patch, * patches/security/20130618/8009034-jmx_notification_improvement.patch, * patches/security/20130618/8009038-jmx_notification_support_improvement.patch, * patches/security/20130618/8009067-improve_key_storing.patch, * patches/security/20130618/8009071-improve_shape_handling.patch, * patches/security/20130618/8009235-improve_tsa_data_handling.patch, * patches/security/20130618/8009554-serialjavaobject.patch, * patches/security/20130618/8011243-improve_imaginglib.patch, * patches/security/20130618/8011248-better_component_rasters.patch, * patches/security/20130618/8011253-better_short_component_rasters.patch, * patches/security/20130618/8011257-better_byte_component_rasters.patch, * patches/security/20130618/8011557-improve_reflection.patch, * patches/security/20130618/8012375-javadoc_framing.patch, * patches/security/20130618/8012421-better_positioning.patch, * patches/security/20130618/8012438-better_image_validation.patch, * patches/security/20130618/8012597-better_image_channel_validation.patch, * patches/security/20130618/8012601-better_layout_validation.patch, * patches/security/20130618/8014281-better_xml_signature_checking.patch, * patches/security/20130618/8015997-more_javadoc_framing.patch, * patches/security/20130618/diamond_fix.patch, * patches/security/20130618/handle_npe.patch, * patches/security/20130618/hs_merge-01.patch, * patches/security/20130618/hs_merge-02.patch, * patches/security/20130618/hs_merge-03.patch, * patches/security/20130618/hs_merge-04.patch, * patches/security/20130618/javac_issue.patch, * patches/security/20130618/langtools_generics.patch, * patches/security/20130618/langtools_merge-01.patch, * patches/security/20130618/langtools_merge-02.patch, * patches/security/20130618/langtools_merge-03.patch: 2013/06/18 security patches.
author Andrew John Hughes <gnu.andrew@redhat.com>
date Sat, 22 Jun 2013 16:38:24 -0500
parents
children 49e2fd0a8dfd
line wrap: on
line source

# HG changeset patch
# User andrew
# Date 1371562757 18000
# Node ID 684f0c17ce15a3e012e9b73d618af1462f8c2d64
# Parent  f5eac6fae49e04b673a8307058ce3b22750da0a9
7158805: Better rewriting of nested subroutine calls
Reviewed-by: mschoene, coleenp

diff --git a/src/share/vm/memory/allocation.cpp b/src/share/vm/memory/allocation.cpp
--- openjdk/hotspot/src/share/vm/memory/allocation.cpp
+++ openjdk/hotspot/src/share/vm/memory/allocation.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -199,7 +199,7 @@
    ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
 
   // Allocate a new chunk from the pool (might expand the pool)
-  void* allocate(size_t bytes) {
+  void* allocate(size_t bytes, AllocFailType alloc_failmode) {
     assert(bytes == _size, "bad size");
     void* p = NULL;
     { ThreadCritical tc;
@@ -207,9 +207,9 @@
       p = get_first();
       if (p == NULL) p = os::malloc(bytes);
     }
-    if (p == NULL)
+    if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
       vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
-
+    }
     return p;
   }
 
@@ -300,7 +300,7 @@
 //--------------------------------------------------------------------------------------
 // Chunk implementation
 
-void* Chunk::operator new(size_t requested_size, size_t length) {
+void* Chunk::operator new(size_t requested_size, AllocFailType alloc_failmode, size_t length) {
   // requested_size is equal to sizeof(Chunk) but in order for the arena
   // allocations to come out aligned as expected the size must be aligned
   // to expected arean alignment.
@@ -308,13 +308,14 @@
   assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
   size_t bytes = ARENA_ALIGN(requested_size) + length;
   switch (length) {
-   case Chunk::size:        return ChunkPool::large_pool()->allocate(bytes);
-   case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
-   case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes);
+   case Chunk::size:        return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
+   case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
+   case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
    default: {
-     void *p =  os::malloc(bytes);
-     if (p == NULL)
+     void* p = os::malloc(bytes);
+     if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
        vm_exit_out_of_memory(bytes, "Chunk::new");
+     }
      return p;
    }
   }
@@ -367,14 +368,14 @@
 Arena::Arena(size_t init_size) {
   size_t round_size = (sizeof (char *)) - 1;
   init_size = (init_size+round_size) & ~round_size;
-  _first = _chunk = new (init_size) Chunk(init_size);
+  _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
   _hwm = _chunk->bottom();      // Save the cached hwm, max
   _max = _chunk->top();
   set_size_in_bytes(init_size);
 }
 
 Arena::Arena() {
-  _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
+  _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
   _hwm = _chunk->bottom();      // Save the cached hwm, max
   _max = _chunk->top();
   set_size_in_bytes(Chunk::init_size);
@@ -427,15 +428,15 @@
 }
 
 // Grow a new Chunk
-void* Arena::grow( size_t x ) {
+void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
   // Get minimal required size.  Either real big, or even bigger for giant objs
   size_t len = MAX2(x, (size_t) Chunk::size);
 
   Chunk *k = _chunk;            // Get filled-up chunk address
-  _chunk = new (len) Chunk(len);
+  _chunk = new (alloc_failmode, len) Chunk(len);
 
   if (_chunk == NULL) {
-    signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
+    return NULL;
   }
 
   if (k) k->set_next(_chunk);   // Append new chunk to end of linked list
@@ -451,13 +452,16 @@
 
 
 // Reallocate storage in Arena.
-void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size) {
+void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
   assert(new_size >= 0, "bad size");
   if (new_size == 0) return NULL;
 #ifdef ASSERT
   if (UseMallocOnly) {
     // always allocate a new object  (otherwise we'll free this one twice)
-    char* copy = (char*)Amalloc(new_size);
+    char* copy = (char*)Amalloc(new_size, alloc_failmode);
+    if (copy == NULL) {
+      return NULL;
+    }
     size_t n = MIN2(old_size, new_size);
     if (n > 0) memcpy(copy, old_ptr, n);
     Afree(old_ptr,old_size);    // Mostly done to keep stats accurate
@@ -483,7 +487,10 @@
   }
 
   // Oops, got to relocate guts
-  void *new_ptr = Amalloc(new_size);
+  void *new_ptr = Amalloc(new_size, alloc_failmode);
+  if (new_ptr == NULL) {
+    return NULL;
+  }
   memcpy( new_ptr, c_old, old_size );
   Afree(c_old,old_size);        // Mostly done to keep stats accurate
   return new_ptr;
diff --git a/src/share/vm/memory/allocation.hpp b/src/share/vm/memory/allocation.hpp
--- openjdk/hotspot/src/share/vm/memory/allocation.hpp
+++ openjdk/hotspot/src/share/vm/memory/allocation.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,12 @@
 #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
 #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
 
+class AllocFailStrategy {
+public:
+  enum AllocFailEnum { EXIT_OOM, RETURN_NULL };
+};
+typedef AllocFailStrategy::AllocFailEnum AllocFailType;
+
 // All classes in the virtual machine must be subclassed
 // by one of the following allocation classes:
 //
@@ -152,7 +158,7 @@
   Chunk*       _next;     // Next Chunk in list
   const size_t _len;      // Size of this Chunk
  public:
-  void* operator new(size_t size, size_t length);
+  void* operator new(size_t size, AllocFailType alloc_failmode, size_t length);
   void  operator delete(void* p);
   Chunk(size_t length);
 
@@ -200,7 +206,8 @@
   Chunk *_first;                // First chunk
   Chunk *_chunk;                // current chunk
   char *_hwm, *_max;            // High water mark and max in current chunk
-  void* grow(size_t x);         // Get a new Chunk of at least size x
+  // Get a new Chunk of at least size x
+  void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
   NOT_PRODUCT(size_t _size_in_bytes;) // Size of arena (used for memory usage tracing)
   NOT_PRODUCT(static size_t _bytes_allocated;) // total #bytes allocated since start
   friend class AllocStats;
@@ -209,10 +216,15 @@
 
   void signal_out_of_memory(size_t request, const char* whence) const;
 
-  void check_for_overflow(size_t request, const char* whence) const {
+  bool check_for_overflow(size_t request, const char* whence,
+      AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const {
     if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
+      if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
+        return false;
+      }
       signal_out_of_memory(request, whence);
     }
+    return true;
   }
 
  public:
@@ -224,14 +236,15 @@
   char* hwm() const             { return _hwm; }
 
   // Fast allocate in the arena.  Common case is: pointer test + increment.
-  void* Amalloc(size_t x) {
+  void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
     assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
     x = ARENA_ALIGN(x);
     debug_only(if (UseMallocOnly) return malloc(x);)
-    check_for_overflow(x, "Arena::Amalloc");
+    if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode))
+      return NULL;
     NOT_PRODUCT(_bytes_allocated += x);
     if (_hwm + x > _max) {
-      return grow(x);
+      return grow(x, alloc_failmode);
     } else {
       char *old = _hwm;
       _hwm += x;
@@ -239,13 +252,14 @@
     }
   }
   // Further assume size is padded out to words
-  void *Amalloc_4(size_t x) {
+  void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
     assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
     debug_only(if (UseMallocOnly) return malloc(x);)
-    check_for_overflow(x, "Arena::Amalloc_4");
+    if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode))
+      return NULL;
     NOT_PRODUCT(_bytes_allocated += x);
     if (_hwm + x > _max) {
-      return grow(x);
+      return grow(x, alloc_failmode);
     } else {
       char *old = _hwm;
       _hwm += x;
@@ -255,7 +269,7 @@
 
   // Allocate with 'double' alignment. It is 8 bytes on sparc.
   // In other cases Amalloc_D() should be the same as Amalloc_4().
-  void* Amalloc_D(size_t x) {
+  void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
     assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
     debug_only(if (UseMallocOnly) return malloc(x);)
 #if defined(SPARC) && !defined(_LP64)
@@ -263,10 +277,11 @@
     size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
     x += delta;
 #endif
-    check_for_overflow(x, "Arena::Amalloc_D");
+    if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode))
+      return NULL;
     NOT_PRODUCT(_bytes_allocated += x);
     if (_hwm + x > _max) {
-      return grow(x); // grow() returns a result aligned >= 8 bytes.
+      return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
     } else {
       char *old = _hwm;
       _hwm += x;
@@ -286,7 +301,8 @@
     if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
   }
 
-  void *Arealloc( void *old_ptr, size_t old_size, size_t new_size );
+  void *Arealloc( void *old_ptr, size_t old_size, size_t new_size,
+     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
 
   // Move contents of this arena into an empty arena
   Arena *move_contents(Arena *empty_arena);
@@ -328,9 +344,12 @@
 
 
 //%note allocation_1
-extern char* resource_allocate_bytes(size_t size);
-extern char* resource_allocate_bytes(Thread* thread, size_t size);
-extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size);
+extern char* resource_allocate_bytes(size_t size,
+    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
+extern char* resource_allocate_bytes(Thread* thread, size_t size,
+    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
+extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size,
+    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
 extern void resource_free_bytes( char *old, size_t size );
 
 //----------------------------------------------------------------------
@@ -376,6 +395,13 @@
       DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
       return res;
   }
+
+  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) {
+    address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
+    DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
+    return res;
+  }
+
   void  operator delete(void* p);
 };
 
@@ -386,6 +412,9 @@
 #define NEW_RESOURCE_ARRAY(type, size)\
   (type*) resource_allocate_bytes((size) * sizeof(type))
 
+#define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\
+  (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
+
 #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\
   (type*) resource_allocate_bytes(thread, (size) * sizeof(type))
 
diff --git a/src/share/vm/memory/allocation.inline.hpp b/src/share/vm/memory/allocation.inline.hpp
--- openjdk/hotspot/src/share/vm/memory/allocation.inline.hpp
+++ openjdk/hotspot/src/share/vm/memory/allocation.inline.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,25 +34,29 @@
 
 
 // allocate using malloc; will fail if no memory available
-inline char* AllocateHeap(size_t size, const char* name = NULL) {
+inline char* AllocateHeap(size_t size, const char* name = NULL,
+     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
   char* p = (char*) os::malloc(size);
   #ifdef ASSERT
   if (PrintMallocFree) trace_heap_malloc(size, name, p);
   #else
   Unused_Variable(name);
   #endif
-  if (p == NULL) vm_exit_out_of_memory(size, name);
+  if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM)
+    vm_exit_out_of_memory(size, "AllocateHeap");
   return p;
 }
 
-inline char* ReallocateHeap(char *old, size_t size, const char* name = NULL) {
+inline char* ReallocateHeap(char *old, size_t size, const char* name = NULL,
+    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
   char* p = (char*) os::realloc(old,size);
   #ifdef ASSERT
   if (PrintMallocFree) trace_heap_malloc(size, name, p);
   #else
   Unused_Variable(name);
   #endif
-  if (p == NULL) vm_exit_out_of_memory(size, name);
+  if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM)
+    vm_exit_out_of_memory(size, "ReallocateHeap");
   return p;
 }
 
diff --git a/src/share/vm/memory/resourceArea.cpp b/src/share/vm/memory/resourceArea.cpp
--- openjdk/hotspot/src/share/vm/memory/resourceArea.cpp
+++ openjdk/hotspot/src/share/vm/memory/resourceArea.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,15 +42,16 @@
 // The following routines are declared in allocation.hpp and used everywhere:
 
 // Allocation in thread-local resource area
-extern char* resource_allocate_bytes(size_t size) {
-  return Thread::current()->resource_area()->allocate_bytes(size);
+extern char* resource_allocate_bytes(size_t size, AllocFailType alloc_failmode) {
+  return Thread::current()->resource_area()->allocate_bytes(size, alloc_failmode);
 }
-extern char* resource_allocate_bytes(Thread* thread, size_t size) {
-  return thread->resource_area()->allocate_bytes(size);
+extern char* resource_allocate_bytes(Thread* thread, size_t size, AllocFailType alloc_failmode) {
+  return thread->resource_area()->allocate_bytes(size, alloc_failmode);
 }
 
-extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size){
-  return (char*)Thread::current()->resource_area()->Arealloc(old, old_size, new_size);
+extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size,
+    AllocFailType alloc_failmode){
+  return (char*)Thread::current()->resource_area()->Arealloc(old, old_size, new_size, alloc_failmode);
 }
 
 extern void resource_free_bytes( char *old, size_t size ) {
diff --git a/src/share/vm/memory/resourceArea.hpp b/src/share/vm/memory/resourceArea.hpp
--- openjdk/hotspot/src/share/vm/memory/resourceArea.hpp
+++ openjdk/hotspot/src/share/vm/memory/resourceArea.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,7 +64,7 @@
     debug_only(_nesting = 0;);
   }
 
-  char* allocate_bytes(size_t size) {
+  char* allocate_bytes(size_t size, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
 #ifdef ASSERT
     if (_nesting < 1 && !_warned++)
       fatal("memory leak: allocating without ResourceMark");
@@ -74,7 +74,7 @@
       return (*save = (char*)os::malloc(size));
     }
 #endif
-    return (char*)Amalloc(size);
+    return (char*)Amalloc(size, alloc_failmode);
   }
 
   debug_only(int nesting() const { return _nesting; });
diff --git a/src/share/vm/oops/generateOopMap.cpp b/src/share/vm/oops/generateOopMap.cpp
--- openjdk/hotspot/src/share/vm/oops/generateOopMap.cpp
+++ openjdk/hotspot/src/share/vm/oops/generateOopMap.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -643,11 +643,20 @@
 // CellType handling methods
 //
 
+// Allocate memory and throw LinkageError if failure.
+#define ALLOC_RESOURCE_ARRAY(var, type, count) \
+  var = NEW_RESOURCE_ARRAY_RETURN_NULL(type, count);              \
+  if (var == NULL) {                                              \
+    report_error("Cannot reserve enough memory to analyze this method"); \
+    return;                                                       \
+  }
+
 void GenerateOopMap::init_state() {
   _state_len     = _max_locals + _max_stack + _max_monitors;
-  _state         = NEW_RESOURCE_ARRAY(CellTypeState, _state_len);
+  ALLOC_RESOURCE_ARRAY(_state, CellTypeState, _state_len);
   memset(_state, 0, _state_len * sizeof(CellTypeState));
-  _state_vec_buf = NEW_RESOURCE_ARRAY(char, MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */);
+  int count = MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */;
+  ALLOC_RESOURCE_ARRAY(_state_vec_buf, char, count)
 }
 
 void GenerateOopMap::make_context_uninitialized() {
@@ -905,7 +914,7 @@
   // But cumbersome since we don't know the stack heights yet.  (Nor the
   // monitor stack heights...)
 
-  _basic_blocks = NEW_RESOURCE_ARRAY(BasicBlock, _bb_count);
+  ALLOC_RESOURCE_ARRAY(_basic_blocks, BasicBlock, _bb_count);
 
   // Make a pass through the bytecodes.  Count the number of monitorenters.
   // This can be used an upper bound on the monitor stack depth in programs
@@ -976,8 +985,8 @@
     return;
   }
 
-  CellTypeState *basicBlockState =
-      NEW_RESOURCE_ARRAY(CellTypeState, bbNo * _state_len);
+  CellTypeState *basicBlockState;
+  ALLOC_RESOURCE_ARRAY(basicBlockState, CellTypeState, bbNo * _state_len);
   memset(basicBlockState, 0, bbNo * _state_len * sizeof(CellTypeState));
 
   // Make a pass over the basicblocks and assign their state vectors.