Mercurial > hg > release > icedtea6-1.11
view patches/security/20130618/7158805-nested_subroutine_rewriting-it6.patch @ 2911:ea7bce0f610a
Summary: Backport 20130618 sec fixes 7158805 and 8001330
added patches/security/20130618/7158805-nested_subroutine_rewriting-it6.patch
added patches/security/20130618/8001330-checking_order_improvement-it6.patch
changed ChangeLog
changed Makefile.am
author | chrisphi |
---|---|
date | Wed, 26 Jun 2013 15:10:59 -0400 |
parents | |
children |
line wrap: on
line source
*** openjdk/hotspot/src/share/vm/memory/allocation.cpp 2011-11-14 17:07:35.000000000 -0500 --- openjdk/hotspot/src/share/vm/memory/allocation.cpp 2013-06-25 14:55:54.749915166 -0400 *************** *** 1,5 **** /* ! * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- 1,5 ---- /* ! * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it *************** *** 199,205 **** ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } // Allocate a new chunk from the pool (might expand the pool) ! void* allocate(size_t bytes) { assert(bytes == _size, "bad size"); void* p = NULL; { ThreadCritical tc; --- 199,205 ---- ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } // Allocate a new chunk from the pool (might expand the pool) ! void* allocate(size_t bytes, AllocFailType alloc_failmode) { assert(bytes == _size, "bad size"); void* p = NULL; { ThreadCritical tc; *************** *** 207,215 **** p = get_first(); if (p == NULL) p = os::malloc(bytes); } ! if (p == NULL) vm_exit_out_of_memory(bytes, "ChunkPool::allocate"); ! return p; } --- 207,215 ---- p = get_first(); if (p == NULL) p = os::malloc(bytes); } ! if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { vm_exit_out_of_memory(bytes, "ChunkPool::allocate"); ! } return p; } *************** *** 300,306 **** //-------------------------------------------------------------------------------------- // Chunk implementation ! void* Chunk::operator new(size_t requested_size, size_t length) { // requested_size is equal to sizeof(Chunk) but in order for the arena // allocations to come out aligned as expected the size must be aligned // to expected arean alignment. --- 300,306 ---- //-------------------------------------------------------------------------------------- // Chunk implementation ! void* Chunk::operator new(size_t requested_size, AllocFailType alloc_failmode, size_t length) { // requested_size is equal to sizeof(Chunk) but in order for the arena // allocations to come out aligned as expected the size must be aligned // to expected arean alignment. *************** *** 308,320 **** assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); size_t bytes = ARENA_ALIGN(requested_size) + length; switch (length) { ! case Chunk::size: return ChunkPool::large_pool()->allocate(bytes); ! case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes); ! case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes); default: { ! void *p = os::malloc(bytes); ! if (p == NULL) vm_exit_out_of_memory(bytes, "Chunk::new"); return p; } } --- 308,321 ---- assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); size_t bytes = ARENA_ALIGN(requested_size) + length; switch (length) { ! case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode); ! case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode); ! case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode); default: { ! void* p = os::malloc(bytes); ! if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { vm_exit_out_of_memory(bytes, "Chunk::new"); + } return p; } } *************** *** 367,380 **** Arena::Arena(size_t init_size) { size_t round_size = (sizeof (char *)) - 1; init_size = (init_size+round_size) & ~round_size; ! _first = _chunk = new (init_size) Chunk(init_size); _hwm = _chunk->bottom(); // Save the cached hwm, max _max = _chunk->top(); set_size_in_bytes(init_size); } Arena::Arena() { ! _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size); _hwm = _chunk->bottom(); // Save the cached hwm, max _max = _chunk->top(); set_size_in_bytes(Chunk::init_size); --- 368,381 ---- Arena::Arena(size_t init_size) { size_t round_size = (sizeof (char *)) - 1; init_size = (init_size+round_size) & ~round_size; ! _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size); _hwm = _chunk->bottom(); // Save the cached hwm, max _max = _chunk->top(); set_size_in_bytes(init_size); } Arena::Arena() { ! _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size); _hwm = _chunk->bottom(); // Save the cached hwm, max _max = _chunk->top(); set_size_in_bytes(Chunk::init_size); *************** *** 427,441 **** } // Grow a new Chunk ! void* Arena::grow( size_t x ) { // Get minimal required size. Either real big, or even bigger for giant objs size_t len = MAX2(x, (size_t) Chunk::size); Chunk *k = _chunk; // Get filled-up chunk address ! _chunk = new (len) Chunk(len); if (_chunk == NULL) { ! signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); } if (k) k->set_next(_chunk); // Append new chunk to end of linked list --- 428,442 ---- } // Grow a new Chunk ! void* Arena::grow(size_t x, AllocFailType alloc_failmode) { // Get minimal required size. Either real big, or even bigger for giant objs size_t len = MAX2(x, (size_t) Chunk::size); Chunk *k = _chunk; // Get filled-up chunk address ! _chunk = new (alloc_failmode, len) Chunk(len); if (_chunk == NULL) { ! return NULL; } if (k) k->set_next(_chunk); // Append new chunk to end of linked list *************** *** 451,463 **** // Reallocate storage in Arena. ! void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size) { assert(new_size >= 0, "bad size"); if (new_size == 0) return NULL; #ifdef ASSERT if (UseMallocOnly) { // always allocate a new object (otherwise we'll free this one twice) ! char* copy = (char*)Amalloc(new_size); size_t n = MIN2(old_size, new_size); if (n > 0) memcpy(copy, old_ptr, n); Afree(old_ptr,old_size); // Mostly done to keep stats accurate --- 452,467 ---- // Reallocate storage in Arena. ! void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) { assert(new_size >= 0, "bad size"); if (new_size == 0) return NULL; #ifdef ASSERT if (UseMallocOnly) { // always allocate a new object (otherwise we'll free this one twice) ! char* copy = (char*)Amalloc(new_size, alloc_failmode); ! if (copy == NULL) { ! return NULL; ! } size_t n = MIN2(old_size, new_size); if (n > 0) memcpy(copy, old_ptr, n); Afree(old_ptr,old_size); // Mostly done to keep stats accurate *************** *** 483,489 **** } // Oops, got to relocate guts ! void *new_ptr = Amalloc(new_size); memcpy( new_ptr, c_old, old_size ); Afree(c_old,old_size); // Mostly done to keep stats accurate return new_ptr; --- 487,496 ---- } // Oops, got to relocate guts ! void *new_ptr = Amalloc(new_size, alloc_failmode); ! if (new_ptr == NULL) { ! return NULL; ! } memcpy( new_ptr, c_old, old_size ); Afree(c_old,old_size); // Mostly done to keep stats accurate return new_ptr; *** openjdk/hotspot/src/share/vm/memory/allocation.hpp 2011-11-14 17:07:35.000000000 -0500 --- openjdk/hotspot/src/share/vm/memory/allocation.hpp 2013-06-25 15:13:06.325141250 -0400 *************** *** 1,5 **** /* ! * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- 1,5 ---- /* ! * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it *************** *** 34,43 **** --- 34,51 ---- #include "opto/c2_globals.hpp" #endif + #include <new> + #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1) #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1)) #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK) + class AllocFailStrategy { + public: + enum AllocFailEnum { EXIT_OOM, RETURN_NULL }; + }; + typedef AllocFailStrategy::AllocFailEnum AllocFailType; + // All classes in the virtual machine must be subclassed // by one of the following allocation classes: // *************** *** 152,158 **** Chunk* _next; // Next Chunk in list const size_t _len; // Size of this Chunk public: ! void* operator new(size_t size, size_t length); void operator delete(void* p); Chunk(size_t length); --- 160,166 ---- Chunk* _next; // Next Chunk in list const size_t _len; // Size of this Chunk public: ! void* operator new(size_t size, AllocFailType alloc_failmode, size_t length); void operator delete(void* p); Chunk(size_t length); *************** *** 200,206 **** Chunk *_first; // First chunk Chunk *_chunk; // current chunk char *_hwm, *_max; // High water mark and max in current chunk ! void* grow(size_t x); // Get a new Chunk of at least size x NOT_PRODUCT(size_t _size_in_bytes;) // Size of arena (used for memory usage tracing) NOT_PRODUCT(static size_t _bytes_allocated;) // total #bytes allocated since start friend class AllocStats; --- 208,215 ---- Chunk *_first; // First chunk Chunk *_chunk; // current chunk char *_hwm, *_max; // High water mark and max in current chunk ! // Get a new Chunk of at least size x ! void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); NOT_PRODUCT(size_t _size_in_bytes;) // Size of arena (used for memory usage tracing) NOT_PRODUCT(static size_t _bytes_allocated;) // total #bytes allocated since start friend class AllocStats; *************** *** 209,218 **** void signal_out_of_memory(size_t request, const char* whence) const; ! void check_for_overflow(size_t request, const char* whence) const { if (UINTPTR_MAX - request < (uintptr_t)_hwm) { signal_out_of_memory(request, whence); } } public: --- 218,232 ---- void signal_out_of_memory(size_t request, const char* whence) const; ! bool check_for_overflow(size_t request, const char* whence, ! AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const { if (UINTPTR_MAX - request < (uintptr_t)_hwm) { + if (alloc_failmode == AllocFailStrategy::RETURN_NULL) { + return false; + } signal_out_of_memory(request, whence); } + return true; } public: *************** *** 224,237 **** char* hwm() const { return _hwm; } // Fast allocate in the arena. Common case is: pointer test + increment. ! void* Amalloc(size_t x) { assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); x = ARENA_ALIGN(x); debug_only(if (UseMallocOnly) return malloc(x);) ! check_for_overflow(x, "Arena::Amalloc"); NOT_PRODUCT(_bytes_allocated += x); if (_hwm + x > _max) { ! return grow(x); } else { char *old = _hwm; _hwm += x; --- 238,252 ---- char* hwm() const { return _hwm; } // Fast allocate in the arena. Common case is: pointer test + increment. ! void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); x = ARENA_ALIGN(x); debug_only(if (UseMallocOnly) return malloc(x);) ! if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode)) ! return NULL; NOT_PRODUCT(_bytes_allocated += x); if (_hwm + x > _max) { ! return grow(x, alloc_failmode); } else { char *old = _hwm; _hwm += x; *************** *** 239,251 **** } } // Further assume size is padded out to words ! void *Amalloc_4(size_t x) { assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); debug_only(if (UseMallocOnly) return malloc(x);) ! check_for_overflow(x, "Arena::Amalloc_4"); NOT_PRODUCT(_bytes_allocated += x); if (_hwm + x > _max) { ! return grow(x); } else { char *old = _hwm; _hwm += x; --- 254,267 ---- } } // Further assume size is padded out to words ! void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); debug_only(if (UseMallocOnly) return malloc(x);) ! if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode)) ! return NULL; NOT_PRODUCT(_bytes_allocated += x); if (_hwm + x > _max) { ! return grow(x, alloc_failmode); } else { char *old = _hwm; _hwm += x; *************** *** 255,261 **** // Allocate with 'double' alignment. It is 8 bytes on sparc. // In other cases Amalloc_D() should be the same as Amalloc_4(). ! void* Amalloc_D(size_t x) { assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); debug_only(if (UseMallocOnly) return malloc(x);) #if defined(SPARC) && !defined(_LP64) --- 271,277 ---- // Allocate with 'double' alignment. It is 8 bytes on sparc. // In other cases Amalloc_D() should be the same as Amalloc_4(). ! void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); debug_only(if (UseMallocOnly) return malloc(x);) #if defined(SPARC) && !defined(_LP64) *************** *** 263,272 **** size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm; x += delta; #endif ! check_for_overflow(x, "Arena::Amalloc_D"); NOT_PRODUCT(_bytes_allocated += x); if (_hwm + x > _max) { ! return grow(x); // grow() returns a result aligned >= 8 bytes. } else { char *old = _hwm; _hwm += x; --- 279,289 ---- size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm; x += delta; #endif ! if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode)) ! return NULL; NOT_PRODUCT(_bytes_allocated += x); if (_hwm + x > _max) { ! return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes. } else { char *old = _hwm; _hwm += x; *************** *** 286,292 **** if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr; } ! void *Arealloc( void *old_ptr, size_t old_size, size_t new_size ); // Move contents of this arena into an empty arena Arena *move_contents(Arena *empty_arena); --- 303,310 ---- if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr; } ! void *Arealloc( void *old_ptr, size_t old_size, size_t new_size, ! AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); // Move contents of this arena into an empty arena Arena *move_contents(Arena *empty_arena); *************** *** 328,336 **** //%note allocation_1 ! extern char* resource_allocate_bytes(size_t size); ! extern char* resource_allocate_bytes(Thread* thread, size_t size); ! extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size); extern void resource_free_bytes( char *old, size_t size ); //---------------------------------------------------------------------- --- 346,357 ---- //%note allocation_1 ! extern char* resource_allocate_bytes(size_t size, ! AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); ! extern char* resource_allocate_bytes(Thread* thread, size_t size, ! AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); ! extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size, ! AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); extern void resource_free_bytes( char *old, size_t size ); //---------------------------------------------------------------------- *************** *** 376,381 **** --- 397,409 ---- DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) return res; } + + void* operator new(size_t size, const std::nothrow_t& nothrow_constant) { + address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); + DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);) + return res; + } + void operator delete(void* p); }; *************** *** 386,391 **** --- 414,422 ---- #define NEW_RESOURCE_ARRAY(type, size)\ (type*) resource_allocate_bytes((size) * sizeof(type)) + #define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\ + (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL) + #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\ (type*) resource_allocate_bytes(thread, (size) * sizeof(type)) *** openjdk/hotspot/src/share/vm/memory/allocation.inline.hpp 2011-11-14 17:07:35.000000000 -0500 --- openjdk/hotspot/src/share/vm/memory/allocation.inline.hpp 2013-06-25 14:55:54.751915115 -0400 *************** *** 1,5 **** /* ! * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- 1,5 ---- /* ! * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it *************** *** 34,58 **** // allocate using malloc; will fail if no memory available ! inline char* AllocateHeap(size_t size, const char* name = NULL) { char* p = (char*) os::malloc(size); #ifdef ASSERT if (PrintMallocFree) trace_heap_malloc(size, name, p); #else Unused_Variable(name); #endif ! if (p == NULL) vm_exit_out_of_memory(size, name); return p; } ! inline char* ReallocateHeap(char *old, size_t size, const char* name = NULL) { char* p = (char*) os::realloc(old,size); #ifdef ASSERT if (PrintMallocFree) trace_heap_malloc(size, name, p); #else Unused_Variable(name); #endif ! if (p == NULL) vm_exit_out_of_memory(size, name); return p; } --- 34,62 ---- // allocate using malloc; will fail if no memory available ! inline char* AllocateHeap(size_t size, const char* name = NULL, ! AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { char* p = (char*) os::malloc(size); #ifdef ASSERT if (PrintMallocFree) trace_heap_malloc(size, name, p); #else Unused_Variable(name); #endif ! if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) ! vm_exit_out_of_memory(size, "AllocateHeap"); return p; } ! inline char* ReallocateHeap(char *old, size_t size, const char* name = NULL, ! AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { char* p = (char*) os::realloc(old,size); #ifdef ASSERT if (PrintMallocFree) trace_heap_malloc(size, name, p); #else Unused_Variable(name); #endif ! if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) ! vm_exit_out_of_memory(size, "ReallocateHeap"); return p; } *** openjdk/hotspot/src/share/vm/memory/resourceArea.cpp 2011-11-14 17:07:36.000000000 -0500 --- openjdk/hotspot/src/share/vm/memory/resourceArea.cpp 2013-06-25 14:55:54.787914183 -0400 *************** *** 1,5 **** /* ! * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- 1,5 ---- /* ! * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it *************** *** 42,56 **** // The following routines are declared in allocation.hpp and used everywhere: // Allocation in thread-local resource area ! extern char* resource_allocate_bytes(size_t size) { ! return Thread::current()->resource_area()->allocate_bytes(size); } ! extern char* resource_allocate_bytes(Thread* thread, size_t size) { ! return thread->resource_area()->allocate_bytes(size); } ! extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size){ ! return (char*)Thread::current()->resource_area()->Arealloc(old, old_size, new_size); } extern void resource_free_bytes( char *old, size_t size ) { --- 42,57 ---- // The following routines are declared in allocation.hpp and used everywhere: // Allocation in thread-local resource area ! extern char* resource_allocate_bytes(size_t size, AllocFailType alloc_failmode) { ! return Thread::current()->resource_area()->allocate_bytes(size, alloc_failmode); } ! extern char* resource_allocate_bytes(Thread* thread, size_t size, AllocFailType alloc_failmode) { ! return thread->resource_area()->allocate_bytes(size, alloc_failmode); } ! extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size, ! AllocFailType alloc_failmode){ ! return (char*)Thread::current()->resource_area()->Arealloc(old, old_size, new_size, alloc_failmode); } extern void resource_free_bytes( char *old, size_t size ) { *** openjdk/hotspot/src/share/vm/memory/resourceArea.hpp 2011-11-14 17:07:36.000000000 -0500 --- openjdk/hotspot/src/share/vm/memory/resourceArea.hpp 2013-06-25 14:55:54.789914131 -0400 *************** *** 1,5 **** /* ! * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- 1,5 ---- /* ! * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it *************** *** 64,70 **** debug_only(_nesting = 0;); } ! char* allocate_bytes(size_t size) { #ifdef ASSERT if (_nesting < 1 && !_warned++) fatal("memory leak: allocating without ResourceMark"); --- 64,70 ---- debug_only(_nesting = 0;); } ! char* allocate_bytes(size_t size, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { #ifdef ASSERT if (_nesting < 1 && !_warned++) fatal("memory leak: allocating without ResourceMark"); *************** *** 74,80 **** return (*save = (char*)os::malloc(size)); } #endif ! return (char*)Amalloc(size); } debug_only(int nesting() const { return _nesting; }); --- 74,80 ---- return (*save = (char*)os::malloc(size)); } #endif ! return (char*)Amalloc(size, alloc_failmode); } debug_only(int nesting() const { return _nesting; }); *** openjdk/hotspot/src/share/vm/oops/generateOopMap.cpp 2011-11-14 17:07:36.000000000 -0500 --- openjdk/hotspot/src/share/vm/oops/generateOopMap.cpp 2013-06-25 14:55:54.790914103 -0400 *************** *** 1,5 **** /* ! * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- 1,5 ---- /* ! * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it *************** *** 643,653 **** // CellType handling methods // void GenerateOopMap::init_state() { _state_len = _max_locals + _max_stack + _max_monitors; ! _state = NEW_RESOURCE_ARRAY(CellTypeState, _state_len); memset(_state, 0, _state_len * sizeof(CellTypeState)); ! _state_vec_buf = NEW_RESOURCE_ARRAY(char, MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */); } void GenerateOopMap::make_context_uninitialized() { --- 643,662 ---- // CellType handling methods // + // Allocate memory and throw LinkageError if failure. + #define ALLOC_RESOURCE_ARRAY(var, type, count) \ + var = NEW_RESOURCE_ARRAY_RETURN_NULL(type, count); \ + if (var == NULL) { \ + report_error("Cannot reserve enough memory to analyze this method"); \ + return; \ + } + void GenerateOopMap::init_state() { _state_len = _max_locals + _max_stack + _max_monitors; ! ALLOC_RESOURCE_ARRAY(_state, CellTypeState, _state_len); memset(_state, 0, _state_len * sizeof(CellTypeState)); ! int count = MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */; ! ALLOC_RESOURCE_ARRAY(_state_vec_buf, char, count) } void GenerateOopMap::make_context_uninitialized() { *************** *** 905,911 **** // But cumbersome since we don't know the stack heights yet. (Nor the // monitor stack heights...) ! _basic_blocks = NEW_RESOURCE_ARRAY(BasicBlock, _bb_count); // Make a pass through the bytecodes. Count the number of monitorenters. // This can be used an upper bound on the monitor stack depth in programs --- 914,920 ---- // But cumbersome since we don't know the stack heights yet. (Nor the // monitor stack heights...) ! ALLOC_RESOURCE_ARRAY(_basic_blocks, BasicBlock, _bb_count); // Make a pass through the bytecodes. Count the number of monitorenters. // This can be used an upper bound on the monitor stack depth in programs *************** *** 976,983 **** return; } ! CellTypeState *basicBlockState = ! NEW_RESOURCE_ARRAY(CellTypeState, bbNo * _state_len); memset(basicBlockState, 0, bbNo * _state_len * sizeof(CellTypeState)); // Make a pass over the basicblocks and assign their state vectors. --- 985,992 ---- return; } ! CellTypeState *basicBlockState; ! ALLOC_RESOURCE_ARRAY(basicBlockState, CellTypeState, bbNo * _state_len); memset(basicBlockState, 0, bbNo * _state_len * sizeof(CellTypeState)); // Make a pass over the basicblocks and assign their state vectors.