# HG changeset patch # User jcoomes # Date 1306565749 25200 # Node ID b36598cf2c62c7d9b3e4774db0fd861f1be16406 # Parent d920485ae93b6cb09ac133089ad6692804bfca38# Parent 8cbcd406c42e7429cb1734d5d79c9e71a409a324 Merge diff -r d920485ae93b -r b36598cf2c62 src/cpu/x86/vm/c1_LIRAssembler_x86.cpp --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu May 26 20:19:48 2011 -0700 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Fri May 27 23:55:49 2011 -0700 @@ -47,7 +47,7 @@ static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { // Use the expression (adr)&(~0xF) to provide 128-bits aligned address // of 128-bits operands for SSE instructions. - jlong *operand = (jlong*)(((long)adr)&((long)(~0xF))); + jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF))); // Store the value to a 128-bits operand. operand[0] = lo; operand[1] = hi; diff -r d920485ae93b -r b36598cf2c62 src/share/vm/ci/ciObject.cpp --- a/src/share/vm/ci/ciObject.cpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/ci/ciObject.cpp Fri May 27 23:55:49 2011 -0700 @@ -187,7 +187,7 @@ // ciObject::can_be_constant bool ciObject::can_be_constant() { if (ScavengeRootsInCode >= 1) return true; // now everybody can encode as a constant - return handle() == NULL || !is_scavengable(); + return handle() == NULL || is_perm(); } // ------------------------------------------------------------------ @@ -204,7 +204,7 @@ return true; } } - return handle() == NULL || !is_scavengable(); + return handle() == NULL || is_perm(); } diff -r d920485ae93b -r b36598cf2c62 src/share/vm/ci/ciObject.hpp --- a/src/share/vm/ci/ciObject.hpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/ci/ciObject.hpp Fri May 27 23:55:49 2011 -0700 @@ -108,7 +108,7 @@ int hash(); // Tells if this oop has an encoding as a constant. - // True if is_scavengable is false. + // True if is_perm is true. // Also true if ScavengeRootsInCode is non-zero. // If it does not have an encoding, the compiler is responsible for // making other arrangements for dealing with the oop. @@ -116,7 +116,7 @@ bool can_be_constant(); // Tells if this oop should be made a constant. - // True if is_scavengable is false or ScavengeRootsInCode > 1. + // True if is_perm is true or ScavengeRootsInCode > 1. bool should_be_constant(); // Is this object guaranteed to be in the permanent part of the heap? diff -r d920485ae93b -r b36598cf2c62 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Fri May 27 23:55:49 2011 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -407,6 +407,11 @@ void save_sweep_limit() { _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? unallocated_block() : end(); + if (CMSTraceSweeper) { + gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT + " for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<", + _sweep_limit, bottom(), end()); + } } NOT_PRODUCT( void clear_sweep_limit() { _sweep_limit = NULL; } diff -r d920485ae93b -r b36598cf2c62 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri May 27 23:55:49 2011 -0700 @@ -7888,60 +7888,64 @@ assert(_limit >= _sp->bottom() && _limit <= _sp->end(), "sweep _limit out of bounds"); if (CMSTraceSweeper) { - gclog_or_tty->print("\n====================\nStarting new sweep\n"); - } -} - -// We need this destructor to reclaim any space at the end -// of the space, which do_blk below may not yet have added back to -// the free lists. + gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT, + _limit); + } +} + +void SweepClosure::print_on(outputStream* st) const { + tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")", + _sp->bottom(), _sp->end()); + tty->print_cr("_limit = " PTR_FORMAT, _limit); + tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger); + NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);) + tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d", + _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced); +} + +#ifndef PRODUCT +// Assertion checking only: no useful work in product mode -- +// however, if any of the flags below become product flags, +// you may need to review this code to see if it needs to be +// enabled in product mode. SweepClosure::~SweepClosure() { assert_lock_strong(_freelistLock); assert(_limit >= _sp->bottom() && _limit <= _sp->end(), "sweep _limit out of bounds"); - // Flush any remaining coterminal free run as a single - // coalesced chunk to the appropriate free list. if (inFreeRange()) { - assert(freeFinger() < _limit, "freeFinger points too high"); - flush_cur_free_chunk(freeFinger(), pointer_delta(_limit, freeFinger())); - if (CMSTraceSweeper) { - gclog_or_tty->print("Sweep: last chunk: "); - gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") [coalesced:"SIZE_FORMAT"]\n", - freeFinger(), pointer_delta(_limit, freeFinger()), lastFreeRangeCoalesced()); - } - } // else nothing to flush - NOT_PRODUCT( - if (Verbose && PrintGC) { - gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " - SIZE_FORMAT " bytes", - _numObjectsFreed, _numWordsFreed*sizeof(HeapWord)); - gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, " - SIZE_FORMAT" bytes " - "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes", - _numObjectsLive, _numWordsLive*sizeof(HeapWord), - _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord)); - size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * - sizeof(HeapWord); - gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes); - - if (PrintCMSStatistics && CMSVerifyReturnedBytes) { - size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes(); - size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes(); - size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes; - gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes); - gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes", - indexListReturnedBytes); - gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes", - dictReturnedBytes); - } - } - ) - // Now, in debug mode, just null out the sweep_limit - NOT_PRODUCT(_sp->clear_sweep_limit();) + warning("inFreeRange() should have been reset; dumping state of SweepClosure"); + print(); + ShouldNotReachHere(); + } + if (Verbose && PrintGC) { + gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes", + _numObjectsFreed, _numWordsFreed*sizeof(HeapWord)); + gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, " + SIZE_FORMAT" bytes " + "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes", + _numObjectsLive, _numWordsLive*sizeof(HeapWord), + _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord)); + size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) + * sizeof(HeapWord); + gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes); + + if (PrintCMSStatistics && CMSVerifyReturnedBytes) { + size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes(); + size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes(); + size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes; + gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes); + gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes", + indexListReturnedBytes); + gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes", + dictReturnedBytes); + } + } if (CMSTraceSweeper) { - gclog_or_tty->print("end of sweep\n================\n"); - } -} + gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================", + _limit); + } +} +#endif // PRODUCT void SweepClosure::initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists) { @@ -8001,15 +8005,17 @@ // we started the sweep, it may no longer be one because heap expansion // may have caused us to coalesce the block ending at the address _limit // with a newly expanded chunk (this happens when _limit was set to the - // previous _end of the space), so we may have stepped past _limit; see CR 6977970. + // previous _end of the space), so we may have stepped past _limit: + // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740. if (addr >= _limit) { // we have swept up to or past the limit: finish up assert(_limit >= _sp->bottom() && _limit <= _sp->end(), "sweep _limit out of bounds"); assert(addr < _sp->end(), "addr out of bounds"); - // Flush any remaining coterminal free run as a single + // Flush any free range we might be holding as a single // coalesced chunk to the appropriate free list. if (inFreeRange()) { - assert(freeFinger() < _limit, "finger points too high"); + assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit, + err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger())); flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); if (CMSTraceSweeper) { @@ -8033,7 +8039,16 @@ res = fc->size(); do_already_free_chunk(fc); debug_only(_sp->verifyFreeLists()); - assert(res == fc->size(), "Don't expect the size to change"); + // If we flush the chunk at hand in lookahead_and_flush() + // and it's coalesced with a preceding chunk, then the + // process of "mangling" the payload of the coalesced block + // will cause erasure of the size information from the + // (erstwhile) header of all the coalesced blocks but the + // first, so the first disjunct in the assert will not hold + // in that specific case (in which case the second disjunct + // will hold). + assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit, + "Otherwise the size info doesn't change at this step"); NOT_PRODUCT( _numObjectsAlreadyFree++; _numWordsAlreadyFree += res; @@ -8103,7 +8118,7 @@ // void SweepClosure::do_already_free_chunk(FreeChunk* fc) { - size_t size = fc->size(); + const size_t size = fc->size(); // Chunks that cannot be coalesced are not in the // free lists. if (CMSTestInFreeList && !fc->cantCoalesce()) { @@ -8112,7 +8127,7 @@ } // a chunk that is already free, should not have been // marked in the bit map - HeapWord* addr = (HeapWord*) fc; + HeapWord* const addr = (HeapWord*) fc; assert(!_bitMap->isMarked(addr), "free chunk should be unmarked"); // Verify that the bit map has no bits marked between // addr and purported end of this block. @@ -8149,7 +8164,7 @@ } } else { // the midst of a free range, we are coalescing - debug_only(record_free_block_coalesced(fc);) + print_free_block_coalesced(fc); if (CMSTraceSweeper) { gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size); } @@ -8173,6 +8188,10 @@ } } } + // Note that if the chunk is not coalescable (the else arm + // below), we unconditionally flush, without needing to do + // a "lookahead," as we do below. + if (inFreeRange()) lookahead_and_flush(fc, size); } else { // Code path common to both original and adaptive free lists. @@ -8191,8 +8210,8 @@ // This is a chunk of garbage. It is not in any free list. // Add it to a free list or let it possibly be coalesced into // a larger chunk. - HeapWord* addr = (HeapWord*) fc; - size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); + HeapWord* const addr = (HeapWord*) fc; + const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); if (_sp->adaptive_freelists()) { // Verify that the bit map has no bits marked between @@ -8205,7 +8224,6 @@ // start of a new free range assert(size > 0, "A free range should have a size"); initialize_free_range(addr, false); - } else { // this will be swept up when we hit the end of the // free range @@ -8235,6 +8253,9 @@ // addr and purported end of just dead object. _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); } + assert(_limit >= addr + size, + "A freshly garbage chunk can't possibly straddle over _limit"); + if (inFreeRange()) lookahead_and_flush(fc, size); return size; } @@ -8284,8 +8305,8 @@ (!_collector->should_unload_classes() || oop(addr)->is_parsable()), "Should be an initialized object"); - // Note that there are objects used during class redefinition - // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite() + // Note that there are objects used during class redefinition, + // e.g. merge_cp in VM_RedefineClasses::merge_cp_and_rewrite(), // which are discarded with their is_conc_safe state still // false. These object may be floating garbage so may be // seen here. If they are floating garbage their size @@ -8307,7 +8328,7 @@ size_t chunkSize) { // do_post_free_or_garbage_chunk() should only be called in the case // of the adaptive free list allocator. - bool fcInFreeLists = fc->isFree(); + const bool fcInFreeLists = fc->isFree(); assert(_sp->adaptive_freelists(), "Should only be used in this case."); assert((HeapWord*)fc <= _limit, "sweep invariant"); if (CMSTestInFreeList && fcInFreeLists) { @@ -8318,11 +8339,11 @@ gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize); } - HeapWord* addr = (HeapWord*) fc; + HeapWord* const fc_addr = (HeapWord*) fc; bool coalesce; - size_t left = pointer_delta(addr, freeFinger()); - size_t right = chunkSize; + const size_t left = pointer_delta(fc_addr, freeFinger()); + const size_t right = chunkSize; switch (FLSCoalescePolicy) { // numeric value forms a coalition aggressiveness metric case 0: { // never coalesce @@ -8355,15 +8376,15 @@ // If the chunk is in a free range and either we decided to coalesce above // or the chunk is near the large block at the end of the heap // (isNearLargestChunk() returns true), then coalesce this chunk. - bool doCoalesce = inFreeRange() && - (coalesce || _g->isNearLargestChunk((HeapWord*)fc)); + const bool doCoalesce = inFreeRange() + && (coalesce || _g->isNearLargestChunk(fc_addr)); if (doCoalesce) { // Coalesce the current free range on the left with the new // chunk on the right. If either is on a free list, // it must be removed from the list and stashed in the closure. if (freeRangeInFreeLists()) { - FreeChunk* ffc = (FreeChunk*)freeFinger(); - assert(ffc->size() == pointer_delta(addr, freeFinger()), + FreeChunk* const ffc = (FreeChunk*)freeFinger(); + assert(ffc->size() == pointer_delta(fc_addr, freeFinger()), "Size of free range is inconsistent with chunk size."); if (CMSTestInFreeList) { assert(_sp->verifyChunkInFreeLists(ffc), @@ -8380,13 +8401,14 @@ _sp->removeFreeChunkFromFreeLists(fc); } set_lastFreeRangeCoalesced(true); + print_free_block_coalesced(fc); } else { // not in a free range and/or should not coalesce // Return the current free range and start a new one. if (inFreeRange()) { // In a free range but cannot coalesce with the right hand chunk. // Put the current free range into the free lists. flush_cur_free_chunk(freeFinger(), - pointer_delta(addr, freeFinger())); + pointer_delta(fc_addr, freeFinger())); } // Set up for new free range. Pass along whether the right hand // chunk is in the free lists. @@ -8394,6 +8416,42 @@ } } +// Lookahead flush: +// If we are tracking a free range, and this is the last chunk that +// we'll look at because its end crosses past _limit, we'll preemptively +// flush it along with any free range we may be holding on to. Note that +// this can be the case only for an already free or freshly garbage +// chunk. If this block is an object, it can never straddle +// over _limit. The "straddling" occurs when _limit is set at +// the previous end of the space when this cycle started, and +// a subsequent heap expansion caused the previously co-terminal +// free block to be coalesced with the newly expanded portion, +// thus rendering _limit a non-block-boundary making it dangerous +// for the sweeper to step over and examine. +void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) { + assert(inFreeRange(), "Should only be called if currently in a free range."); + HeapWord* const eob = ((HeapWord*)fc) + chunk_size; + assert(_sp->used_region().contains(eob - 1), + err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")" + " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")", + _limit, _sp->bottom(), _sp->end(), fc, chunk_size)); + if (eob >= _limit) { + assert(eob == _limit || fc->isFree(), "Only a free chunk should allow us to cross over the limit"); + if (CMSTraceSweeper) { + gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block " + "[" PTR_FORMAT "," PTR_FORMAT ") in space " + "[" PTR_FORMAT "," PTR_FORMAT ")", + _limit, fc, eob, _sp->bottom(), _sp->end()); + } + // Return the storage we are tracking back into the free lists. + if (CMSTraceSweeper) { + gclog_or_tty->print_cr("Flushing ... "); + } + assert(freeFinger() < eob, "Error"); + flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger())); + } +} + void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) { assert(inFreeRange(), "Should only be called if currently in a free range."); assert(size > 0, @@ -8419,6 +8477,8 @@ } _sp->addChunkAndRepairOffsetTable(chunk, size, lastFreeRangeCoalesced()); + } else if (CMSTraceSweeper) { + gclog_or_tty->print_cr("Already in free list: nothing to flush"); } set_inFreeRange(false); set_freeRangeInFreeLists(false); @@ -8477,13 +8537,14 @@ bool debug_verifyChunkInFreeLists(FreeChunk* fc) { return debug_cms_space->verifyChunkInFreeLists(fc); } - -void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const { +#endif + +void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const { if (CMSTraceSweeper) { - gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size()); - } -} -#endif + gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")", + fc, fc->size()); + } +} // CMSIsAliveClosure bool CMSIsAliveClosure::do_object_b(oop obj) { diff -r d920485ae93b -r b36598cf2c62 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri May 27 23:55:49 2011 -0700 @@ -1701,9 +1701,9 @@ CMSCollector* _collector; // collector doing the work ConcurrentMarkSweepGeneration* _g; // Generation being swept CompactibleFreeListSpace* _sp; // Space being swept - HeapWord* _limit;// the address at which the sweep should stop because - // we do not expect blocks eligible for sweeping past - // that address. + HeapWord* _limit;// the address at or above which the sweep should stop + // because we do not expect newly garbage blocks + // eligible for sweeping past that address. Mutex* _freelistLock; // Free list lock (in space) CMSBitMap* _bitMap; // Marking bit map (in // generation) @@ -1750,6 +1750,10 @@ void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); // Process a free chunk during sweeping. void do_already_free_chunk(FreeChunk *fc); + // Work method called when processing an already free or a + // freshly garbage chunk to do a lookahead and possibly a + // premptive flush if crossing over _limit. + void lookahead_and_flush(FreeChunk* fc, size_t chunkSize); // Process a garbage chunk during sweeping. size_t do_garbage_chunk(FreeChunk *fc); // Process a live chunk during sweeping. @@ -1758,8 +1762,6 @@ // Accessors. HeapWord* freeFinger() const { return _freeFinger; } void set_freeFinger(HeapWord* v) { _freeFinger = v; } - size_t freeRangeSize() const { return _freeRangeSize; } - void set_freeRangeSize(size_t v) { _freeRangeSize = v; } bool inFreeRange() const { return _inFreeRange; } void set_inFreeRange(bool v) { _inFreeRange = v; } bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } @@ -1779,14 +1781,16 @@ void do_yield_work(HeapWord* addr); // Debugging/Printing - void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN; + void print_free_block_coalesced(FreeChunk* fc) const; public: SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, CMSBitMap* bitMap, bool should_yield); - ~SweepClosure(); + ~SweepClosure() PRODUCT_RETURN; size_t do_blk_careful(HeapWord* addr); + void print() const { print_on(tty); } + void print_on(outputStream *st) const; }; // Closures related to weak references processing diff -r d920485ae93b -r b36598cf2c62 src/share/vm/interpreter/interpreterRuntime.cpp --- a/src/share/vm/interpreter/interpreterRuntime.cpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Fri May 27 23:55:49 2011 -0700 @@ -139,9 +139,15 @@ ResourceMark rm(thread); methodHandle m (thread, method(thread)); Bytecode_loadconstant ldc(m, bci(thread)); - oop result = ldc.resolve_constant(THREAD); - DEBUG_ONLY(ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc.cache_index())); - assert(result == cpce->f1(), "expected result for assembly code"); + oop result = ldc.resolve_constant(CHECK); +#ifdef ASSERT + { + // The bytecode wrappers aren't GC-safe so construct a new one + Bytecode_loadconstant ldc2(m, bci(thread)); + ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc2.cache_index()); + assert(result == cpce->f1(), "expected result for assembly code"); + } +#endif } IRT_END diff -r d920485ae93b -r b36598cf2c62 src/share/vm/interpreter/rewriter.cpp --- a/src/share/vm/interpreter/rewriter.cpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/interpreter/rewriter.cpp Fri May 27 23:55:49 2011 -0700 @@ -63,6 +63,15 @@ _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0); } +// Unrewrite the bytecodes if an error occurs. +void Rewriter::restore_bytecodes() { + int len = _methods->length(); + + for (int i = len-1; i >= 0; i--) { + methodOop method = (methodOop)_methods->obj_at(i); + scan_method(method, true); + } +} // Creates a constant pool cache given a CPC map void Rewriter::make_constant_pool_cache(TRAPS) { @@ -133,57 +142,94 @@ // Rewrite a classfile-order CP index into a native-order CPC index. -void Rewriter::rewrite_member_reference(address bcp, int offset) { +void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) { address p = bcp + offset; - int cp_index = Bytes::get_Java_u2(p); - int cache_index = cp_entry_to_cp_cache(cp_index); - Bytes::put_native_u2(p, cache_index); + if (!reverse) { + int cp_index = Bytes::get_Java_u2(p); + int cache_index = cp_entry_to_cp_cache(cp_index); + Bytes::put_native_u2(p, cache_index); + } else { + int cache_index = Bytes::get_native_u2(p); + int pool_index = cp_cache_entry_pool_index(cache_index); + Bytes::put_Java_u2(p, pool_index); + } } -void Rewriter::rewrite_invokedynamic(address bcp, int offset) { +void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) { address p = bcp + offset; - assert(p[-1] == Bytecodes::_invokedynamic, ""); - int cp_index = Bytes::get_Java_u2(p); - int cpc = maybe_add_cp_cache_entry(cp_index); // add lazily - int cpc2 = add_secondary_cp_cache_entry(cpc); + assert(p[-1] == Bytecodes::_invokedynamic, "not invokedynamic bytecode"); + if (!reverse) { + int cp_index = Bytes::get_Java_u2(p); + int cpc = maybe_add_cp_cache_entry(cp_index); // add lazily + int cpc2 = add_secondary_cp_cache_entry(cpc); - // Replace the trailing four bytes with a CPC index for the dynamic - // call site. Unlike other CPC entries, there is one per bytecode, - // not just one per distinct CP entry. In other words, the - // CPC-to-CP relation is many-to-one for invokedynamic entries. - // This means we must use a larger index size than u2 to address - // all these entries. That is the main reason invokedynamic - // must have a five-byte instruction format. (Of course, other JVM - // implementations can use the bytes for other purposes.) - Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2)); - // Note: We use native_u4 format exclusively for 4-byte indexes. + // Replace the trailing four bytes with a CPC index for the dynamic + // call site. Unlike other CPC entries, there is one per bytecode, + // not just one per distinct CP entry. In other words, the + // CPC-to-CP relation is many-to-one for invokedynamic entries. + // This means we must use a larger index size than u2 to address + // all these entries. That is the main reason invokedynamic + // must have a five-byte instruction format. (Of course, other JVM + // implementations can use the bytes for other purposes.) + Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2)); + // Note: We use native_u4 format exclusively for 4-byte indexes. + } else { + int cache_index = constantPoolCacheOopDesc::decode_secondary_index( + Bytes::get_native_u4(p)); + int secondary_index = cp_cache_secondary_entry_main_index(cache_index); + int pool_index = cp_cache_entry_pool_index(secondary_index); + assert(_pool->tag_at(pool_index).is_invoke_dynamic(), "wrong index"); + // zero out 4 bytes + Bytes::put_Java_u4(p, 0); + Bytes::put_Java_u2(p, pool_index); + } } // Rewrite some ldc bytecodes to _fast_aldc -void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide) { - assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), ""); - address p = bcp + offset; - int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p); - constantTag tag = _pool->tag_at(cp_index).value(); - if (tag.is_method_handle() || tag.is_method_type()) { - int cache_index = cp_entry_to_cp_cache(cp_index); - if (is_wide) { - (*bcp) = Bytecodes::_fast_aldc_w; - assert(cache_index == (u2)cache_index, ""); - Bytes::put_native_u2(p, cache_index); - } else { - (*bcp) = Bytecodes::_fast_aldc; - assert(cache_index == (u1)cache_index, ""); - (*p) = (u1)cache_index; +void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide, + bool reverse) { + if (!reverse) { + assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "not ldc bytecode"); + address p = bcp + offset; + int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p); + constantTag tag = _pool->tag_at(cp_index).value(); + if (tag.is_method_handle() || tag.is_method_type()) { + int cache_index = cp_entry_to_cp_cache(cp_index); + if (is_wide) { + (*bcp) = Bytecodes::_fast_aldc_w; + assert(cache_index == (u2)cache_index, "index overflow"); + Bytes::put_native_u2(p, cache_index); + } else { + (*bcp) = Bytecodes::_fast_aldc; + assert(cache_index == (u1)cache_index, "index overflow"); + (*p) = (u1)cache_index; + } + } + } else { + Bytecodes::Code rewritten_bc = + (is_wide ? Bytecodes::_fast_aldc_w : Bytecodes::_fast_aldc); + if ((*bcp) == rewritten_bc) { + address p = bcp + offset; + int cache_index = is_wide ? Bytes::get_native_u2(p) : (u1)(*p); + int pool_index = cp_cache_entry_pool_index(cache_index); + if (is_wide) { + (*bcp) = Bytecodes::_ldc_w; + assert(pool_index == (u2)pool_index, "index overflow"); + Bytes::put_Java_u2(p, pool_index); + } else { + (*bcp) = Bytecodes::_ldc; + assert(pool_index == (u1)pool_index, "index overflow"); + (*p) = (u1)pool_index; + } } } } // Rewrites a method given the index_map information -void Rewriter::scan_method(methodOop method) { +void Rewriter::scan_method(methodOop method, bool reverse) { int nof_jsrs = 0; bool has_monitor_bytecodes = false; @@ -236,6 +282,13 @@ #endif break; } + case Bytecodes::_fast_linearswitch: + case Bytecodes::_fast_binaryswitch: { +#ifndef CC_INTERP + (*bcp) = Bytecodes::_lookupswitch; +#endif + break; + } case Bytecodes::_getstatic : // fall through case Bytecodes::_putstatic : // fall through case Bytecodes::_getfield : // fall through @@ -244,16 +297,18 @@ case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokestatic : case Bytecodes::_invokeinterface: - rewrite_member_reference(bcp, prefix_length+1); + rewrite_member_reference(bcp, prefix_length+1, reverse); break; case Bytecodes::_invokedynamic: - rewrite_invokedynamic(bcp, prefix_length+1); + rewrite_invokedynamic(bcp, prefix_length+1, reverse); break; case Bytecodes::_ldc: - maybe_rewrite_ldc(bcp, prefix_length+1, false); + case Bytecodes::_fast_aldc: + maybe_rewrite_ldc(bcp, prefix_length+1, false, reverse); break; case Bytecodes::_ldc_w: - maybe_rewrite_ldc(bcp, prefix_length+1, true); + case Bytecodes::_fast_aldc_w: + maybe_rewrite_ldc(bcp, prefix_length+1, true, reverse); break; case Bytecodes::_jsr : // fall through case Bytecodes::_jsr_w : nof_jsrs++; break; @@ -273,12 +328,13 @@ if (nof_jsrs > 0) { method->set_has_jsrs(); // Second pass will revisit this method. - assert(method->has_jsrs(), ""); + assert(method->has_jsrs(), "didn't we just set this?"); } } // After constant pool is created, revisit methods containing jsrs. methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) { + ResourceMark rm(THREAD); ResolveOopMapConflicts romc(method); methodHandle original_method = method; method = romc.do_potential_rewrite(CHECK_(methodHandle())); @@ -300,7 +356,6 @@ return method; } - void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) { ResourceMark rm(THREAD); Rewriter rw(klass, klass->constants(), klass->methods(), CHECK); @@ -343,34 +398,57 @@ } // rewrite methods, in two passes - int i, len = _methods->length(); + int len = _methods->length(); - for (i = len; --i >= 0; ) { + for (int i = len-1; i >= 0; i--) { methodOop method = (methodOop)_methods->obj_at(i); scan_method(method); } // allocate constant pool cache, now that we've seen all the bytecodes - make_constant_pool_cache(CHECK); + make_constant_pool_cache(THREAD); + + // Restore bytecodes to their unrewritten state if there are exceptions + // rewriting bytecodes or allocating the cpCache + if (HAS_PENDING_EXCEPTION) { + restore_bytecodes(); + return; + } +} - for (i = len; --i >= 0; ) { - methodHandle m(THREAD, (methodOop)_methods->obj_at(i)); +// Relocate jsr/rets in a method. This can't be done with the rewriter +// stage because it can throw other exceptions, leaving the bytecodes +// pointing at constant pool cache entries. +// Link and check jvmti dependencies while we're iterating over the methods. +// JSR292 code calls with a different set of methods, so two entry points. +void Rewriter::relocate_and_link(instanceKlassHandle this_oop, TRAPS) { + objArrayHandle methods(THREAD, this_oop->methods()); + relocate_and_link(this_oop, methods, THREAD); +} + +void Rewriter::relocate_and_link(instanceKlassHandle this_oop, + objArrayHandle methods, TRAPS) { + int len = methods->length(); + for (int i = len-1; i >= 0; i--) { + methodHandle m(THREAD, (methodOop)methods->obj_at(i)); if (m->has_jsrs()) { m = rewrite_jsrs(m, CHECK); // Method might have gotten rewritten. - _methods->obj_at_put(i, m()); + methods->obj_at_put(i, m()); } - // Set up method entry points for compiler and interpreter. + // Set up method entry points for compiler and interpreter . m->link_method(m, CHECK); + // This is for JVMTI and unrelated to relocator but the last thing we do #ifdef ASSERT if (StressMethodComparator) { static int nmc = 0; for (int j = i; j >= 0 && j >= i-4; j--) { if ((++nmc % 1000) == 0) tty->print_cr("Have run MethodComparator %d times...", nmc); - bool z = MethodComparator::methods_EMCP(m(), (methodOop)_methods->obj_at(j)); + bool z = MethodComparator::methods_EMCP(m(), + (methodOop)methods->obj_at(j)); if (j == i && !z) { tty->print("MethodComparator FAIL: "); m->print(); m->print_codes(); assert(z, "method must compare equal to itself"); diff -r d920485ae93b -r b36598cf2c62 src/share/vm/interpreter/rewriter.hpp --- a/src/share/vm/interpreter/rewriter.hpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/interpreter/rewriter.hpp Fri May 27 23:55:49 2011 -0700 @@ -85,13 +85,15 @@ void compute_index_maps(); void make_constant_pool_cache(TRAPS); - void scan_method(methodOop m); - methodHandle rewrite_jsrs(methodHandle m, TRAPS); + void scan_method(methodOop m, bool reverse = false); void rewrite_Object_init(methodHandle m, TRAPS); - void rewrite_member_reference(address bcp, int offset); - void rewrite_invokedynamic(address bcp, int offset); - void maybe_rewrite_ldc(address bcp, int offset, bool is_wide); + void rewrite_member_reference(address bcp, int offset, bool reverse = false); + void rewrite_invokedynamic(address bcp, int offset, bool reverse = false); + void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false); + // Revert bytecodes in case of an exception. + void restore_bytecodes(); + static methodHandle rewrite_jsrs(methodHandle m, TRAPS); public: // Driver routine: static void rewrite(instanceKlassHandle klass, TRAPS); @@ -100,6 +102,13 @@ enum { _secondary_entry_tag = nth_bit(30) }; + + // Second pass, not gated by is_rewritten flag + static void relocate_and_link(instanceKlassHandle klass, TRAPS); + // JSR292 version to call with it's own methods. + static void relocate_and_link(instanceKlassHandle klass, + objArrayHandle methods, TRAPS); + }; #endif // SHARE_VM_INTERPRETER_REWRITER_HPP diff -r d920485ae93b -r b36598cf2c62 src/share/vm/memory/blockOffsetTable.cpp --- a/src/share/vm/memory/blockOffsetTable.cpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/memory/blockOffsetTable.cpp Fri May 27 23:55:49 2011 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -566,11 +566,17 @@ q = n; n += _sp->block_size(n); assert(n > q, - err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT " _sp = [" PTR_FORMAT "," PTR_FORMAT ")", - n, last, _sp->bottom(), _sp->end())); + err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT"," + " while querying blk_start(" PTR_FORMAT ")" + " on _sp = [" PTR_FORMAT "," PTR_FORMAT ")", + n, last, addr, _sp->bottom(), _sp->end())); } - assert(q <= addr, err_msg("wrong order for current (" INTPTR_FORMAT ") <= arg (" INTPTR_FORMAT ")", q, addr)); - assert(addr <= n, err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", addr, n)); + assert(q <= addr, + err_msg("wrong order for current (" INTPTR_FORMAT ")" " <= arg (" INTPTR_FORMAT ")", + q, addr)); + assert(addr <= n, + err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", + addr, n)); return q; } diff -r d920485ae93b -r b36598cf2c62 src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/oops/instanceKlass.cpp Fri May 27 23:55:49 2011 -0700 @@ -335,6 +335,9 @@ this_oop->rewrite_class(CHECK_false); } + // relocate jsrs and link methods after they are all rewritten + this_oop->relocate_and_link_methods(CHECK_false); + // Initialize the vtable and interface table after // methods have been rewritten since rewrite may // fabricate new methodOops. @@ -365,17 +368,8 @@ // Rewrite the byte codes of all of the methods of a class. -// Three cases: -// During the link of a newly loaded class. -// During the preloading of classes to be written to the shared spaces. -// - Rewrite the methods and update the method entry points. -// -// During the link of a class in the shared spaces. -// - The methods were already rewritten, update the metho entry points. -// // The rewriter must be called exactly once. Rewriting must happen after // verification but before the first method of the class is executed. - void instanceKlass::rewrite_class(TRAPS) { assert(is_loaded(), "must be loaded"); instanceKlassHandle this_oop(THREAD, this->as_klassOop()); @@ -383,10 +377,19 @@ assert(this_oop()->is_shared(), "rewriting an unshared class?"); return; } - Rewriter::rewrite(this_oop, CHECK); // No exception can happen here + Rewriter::rewrite(this_oop, CHECK); this_oop->set_rewritten(); } +// Now relocate and link method entry points after class is rewritten. +// This is outside is_rewritten flag. In case of an exception, it can be +// executed more than once. +void instanceKlass::relocate_and_link_methods(TRAPS) { + assert(is_loaded(), "must be loaded"); + instanceKlassHandle this_oop(THREAD, this->as_klassOop()); + Rewriter::relocate_and_link(this_oop, CHECK); +} + void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) { // Make sure klass is linked (verified) before initialization diff -r d920485ae93b -r b36598cf2c62 src/share/vm/oops/instanceKlass.hpp --- a/src/share/vm/oops/instanceKlass.hpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/oops/instanceKlass.hpp Fri May 27 23:55:49 2011 -0700 @@ -392,6 +392,7 @@ bool link_class_or_fail(TRAPS); // returns false on failure void unlink_class(); void rewrite_class(TRAPS); + void relocate_and_link_methods(TRAPS); methodOop class_initializer(); // set the class to initialized if no static initializer is present diff -r d920485ae93b -r b36598cf2c62 src/share/vm/oops/methodOop.cpp --- a/src/share/vm/oops/methodOop.cpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/oops/methodOop.cpp Fri May 27 23:55:49 2011 -0700 @@ -693,7 +693,10 @@ // Called when the method_holder is getting linked. Setup entrypoints so the method // is ready to be called from interpreter, compiler, and vtables. void methodOopDesc::link_method(methodHandle h_method, TRAPS) { - assert(_i2i_entry == NULL, "should only be called once"); + // If the code cache is full, we may reenter this function for the + // leftover methods that weren't linked. + if (_i2i_entry != NULL) return; + assert(_adapter == NULL, "init'd to NULL" ); assert( _code == NULL, "nothing compiled yet" ); @@ -717,7 +720,7 @@ // called from the vtable. We need adapters on such methods that get loaded // later. Ditto for mega-morphic itable calls. If this proves to be a // problem we'll make these lazily later. - (void) make_adapters(h_method, CHECK); + if (UseCompiler) (void) make_adapters(h_method, CHECK); // ONLY USE the h_method now as make_adapter may have blocked diff -r d920485ae93b -r b36598cf2c62 src/share/vm/opto/cfgnode.cpp --- a/src/share/vm/opto/cfgnode.cpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/opto/cfgnode.cpp Fri May 27 23:55:49 2011 -0700 @@ -1556,7 +1556,9 @@ Node *top = phase->C->top(); bool new_phi = (outcnt() == 0); // transforming new Phi - assert(!can_reshape || !new_phi, "for igvn new phi should be hooked"); + // No change for igvn if new phi is not hooked + if (new_phi && can_reshape) + return NULL; // The are 2 situations when only one valid phi's input is left // (in addition to Region input). diff -r d920485ae93b -r b36598cf2c62 src/share/vm/opto/loopTransform.cpp --- a/src/share/vm/opto/loopTransform.cpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/opto/loopTransform.cpp Fri May 27 23:55:49 2011 -0700 @@ -1292,9 +1292,23 @@ } assert(new_limit != NULL, ""); // Replace in loop test. - _igvn.hash_delete(cmp); - cmp->set_req(2, new_limit); - + assert(loop_end->in(1)->in(1) == cmp, "sanity"); + if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) { + // Don't need to create new test since only one user. + _igvn.hash_delete(cmp); + cmp->set_req(2, new_limit); + } else { + // Create new test since it is shared. + Node* ctrl2 = loop_end->in(0); + Node* cmp2 = cmp->clone(); + cmp2->set_req(2, new_limit); + register_new_node(cmp2, ctrl2); + Node* bol2 = loop_end->in(1)->clone(); + bol2->set_req(1, cmp2); + register_new_node(bol2, ctrl2); + _igvn.hash_delete(loop_end); + loop_end->set_req(1, bol2); + } // Step 3: Find the min-trip test guaranteed before a 'main' loop. // Make it a 1-trip test (means at least 2 trips). diff -r d920485ae93b -r b36598cf2c62 src/share/vm/opto/output.cpp --- a/src/share/vm/opto/output.cpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/opto/output.cpp Fri May 27 23:55:49 2011 -0700 @@ -911,7 +911,7 @@ } } else { const TypePtr *tp = obj_node->bottom_type()->make_ptr(); - scval = new ConstantOopWriteValue(tp->is_instptr()->const_oop()->constant_encoding()); + scval = new ConstantOopWriteValue(tp->is_oopptr()->const_oop()->constant_encoding()); } OptoReg::Name box_reg = BoxLockNode::stack_slot(box_node); diff -r d920485ae93b -r b36598cf2c62 src/share/vm/opto/stringopts.cpp --- a/src/share/vm/opto/stringopts.cpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/opto/stringopts.cpp Fri May 27 23:55:49 2011 -0700 @@ -768,6 +768,7 @@ tty->cr(); } #endif + fail = true; break; } else if (ptr->is_Proj() && ptr->in(0)->is_Initialize()) { ptr = ptr->in(0)->in(0); diff -r d920485ae93b -r b36598cf2c62 src/share/vm/prims/jvmtiRedefineClasses.cpp --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp Fri May 27 23:55:49 2011 -0700 @@ -992,6 +992,9 @@ } Rewriter::rewrite(scratch_class, THREAD); + if (!HAS_PENDING_EXCEPTION) { + Rewriter::relocate_and_link(scratch_class, THREAD); + } if (HAS_PENDING_EXCEPTION) { Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); CLEAR_PENDING_EXCEPTION; diff -r d920485ae93b -r b36598cf2c62 src/share/vm/prims/methodHandleWalk.cpp --- a/src/share/vm/prims/methodHandleWalk.cpp Thu May 26 20:19:48 2011 -0700 +++ b/src/share/vm/prims/methodHandleWalk.cpp Fri May 27 23:55:49 2011 -0700 @@ -1399,6 +1399,7 @@ objArrayHandle methods(THREAD, m_array); methods->obj_at_put(0, m()); Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(empty)); // Use fake class. + Rewriter::relocate_and_link(_target_klass(), methods, CHECK_(empty)); // Use fake class. // Set the invocation counter's count to the invoke count of the // original call site.