# HG changeset patch # User rkennke # Date 1443717560 -7200 # Node ID 0d5b3847ab03e9ea7452998be7f3516b37501e7e # Parent 42defc20a38cd2d7aa75db6cde758b18445de0aa First round of cleanup. diff -r 42defc20a38c -r 0d5b3847ab03 src/cpu/x86/vm/shenandoahBarrierSet_x86.cpp --- a/src/cpu/x86/vm/shenandoahBarrierSet_x86.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/cpu/x86/vm/shenandoahBarrierSet_x86.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -2,6 +2,7 @@ Copyright 2015 Red Hat, Inc. and/or its affiliates. */ +#include "gc/shenandoah/brooksPointer.hpp" #include "gc/shenandoah/shenandoahBarrierSet.hpp" #include "asm/macroAssembler.hpp" @@ -255,4 +256,9 @@ __ bind(done); } +void ShenandoahHeap::compile_prepare_oop(MacroAssembler* masm, Register obj) { + __ incrementq(obj, BrooksPointer::BROOKS_POINTER_OBJ_SIZE * HeapWordSize); + __ movptr(Address(obj, -1 * HeapWordSize), obj); +} #endif + diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/c1/c1_LIRGenerator.cpp --- a/src/share/vm/c1/c1_LIRGenerator.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -41,8 +41,6 @@ #include "utilities/bitMap.inline.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS -#include "gc/shenandoah/shenandoahBarrierSet.hpp" -#include "gc/shenandoah/shenandoahHeap.hpp" #include "gc/g1/heapRegion.hpp" #endif // INCLUDE_ALL_GCS diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/classfile/defaultMethods.cpp --- a/src/share/vm/classfile/defaultMethods.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/classfile/defaultMethods.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -390,20 +390,6 @@ Symbol* get_exception_message() { return _exception_message; } Symbol* get_exception_name() { return _exception_name; } - // Return true if the specified klass has a static method that matches - // the name and signature of the target method. - bool has_matching_static(InstanceKlass* root) { - if (_members.length() > 0) { - Pair entry = _members.at(0); - Method* impl = root->find_method(entry.first->name(), - entry.first->signature()); - if ((impl != NULL) && impl->is_static()) { - return true; - } - } - return false; - } - // Either sets the target or the exception error message void determine_target(InstanceKlass* root, TRAPS) { if (has_target() || throws_exception()) { diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/classfile/symbolTable.cpp --- a/src/share/vm/classfile/symbolTable.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/classfile/symbolTable.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -51,11 +51,6 @@ CompactHashtable SymbolTable::_shared_table; -Symbol* SymbolTable::resolve_symbol(Symbol* sym) { - // TODO: Casting Symbol* to oopDesc*/oop seems weird. - return (Symbol*) (oopDesc*) oopDesc::bs()->maybe_resolve_oop((oop) sym); -} - Symbol* SymbolTable::allocate_symbol(const u1* name, int len, bool c_heap, TRAPS) { assert (len <= Symbol::max_length(), "should be checked by caller"); @@ -116,7 +111,7 @@ if (entry->is_shared() && !use_alternate_hashcode()) { break; } - Symbol* s = resolve_symbol(entry->literal()); + Symbol* s = entry->literal(); (*memory_total) += s->size(); (*processed)++; assert(s != NULL, "just checking"); diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/classfile/symbolTable.hpp --- a/src/share/vm/classfile/symbolTable.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/classfile/symbolTable.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -94,8 +94,6 @@ // shared symbol table. static CompactHashtable _shared_table; - static Symbol* resolve_symbol(Symbol* sym); - Symbol* allocate_symbol(const u1* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F // Adding elements diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/code/codeBlob.cpp --- a/src/share/vm/code/codeBlob.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/code/codeBlob.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -28,7 +28,6 @@ #include "code/codeCacheExtensions.hpp" #include "code/relocInfo.hpp" #include "compiler/disassembler.hpp" -#include "gc/shared/barrierSet.hpp" #include "interpreter/bytecode.hpp" #include "memory/allocation.inline.hpp" #include "memory/heap.hpp" diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/compiler/oopMap.cpp --- a/src/share/vm/compiler/oopMap.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/compiler/oopMap.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -262,9 +262,6 @@ assert( i < len, "oopmap not found" ); OopMap* m = at(i); - if (m->offset() != pc_offset) { - tty->print_cr("oopmap not found, pc_offset: %d, m->offset(): %d", pc_offset, m->offset()); - } assert( m->offset() == pc_offset, "oopmap not found" ); return m; } diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/gc/g1/heapRegionBounds.inline.hpp --- a/src/share/vm/gc/g1/heapRegionBounds.inline.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/gc/g1/heapRegionBounds.inline.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_INLINE_HPP -#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_INLINE_HPP +#ifndef SHARE_VM_GC_G1_HEAPREGIONBOUNDS_INLINE_HPP +#define SHARE_VM_GC_G1_HEAPREGIONBOUNDS_INLINE_HPP #include "gc/g1/heapRegionBounds.hpp" @@ -39,4 +39,4 @@ return TARGET_REGION_NUMBER; } -#endif +#endif // SHARE_VM_GC_G1_HEAPREGIONBOUNDS_INLINE_HPP diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/gc/serial/genMarkSweep.hpp --- a/src/share/vm/gc/serial/genMarkSweep.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/gc/serial/genMarkSweep.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -31,7 +31,6 @@ friend class VM_MarkSweep; friend class ShenandoahMarkCompact; friend class G1MarkSweep; - public: static void invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs); diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/gc/shared/collectedHeap.cpp --- a/src/share/vm/gc/shared/collectedHeap.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/gc/shared/collectedHeap.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -319,7 +319,7 @@ #endif // ASSERT } thread->tlab().fill(obj, obj + size, new_tlab_size); - return obj; + return Universe::heap()->tlab_post_allocation_setup(obj); } void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { @@ -452,7 +452,7 @@ #endif // ASSERT void -CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap, bool gc_init) +CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) { assert(words >= filler_array_min_size(), "too small for an array"); assert(words <= filler_array_max_size(), "too big for a single object"); @@ -463,28 +463,28 @@ // Set the length first for concurrent GC. ((arrayOop)start)->set_length((int)len); - post_allocation_setup_common(Universe::intArrayKlassObj(), start, gc_init); + post_allocation_setup_common(Universe::intArrayKlassObj(), start); DEBUG_ONLY(zap_filler_array(start, words, zap);) } void -CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap, bool gc_init) +CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) { assert(words <= filler_array_max_size(), "too big for a single object"); if (words >= filler_array_min_size()) { - fill_with_array(start, words, zap, gc_init); + fill_with_array(start, words, zap); } else if (words > 0) { assert(words == min_fill_size(), "unaligned size"); - post_allocation_setup_common(SystemDictionary::Object_klass(), start, gc_init); + post_allocation_setup_common(SystemDictionary::Object_klass(), start); } } -void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap, bool gc_init) +void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) { DEBUG_ONLY(fill_args_check(start, words);) HandleMark hm; // Free handles before leaving. - fill_with_object_impl(start, words, zap, gc_init); + fill_with_object_impl(start, words, zap); } void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) @@ -632,7 +632,7 @@ } #endif -HeapWord* CollectedHeap::tlab_post_allocation_setup(HeapWord* obj, bool new_obj) { +HeapWord* CollectedHeap::tlab_post_allocation_setup(HeapWord* obj) { return obj; } diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/gc/shared/collectedHeap.hpp --- a/src/share/vm/gc/shared/collectedHeap.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/gc/shared/collectedHeap.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -136,7 +136,6 @@ // Allocate from the current thread's TLAB, with broken-out slow path. inline static HeapWord* allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size); - inline static HeapWord* allocate_from_tlab_work(KlassHandle klass, Thread* thread, size_t size); static HeapWord* allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size); // Allocate an uninitialized block of the given size, or returns NULL if @@ -148,7 +147,7 @@ inline static HeapWord* common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS); // Helper functions for (VM) allocation. - inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj, bool gc_init = true); + inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj); inline static void post_allocation_setup_no_klass_install(KlassHandle klass, HeapWord* objPtr); @@ -169,10 +168,10 @@ // Fill with a single array; caller must ensure filler_array_min_size() <= // words <= filler_array_max_size(). - static void fill_with_array(HeapWord* start, size_t words, bool zap = true, bool gc_init = true); + static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true); // Fill with a single object (either an int array or a java.lang.Object). - static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true, bool gc_init = true); + static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true); virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer); @@ -197,11 +196,7 @@ virtual Name kind() const = 0; - // TLAB Post-allocation setup, specific to GC. - virtual HeapWord* tlab_post_allocation_setup(HeapWord* obj, bool new_obj = true); - - // Collector specific initialization. - virtual void post_allocation_collector_specific_setup(HeapWord* obj) { } + virtual HeapWord* tlab_post_allocation_setup(HeapWord* obj); /** * Returns JNI error code JNI_ENOMEM if memory could not be allocated, @@ -333,12 +328,12 @@ static void fill_with_objects(HeapWord* start, size_t words, bool zap = true); - static void fill_with_object(HeapWord* start, size_t words, bool zap = true, bool gc_init = true); - static void fill_with_object(MemRegion region, bool zap = true, bool gc_init = true) { - fill_with_object(region.start(), region.word_size(), zap, gc_init); + static void fill_with_object(HeapWord* start, size_t words, bool zap = true); + static void fill_with_object(MemRegion region, bool zap = true) { + fill_with_object(region.start(), region.word_size(), zap); } - static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true, bool gc_init = true) { - fill_with_object(start, pointer_delta(end, start), zap, gc_init); + static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) { + fill_with_object(start, pointer_delta(end, start), zap); } // Return the address "addr" aligned by "alignment_in_bytes" if such diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/gc/shared/collectedHeap.inline.hpp --- a/src/share/vm/gc/shared/collectedHeap.inline.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/gc/shared/collectedHeap.inline.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -39,12 +39,9 @@ // Inline allocation implementations. void CollectedHeap::post_allocation_setup_common(KlassHandle klass, - HeapWord* obj, bool gc_init) { + HeapWord* obj) { post_allocation_setup_no_klass_install(klass, obj); post_allocation_install_obj_klass(klass, oop(obj)); - if (gc_init) { - Universe::heap()->post_allocation_collector_specific_setup(obj); - } } void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass, @@ -182,17 +179,11 @@ HeapWord* CollectedHeap::allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size) { assert(UseTLAB, "should use UseTLAB"); + size += Universe::heap()->oop_extra_words(); - HeapWord* obj = allocate_from_tlab_work(klass, thread, size); + HeapWord* obj = thread->tlab().allocate(size); if (obj != NULL) { obj = Universe::heap()->tlab_post_allocation_setup(obj); - } - return obj; -} - -HeapWord* CollectedHeap::allocate_from_tlab_work(KlassHandle klass, Thread* thread, size_t size) { - HeapWord* obj = thread->tlab().allocate(size); - if (obj != NULL) { return obj; } // Otherwise... diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/gc/shared/referenceProcessor.cpp --- a/src/share/vm/gc/shared/referenceProcessor.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/gc/shared/referenceProcessor.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -446,8 +446,8 @@ _next = discovered; _referent_addr = java_lang_ref_Reference::referent_addr(_ref); _referent = java_lang_ref_Reference::referent(_ref); - _referent = oopDesc::bs()->resolve_oop(_referent); - + // _referent = oopDesc::bs()->resolve_oop(_referent); + assert(_referent == oopDesc::bs()->resolve_oop(_referent), "expect forwarded referent"); assert(Universe::heap()->is_in_reserved_or_null(_referent), "Wrong oop found in java.lang.Reference object"); assert(allow_null_referent ? @@ -647,10 +647,13 @@ ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { BarrierSet* bs = oopDesc::bs(); oop obj = NULL; - oop next = bs->resolve_and_maybe_copy_oop(refs_list.head()); + // oop next = bs->resolve_and_maybe_copy_oop(refs_list.head()); + oop next = refs_list.head(); while (next != obj) { obj = next; - next = bs->resolve_and_maybe_copy_oop(java_lang_ref_Reference::discovered(obj)); + assert(obj == oopDesc::bs()->resolve_oop(obj), "expect forwarded obj"); + // next = bs->resolve_and_maybe_copy_oop(java_lang_ref_Reference::discovered(obj)); + next = java_lang_ref_Reference::discovered(obj); java_lang_ref_Reference::set_discovered_raw(obj, NULL); } refs_list.set_head(NULL); @@ -965,9 +968,8 @@ oop obj, HeapWord* discovered_addr) { assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); - // First we must make sure this object is only enqueued once. CAS in a non null - // discovered_addr + // discovered_addr. oop current_head = refs_list.head(); // The last ref must have its discovered field pointing to itself. oop next_discovered = (current_head != NULL) ? current_head : obj; @@ -978,9 +980,9 @@ // This thread just won the right to enqueue the object. // We have separate lists for enqueueing, so no synchronization // is necessary. - refs_list.set_head(obj); refs_list.inc_length(1); + if (TraceReferenceGC) { gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name()); @@ -1059,11 +1061,8 @@ // We only discover references whose referents are not (yet) // known to be strongly reachable. if (is_alive_non_header() != NULL) { - verify_referent(obj); - if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { - return false; // referent is reachable } } @@ -1137,7 +1136,6 @@ // We do a raw store here: the field will be visited later when processing // the discovered references. oop current_head = list->head(); - // The last ref must have its discovered field pointing to itself. oop next_discovered = (current_head != NULL) ? current_head : obj; diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/gc/shared/space.inline.hpp --- a/src/share/vm/gc/shared/space.inline.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/gc/shared/space.inline.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -81,7 +81,7 @@ // We're sure to be here before any objects are compacted into this // space, so this is a good time to initialize this: space->set_compaction_top(space->bottom()); - + if (cp->space == NULL) { assert(cp->gen != NULL, "need a generation"); assert(cp->threshold == NULL, "just checking"); @@ -120,7 +120,6 @@ const intx interval = PrefetchScanIntervalInBytes; while (q < t) { - assert(!space->scanned_block_is_obj(q) || space->make_oop(q)->mark()->is_marked() || oopDesc::bs()->resolve_oop(space->make_oop(q))->mark()->is_marked() || @@ -179,7 +178,6 @@ } assert(q == t, "just checking"); - if (liveRange != NULL) { liveRange->set_end(q); } @@ -195,7 +193,7 @@ template inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) { - // adjust all the interior pointers to point at the new locations of objectsH + // adjust all the interior pointers to point at the new locations of objects // Used by MarkSweep::mark_sweep_phase3() HeapWord* q = space->bottom(); @@ -229,7 +227,7 @@ } else { // $$$ This is funky. Using this to read the previously written // LiveRange. See also use below. - q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer(); + q = (HeapWord*)oop(space->_first_dead)->mark()->decode_pointer(); } } @@ -266,7 +264,7 @@ HeapWord* q = space->bottom(); HeapWord* const t = space->_end_of_live; debug_only(HeapWord* prev_q = NULL); - + if (q < t && space->_first_dead > q && !space->make_oop(q)->is_gc_marked()) { #ifdef ASSERT // Debug only // we have a chunk of the space which hasn't moved and we've reinitialized diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/gc/shared/threadLocalAllocBuffer.cpp --- a/src/share/vm/gc/shared/threadLocalAllocBuffer.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/gc/shared/threadLocalAllocBuffer.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -100,7 +100,7 @@ } else { assert(_number_of_refills == 0 && _fast_refill_waste == 0 && - _slow_refill_waste == 0, // && _gc_waste == 0, + _slow_refill_waste == 0 && _gc_waste == 0, "tlab stats == 0"); } global_stats()->update_slow_allocations(_slow_allocations); @@ -122,8 +122,8 @@ } } - HeapWord* obj = Universe::heap()->tlab_post_allocation_setup(top(), false); - CollectedHeap::fill_with_object(obj, hard_end(), retire, false); + HeapWord* obj = Universe::heap()->tlab_post_allocation_setup(top()); + CollectedHeap::fill_with_object(obj, hard_end(), retire); if (retire || ZeroTLAB) { // "Reset" the TLAB set_start(NULL); @@ -204,13 +204,12 @@ NULL, // top NULL); // end + set_desired_size(initial_desired_size()); + // Following check is needed because at startup the main (primordial) // thread is initialized before the heap is. The initialization for // this thread is redone in startup_initialization below. if (Universe::heap() != NULL) { - - set_desired_size(initial_desired_size()); - size_t capacity = Universe::heap()->tlab_capacity(myThread()) / HeapWordSize; double alloc_frac = desired_size() * target_refills() / (double) capacity; _allocation_fraction.sample(alloc_frac); @@ -318,6 +317,7 @@ } } + GlobalTLABStats::GlobalTLABStats() : _allocating_threads_avg(TLABAllocationWeight) { diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/gc/shared/threadLocalAllocBuffer.hpp --- a/src/share/vm/gc/shared/threadLocalAllocBuffer.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/gc/shared/threadLocalAllocBuffer.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -57,9 +57,9 @@ unsigned _gc_waste; unsigned _slow_allocations; - bool _gclab; + AdaptiveWeightedAverage _allocation_fraction; // fraction of eden allocated in tlabs - AdaptiveWeightedAverage _allocation_fraction; // fraction of eden allocated in tlabs + bool _gclab; void set_start(HeapWord* start) { _start = start; } void set_end(HeapWord* end) { _end = end; } @@ -102,12 +102,6 @@ // do nothing. tlabs must be inited by initialize() calls } - // Resize based on amount of allocation, etc. - void resize(); - - void accumulate_statistics(); - void initialize_statistics(); - static const size_t min_size() { return align_object_size(MinTLABSize / HeapWordSize) + alignment_reserve(); } static const size_t max_size() { assert(_max_size != 0, "max_size not set up"); return _max_size; } static void set_max_size(size_t max_size) { _max_size = max_size; } @@ -127,6 +121,12 @@ // Allocate size HeapWords. The memory is NOT initialized to zero. inline HeapWord* allocate(size_t size); + // Resize based on amount of allocation, etc. + void resize(); + + void accumulate_statistics(); + void initialize_statistics(); + // Rolls back a single allocation of the given size. void rollback(size_t size); diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/gc/shenandoah/shenandoahHeap.cpp --- a/src/share/vm/gc/shenandoah/shenandoahHeap.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/gc/shenandoah/shenandoahHeap.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -2380,19 +2380,6 @@ _update_references_in_progress = update_refs_in_progress; } -void ShenandoahHeap::post_allocation_collector_specific_setup(HeapWord* hw) { - oop obj = oop(hw); - - // Assuming for now that objects can't be created already locked - assert(! obj->has_displaced_mark(), "hopefully new objects don't have displaced mark"); - // tty->print_cr("post_allocation_collector_specific_setup:: "PTR_FORMAT, p2i(obj)); - - if (_concurrent_mark_in_progress - || (shenandoahPolicy()->update_refs_early() && _evacuation_in_progress)) { - mark_current_no_checks(obj); - } -} - void ShenandoahHeap::verify_copy(oop p,oop c){ assert(p != oopDesc::bs()->resolve_oop(p), "forwarded correctly"); assert(oopDesc::bs()->resolve_oop(p) == c, "verify pointer is correct"); @@ -2528,9 +2515,9 @@ return return_val; } -HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj, bool new_obj) { +HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) { HeapWord* result = obj + BrooksPointer::BROOKS_POINTER_OBJ_SIZE; - initialize_brooks_ptr(obj, result, new_obj); + initialize_brooks_ptr(obj, result); return result; } @@ -2578,13 +2565,6 @@ } -#ifndef CC_INTERP -void ShenandoahHeap::compile_prepare_oop(MacroAssembler* masm, Register obj) { - __ incrementq(obj, BrooksPointer::BROOKS_POINTER_OBJ_SIZE * HeapWordSize); - __ movptr(Address(obj, -1 * HeapWordSize), obj); -} -#endif - bool ShenandoahIsAliveClosure:: do_object_b(oop obj) { ShenandoahHeap* sh = ShenandoahHeap::heap(); diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/gc/shenandoah/shenandoahHeap.hpp --- a/src/share/vm/gc/shenandoah/shenandoahHeap.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/gc/shenandoah/shenandoahHeap.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -217,7 +217,7 @@ void resize_all_tlabs(); void accumulate_statistics_all_gclabs(); - HeapWord* tlab_post_allocation_setup(HeapWord* obj, bool new_obj); + HeapWord* tlab_post_allocation_setup(HeapWord* obj); uint oop_extra_words(); @@ -274,8 +274,6 @@ bool is_bitmap_clear(); - virtual void post_allocation_collector_specific_setup(HeapWord* obj); - void mark_object_live(oop obj, bool enqueue); void prepare_for_concurrent_evacuation(); diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/interpreter/bytecodeInterpreter.cpp --- a/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -24,7 +24,6 @@ // no precompiled headers #include "classfile/vmSymbols.hpp" -#include "gc/shared/barrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "interpreter/bytecodeHistogram.hpp" #include "interpreter/bytecodeInterpreter.hpp" @@ -1798,7 +1797,7 @@ BasicObjectLock* entry = NULL; while (most_recent != limit ) { if (most_recent->obj() == NULL) entry = most_recent; - else if (oopDesc::bs()->resolve_oop(most_recent->obj()) == lockee) break; + else if (most_recent->obj() == lockee) break; most_recent++; } if (entry != NULL) { @@ -1901,7 +1900,7 @@ BasicObjectLock* limit = istate->monitor_base(); BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); while (most_recent != limit ) { - if (oopDesc::bs()->resolve_oop(most_recent->obj()) == lockee) { + if (most_recent->obj() == lockee) { BasicLock* lock = most_recent->lock(); markOop header = lock->displaced_header(); most_recent->set_obj(NULL); @@ -1973,7 +1972,7 @@ oop obj; if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { Klass* k = cache->f1_as_klass(); - obj = Universe::heap()->barrier_set()->resolve_oop(k->java_mirror()); + obj = k->java_mirror(); MORE_STACK(1); // Assume single slot push } else { obj = (oop) STACK_OBJECT(-1); @@ -2088,7 +2087,7 @@ } if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { Klass* k = cache->f1_as_klass(); - obj = Universe::heap()->barrier_set()->resolve_oop(k->java_mirror()); + obj = k->java_mirror(); } else { --count; obj = (oop) STACK_OBJECT(count); @@ -2153,10 +2152,7 @@ Klass* k_entry = (Klass*) entry; assert(k_entry->oop_is_instance(), "Should be InstanceKlass"); InstanceKlass* ik = (InstanceKlass*) k_entry; - // TODO: How can we do fastpath allocation with a clean GC interface? This - // code assumes a bunch of things about the GC, and the setup code is - // sensitive to changes in setup code in CollectedHeap. - if (false && ik->is_initialized() && ik->can_be_fastpath_allocated() ) { + if (ik->is_initialized() && ik->can_be_fastpath_allocated() ) { size_t obj_size = ik->size_helper(); oop result = NULL; // If the TLAB isn't pre-zeroed then we'll have to do it diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/memory/universe.hpp --- a/src/share/vm/memory/universe.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/memory/universe.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -351,7 +351,6 @@ // The particular choice of collected heap. static CollectedHeap* heap() { return _collectedHeap; } - static CollectedHeap** heap_addr() { return &_collectedHeap; } // For UseCompressedOops // Narrow Oop encoding mode: diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/oops/instanceKlass.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -1527,7 +1527,6 @@ Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature, OverpassLookupMode overpass_mode) const { OverpassLookupMode overpass_local_mode = overpass_mode; Klass* klass = const_cast(this); - bool dont_ignore_overpasses = true; // For the class being searched, find its overpasses. while (klass != NULL) { Method* method = InstanceKlass::cast(klass)->find_method_impl(name, signature, overpass_local_mode, find_static, find_private); if (method != NULL) { diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/oops/oop.hpp --- a/src/share/vm/oops/oop.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/oops/oop.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -174,10 +174,6 @@ static oop load_decode_heap_oop(narrowOop* p); static oop load_decode_heap_oop(oop* p); -#ifdef ASSERT_DISABLED - static void shenandoah_check_store_value(oop v); -#endif - // Store an oop into the heap. static void store_heap_oop(narrowOop* p, narrowOop v); static void store_heap_oop(oop* p, oop v); diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/oops/oop.inline.hpp --- a/src/share/vm/oops/oop.inline.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/oops/oop.inline.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -218,42 +218,20 @@ } // Store already encoded heap oop into the heap. -inline void oopDesc::store_heap_oop(oop* p, oop v) { -#ifdef ASSERT_DISABLED - shenandoah_check_store_value(v); -#endif - *p = v; -} +inline void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; } inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; } // Encode and store a heap oop. inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) { -#ifdef ASSERT_DISABLED - shenandoah_check_store_value(v); -#endif - *p = encode_heap_oop_not_null(v); } -inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { -#ifdef ASSERT_DISABLED - shenandoah_check_store_value(v); -#endif - *p = v; -} +inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; } // Encode and store a heap oop allowing for null. inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) { -#ifdef ASSERT_DISABLED - shenandoah_check_store_value(v); -#endif *p = encode_heap_oop(v); } -inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { -#ifdef ASSERT_DISABLED - shenandoah_check_store_value(v); -#endif - *p = v; -} +inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; } // Store heap oop as is for volatile fields. inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) { @@ -682,7 +660,6 @@ if (prebarrier) { update_barrier_set_pre((oop*)dest, exchange_value); } - return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value); } } @@ -712,10 +689,6 @@ if (!Universe::heap()->is_in_reserved(obj)) return false; // obj is aligned and accessible in heap if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false; - Klass* klass = obj->klass(); - if (! Metaspace::contains(klass)) { - return false; - } // Header verification: the mark is typically non-NULL. If we're // at a safepoint, it must not be null. diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/opto/c2_globals.hpp --- a/src/share/vm/opto/c2_globals.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/opto/c2_globals.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -480,7 +480,7 @@ notproduct(bool, PrintEliminateLocks, false, \ "Print out when locks are eliminated") \ \ - product(bool, EliminateAutoBox, false, \ + product(bool, EliminateAutoBox, true, \ "Control optimizations for autobox elimination") \ \ diagnostic(bool, UseImplicitStableValues, true, \ diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/opto/chaitin.cpp --- a/src/share/vm/opto/chaitin.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/opto/chaitin.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -280,25 +280,7 @@ Node* proj = orig->raw_out(i); if (proj->is_MachProj()) { assert(proj->outcnt() == 0, "only kill projections are expected here"); - -#ifdef ASSERT - if (UseShenandoahGC && _cfg.get_block_for_node(proj) != borig) { - tty->print_cr("WARNING: block of original node doesn't match block of kill projection (NULL) in Shenandoah. Consider fixing this in chaitin.cpp PhaseChaitin::clone_projs()."); - /* - tty->print_cr("orig:"); - orig->dump(3); - - orig->raw_out(i+1)->dump(3); - - tty->print_cr("\nproj:"); - proj->dump(3); - tty->print_cr("\nblock(orig):"); - borig->dump(); - tty->print_cr(""); - */ - } -#endif - assert(_cfg.get_block_for_node(proj) == borig || UseShenandoahGC, "incorrect block for kill projections"); + assert(_cfg.get_block_for_node(proj) == borig, "incorrect block for kill projections"); found_projs++; // Copy kill projections after the cloned node Node* kills = proj->clone(); diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/opto/graphKit.cpp --- a/src/share/vm/opto/graphKit.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/opto/graphKit.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -24,8 +24,6 @@ #include "precompiled.hpp" #include "compiler/compileLog.hpp" -#include "gc/shenandoah/shenandoahBarrierSet.hpp" -#include "gc/shenandoah/shenandoahHeap.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/g1/heapRegion.hpp" #include "gc/shared/barrierSet.hpp" @@ -3243,8 +3241,6 @@ return; } - // obj = shenandoah_write_barrier(obj); - // Memory barrier to avoid floating things down past the locked region insert_mem_bar(Op_MemBarReleaseLock); @@ -4307,13 +4303,6 @@ const TypePtr* offset_field_type = string_type->add_offset(offset_offset); int offset_field_idx = C->get_alias_index(offset_field_type); - // TODO: This is only a workaround and is probably not needed, because - // the value never changes (strings are immutable). However, if we leave - // that out, it tries to 'see' the stored value (from the initializer) and - // fails because no stores have been captured. I don't know yet, why. So we - // leave this here as workaround for now. The other option would be - // to leave this barrier here in any case, and let C2 optimize it away - // if it can prove that the object is immutable. str = shenandoah_read_barrier(str); return make_load(ctrl, @@ -4332,7 +4321,6 @@ const TypePtr* count_field_type = string_type->add_offset(count_offset); int count_field_idx = C->get_alias_index(count_field_type); - // TODO: See comment in load_String_offset(). str = shenandoah_read_barrier(str); return make_load(ctrl, @@ -4353,7 +4341,6 @@ ciTypeArrayKlass::make(T_CHAR), true, 0); int value_field_idx = C->get_alias_index(value_field_type); - // TODO: See comment in load_String_offset(). str = shenandoah_read_barrier(str); Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset), @@ -4372,7 +4359,6 @@ const TypePtr* offset_field_type = string_type->add_offset(offset_offset); int offset_field_idx = C->get_alias_index(offset_field_type); - // TODO: See comment in load_String_offset(). // TODO: Use incoming ctrl. str = shenandoah_write_barrier(str); @@ -4386,7 +4372,6 @@ false, NULL, 0); const TypePtr* value_field_type = string_type->add_offset(value_offset); - // TODO: See comment in load_String_offset(). // TODO: Use incoming ctrl. str = shenandoah_write_barrier(str); value = shenandoah_read_barrier_nomem(value); @@ -4402,7 +4387,6 @@ const TypePtr* count_field_type = string_type->add_offset(count_offset); int count_field_idx = C->get_alias_index(count_field_type); - // TODO: See comment in load_String_offset(). // TODO: Use incoming ctrl. str = shenandoah_write_barrier(str); diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/opto/library_call.cpp --- a/src/share/vm/opto/library_call.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/opto/library_call.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -28,7 +28,6 @@ #include "classfile/vmSymbols.hpp" #include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" -#include "gc/shenandoah/shenandoahHeap.hpp" #include "gc/shenandoah/shenandoahRuntime.hpp" #include "oops/objArrayKlass.hpp" #include "opto/addnode.hpp" @@ -2753,7 +2752,6 @@ if (_gvn.type(oldval) == TypePtr::NULL_PTR) { oldval = _gvn.makecon(TypePtr::NULL_PTR); } - // The only known value which might get overwritten is oldval. pre_barrier(false /* do_load */, control(), NULL, NULL, max_juint, NULL, NULL, @@ -3224,8 +3222,7 @@ // Given a klass oop, load its java mirror (a java.lang.Class oop). Node* LibraryCallKit::load_mirror_from_klass(Node* klass) { Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset())); - Node* ld = make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered); - return ld; + return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered); } //-----------------------load_klass_from_mirror_common------------------------- @@ -3544,11 +3541,16 @@ bool LibraryCallKit::inline_native_subtype_check() { // Pull both arguments off the stack. Node* args[2]; // two java.lang.Class mirrors: superc, subc + args[0] = argument(0); + args[1] = argument(1); + // We need write barriers here, because for primitive types we later compare // the two Class objects using ==, and those would give false negatives // if one obj is in from-space, and one in to-space. - args[0] = shenandoah_write_barrier(argument(0)); - args[1] = shenandoah_write_barrier(argument(1)); + // TODO: Consider doing improved == comparison that only needs read barriers + // on the false-path. + args[0] = shenandoah_write_barrier(args[0]); + args[1] = shenandoah_write_barrier(args[1]); Node* klasses[2]; // corresponding Klasses: superk, subk klasses[0] = klasses[1] = top(); diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/opto/locknode.cpp --- a/src/share/vm/opto/locknode.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/opto/locknode.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -184,7 +184,6 @@ // Null check; get casted pointer. Node* obj = null_check(peek()); - // Check for locking null object if (stopped()) return; @@ -204,6 +203,5 @@ // Because monitors are guaranteed paired (else we bail out), we know // the matching Lock for this Unlock. Hence we know there is no need // for a null check on Unlock. - Node* obj = map()->peek_monitor_obj(); - shared_unlock(map()->peek_monitor_box(), obj); + shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj()); } diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/opto/loopopts.cpp --- a/src/share/vm/opto/loopopts.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/opto/loopopts.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -977,7 +977,6 @@ // Replace 'n' with the new phi split_mem_thru_phi(n, n_blk, phi); _igvn.replace_node( n, phi ); - // Moved a load around the loop, 'en-registering' something. if (n_blk->is_Loop() && n->is_Load() && !phi->in(LoopNode::LoopBackControl)->is_Load()) diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/opto/macro.cpp --- a/src/share/vm/opto/macro.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/opto/macro.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -2715,7 +2715,7 @@ while (macro_idx >= 0) { Node * n = C->macro_node(macro_idx); assert(n->is_macro(), "only macro nodes expected here"); - if (_igvn.type(n) == Type::TOP || n->in(0)->is_top()) { + if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) { // node is unreachable, so don't try to expand it C->remove_macro_node(n); } else if (n->is_ArrayCopy()){ @@ -2733,7 +2733,7 @@ int macro_count = C->macro_count(); Node * n = C->macro_node(macro_count-1); assert(n->is_macro(), "only macro nodes expected here"); - if (_igvn.type(n) == Type::TOP || n->in(0)->is_top()) { + if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) { // node is unreachable, so don't try to expand it C->remove_macro_node(n); continue; diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/opto/macro.hpp --- a/src/share/vm/opto/macro.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/opto/macro.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -42,7 +42,6 @@ Node* intcon(jint con) const { return _igvn.intcon(con); } Node* longcon(jlong con) const { return _igvn.longcon(con); } Node* makecon(const Type *t) const { return _igvn.makecon(t); } - Node* zerocon(BasicType bt) const { return _igvn.zerocon(bt); } Node* basic_plus_adr(Node* base, int offset) { return (offset == 0)? base: basic_plus_adr(base, MakeConX(offset)); diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/opto/memnode.cpp --- a/src/share/vm/opto/memnode.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/opto/memnode.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -967,7 +967,6 @@ // Now prove that we have a LoadQ matched to a StoreQ, for some Q. if (store_Opcode() != st->Opcode()) return NULL; - //tty->print_cr("can_see_stored_value 1"); return st->in(MemNode::ValueIn); } @@ -981,7 +980,6 @@ // (This is one of the few places where a generic PhaseTransform // can create new nodes. Think of it as lazily manifesting // virtually pre-existing constants.) - // tty->print_cr("can_see_stored_value 2"); return phase->zerocon(memory_type()); } @@ -1008,8 +1006,6 @@ base->as_Proj()->_con == TypeFunc::Parms && base->in(0)->is_CallStaticJava() && base->in(0)->as_CallStaticJava()->is_boxing_method()) { - - //tty->print_cr("can_see_stored_value 3"); return base->in(0)->in(TypeFunc::Parms); } } diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/opto/node.hpp --- a/src/share/vm/opto/node.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/opto/node.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -690,6 +690,10 @@ assert(c <= _max_classes, "invalid node class"); _class_id = c; // cast out const } + void init_flags(jushort fl) { + assert(fl <= _max_flags, "invalid node flag"); + _flags |= fl; + } void clear_flag(jushort fl) { assert(fl <= _max_flags, "invalid node flag"); _flags &= ~fl; @@ -699,10 +703,6 @@ const jushort class_id() const { return _class_id; } const jushort flags() const { return _flags; } - void init_flags(jushort fl) { - assert(fl <= _max_flags, "invalid node flag"); - _flags |= fl; - } void add_flag(jushort fl) { init_flags(fl); } diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/opto/parse1.cpp --- a/src/share/vm/opto/parse1.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/opto/parse1.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -132,8 +132,7 @@ } default: ShouldNotReachHere(); } - l = _gvn.transform(l); - return l; + return _gvn.transform(l); } // Helper routine to prevent the interpreter from handing diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/opto/parse2.cpp --- a/src/share/vm/opto/parse2.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/opto/parse2.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -55,7 +55,6 @@ dec_sp(2); // Pop array and index const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type); Node* ld = make_load(control(), adr, elem, elem_type, adr_type, MemNode::unordered); - assert(elem_type != T_ARRAY, "doesn't happen, right?"); push(ld); } diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/opto/superword.cpp --- a/src/share/vm/opto/superword.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/opto/superword.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -2074,7 +2074,7 @@ Node *n = lpt()->_body.at(i); set_bb_idx(n, i); // Create a temporary map if (in_bb(n)) { - if (n->is_LoadStore() || n->is_MergeMem() || n->is_ShenandoahBarrier() || + if (n->is_LoadStore() || n->is_MergeMem() || (n->is_Proj() && !n->as_Proj()->is_CFG())) { // Bailout if the loop has LoadStore, MergeMem or data Proj // nodes. Superword optimization does not work with them. diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/prims/jvm.cpp --- a/src/share/vm/prims/jvm.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/prims/jvm.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -637,9 +637,6 @@ (size_t)align_object_size(size) / HeapWordsPerLong); // Clear the header new_obj_oop->init_mark(); - // TODO: Find a nicer way to hook up Shenandoah's special handling of - // age bits. - Universe::heap()->post_allocation_collector_specific_setup((HeapWord*) new_obj_oop); // Store check (mark entire object and let gc sort it out) BarrierSet* bs = Universe::heap()->barrier_set(); diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/prims/unsafe.cpp --- a/src/share/vm/prims/unsafe.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/prims/unsafe.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -74,6 +74,7 @@ #define UnsafeWrapper(arg) /*nothing, for the present*/ + inline void* addr_from_java(jlong addr) { // This assert fails in a variety of ways on 32-bit systems. // It is impossible to predict whether native code that converts diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/runtime/globals.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -1637,7 +1637,7 @@ product(bool, ShenandoahPrintCollectionSet, false, \ "Print the collection set before each GC phase") \ \ - product(bool, UseParallelGC, false, \ + product(bool, UseParallelGC, false, \ "Use the Parallel Scavenge garbage collector") \ \ product(bool, UseParallelOldGC, false, \ diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/runtime/mutexLocker.cpp --- a/src/share/vm/runtime/mutexLocker.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/runtime/mutexLocker.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -267,6 +267,7 @@ if (UseConcMarkSweepGC) { def(SLT_lock , Monitor, nonleaf, false, Monitor::_safepoint_check_never); // used in CMS GC for locking PLL lock } + def(Heap_lock , Monitor, nonleaf+1, false, Monitor::_safepoint_check_sometimes); def(ShenandoahHeap_lock , Monitor, special, false, Monitor::_safepoint_check_never); def(JfieldIdCreation_lock , Mutex , nonleaf+1, true, Monitor::_safepoint_check_always); // jfieldID, Used in VM_Operation diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/runtime/mutexLocker.hpp --- a/src/share/vm/runtime/mutexLocker.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/runtime/mutexLocker.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -351,17 +351,13 @@ Monitor * _mutex; bool _reentrant; public: - VerifyMutexLocker(Monitor * mutex, bool no_safepoint_check = !Mutex::_no_safepoint_check_flag) { + VerifyMutexLocker(Monitor * mutex) { _mutex = mutex; _reentrant = mutex->owned_by_self(); if (!_reentrant) { // We temp. disable strict safepoint checking, while we require the lock FlagSetting fs(StrictSafepointChecks, false); - if (no_safepoint_check == Mutex::_no_safepoint_check_flag) { - _mutex->lock_without_safepoint_check(); - } else { - _mutex->lock(); - } + _mutex->lock(); } } diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/runtime/os.hpp --- a/src/share/vm/runtime/os.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/runtime/os.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -436,7 +436,6 @@ vm_thread, cgc_thread, // Concurrent GC thread pgc_thread, // Parallel GC thread - shenandoah_thread, java_thread, compiler_thread, watcher_thread, diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/runtime/sharedRuntime.cpp --- a/src/share/vm/runtime/sharedRuntime.cpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/runtime/sharedRuntime.cpp Thu Oct 01 18:39:20 2015 +0200 @@ -1834,7 +1834,8 @@ // Handles the uncommon cases of monitor unlocking in compiled code JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock, JavaThread * THREAD)) oop obj(_obj); - obj = oopDesc::bs()->resolve_oop(obj); + obj = oopDesc::bs()->resolve_oop(obj); + assert(JavaThread::current() == THREAD, "invariant"); // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore // testing was unable to ever fire the assert that guarded it so I have removed it. assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?"); diff -r 42defc20a38c -r 0d5b3847ab03 src/share/vm/services/memoryService.hpp --- a/src/share/vm/services/memoryService.hpp Thu Oct 01 12:58:23 2015 +0200 +++ b/src/share/vm/services/memoryService.hpp Thu Oct 01 18:39:20 2015 +0200 @@ -116,7 +116,6 @@ size_t max_size, bool support_usage_threshold); - static void add_gen_collected_heap_info(GenCollectedHeap* heap); static void add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap); static void add_g1_heap_info(G1CollectedHeap* g1h);